nfs4state.c 143.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
*  Copyright (c) 2001 The Regents of the University of Michigan.
*  All rights reserved.
*
*  Kendrick Smith <kmsmith@umich.edu>
*  Andy Adamson <kandros@umich.edu>
*
*  Redistribution and use in source and binary forms, with or without
*  modification, are permitted provided that the following conditions
*  are met:
*
*  1. Redistributions of source code must retain the above copyright
*     notice, this list of conditions and the following disclaimer.
*  2. Redistributions in binary form must reproduce the above copyright
*     notice, this list of conditions and the following disclaimer in the
*     documentation and/or other materials provided with the distribution.
*  3. Neither the name of the University nor the names of its
*     contributors may be used to endorse or promote products derived
*     from this software without specific prior written permission.
*
*  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
*  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
*  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/

35
#include <linux/file.h>
36
#include <linux/fs.h>
37
#include <linux/slab.h>
38
#include <linux/namei.h>
39
#include <linux/swap.h>
40
#include <linux/pagemap.h>
41
#include <linux/ratelimit.h>
42
#include <linux/sunrpc/svcauth_gss.h>
43
#include <linux/sunrpc/addr.h>
44
#include <linux/hash.h>
45
#include "xdr4.h"
46
#include "xdr4cb.h"
47
#include "vfs.h"
48
#include "current_stateid.h"
L
Linus Torvalds 已提交
49

50 51
#include "netns.h"

L
Linus Torvalds 已提交
52 53
#define NFSDDBG_FACILITY                NFSDDBG_PROC

54 55 56 57 58 59 60 61
#define all_ones {{~0,~0},~0}
static const stateid_t one_stateid = {
	.si_generation = ~0,
	.si_opaque = all_ones,
};
static const stateid_t zero_stateid = {
	/* all fields zero */
};
62 63 64
static const stateid_t currentstateid = {
	.si_generation = 1,
};
65

A
Andy Adamson 已提交
66
static u64 current_sessionid = 1;
67

68 69
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
70
#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
L
Linus Torvalds 已提交
71 72

/* forward declarations */
73
static int check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner);
L
Linus Torvalds 已提交
74

75 76 77
/* Locking: */

/* Currently used for almost all code touching nfsv4 state: */
I
Ingo Molnar 已提交
78
static DEFINE_MUTEX(client_mutex);
L
Linus Torvalds 已提交
79

80 81 82 83 84
/*
 * Currently used for the del_recall_lru and file hash table.  In an
 * effort to decrease the scope of the client_mutex, this spinlock may
 * eventually cover more:
 */
85
static DEFINE_SPINLOCK(state_lock);
86

C
Christoph Hellwig 已提交
87 88 89 90 91
static struct kmem_cache *openowner_slab;
static struct kmem_cache *lockowner_slab;
static struct kmem_cache *file_slab;
static struct kmem_cache *stateid_slab;
static struct kmem_cache *deleg_slab;
N
NeilBrown 已提交
92

L
Linus Torvalds 已提交
93 94 95
void
nfs4_lock_state(void)
{
I
Ingo Molnar 已提交
96
	mutex_lock(&client_mutex);
L
Linus Torvalds 已提交
97 98
}

99
static void free_session(struct nfsd4_session *);
100

101
static bool is_session_dead(struct nfsd4_session *ses)
102
{
103
	return ses->se_flags & NFS4_SESSION_DEAD;
104 105
}

106
static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
107
{
108
	if (atomic_read(&ses->se_ref) > ref_held_by_me)
109 110 111
		return nfserr_jukebox;
	ses->se_flags |= NFS4_SESSION_DEAD;
	return nfs_ok;
112 113
}

L
Linus Torvalds 已提交
114 115 116
void
nfs4_unlock_state(void)
{
I
Ingo Molnar 已提交
117
	mutex_unlock(&client_mutex);
L
Linus Torvalds 已提交
118 119
}

120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
static bool is_client_expired(struct nfs4_client *clp)
{
	return clp->cl_time == 0;
}

static __be32 mark_client_expired_locked(struct nfs4_client *clp)
{
	if (atomic_read(&clp->cl_refcount))
		return nfserr_jukebox;
	clp->cl_time = 0;
	return nfs_ok;
}

static __be32 mark_client_expired(struct nfs4_client *clp)
{
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
	__be32 ret;

	spin_lock(&nn->client_lock);
	ret = mark_client_expired_locked(clp);
	spin_unlock(&nn->client_lock);
	return ret;
}

static __be32 get_client_locked(struct nfs4_client *clp)
{
	if (is_client_expired(clp))
		return nfserr_expired;
	atomic_inc(&clp->cl_refcount);
	return nfs_ok;
}

/* must be called under the client_lock */
static inline void
renew_client_locked(struct nfs4_client *clp)
{
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

	if (is_client_expired(clp)) {
		WARN_ON(1);
		printk("%s: client (clientid %08x/%08x) already expired\n",
			__func__,
			clp->cl_clientid.cl_boot,
			clp->cl_clientid.cl_id);
		return;
	}

	dprintk("renewing client (clientid %08x/%08x)\n",
			clp->cl_clientid.cl_boot,
			clp->cl_clientid.cl_id);
	list_move_tail(&clp->cl_lru, &nn->client_lru);
	clp->cl_time = get_seconds();
}

static inline void
renew_client(struct nfs4_client *clp)
{
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

	spin_lock(&nn->client_lock);
	renew_client_locked(clp);
	spin_unlock(&nn->client_lock);
}

184
static void put_client_renew_locked(struct nfs4_client *clp)
185 186 187 188 189 190 191
{
	if (!atomic_dec_and_test(&clp->cl_refcount))
		return;
	if (!is_client_expired(clp))
		renew_client_locked(clp);
}

192 193 194 195
static void put_client_renew(struct nfs4_client *clp)
{
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

196 197 198 199
	if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
		return;
	if (!is_client_expired(clp))
		renew_client_locked(clp);
200 201 202
	spin_unlock(&nn->client_lock);
}

203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235
static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
{
	__be32 status;

	if (is_session_dead(ses))
		return nfserr_badsession;
	status = get_client_locked(ses->se_client);
	if (status)
		return status;
	atomic_inc(&ses->se_ref);
	return nfs_ok;
}

static void nfsd4_put_session_locked(struct nfsd4_session *ses)
{
	struct nfs4_client *clp = ses->se_client;

	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
		free_session(ses);
	put_client_renew_locked(clp);
}

static void nfsd4_put_session(struct nfsd4_session *ses)
{
	struct nfs4_client *clp = ses->se_client;
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

	spin_lock(&nn->client_lock);
	nfsd4_put_session_locked(ses);
	spin_unlock(&nn->client_lock);
}


L
Linus Torvalds 已提交
236 237 238 239 240 241 242 243 244 245 246 247 248
static inline u32
opaque_hashval(const void *ptr, int nbytes)
{
	unsigned char *cptr = (unsigned char *) ptr;

	u32 x = 0;
	while (nbytes--) {
		x *= 37;
		x += *cptr++;
	}
	return x;
}

249 250 251 252 253
static void nfsd4_free_file(struct nfs4_file *f)
{
	kmem_cache_free(file_slab, f);
}

254 255 256
static inline void
put_nfs4_file(struct nfs4_file *fi)
{
257
	if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
258
		hlist_del(&fi->fi_hash);
259
		spin_unlock(&state_lock);
260
		iput(fi->fi_inode);
261
		nfsd4_free_file(fi);
262
	}
263 264 265 266 267
}

static inline void
get_nfs4_file(struct nfs4_file *fi)
{
268
	atomic_inc(&fi->fi_ref);
269 270
}

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
static struct file *
__nfs4_get_fd(struct nfs4_file *f, int oflag)
{
	if (f->fi_fds[oflag])
		return get_file(f->fi_fds[oflag]);
	return NULL;
}

static struct file *
find_writeable_file_locked(struct nfs4_file *f)
{
	struct file *ret;

	lockdep_assert_held(&f->fi_lock);

	ret = __nfs4_get_fd(f, O_WRONLY);
	if (!ret)
		ret = __nfs4_get_fd(f, O_RDWR);
	return ret;
}

static struct file *
find_writeable_file(struct nfs4_file *f)
{
	struct file *ret;

	spin_lock(&f->fi_lock);
	ret = find_writeable_file_locked(f);
	spin_unlock(&f->fi_lock);

	return ret;
}

static struct file *find_readable_file_locked(struct nfs4_file *f)
{
	struct file *ret;

	lockdep_assert_held(&f->fi_lock);

	ret = __nfs4_get_fd(f, O_RDONLY);
	if (!ret)
		ret = __nfs4_get_fd(f, O_RDWR);
	return ret;
}

static struct file *
find_readable_file(struct nfs4_file *f)
{
	struct file *ret;

	spin_lock(&f->fi_lock);
	ret = find_readable_file_locked(f);
	spin_unlock(&f->fi_lock);

	return ret;
}

static struct file *
find_any_file(struct nfs4_file *f)
{
	struct file *ret;

	spin_lock(&f->fi_lock);
	ret = __nfs4_get_fd(f, O_RDWR);
	if (!ret) {
		ret = __nfs4_get_fd(f, O_WRONLY);
		if (!ret)
			ret = __nfs4_get_fd(f, O_RDONLY);
	}
	spin_unlock(&f->fi_lock);
	return ret;
}

344
static int num_delegations;
345
unsigned long max_delegations;
346 347 348 349 350

/*
 * Open owner state (share locks)
 */

351 352 353 354
/* hash tables for lock and open owners */
#define OWNER_HASH_BITS              8
#define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
#define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
355

356
static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
357 358 359 360 361
{
	unsigned int ret;

	ret = opaque_hashval(ownername->data, ownername->len);
	ret += clientid;
362
	return ret & OWNER_HASH_MASK;
363
}
364 365 366 367

/* hash table for nfs4_file */
#define FILE_HASH_BITS                   8
#define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
S
Shan Wei 已提交
368

369 370 371 372 373 374
static unsigned int file_hashval(struct inode *ino)
{
	/* XXX: why are we hashing on inode pointer, anyway? */
	return hash_ptr(ino, FILE_HASH_BITS);
}

375
static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
376

377 378
static void
__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
379
{
380 381 382 383
	if (access & NFS4_SHARE_ACCESS_WRITE)
		atomic_inc(&fp->fi_access[O_WRONLY]);
	if (access & NFS4_SHARE_ACCESS_READ)
		atomic_inc(&fp->fi_access[O_RDONLY]);
384 385
}

386 387
static __be32
nfs4_file_get_access(struct nfs4_file *fp, u32 access)
388
{
389 390 391 392 393 394
	/* Does this access mode make sense? */
	if (access & ~NFS4_SHARE_ACCESS_BOTH)
		return nfserr_inval;

	__nfs4_file_get_access(fp, access);
	return nfs_ok;
395 396
}

397
static struct file *nfs4_file_put_fd(struct nfs4_file *fp, int oflag)
398
{
399 400 401 402 403
	struct file *filp;

	filp = fp->fi_fds[oflag];
	fp->fi_fds[oflag] = NULL;
	return filp;
404 405
}

406
static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
407
{
408 409 410 411 412 413 414
	might_lock(&fp->fi_lock);

	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
		struct file *f1 = NULL;
		struct file *f2 = NULL;

		f1 = nfs4_file_put_fd(fp, oflag);
415
		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
416 417 418 419 420 421
			f2 = nfs4_file_put_fd(fp, O_RDWR);
		spin_unlock(&fp->fi_lock);
		if (f1)
			fput(f1);
		if (f2)
			fput(f2);
422 423 424
	}
}

425
static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
426
{
427 428 429
	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);

	if (access & NFS4_SHARE_ACCESS_WRITE)
430
		__nfs4_file_put_access(fp, O_WRONLY);
431 432
	if (access & NFS4_SHARE_ACCESS_READ)
		__nfs4_file_put_access(fp, O_RDONLY);
433 434
}

J
J. Bruce Fields 已提交
435 436
static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct
kmem_cache *slab)
437
{
J
J. Bruce Fields 已提交
438 439
	struct idr *stateids = &cl->cl_stateids;
	struct nfs4_stid *stid;
J
J. Bruce Fields 已提交
440
	int new_id;
441

J
J. Bruce Fields 已提交
442 443 444 445
	stid = kmem_cache_alloc(slab, GFP_KERNEL);
	if (!stid)
		return NULL;

446
	new_id = idr_alloc_cyclic(stateids, stid, 0, 0, GFP_KERNEL);
T
Tejun Heo 已提交
447
	if (new_id < 0)
J
J. Bruce Fields 已提交
448
		goto out_free;
449
	stid->sc_client = cl;
J
J. Bruce Fields 已提交
450 451 452
	stid->sc_type = 0;
	stid->sc_stateid.si_opaque.so_id = new_id;
	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
453
	/* Will be incremented before return to client: */
J
J. Bruce Fields 已提交
454
	stid->sc_stateid.si_generation = 0;
455 456

	/*
J
J. Bruce Fields 已提交
457 458 459 460 461 462 463
	 * It shouldn't be a problem to reuse an opaque stateid value.
	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
	 * example, a stray write retransmission could be accepted by
	 * the server when it should have been rejected.  Therefore,
	 * adopt a trick from the sctp code to attempt to maximize the
	 * amount of time until an id is reused, by ensuring they always
	 * "increase" (mod INT_MAX):
464
	 */
J
J. Bruce Fields 已提交
465 466
	return stid;
out_free:
467
	kmem_cache_free(slab, stid);
J
J. Bruce Fields 已提交
468
	return NULL;
469 470
}

471 472 473 474 475
static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
{
	return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
}

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
/*
 * When we recall a delegation, we should be careful not to hand it
 * out again straight away.
 * To ensure this we keep a pair of bloom filters ('new' and 'old')
 * in which the filehandles of recalled delegations are "stored".
 * If a filehandle appear in either filter, a delegation is blocked.
 * When a delegation is recalled, the filehandle is stored in the "new"
 * filter.
 * Every 30 seconds we swap the filters and clear the "new" one,
 * unless both are empty of course.
 *
 * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
 * low 3 bytes as hash-table indices.
 *
 * 'state_lock', which is always held when block_delegations() is called,
 * is used to manage concurrent access.  Testing does not need the lock
 * except when swapping the two filters.
 */
static struct bloom_pair {
	int	entries, old_entries;
	time_t	swap_time;
	int	new; /* index into 'set' */
	DECLARE_BITMAP(set[2], 256);
} blocked_delegations;

static int delegation_blocked(struct knfsd_fh *fh)
{
	u32 hash;
	struct bloom_pair *bd = &blocked_delegations;

	if (bd->entries == 0)
		return 0;
	if (seconds_since_boot() - bd->swap_time > 30) {
		spin_lock(&state_lock);
		if (seconds_since_boot() - bd->swap_time > 30) {
			bd->entries -= bd->old_entries;
			bd->old_entries = bd->entries;
			memset(bd->set[bd->new], 0,
			       sizeof(bd->set[0]));
			bd->new = 1-bd->new;
			bd->swap_time = seconds_since_boot();
		}
		spin_unlock(&state_lock);
	}
	hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
	if (test_bit(hash&255, bd->set[0]) &&
	    test_bit((hash>>8)&255, bd->set[0]) &&
	    test_bit((hash>>16)&255, bd->set[0]))
		return 1;

	if (test_bit(hash&255, bd->set[1]) &&
	    test_bit((hash>>8)&255, bd->set[1]) &&
	    test_bit((hash>>16)&255, bd->set[1]))
		return 1;

	return 0;
}

static void block_delegations(struct knfsd_fh *fh)
{
	u32 hash;
	struct bloom_pair *bd = &blocked_delegations;

	hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);

	__set_bit(hash&255, bd->set[bd->new]);
	__set_bit((hash>>8)&255, bd->set[bd->new]);
	__set_bit((hash>>16)&255, bd->set[bd->new]);
	if (bd->entries == 0)
		bd->swap_time = seconds_since_boot();
	bd->entries += 1;
}

L
Linus Torvalds 已提交
549
static struct nfs4_delegation *
550
alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
L
Linus Torvalds 已提交
551 552 553 554
{
	struct nfs4_delegation *dp;

	dprintk("NFSD alloc_init_deleg\n");
555
	if (num_delegations > max_delegations)
556
		return NULL;
557 558
	if (delegation_blocked(&current_fh->fh_handle))
		return NULL;
559
	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
N
NeilBrown 已提交
560
	if (dp == NULL)
L
Linus Torvalds 已提交
561
		return dp;
562 563
	/*
	 * delegation seqid's are never incremented.  The 4.1 special
J
J. Bruce Fields 已提交
564 565
	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
	 * 0 anyway just for consistency and use 1:
566 567
	 */
	dp->dl_stid.sc_stateid.si_generation = 1;
568
	num_delegations++;
569 570
	INIT_LIST_HEAD(&dp->dl_perfile);
	INIT_LIST_HEAD(&dp->dl_perclnt);
L
Linus Torvalds 已提交
571
	INIT_LIST_HEAD(&dp->dl_recall_lru);
572
	dp->dl_file = NULL;
573
	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
574
	fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
L
Linus Torvalds 已提交
575 576
	dp->dl_time = 0;
	atomic_set(&dp->dl_count, 1);
577
	nfsd4_init_callback(&dp->dl_recall);
L
Linus Torvalds 已提交
578 579 580
	return dp;
}

581
static void remove_stid(struct nfs4_stid *s)
J
J. Bruce Fields 已提交
582 583 584 585 586 587
{
	struct idr *stateids = &s->sc_client->cl_stateids;

	idr_remove(stateids, s->sc_stateid.si_opaque.so_id);
}

B
Benny Halevy 已提交
588 589 590 591 592
static void nfs4_free_stid(struct kmem_cache *slab, struct nfs4_stid *s)
{
	kmem_cache_free(slab, s);
}

L
Linus Torvalds 已提交
593 594 595 596
void
nfs4_put_delegation(struct nfs4_delegation *dp)
{
	if (atomic_dec_and_test(&dp->dl_count)) {
B
Benny Halevy 已提交
597
		nfs4_free_stid(deleg_slab, &dp->dl_stid);
598
		num_delegations--;
L
Linus Torvalds 已提交
599 600 601
	}
}

602
static void nfs4_put_deleg_lease(struct nfs4_file *fp)
L
Linus Torvalds 已提交
603
{
604 605
	if (!fp->fi_lease)
		return;
606 607 608
	if (atomic_dec_and_test(&fp->fi_delegees)) {
		vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
		fp->fi_lease = NULL;
609
		fput(fp->fi_deleg_file);
610 611
		fp->fi_deleg_file = NULL;
	}
L
Linus Torvalds 已提交
612 613
}

J
J. Bruce Fields 已提交
614 615
static void unhash_stid(struct nfs4_stid *s)
{
J
J. Bruce Fields 已提交
616
	s->sc_type = 0;
J
J. Bruce Fields 已提交
617 618
}

619 620 621
static void
hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
{
622
	lockdep_assert_held(&state_lock);
623

624
	dp->dl_stid.sc_type = NFS4_DELEG_STID;
625 626 627 628
	list_add(&dp->dl_perfile, &fp->fi_delegations);
	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
}

L
Linus Torvalds 已提交
629 630 631 632
/* Called under the state lock. */
static void
unhash_delegation(struct nfs4_delegation *dp)
{
633
	spin_lock(&state_lock);
634
	list_del_init(&dp->dl_perclnt);
635
	list_del_init(&dp->dl_perfile);
L
Linus Torvalds 已提交
636
	list_del_init(&dp->dl_recall_lru);
637
	spin_unlock(&state_lock);
638 639 640 641 642
	if (dp->dl_file) {
		nfs4_put_deleg_lease(dp->dl_file);
		put_nfs4_file(dp->dl_file);
		dp->dl_file = NULL;
	}
643 644 645 646 647 648 649
}



static void destroy_revoked_delegation(struct nfs4_delegation *dp)
{
	list_del_init(&dp->dl_recall_lru);
650
	remove_stid(&dp->dl_stid);
L
Linus Torvalds 已提交
651 652 653
	nfs4_put_delegation(dp);
}

654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
static void destroy_delegation(struct nfs4_delegation *dp)
{
	unhash_delegation(dp);
	remove_stid(&dp->dl_stid);
	nfs4_put_delegation(dp);
}

static void revoke_delegation(struct nfs4_delegation *dp)
{
	struct nfs4_client *clp = dp->dl_stid.sc_client;

	if (clp->cl_minorversion == 0)
		destroy_delegation(dp);
	else {
		unhash_delegation(dp);
		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
	}
}

L
Linus Torvalds 已提交
674 675 676 677
/* 
 * SETCLIENTID state 
 */

678 679 680 681 682 683 684 685 686 687
static unsigned int clientid_hashval(u32 id)
{
	return id & CLIENT_HASH_MASK;
}

static unsigned int clientstr_hashval(const char *name)
{
	return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
}

688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
/*
 * We store the NONE, READ, WRITE, and BOTH bits separately in the
 * st_{access,deny}_bmap field of the stateid, in order to track not
 * only what share bits are currently in force, but also what
 * combinations of share bits previous opens have used.  This allows us
 * to enforce the recommendation of rfc 3530 14.2.19 that the server
 * return an error if the client attempt to downgrade to a combination
 * of share bits not explicable by closing some of its previous opens.
 *
 * XXX: This enforcement is actually incomplete, since we don't keep
 * track of access/deny bit combinations; so, e.g., we allow:
 *
 *	OPEN allow read, deny write
 *	OPEN allow both, deny none
 *	DOWNGRADE allow read, deny none
 *
 * which we should reject.
 */
706 707
static unsigned int
bmap_to_share_mode(unsigned long bmap) {
708
	int i;
709
	unsigned int access = 0;
710 711 712

	for (i = 1; i < 4; i++) {
		if (test_bit(i, &bmap))
713
			access |= i;
714
	}
715
	return access;
716 717
}

718
static bool
719
test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
720 721
	unsigned int access, deny;

722 723
	access = bmap_to_share_mode(stp->st_access_bmap);
	deny = bmap_to_share_mode(stp->st_deny_bmap);
724
	if ((access & open->op_share_deny) || (deny & open->op_share_access))
725 726
		return false;
	return true;
727 728
}

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
/* set share access for a given stateid */
static inline void
set_access(u32 access, struct nfs4_ol_stateid *stp)
{
	__set_bit(access, &stp->st_access_bmap);
}

/* clear share access for a given stateid */
static inline void
clear_access(u32 access, struct nfs4_ol_stateid *stp)
{
	__clear_bit(access, &stp->st_access_bmap);
}

/* test whether a given stateid has access */
static inline bool
test_access(u32 access, struct nfs4_ol_stateid *stp)
{
	return test_bit(access, &stp->st_access_bmap);
}

750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768
/* set share deny for a given stateid */
static inline void
set_deny(u32 access, struct nfs4_ol_stateid *stp)
{
	__set_bit(access, &stp->st_deny_bmap);
}

/* clear share deny for a given stateid */
static inline void
clear_deny(u32 access, struct nfs4_ol_stateid *stp)
{
	__clear_bit(access, &stp->st_deny_bmap);
}

/* test whether a given stateid is denying specific access */
static inline bool
test_deny(u32 access, struct nfs4_ol_stateid *stp)
{
	return test_bit(access, &stp->st_deny_bmap);
769 770 771 772
}

static int nfs4_access_to_omode(u32 access)
{
773
	switch (access & NFS4_SHARE_ACCESS_BOTH) {
774 775 776 777 778 779 780
	case NFS4_SHARE_ACCESS_READ:
		return O_RDONLY;
	case NFS4_SHARE_ACCESS_WRITE:
		return O_WRONLY;
	case NFS4_SHARE_ACCESS_BOTH:
		return O_RDWR;
	}
781 782
	WARN_ON_ONCE(1);
	return O_RDONLY;
783 784
}

785 786 787 788 789 790 791 792
/* release all access and file references for a given stateid */
static void
release_all_access(struct nfs4_ol_stateid *stp)
{
	int i;

	for (i = 1; i < 4; i++) {
		if (test_access(i, stp))
793
			nfs4_file_put_access(stp->st_file, i);
794 795 796 797
		clear_access(i, stp);
	}
}

798
static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
799
{
800 801 802
	struct nfs4_file *fp = stp->st_file;

	spin_lock(&fp->fi_lock);
803
	list_del(&stp->st_perfile);
804
	spin_unlock(&fp->fi_lock);
805 806 807
	list_del(&stp->st_perstateowner);
}

808
static void close_generic_stateid(struct nfs4_ol_stateid *stp)
809
{
810
	release_all_access(stp);
O
OGAWA Hirofumi 已提交
811
	put_nfs4_file(stp->st_file);
812 813 814
	stp->st_file = NULL;
}

815
static void free_generic_stateid(struct nfs4_ol_stateid *stp)
816
{
817
	remove_stid(&stp->st_stid);
B
Benny Halevy 已提交
818
	nfs4_free_stid(stateid_slab, &stp->st_stid);
819 820
}

821
static void __release_lock_stateid(struct nfs4_ol_stateid *stp)
822 823 824
{
	struct file *file;

825
	list_del(&stp->st_locks);
826
	unhash_generic_stateid(stp);
J
J. Bruce Fields 已提交
827
	unhash_stid(&stp->st_stid);
828
	file = find_any_file(stp->st_file);
829 830
	if (file)
		filp_close(file, (fl_owner_t)lockowner(stp->st_stateowner));
831
	close_generic_stateid(stp);
832 833 834
	free_generic_stateid(stp);
}

835
static void unhash_lockowner(struct nfs4_lockowner *lo)
836
{
837
	struct nfs4_ol_stateid *stp;
838

839 840 841
	list_del(&lo->lo_owner.so_strhash);
	while (!list_empty(&lo->lo_owner.so_stateids)) {
		stp = list_first_entry(&lo->lo_owner.so_stateids,
842
				struct nfs4_ol_stateid, st_perstateowner);
843
		__release_lock_stateid(stp);
844 845 846
	}
}

847 848 849 850 851 852
static void nfs4_free_lockowner(struct nfs4_lockowner *lo)
{
	kfree(lo->lo_owner.so_owner.data);
	kmem_cache_free(lockowner_slab, lo);
}

853
static void release_lockowner(struct nfs4_lockowner *lo)
854
{
855 856
	unhash_lockowner(lo);
	nfs4_free_lockowner(lo);
857 858
}

859 860 861 862 863 864 865
static void release_lockowner_if_empty(struct nfs4_lockowner *lo)
{
	if (list_empty(&lo->lo_owner.so_stateids))
		release_lockowner(lo);
}

static void release_lock_stateid(struct nfs4_ol_stateid *stp)
866
{
867
	struct nfs4_lockowner *lo;
868

869 870 871 872 873 874 875 876 877 878 879 880 881
	lo = lockowner(stp->st_stateowner);
	__release_lock_stateid(stp);
	release_lockowner_if_empty(lo);
}

static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp)
{
	struct nfs4_ol_stateid *stp;

	while (!list_empty(&open_stp->st_locks)) {
		stp = list_entry(open_stp->st_locks.next,
				struct nfs4_ol_stateid, st_locks);
		release_lock_stateid(stp);
882 883 884
	}
}

885
static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
886 887
{
	unhash_generic_stateid(stp);
888
	release_open_stateid_locks(stp);
889 890 891 892 893 894
	close_generic_stateid(stp);
}

static void release_open_stateid(struct nfs4_ol_stateid *stp)
{
	unhash_open_stateid(stp);
895 896 897
	free_generic_stateid(stp);
}

898
static void unhash_openowner(struct nfs4_openowner *oo)
899
{
900
	struct nfs4_ol_stateid *stp;
901

902 903 904 905
	list_del(&oo->oo_owner.so_strhash);
	list_del(&oo->oo_perclient);
	while (!list_empty(&oo->oo_owner.so_stateids)) {
		stp = list_first_entry(&oo->oo_owner.so_stateids,
906
				struct nfs4_ol_stateid, st_perstateowner);
907
		release_open_stateid(stp);
908 909 910
	}
}

911 912 913 914 915 916 917 918 919 920
static void release_last_closed_stateid(struct nfs4_openowner *oo)
{
	struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;

	if (s) {
		free_generic_stateid(s);
		oo->oo_last_closed_stid = NULL;
	}
}

921 922 923 924 925 926
static void nfs4_free_openowner(struct nfs4_openowner *oo)
{
	kfree(oo->oo_owner.so_owner.data);
	kmem_cache_free(openowner_slab, oo);
}

927
static void release_openowner(struct nfs4_openowner *oo)
928
{
929 930
	unhash_openowner(oo);
	list_del(&oo->oo_close_lru);
931
	release_last_closed_stateid(oo);
932
	nfs4_free_openowner(oo);
933 934
}

M
Marc Eshel 已提交
935 936 937 938 939 940 941 942
static inline int
hash_sessionid(struct nfs4_sessionid *sessionid)
{
	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;

	return sid->sequence % SESSION_HASH_SIZE;
}

943
#ifdef NFSD_DEBUG
M
Marc Eshel 已提交
944 945 946 947 948 949
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
	u32 *ptr = (u32 *)(&sessionid->data[0]);
	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
}
950 951 952 953 954 955 956
#else
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
}
#endif

957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
/*
 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
 * won't be used for replay.
 */
void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
{
	struct nfs4_stateowner *so = cstate->replay_owner;

	if (nfserr == nfserr_replay_me)
		return;

	if (!seqid_mutating_err(ntohl(nfserr))) {
		cstate->replay_owner = NULL;
		return;
	}
	if (!so)
		return;
	if (so->so_is_open_owner)
		release_last_closed_stateid(openowner(so));
	so->so_seqid++;
	return;
}
M
Marc Eshel 已提交
979

A
Andy Adamson 已提交
980 981 982 983 984 985 986 987 988 989 990 991 992
static void
gen_sessionid(struct nfsd4_session *ses)
{
	struct nfs4_client *clp = ses->se_client;
	struct nfsd4_sessionid *sid;

	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
	sid->clientid = clp->cl_clientid;
	sid->sequence = current_sessionid++;
	sid->reserved = 0;
}

/*
993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
 * The protocol defines ca_maxresponssize_cached to include the size of
 * the rpc header, but all we need to cache is the data starting after
 * the end of the initial SEQUENCE operation--the rest we regenerate
 * each time.  Therefore we can advertise a ca_maxresponssize_cached
 * value that is the number of bytes in our cache plus a few additional
 * bytes.  In order to stay on the safe side, and not promise more than
 * we can cache, those additional bytes must be the minimum possible: 24
 * bytes of rpc header (xid through accept state, with AUTH_NULL
 * verifier), 12 for the compound header (with zero-length tag), and 44
 * for the SEQUENCE op response:
 */
#define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)

1006 1007 1008 1009 1010 1011 1012 1013 1014
static void
free_session_slots(struct nfsd4_session *ses)
{
	int i;

	for (i = 0; i < ses->se_fchannel.maxreqs; i++)
		kfree(ses->se_slots[i]);
}

1015
/*
1016 1017 1018
 * We don't actually need to cache the rpc and session headers, so we
 * can allocate a little less for each slot:
 */
1019
static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1020
{
1021
	u32 size;
1022

1023 1024 1025 1026 1027
	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
		size = 0;
	else
		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
	return size + sizeof(struct nfsd4_slot);
1028
}
A
Andy Adamson 已提交
1029

1030 1031
/*
 * XXX: If we run out of reserved DRC memory we could (up to a point)
1032
 * re-negotiate active sessions and reduce their slot usage to make
1033
 * room for new connections. For now we just fail the create session.
A
Andy Adamson 已提交
1034
 */
1035
static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
A
Andy Adamson 已提交
1036
{
1037 1038
	u32 slotsize = slot_bytes(ca);
	u32 num = ca->maxreqs;
1039
	int avail;
A
Andy Adamson 已提交
1040

1041
	spin_lock(&nfsd_drc_lock);
1042 1043
	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
		    nfsd_drc_max_mem - nfsd_drc_mem_used);
1044 1045 1046
	num = min_t(int, num, avail / slotsize);
	nfsd_drc_mem_used += num * slotsize;
	spin_unlock(&nfsd_drc_lock);
A
Andy Adamson 已提交
1047

1048 1049
	return num;
}
A
Andy Adamson 已提交
1050

1051
static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1052
{
1053 1054
	int slotsize = slot_bytes(ca);

1055
	spin_lock(&nfsd_drc_lock);
1056
	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1057
	spin_unlock(&nfsd_drc_lock);
1058
}
A
Andy Adamson 已提交
1059

1060 1061
static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
					   struct nfsd4_channel_attrs *battrs)
1062
{
1063 1064
	int numslots = fattrs->maxreqs;
	int slotsize = slot_bytes(fattrs);
1065 1066
	struct nfsd4_session *new;
	int mem, i;
1067

1068 1069 1070
	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
	mem = numslots * sizeof(struct nfsd4_slot *);
A
Andy Adamson 已提交
1071

1072 1073 1074
	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
	if (!new)
		return NULL;
1075
	/* allocate each struct nfsd4_slot and data cache in one piece */
1076
	for (i = 0; i < numslots; i++) {
1077
		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1078
		if (!new->se_slots[i])
1079 1080
			goto out_free;
	}
1081 1082 1083 1084

	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));

1085 1086 1087 1088 1089 1090
	return new;
out_free:
	while (i--)
		kfree(new->se_slots[i]);
	kfree(new);
	return NULL;
A
Andy Adamson 已提交
1091 1092
}

1093 1094 1095 1096 1097
static void free_conn(struct nfsd4_conn *c)
{
	svc_xprt_put(c->cn_xprt);
	kfree(c);
}
A
Andy Adamson 已提交
1098

1099 1100 1101 1102
static void nfsd4_conn_lost(struct svc_xpt_user *u)
{
	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
	struct nfs4_client *clp = c->cn_session->se_client;
A
Andy Adamson 已提交
1103

1104 1105 1106 1107 1108
	spin_lock(&clp->cl_lock);
	if (!list_empty(&c->cn_persession)) {
		list_del(&c->cn_persession);
		free_conn(c);
	}
1109
	nfsd4_probe_callback(clp);
1110
	spin_unlock(&clp->cl_lock);
1111
}
A
Andy Adamson 已提交
1112

1113
static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1114 1115
{
	struct nfsd4_conn *conn;
A
Andy Adamson 已提交
1116

1117 1118
	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
	if (!conn)
1119
		return NULL;
1120 1121
	svc_xprt_get(rqstp->rq_xprt);
	conn->cn_xprt = rqstp->rq_xprt;
1122
	conn->cn_flags = flags;
1123 1124 1125
	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
	return conn;
}
1126

1127 1128 1129 1130
static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
	conn->cn_session = ses;
	list_add(&conn->cn_persession, &ses->se_conns);
A
Andy Adamson 已提交
1131 1132
}

1133
static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1134
{
1135
	struct nfs4_client *clp = ses->se_client;
1136

1137
	spin_lock(&clp->cl_lock);
1138
	__nfsd4_hash_conn(conn, ses);
1139
	spin_unlock(&clp->cl_lock);
1140 1141
}

1142
static int nfsd4_register_conn(struct nfsd4_conn *conn)
1143
{
1144
	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1145
	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1146 1147
}

1148
static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
A
Andy Adamson 已提交
1149
{
1150
	int ret;
A
Andy Adamson 已提交
1151

1152
	nfsd4_hash_conn(conn, ses);
1153 1154 1155 1156
	ret = nfsd4_register_conn(conn);
	if (ret)
		/* oops; xprt is already down: */
		nfsd4_conn_lost(&conn->cn_xpt_user);
1157
	if (conn->cn_flags & NFS4_CDFC4_BACK) {
1158 1159 1160
		/* callback channel may be back up */
		nfsd4_probe_callback(ses->se_client);
	}
1161
}
A
Andy Adamson 已提交
1162

1163
static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1164 1165 1166
{
	u32 dir = NFS4_CDFC4_FORE;

1167
	if (cses->flags & SESSION4_BACK_CHAN)
1168
		dir |= NFS4_CDFC4_BACK;
1169
	return alloc_conn(rqstp, dir);
1170 1171 1172
}

/* must be called under client_lock */
1173
static void nfsd4_del_conns(struct nfsd4_session *s)
1174
{
1175 1176
	struct nfs4_client *clp = s->se_client;
	struct nfsd4_conn *c;
A
Andy Adamson 已提交
1177

1178 1179 1180 1181 1182
	spin_lock(&clp->cl_lock);
	while (!list_empty(&s->se_conns)) {
		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
		list_del_init(&c->cn_persession);
		spin_unlock(&clp->cl_lock);
1183

1184 1185
		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
		free_conn(c);
A
Andy Adamson 已提交
1186

1187 1188 1189
		spin_lock(&clp->cl_lock);
	}
	spin_unlock(&clp->cl_lock);
1190
}
A
Andy Adamson 已提交
1191

1192 1193 1194 1195 1196 1197
static void __free_session(struct nfsd4_session *ses)
{
	free_session_slots(ses);
	kfree(ses);
}

1198
static void free_session(struct nfsd4_session *ses)
1199
{
1200
	struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
1201 1202

	lockdep_assert_held(&nn->client_lock);
1203
	nfsd4_del_conns(ses);
1204
	nfsd4_put_drc_mem(&ses->se_fchannel);
1205
	__free_session(ses);
1206 1207
}

1208
static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1209 1210
{
	int idx;
1211
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1212

A
Andy Adamson 已提交
1213 1214 1215
	new->se_client = clp;
	gen_sessionid(new);

1216 1217
	INIT_LIST_HEAD(&new->se_conns);

1218
	new->se_cb_seq_nr = 1;
A
Andy Adamson 已提交
1219
	new->se_flags = cses->flags;
1220
	new->se_cb_prog = cses->callback_prog;
1221
	new->se_cb_sec = cses->cb_sec;
1222
	atomic_set(&new->se_ref, 0);
1223
	idx = hash_sessionid(&new->se_sessionid);
1224
	spin_lock(&nn->client_lock);
1225
	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1226
	spin_lock(&clp->cl_lock);
A
Andy Adamson 已提交
1227
	list_add(&new->se_perclnt, &clp->cl_sessions);
1228
	spin_unlock(&clp->cl_lock);
1229
	spin_unlock(&nn->client_lock);
1230

1231
	if (cses->flags & SESSION4_BACK_CHAN) {
1232
		struct sockaddr *sa = svc_addr(rqstp);
1233 1234 1235 1236 1237 1238 1239
		/*
		 * This is a little silly; with sessions there's no real
		 * use for the callback address.  Use the peer address
		 * as a reasonable default for now, but consider fixing
		 * the rpc client not to require an address in the
		 * future:
		 */
1240 1241 1242
		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
	}
A
Andy Adamson 已提交
1243 1244
}

1245
/* caller must hold client_lock */
M
Marc Eshel 已提交
1246
static struct nfsd4_session *
1247
__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
M
Marc Eshel 已提交
1248 1249 1250
{
	struct nfsd4_session *elem;
	int idx;
1251
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
M
Marc Eshel 已提交
1252 1253 1254 1255

	dump_sessionid(__func__, sessionid);
	idx = hash_sessionid(sessionid);
	/* Search in the appropriate list */
1256
	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
M
Marc Eshel 已提交
1257 1258 1259 1260 1261 1262 1263 1264 1265 1266
		if (!memcmp(elem->se_sessionid.data, sessionid->data,
			    NFS4_MAX_SESSIONID_LEN)) {
			return elem;
		}
	}

	dprintk("%s: session not found\n", __func__);
	return NULL;
}

1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284
static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
		__be32 *ret)
{
	struct nfsd4_session *session;
	__be32 status = nfserr_badsession;

	session = __find_in_sessionid_hashtbl(sessionid, net);
	if (!session)
		goto out;
	status = nfsd4_get_session_locked(session);
	if (status)
		session = NULL;
out:
	*ret = status;
	return session;
}

1285
/* caller must hold client_lock */
A
Andy Adamson 已提交
1286
static void
M
Marc Eshel 已提交
1287
unhash_session(struct nfsd4_session *ses)
A
Andy Adamson 已提交
1288 1289
{
	list_del(&ses->se_hash);
1290
	spin_lock(&ses->se_client->cl_lock);
A
Andy Adamson 已提交
1291
	list_del(&ses->se_perclnt);
1292
	spin_unlock(&ses->se_client->cl_lock);
M
Marc Eshel 已提交
1293 1294
}

L
Linus Torvalds 已提交
1295 1296
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
static int
1297
STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
L
Linus Torvalds 已提交
1298
{
1299
	if (clid->cl_boot == nn->boot_time)
L
Linus Torvalds 已提交
1300
		return 0;
A
Andy Adamson 已提交
1301
	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1302
		clid->cl_boot, clid->cl_id, nn->boot_time);
L
Linus Torvalds 已提交
1303 1304 1305 1306 1307 1308 1309 1310
	return 1;
}

/* 
 * XXX Should we use a slab cache ?
 * This type of memory management is somewhat inefficient, but we use it
 * anyway since SETCLIENTID is not a common operation.
 */
1311
static struct nfs4_client *alloc_client(struct xdr_netobj name)
L
Linus Torvalds 已提交
1312 1313 1314
{
	struct nfs4_client *clp;

1315 1316 1317
	clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
	if (clp == NULL)
		return NULL;
1318
	clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1319 1320 1321
	if (clp->cl_name.data == NULL) {
		kfree(clp);
		return NULL;
L
Linus Torvalds 已提交
1322
	}
1323
	clp->cl_name.len = name.len;
1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
	INIT_LIST_HEAD(&clp->cl_sessions);
	idr_init(&clp->cl_stateids);
	atomic_set(&clp->cl_refcount, 0);
	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
	INIT_LIST_HEAD(&clp->cl_idhash);
	INIT_LIST_HEAD(&clp->cl_openowners);
	INIT_LIST_HEAD(&clp->cl_delegations);
	INIT_LIST_HEAD(&clp->cl_lru);
	INIT_LIST_HEAD(&clp->cl_callbacks);
	INIT_LIST_HEAD(&clp->cl_revoked);
	spin_lock_init(&clp->cl_lock);
	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
L
Linus Torvalds 已提交
1336 1337 1338
	return clp;
}

1339
static void
L
Linus Torvalds 已提交
1340 1341
free_client(struct nfs4_client *clp)
{
1342
	struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
1343 1344

	lockdep_assert_held(&nn->client_lock);
1345 1346 1347 1348 1349
	while (!list_empty(&clp->cl_sessions)) {
		struct nfsd4_session *ses;
		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
				se_perclnt);
		list_del(&ses->se_perclnt);
1350 1351
		WARN_ON_ONCE(atomic_read(&ses->se_ref));
		free_session(ses);
1352
	}
1353
	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1354
	free_svc_cred(&clp->cl_cred);
L
Linus Torvalds 已提交
1355
	kfree(clp->cl_name.data);
M
majianpeng 已提交
1356
	idr_destroy(&clp->cl_stateids);
L
Linus Torvalds 已提交
1357 1358 1359
	kfree(clp);
}

B
Benny Halevy 已提交
1360 1361 1362 1363
/* must be called under the client_lock */
static inline void
unhash_client_locked(struct nfs4_client *clp)
{
1364 1365
	struct nfsd4_session *ses;

B
Benny Halevy 已提交
1366
	list_del(&clp->cl_lru);
1367
	spin_lock(&clp->cl_lock);
1368 1369
	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
		list_del_init(&ses->se_hash);
1370
	spin_unlock(&clp->cl_lock);
B
Benny Halevy 已提交
1371 1372
}

L
Linus Torvalds 已提交
1373
static void
1374
destroy_client(struct nfs4_client *clp)
L
Linus Torvalds 已提交
1375
{
1376
	struct nfs4_openowner *oo;
L
Linus Torvalds 已提交
1377 1378
	struct nfs4_delegation *dp;
	struct list_head reaplist;
1379
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
L
Linus Torvalds 已提交
1380 1381

	INIT_LIST_HEAD(&reaplist);
1382
	spin_lock(&state_lock);
1383 1384 1385
	while (!list_empty(&clp->cl_delegations)) {
		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
		list_del_init(&dp->dl_perclnt);
1386 1387
		/* Ensure that deleg break won't try to requeue it */
		++dp->dl_time;
L
Linus Torvalds 已提交
1388 1389
		list_move(&dp->dl_recall_lru, &reaplist);
	}
1390
	spin_unlock(&state_lock);
L
Linus Torvalds 已提交
1391 1392
	while (!list_empty(&reaplist)) {
		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1393
		destroy_delegation(dp);
L
Linus Torvalds 已提交
1394
	}
1395 1396 1397 1398 1399
	list_splice_init(&clp->cl_revoked, &reaplist);
	while (!list_empty(&reaplist)) {
		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
		destroy_revoked_delegation(dp);
	}
1400
	while (!list_empty(&clp->cl_openowners)) {
1401 1402
		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
		release_openowner(oo);
L
Linus Torvalds 已提交
1403
	}
1404
	nfsd4_shutdown_callback(clp);
B
Benny Halevy 已提交
1405 1406
	if (clp->cl_cb_conn.cb_xprt)
		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1407
	list_del(&clp->cl_idhash);
1408
	if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1409
		rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1410
	else
1411
		rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1412
	spin_lock(&nn->client_lock);
B
Benny Halevy 已提交
1413
	unhash_client_locked(clp);
1414 1415
	WARN_ON_ONCE(atomic_read(&clp->cl_refcount));
	free_client(clp);
1416
	spin_unlock(&nn->client_lock);
L
Linus Torvalds 已提交
1417 1418
}

1419 1420 1421 1422 1423 1424
static void expire_client(struct nfs4_client *clp)
{
	nfsd4_client_record_remove(clp);
	destroy_client(clp);
}

1425 1426 1427 1428
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
{
	memcpy(target->cl_verifier.data, source->data,
			sizeof(target->cl_verifier.data));
L
Linus Torvalds 已提交
1429 1430
}

1431 1432
static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
{
L
Linus Torvalds 已提交
1433 1434 1435 1436
	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
	target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
}

1437
static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1438
{
1439 1440 1441 1442 1443 1444 1445
	if (source->cr_principal) {
		target->cr_principal =
				kstrdup(source->cr_principal, GFP_KERNEL);
		if (target->cr_principal == NULL)
			return -ENOMEM;
	} else
		target->cr_principal = NULL;
1446
	target->cr_flavor = source->cr_flavor;
L
Linus Torvalds 已提交
1447 1448 1449 1450
	target->cr_uid = source->cr_uid;
	target->cr_gid = source->cr_gid;
	target->cr_group_info = source->cr_group_info;
	get_group_info(target->cr_group_info);
1451 1452 1453
	target->cr_gss_mech = source->cr_gss_mech;
	if (source->cr_gss_mech)
		gss_mech_get(source->cr_gss_mech);
1454
	return 0;
L
Linus Torvalds 已提交
1455 1456
}

1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467
static long long
compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
{
	long long res;

	res = o1->len - o2->len;
	if (res)
		return res;
	return (long long)memcmp(o1->data, o2->data, o1->len);
}

1468
static int same_name(const char *n1, const char *n2)
1469
{
N
NeilBrown 已提交
1470
	return 0 == memcmp(n1, n2, HEXDIR_LEN);
L
Linus Torvalds 已提交
1471 1472 1473
}

static int
1474 1475 1476
same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
{
	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
L
Linus Torvalds 已提交
1477 1478 1479
}

static int
1480 1481 1482
same_clid(clientid_t *cl1, clientid_t *cl2)
{
	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
L
Linus Torvalds 已提交
1483 1484
}

1485 1486 1487 1488 1489 1490 1491
static bool groups_equal(struct group_info *g1, struct group_info *g2)
{
	int i;

	if (g1->ngroups != g2->ngroups)
		return false;
	for (i=0; i<g1->ngroups; i++)
1492
		if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1493 1494 1495 1496
			return false;
	return true;
}

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
/*
 * RFC 3530 language requires clid_inuse be returned when the
 * "principal" associated with a requests differs from that previously
 * used.  We use uid, gid's, and gss principal string as our best
 * approximation.  We also don't want to allow non-gss use of a client
 * established using gss: in theory cr_principal should catch that
 * change, but in practice cr_principal can be null even in the gss case
 * since gssd doesn't always pass down a principal string.
 */
static bool is_gss_cred(struct svc_cred *cr)
{
	/* Is cr_flavor one of the gss "pseudoflavors"?: */
	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
}


1513
static bool
1514 1515
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{
1516
	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1517 1518
		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1519 1520 1521 1522 1523 1524
		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
		return false;
	if (cr1->cr_principal == cr2->cr_principal)
		return true;
	if (!cr1->cr_principal || !cr2->cr_principal)
		return false;
1525
	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
L
Linus Torvalds 已提交
1526 1527
}

1528 1529 1530 1531 1532
static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
{
	struct svc_cred *cr = &rqstp->rq_cred;
	u32 service;

1533 1534
	if (!cr->cr_gss_mech)
		return false;
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554
	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
	return service == RPC_GSS_SVC_INTEGRITY ||
	       service == RPC_GSS_SVC_PRIVACY;
}

static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
{
	struct svc_cred *cr = &rqstp->rq_cred;

	if (!cl->cl_mach_cred)
		return true;
	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
		return false;
	if (!svc_rqst_integrity_protected(rqstp))
		return false;
	if (!cr->cr_principal)
		return false;
	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
}

1555
static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1556 1557 1558
{
	static u32 current_clientid = 1;

1559
	clp->cl_clientid.cl_boot = nn->boot_time;
L
Linus Torvalds 已提交
1560 1561 1562
	clp->cl_clientid.cl_id = current_clientid++; 
}

1563 1564
static void gen_confirm(struct nfs4_client *clp)
{
1565
	__be32 verf[2];
1566
	static u32 i;
L
Linus Torvalds 已提交
1567

1568 1569 1570 1571 1572 1573
	/*
	 * This is opaque to client, so no need to byte-swap. Use
	 * __force to keep sparse happy
	 */
	verf[0] = (__force __be32)get_seconds();
	verf[1] = (__force __be32)i++;
1574
	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
L
Linus Torvalds 已提交
1575 1576
}

1577
static struct nfs4_stid *find_stateid(struct nfs4_client *cl, stateid_t *t)
1578
{
J
J. Bruce Fields 已提交
1579 1580 1581 1582 1583 1584
	struct nfs4_stid *ret;

	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
	if (!ret || !ret->sc_type)
		return NULL;
	return ret;
J
J. Bruce Fields 已提交
1585 1586
}

1587
static struct nfs4_stid *find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1588 1589
{
	struct nfs4_stid *s;
J
J. Bruce Fields 已提交
1590

1591
	s = find_stateid(cl, t);
J
J. Bruce Fields 已提交
1592 1593
	if (!s)
		return NULL;
1594
	if (typemask & s->sc_type)
1595 1596 1597 1598
		return s;
	return NULL;
}

J
Jeff Layton 已提交
1599
static struct nfs4_client *create_client(struct xdr_netobj name,
1600 1601 1602 1603
		struct svc_rqst *rqstp, nfs4_verifier *verf)
{
	struct nfs4_client *clp;
	struct sockaddr *sa = svc_addr(rqstp);
1604
	int ret;
1605
	struct net *net = SVC_NET(rqstp);
1606
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1607 1608 1609 1610 1611

	clp = alloc_client(name);
	if (clp == NULL)
		return NULL;

1612 1613
	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
	if (ret) {
1614
		spin_lock(&nn->client_lock);
1615
		free_client(clp);
1616
		spin_unlock(&nn->client_lock);
1617
		return NULL;
1618
	}
1619
	nfsd4_init_callback(&clp->cl_cb_null);
B
Benny Halevy 已提交
1620
	clp->cl_time = get_seconds();
1621 1622 1623 1624
	clear_bit(0, &clp->cl_cb_slot_busy);
	copy_verf(clp, verf);
	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
	gen_confirm(clp);
1625
	clp->cl_cb_session = NULL;
1626
	clp->net = net;
1627 1628 1629
	return clp;
}

1630
static void
1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
{
	struct rb_node **new = &(root->rb_node), *parent = NULL;
	struct nfs4_client *clp;

	while (*new) {
		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
		parent = *new;

		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
			new = &((*new)->rb_left);
		else
			new = &((*new)->rb_right);
	}

	rb_link_node(&new_clp->cl_namenode, parent, new);
	rb_insert_color(&new_clp->cl_namenode, root);
}

static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
{
	long long cmp;
	struct rb_node *node = root->rb_node;
	struct nfs4_client *clp;

	while (node) {
		clp = rb_entry(node, struct nfs4_client, cl_namenode);
		cmp = compare_blob(&clp->cl_name, name);
		if (cmp > 0)
			node = node->rb_left;
		else if (cmp < 0)
			node = node->rb_right;
		else
			return clp;
	}
	return NULL;
}

static void
add_to_unconfirmed(struct nfs4_client *clp)
L
Linus Torvalds 已提交
1672 1673
{
	unsigned int idhashval;
1674
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
L
Linus Torvalds 已提交
1675

1676
	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1677
	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
L
Linus Torvalds 已提交
1678
	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1679
	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1680
	renew_client(clp);
L
Linus Torvalds 已提交
1681 1682
}

1683
static void
L
Linus Torvalds 已提交
1684 1685 1686
move_to_confirmed(struct nfs4_client *clp)
{
	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1687
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
L
Linus Torvalds 已提交
1688 1689

	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1690
	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1691
	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1692
	add_clp_to_name_tree(clp, &nn->conf_name_tree);
1693
	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
L
Linus Torvalds 已提交
1694 1695 1696 1697
	renew_client(clp);
}

static struct nfs4_client *
J
J. Bruce Fields 已提交
1698
find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
L
Linus Torvalds 已提交
1699 1700 1701 1702
{
	struct nfs4_client *clp;
	unsigned int idhashval = clientid_hashval(clid->cl_id);

J
J. Bruce Fields 已提交
1703
	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1704
		if (same_clid(&clp->cl_clientid, clid)) {
1705 1706
			if ((bool)clp->cl_minorversion != sessions)
				return NULL;
1707
			renew_client(clp);
L
Linus Torvalds 已提交
1708
			return clp;
1709
		}
L
Linus Torvalds 已提交
1710 1711 1712 1713
	}
	return NULL;
}

J
J. Bruce Fields 已提交
1714 1715 1716 1717 1718 1719 1720 1721
static struct nfs4_client *
find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
	struct list_head *tbl = nn->conf_id_hashtbl;

	return find_client_in_id_table(tbl, clid, sessions);
}

L
Linus Torvalds 已提交
1722
static struct nfs4_client *
1723
find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
L
Linus Torvalds 已提交
1724
{
J
J. Bruce Fields 已提交
1725
	struct list_head *tbl = nn->unconf_id_hashtbl;
L
Linus Torvalds 已提交
1726

J
J. Bruce Fields 已提交
1727
	return find_client_in_id_table(tbl, clid, sessions);
L
Linus Torvalds 已提交
1728 1729
}

1730
static bool clp_used_exchangeid(struct nfs4_client *clp)
1731
{
1732
	return clp->cl_exchange_flags != 0;
1733
} 
1734

1735
static struct nfs4_client *
1736
find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1737
{
1738
	return find_clp_in_name_tree(name, &nn->conf_name_tree);
1739 1740 1741
}

static struct nfs4_client *
1742
find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1743
{
1744
	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1745 1746
}

1747
static void
1748
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
L
Linus Torvalds 已提交
1749
{
1750
	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1751 1752
	struct sockaddr	*sa = svc_addr(rqstp);
	u32 scopeid = rpc_get_scope_id(sa);
1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
	unsigned short expected_family;

	/* Currently, we only support tcp and tcp6 for the callback channel */
	if (se->se_callback_netid_len == 3 &&
	    !memcmp(se->se_callback_netid_val, "tcp", 3))
		expected_family = AF_INET;
	else if (se->se_callback_netid_len == 4 &&
		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
		expected_family = AF_INET6;
	else
L
Linus Torvalds 已提交
1763 1764
		goto out_err;

1765
	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1766
					    se->se_callback_addr_len,
1767 1768
					    (struct sockaddr *)&conn->cb_addr,
					    sizeof(conn->cb_addr));
1769

1770
	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
L
Linus Torvalds 已提交
1771
		goto out_err;
1772

1773 1774
	if (conn->cb_addr.ss_family == AF_INET6)
		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1775

1776 1777
	conn->cb_prog = se->se_callback_prog;
	conn->cb_ident = se->se_callback_ident;
1778
	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
L
Linus Torvalds 已提交
1779 1780
	return;
out_err:
1781 1782
	conn->cb_addr.ss_family = AF_UNSPEC;
	conn->cb_addrlen = 0;
N
Neil Brown 已提交
1783
	dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
L
Linus Torvalds 已提交
1784 1785 1786 1787 1788 1789
		"will not receive delegations\n",
		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);

	return;
}

1790
/*
1791
 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
1792
 */
1793
static void
1794 1795
nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
{
1796
	struct xdr_buf *buf = resp->xdr.buf;
1797 1798
	struct nfsd4_slot *slot = resp->cstate.slot;
	unsigned int base;
1799

1800
	dprintk("--> %s slot %p\n", __func__, slot);
1801

1802 1803
	slot->sl_opcnt = resp->opcnt;
	slot->sl_status = resp->cstate.status;
1804

1805
	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1806
	if (nfsd4_not_cached(resp)) {
1807
		slot->sl_datalen = 0;
1808
		return;
1809
	}
1810 1811 1812
	base = resp->cstate.data_offset;
	slot->sl_datalen = buf->len - base;
	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
1813 1814
		WARN("%s: sessions DRC could not cache compound\n", __func__);
	return;
1815 1816 1817
}

/*
1818 1819 1820 1821
 * Encode the replay sequence operation from the slot values.
 * If cachethis is FALSE encode the uncached rep error on the next
 * operation which sets resp->p and increments resp->opcnt for
 * nfs4svc_encode_compoundres.
1822 1823
 *
 */
1824 1825 1826
static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
			  struct nfsd4_compoundres *resp)
1827
{
1828 1829
	struct nfsd4_op *op;
	struct nfsd4_slot *slot = resp->cstate.slot;
1830

1831 1832 1833
	/* Encode the replayed sequence operation */
	op = &args->ops[resp->opcnt - 1];
	nfsd4_encode_operation(resp, op);
1834

1835
	/* Return nfserr_retry_uncached_rep in next operation. */
1836
	if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
1837 1838 1839
		op = &args->ops[resp->opcnt++];
		op->status = nfserr_retry_uncached_rep;
		nfsd4_encode_operation(resp, op);
1840
	}
1841
	return op->status;
1842 1843 1844
}

/*
1845 1846
 * The sequence operation is not cached because we can use the slot and
 * session values.
1847
 */
1848
static __be32
1849 1850
nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
			 struct nfsd4_sequence *seq)
1851
{
1852
	struct nfsd4_slot *slot = resp->cstate.slot;
1853 1854
	struct xdr_stream *xdr = &resp->xdr;
	__be32 *p;
1855 1856
	__be32 status;

1857
	dprintk("--> %s slot %p\n", __func__, slot);
1858

1859
	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
1860
	if (status)
1861
		return status;
1862

1863 1864 1865 1866 1867 1868 1869
	p = xdr_reserve_space(xdr, slot->sl_datalen);
	if (!p) {
		WARN_ON_ONCE(1);
		return nfserr_serverfault;
	}
	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
	xdr_commit_encode(xdr);
1870

1871
	resp->opcnt = slot->sl_opcnt;
1872
	return slot->sl_status;
1873 1874
}

A
Andy Adamson 已提交
1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
/*
 * Set the exchange_id flags returned by the server.
 */
static void
nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
{
	/* pNFS is not supported */
	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;

	/* Referrals are supported, Migration is not. */
	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;

	/* set the wire flags to return to client. */
	clid->flags = new->cl_exchange_flags;
}

1891 1892 1893 1894 1895 1896 1897 1898
static bool client_has_state(struct nfs4_client *clp)
{
	/*
	 * Note clp->cl_openowners check isn't quite right: there's no
	 * need to count owners without stateid's.
	 *
	 * Also note we should probably be using this in 4.0 case too.
	 */
1899 1900 1901
	return !list_empty(&clp->cl_openowners)
		|| !list_empty(&clp->cl_delegations)
		|| !list_empty(&clp->cl_sessions);
1902 1903
}

A
Andy Adamson 已提交
1904 1905 1906 1907 1908
__be32
nfsd4_exchange_id(struct svc_rqst *rqstp,
		  struct nfsd4_compound_state *cstate,
		  struct nfsd4_exchange_id *exid)
{
A
Andy Adamson 已提交
1909
	struct nfs4_client *unconf, *conf, *new;
J
J. Bruce Fields 已提交
1910
	__be32 status;
1911
	char			addr_str[INET6_ADDRSTRLEN];
A
Andy Adamson 已提交
1912
	nfs4_verifier		verf = exid->verifier;
1913
	struct sockaddr		*sa = svc_addr(rqstp);
1914
	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1915
	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
A
Andy Adamson 已提交
1916

1917
	rpc_ntop(sa, addr_str, sizeof(addr_str));
A
Andy Adamson 已提交
1918
	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
1919
		"ip_addr=%s flags %x, spa_how %d\n",
A
Andy Adamson 已提交
1920
		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
1921
		addr_str, exid->flags, exid->spa_how);
A
Andy Adamson 已提交
1922

1923
	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
A
Andy Adamson 已提交
1924 1925 1926
		return nfserr_inval;

	switch (exid->spa_how) {
1927 1928 1929
	case SP4_MACH_CRED:
		if (!svc_rqst_integrity_protected(rqstp))
			return nfserr_inval;
A
Andy Adamson 已提交
1930 1931
	case SP4_NONE:
		break;
1932 1933
	default:				/* checked by xdr code */
		WARN_ON_ONCE(1);
A
Andy Adamson 已提交
1934
	case SP4_SSV:
1935
		return nfserr_encr_alg_unsupp;
A
Andy Adamson 已提交
1936 1937
	}

1938
	/* Cases below refer to rfc 5661 section 18.35.4: */
A
Andy Adamson 已提交
1939
	nfs4_lock_state();
1940
	conf = find_confirmed_client_by_name(&exid->clname, nn);
A
Andy Adamson 已提交
1941
	if (conf) {
1942 1943 1944
		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
		bool verfs_match = same_verf(&verf, &conf->cl_verifier);

1945 1946
		if (update) {
			if (!clp_used_exchangeid(conf)) { /* buggy client */
1947
				status = nfserr_inval;
1948 1949
				goto out;
			}
1950 1951 1952 1953
			if (!mach_creds_match(conf, rqstp)) {
				status = nfserr_wrong_cred;
				goto out;
			}
1954
			if (!creds_match) { /* case 9 */
1955
				status = nfserr_perm;
1956 1957 1958
				goto out;
			}
			if (!verfs_match) { /* case 8 */
A
Andy Adamson 已提交
1959 1960 1961
				status = nfserr_not_same;
				goto out;
			}
1962 1963 1964 1965
			/* case 6 */
			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
			new = conf;
			goto out_copy;
A
Andy Adamson 已提交
1966
		}
1967
		if (!creds_match) { /* case 3 */
1968 1969
			if (client_has_state(conf)) {
				status = nfserr_clid_inuse;
A
Andy Adamson 已提交
1970 1971 1972 1973 1974
				goto out;
			}
			expire_client(conf);
			goto out_new;
		}
1975
		if (verfs_match) { /* case 2 */
1976
			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1977 1978 1979 1980 1981
			new = conf;
			goto out_copy;
		}
		/* case 5, client reboot */
		goto out_new;
1982 1983
	}

1984
	if (update) { /* case 7 */
1985 1986
		status = nfserr_noent;
		goto out;
A
Andy Adamson 已提交
1987 1988
	}

1989
	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
1990
	if (unconf) /* case 4, possible retry or client restart */
A
Andy Adamson 已提交
1991 1992
		expire_client(unconf);

1993
	/* case 1 (normal case) */
A
Andy Adamson 已提交
1994
out_new:
J
Jeff Layton 已提交
1995
	new = create_client(exid->clname, rqstp, &verf);
A
Andy Adamson 已提交
1996
	if (new == NULL) {
1997
		status = nfserr_jukebox;
A
Andy Adamson 已提交
1998 1999
		goto out;
	}
2000
	new->cl_minorversion = cstate->minorversion;
2001
	new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
A
Andy Adamson 已提交
2002

2003
	gen_clid(new, nn);
2004
	add_to_unconfirmed(new);
A
Andy Adamson 已提交
2005 2006 2007 2008
out_copy:
	exid->clientid.cl_boot = new->cl_clientid.cl_boot;
	exid->clientid.cl_id = new->cl_clientid.cl_id;

2009
	exid->seqid = new->cl_cs_slot.sl_seqid + 1;
A
Andy Adamson 已提交
2010 2011 2012
	nfsd4_set_ex_flags(new, exid);

	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2013
		new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
A
Andy Adamson 已提交
2014 2015 2016 2017 2018
	status = nfs_ok;

out:
	nfs4_unlock_state();
	return status;
A
Andy Adamson 已提交
2019 2020
}

J
J. Bruce Fields 已提交
2021
static __be32
2022
check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
B
Benny Halevy 已提交
2023
{
2024 2025
	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
		slot_seqid);
B
Benny Halevy 已提交
2026 2027

	/* The slot is in use, and no response has been sent. */
2028 2029
	if (slot_inuse) {
		if (seqid == slot_seqid)
B
Benny Halevy 已提交
2030 2031 2032 2033
			return nfserr_jukebox;
		else
			return nfserr_seq_misordered;
	}
2034
	/* Note unsigned 32-bit arithmetic handles wraparound: */
2035
	if (likely(seqid == slot_seqid + 1))
B
Benny Halevy 已提交
2036
		return nfs_ok;
2037
	if (seqid == slot_seqid)
B
Benny Halevy 已提交
2038 2039 2040 2041
		return nfserr_replay_cache;
	return nfserr_seq_misordered;
}

2042 2043 2044 2045 2046 2047 2048
/*
 * Cache the create session result into the create session single DRC
 * slot cache by saving the xdr structure. sl_seqid has been set.
 * Do this for solo or embedded create session operations.
 */
static void
nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
J
J. Bruce Fields 已提交
2049
			   struct nfsd4_clid_slot *slot, __be32 nfserr)
2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062
{
	slot->sl_status = nfserr;
	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
}

static __be32
nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
			    struct nfsd4_clid_slot *slot)
{
	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
	return slot->sl_status;
}

2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079
#define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
			1 +	/* MIN tag is length with zero, only length */ \
			3 +	/* version, opcount, opcode */ \
			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
				/* seqid, slotID, slotID, cache */ \
			4 ) * sizeof(__be32))

#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
			2 +	/* verifier: AUTH_NULL, length 0 */\
			1 +	/* status */ \
			1 +	/* MIN tag is length with zero, only length */ \
			3 +	/* opcount, opcode, opstatus*/ \
			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
				/* seqid, slotID, slotID, slotID, status */ \
			5 ) * sizeof(__be32))

2080
static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2081
{
2082 2083
	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;

2084 2085 2086 2087
	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
		return nfserr_toosmall;
	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
		return nfserr_toosmall;
2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106
	ca->headerpadsz = 0;
	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
	/*
	 * Note decreasing slot size below client's request may make it
	 * difficult for client to function correctly, whereas
	 * decreasing the number of slots will (just?) affect
	 * performance.  When short on memory we therefore prefer to
	 * decrease number of slots instead of their size.  Clients that
	 * request larger slots than they need will get poor results:
	 */
	ca->maxreqs = nfsd4_get_drc_mem(ca);
	if (!ca->maxreqs)
		return nfserr_jukebox;

2107
	return nfs_ok;
2108 2109
}

2110 2111 2112 2113 2114
#define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
				 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
#define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
				 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))

2115
static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2116
{
2117 2118 2119 2120 2121 2122 2123 2124
	ca->headerpadsz = 0;

	/*
	 * These RPC_MAX_HEADER macros are overkill, especially since we
	 * don't even do gss on the backchannel yet.  But this is still
	 * less than 1k.  Tighten up this estimate in the unlikely event
	 * it turns out to be a problem for some client:
	 */
2125
	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2126
		return nfserr_toosmall;
2127
	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2128 2129 2130 2131 2132 2133
		return nfserr_toosmall;
	ca->maxresp_cached = 0;
	if (ca->maxops < 2)
		return nfserr_toosmall;

	return nfs_ok;
2134 2135
}

2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
{
	switch (cbs->flavor) {
	case RPC_AUTH_NULL:
	case RPC_AUTH_UNIX:
		return nfs_ok;
	default:
		/*
		 * GSS case: the spec doesn't allow us to return this
		 * error.  But it also doesn't allow us not to support
		 * GSS.
		 * I'd rather this fail hard than return some error the
		 * client might think it can already handle:
		 */
		return nfserr_encr_alg_unsupp;
	}
}

A
Andy Adamson 已提交
2154 2155 2156 2157 2158
__be32
nfsd4_create_session(struct svc_rqst *rqstp,
		     struct nfsd4_compound_state *cstate,
		     struct nfsd4_create_session *cr_ses)
{
2159
	struct sockaddr *sa = svc_addr(rqstp);
A
Andy Adamson 已提交
2160
	struct nfs4_client *conf, *unconf;
2161
	struct nfsd4_session *new;
2162
	struct nfsd4_conn *conn;
2163
	struct nfsd4_clid_slot *cs_slot = NULL;
J
J. Bruce Fields 已提交
2164
	__be32 status = 0;
2165
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
A
Andy Adamson 已提交
2166

2167 2168
	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
		return nfserr_inval;
2169 2170 2171
	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
	if (status)
		return status;
2172
	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2173 2174 2175
	if (status)
		return status;
	status = check_backchannel_attrs(&cr_ses->back_channel);
2176
	if (status)
2177
		goto out_release_drc_mem;
2178
	status = nfserr_jukebox;
2179
	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2180 2181
	if (!new)
		goto out_release_drc_mem;
2182 2183 2184
	conn = alloc_conn_from_crses(rqstp, cr_ses);
	if (!conn)
		goto out_free_session;
2185

A
Andy Adamson 已提交
2186
	nfs4_lock_state();
2187
	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2188
	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2189
	WARN_ON_ONCE(conf && unconf);
A
Andy Adamson 已提交
2190 2191

	if (conf) {
2192 2193 2194
		status = nfserr_wrong_cred;
		if (!mach_creds_match(conf, rqstp))
			goto out_free_conn;
2195 2196
		cs_slot = &conf->cl_cs_slot;
		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2197
		if (status == nfserr_replay_cache) {
2198
			status = nfsd4_replay_create_session(cr_ses, cs_slot);
2199
			goto out_free_conn;
2200
		} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
A
Andy Adamson 已提交
2201
			status = nfserr_seq_misordered;
2202
			goto out_free_conn;
A
Andy Adamson 已提交
2203 2204
		}
	} else if (unconf) {
J
J. Bruce Fields 已提交
2205
		struct nfs4_client *old;
A
Andy Adamson 已提交
2206
		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2207
		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
A
Andy Adamson 已提交
2208
			status = nfserr_clid_inuse;
2209
			goto out_free_conn;
A
Andy Adamson 已提交
2210
		}
2211 2212 2213
		status = nfserr_wrong_cred;
		if (!mach_creds_match(unconf, rqstp))
			goto out_free_conn;
2214 2215
		cs_slot = &unconf->cl_cs_slot;
		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2216 2217
		if (status) {
			/* an unconfirmed replay returns misordered */
A
Andy Adamson 已提交
2218
			status = nfserr_seq_misordered;
2219
			goto out_free_conn;
A
Andy Adamson 已提交
2220
		}
2221
		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2222 2223 2224 2225
		if (old) {
			status = mark_client_expired(old);
			if (status)
				goto out_free_conn;
J
J. Bruce Fields 已提交
2226
			expire_client(old);
2227
		}
J
J. Bruce Fields 已提交
2228
		move_to_confirmed(unconf);
A
Andy Adamson 已提交
2229 2230 2231
		conf = unconf;
	} else {
		status = nfserr_stale_clientid;
2232
		goto out_free_conn;
A
Andy Adamson 已提交
2233
	}
2234
	status = nfs_ok;
2235 2236 2237 2238 2239 2240
	/*
	 * We do not support RDMA or persistent sessions
	 */
	cr_ses->flags &= ~SESSION4_PERSIST;
	cr_ses->flags &= ~SESSION4_RDMA;

2241 2242 2243
	init_session(rqstp, new, conf, cr_ses);
	nfsd4_init_conn(rqstp, conn, new);

2244
	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
A
Andy Adamson 已提交
2245
	       NFS4_MAX_SESSIONID_LEN);
2246
	cs_slot->sl_seqid++;
2247
	cr_ses->seqid = cs_slot->sl_seqid;
A
Andy Adamson 已提交
2248

2249 2250
	/* cache solo and embedded create sessions under the state lock */
	nfsd4_cache_create_session(cr_ses, cs_slot, status);
A
Andy Adamson 已提交
2251 2252
	nfs4_unlock_state();
	return status;
2253
out_free_conn:
2254
	nfs4_unlock_state();
2255 2256 2257
	free_conn(conn);
out_free_session:
	__free_session(new);
2258 2259
out_release_drc_mem:
	nfsd4_put_drc_mem(&cr_ses->fore_channel);
J
J. Bruce Fields 已提交
2260
	return status;
A
Andy Adamson 已提交
2261 2262
}

2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276
static __be32 nfsd4_map_bcts_dir(u32 *dir)
{
	switch (*dir) {
	case NFS4_CDFC4_FORE:
	case NFS4_CDFC4_BACK:
		return nfs_ok;
	case NFS4_CDFC4_FORE_OR_BOTH:
	case NFS4_CDFC4_BACK_OR_BOTH:
		*dir = NFS4_CDFC4_BOTH;
		return nfs_ok;
	};
	return nfserr_inval;
}

2277 2278 2279
__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
{
	struct nfsd4_session *session = cstate->session;
2280
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2281
	__be32 status;
2282

2283 2284 2285
	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
	if (status)
		return status;
2286
	spin_lock(&nn->client_lock);
2287 2288
	session->se_cb_prog = bc->bc_cb_program;
	session->se_cb_sec = bc->bc_cb_sec;
2289
	spin_unlock(&nn->client_lock);
2290 2291 2292 2293 2294 2295

	nfsd4_probe_callback(session->se_client);

	return nfs_ok;
}

2296 2297 2298 2299 2300
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
		     struct nfsd4_compound_state *cstate,
		     struct nfsd4_bind_conn_to_session *bcts)
{
	__be32 status;
2301
	struct nfsd4_conn *conn;
2302
	struct nfsd4_session *session;
2303 2304
	struct net *net = SVC_NET(rqstp);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2305 2306 2307

	if (!nfsd4_last_compound_op(rqstp))
		return nfserr_not_only_op;
2308
	nfs4_lock_state();
2309
	spin_lock(&nn->client_lock);
2310
	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2311
	spin_unlock(&nn->client_lock);
2312
	if (!session)
2313
		goto out_no_session;
2314 2315 2316
	status = nfserr_wrong_cred;
	if (!mach_creds_match(session->se_client, rqstp))
		goto out;
2317
	status = nfsd4_map_bcts_dir(&bcts->dir);
2318
	if (status)
2319
		goto out;
2320
	conn = alloc_conn(rqstp, bcts->dir);
2321
	status = nfserr_jukebox;
2322
	if (!conn)
2323 2324 2325 2326
		goto out;
	nfsd4_init_conn(rqstp, conn, session);
	status = nfs_ok;
out:
2327 2328
	nfsd4_put_session(session);
out_no_session:
2329 2330
	nfs4_unlock_state();
	return status;
2331 2332
}

2333 2334 2335 2336 2337 2338 2339
static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
{
	if (!session)
		return 0;
	return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
}

A
Andy Adamson 已提交
2340 2341 2342 2343 2344
__be32
nfsd4_destroy_session(struct svc_rqst *r,
		      struct nfsd4_compound_state *cstate,
		      struct nfsd4_destroy_session *sessionid)
{
B
Benny Halevy 已提交
2345
	struct nfsd4_session *ses;
2346
	__be32 status;
2347
	int ref_held_by_me = 0;
2348 2349
	struct net *net = SVC_NET(r);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
B
Benny Halevy 已提交
2350

2351 2352
	nfs4_lock_state();
	status = nfserr_not_only_op;
2353
	if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2354
		if (!nfsd4_last_compound_op(r))
2355
			goto out;
2356
		ref_held_by_me++;
2357
	}
B
Benny Halevy 已提交
2358
	dump_sessionid(__func__, &sessionid->sessionid);
2359
	spin_lock(&nn->client_lock);
2360
	ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2361 2362
	if (!ses)
		goto out_client_lock;
2363 2364
	status = nfserr_wrong_cred;
	if (!mach_creds_match(ses->se_client, r))
2365
		goto out_put_session;
2366
	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2367
	if (status)
2368
		goto out_put_session;
B
Benny Halevy 已提交
2369
	unhash_session(ses);
2370
	spin_unlock(&nn->client_lock);
B
Benny Halevy 已提交
2371

2372
	nfsd4_probe_callback_sync(ses->se_client);
2373

2374
	spin_lock(&nn->client_lock);
B
Benny Halevy 已提交
2375
	status = nfs_ok;
2376
out_put_session:
2377
	nfsd4_put_session_locked(ses);
2378 2379
out_client_lock:
	spin_unlock(&nn->client_lock);
B
Benny Halevy 已提交
2380
out:
2381
	nfs4_unlock_state();
B
Benny Halevy 已提交
2382
	return status;
A
Andy Adamson 已提交
2383 2384
}

2385
static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2386 2387 2388 2389
{
	struct nfsd4_conn *c;

	list_for_each_entry(c, &s->se_conns, cn_persession) {
2390
		if (c->cn_xprt == xpt) {
2391 2392 2393 2394 2395 2396
			return c;
		}
	}
	return NULL;
}

2397
static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2398 2399
{
	struct nfs4_client *clp = ses->se_client;
2400
	struct nfsd4_conn *c;
2401
	__be32 status = nfs_ok;
2402
	int ret;
2403 2404

	spin_lock(&clp->cl_lock);
2405
	c = __nfsd4_find_conn(new->cn_xprt, ses);
2406 2407 2408 2409 2410
	if (c)
		goto out_free;
	status = nfserr_conn_not_bound_to_session;
	if (clp->cl_mach_cred)
		goto out_free;
2411 2412
	__nfsd4_hash_conn(new, ses);
	spin_unlock(&clp->cl_lock);
2413 2414 2415 2416
	ret = nfsd4_register_conn(new);
	if (ret)
		/* oops; xprt is already down: */
		nfsd4_conn_lost(&new->cn_xpt_user);
2417 2418 2419 2420 2421
	return nfs_ok;
out_free:
	spin_unlock(&clp->cl_lock);
	free_conn(new);
	return status;
2422 2423
}

2424 2425 2426 2427 2428 2429 2430
static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
{
	struct nfsd4_compoundargs *args = rqstp->rq_argp;

	return args->opcnt > session->se_fchannel.maxops;
}

M
Mi Jinlong 已提交
2431 2432 2433 2434 2435 2436 2437 2438
static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
				  struct nfsd4_session *session)
{
	struct xdr_buf *xb = &rqstp->rq_arg;

	return xb->len > session->se_fchannel.maxreq_sz;
}

A
Andy Adamson 已提交
2439
__be32
B
Benny Halevy 已提交
2440
nfsd4_sequence(struct svc_rqst *rqstp,
A
Andy Adamson 已提交
2441 2442 2443
	       struct nfsd4_compound_state *cstate,
	       struct nfsd4_sequence *seq)
{
2444
	struct nfsd4_compoundres *resp = rqstp->rq_resp;
2445
	struct xdr_stream *xdr = &resp->xdr;
B
Benny Halevy 已提交
2446
	struct nfsd4_session *session;
2447
	struct nfs4_client *clp;
B
Benny Halevy 已提交
2448
	struct nfsd4_slot *slot;
2449
	struct nfsd4_conn *conn;
J
J. Bruce Fields 已提交
2450
	__be32 status;
2451
	int buflen;
2452 2453
	struct net *net = SVC_NET(rqstp);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
B
Benny Halevy 已提交
2454

2455 2456 2457
	if (resp->opcnt != 1)
		return nfserr_sequence_pos;

2458 2459 2460 2461 2462 2463 2464 2465
	/*
	 * Will be either used or freed by nfsd4_sequence_check_conn
	 * below.
	 */
	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
	if (!conn)
		return nfserr_jukebox;

2466
	spin_lock(&nn->client_lock);
2467
	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
B
Benny Halevy 已提交
2468
	if (!session)
2469 2470
		goto out_no_session;
	clp = session->se_client;
B
Benny Halevy 已提交
2471

2472 2473
	status = nfserr_too_many_ops;
	if (nfsd4_session_too_many_ops(rqstp, session))
2474
		goto out_put_session;
2475

M
Mi Jinlong 已提交
2476 2477
	status = nfserr_req_too_big;
	if (nfsd4_request_too_big(rqstp, session))
2478
		goto out_put_session;
M
Mi Jinlong 已提交
2479

B
Benny Halevy 已提交
2480
	status = nfserr_badslot;
2481
	if (seq->slotid >= session->se_fchannel.maxreqs)
2482
		goto out_put_session;
B
Benny Halevy 已提交
2483

2484
	slot = session->se_slots[seq->slotid];
B
Benny Halevy 已提交
2485 2486
	dprintk("%s: slotid %d\n", __func__, seq->slotid);

2487 2488 2489 2490 2491
	/* We do not negotiate the number of slots yet, so set the
	 * maxslots to the session maxreqs which is used to encode
	 * sr_highest_slotid and the sr_target_slot id to maxslots */
	seq->maxslots = session->se_fchannel.maxreqs;

2492 2493
	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
					slot->sl_flags & NFSD4_SLOT_INUSE);
B
Benny Halevy 已提交
2494
	if (status == nfserr_replay_cache) {
2495 2496
		status = nfserr_seq_misordered;
		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2497
			goto out_put_session;
B
Benny Halevy 已提交
2498 2499
		cstate->slot = slot;
		cstate->session = session;
2500
		cstate->clp = clp;
A
Andy Adamson 已提交
2501
		/* Return the cached reply status and set cstate->status
2502
		 * for nfsd4_proc_compound processing */
2503
		status = nfsd4_replay_cache_entry(resp, seq);
A
Andy Adamson 已提交
2504
		cstate->status = nfserr_replay_cache;
2505
		goto out;
B
Benny Halevy 已提交
2506 2507
	}
	if (status)
2508
		goto out_put_session;
B
Benny Halevy 已提交
2509

2510
	status = nfsd4_sequence_check_conn(conn, session);
2511
	conn = NULL;
2512 2513
	if (status)
		goto out_put_session;
2514

2515 2516 2517 2518 2519
	buflen = (seq->cachethis) ?
			session->se_fchannel.maxresp_cached :
			session->se_fchannel.maxresp_sz;
	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
				    nfserr_rep_too_big;
2520
	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2521
		goto out_put_session;
2522
	svc_reserve(rqstp, buflen);
2523 2524

	status = nfs_ok;
B
Benny Halevy 已提交
2525 2526
	/* Success! bump slot seqid */
	slot->sl_seqid = seq->seqid;
2527
	slot->sl_flags |= NFSD4_SLOT_INUSE;
2528 2529
	if (seq->cachethis)
		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2530 2531
	else
		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
B
Benny Halevy 已提交
2532 2533 2534

	cstate->slot = slot;
	cstate->session = session;
2535
	cstate->clp = clp;
B
Benny Halevy 已提交
2536 2537

out:
2538 2539 2540 2541 2542 2543 2544 2545 2546
	switch (clp->cl_cb_state) {
	case NFSD4_CB_DOWN:
		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
		break;
	case NFSD4_CB_FAULT:
		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
		break;
	default:
		seq->status_flags = 0;
2547
	}
2548 2549
	if (!list_empty(&clp->cl_revoked))
		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2550
out_no_session:
2551 2552
	if (conn)
		free_conn(conn);
2553
	spin_unlock(&nn->client_lock);
B
Benny Halevy 已提交
2554
	return status;
2555
out_put_session:
2556
	nfsd4_put_session_locked(session);
2557
	goto out_no_session;
A
Andy Adamson 已提交
2558 2559
}

2560 2561 2562 2563 2564 2565 2566 2567 2568 2569
void
nfsd4_sequence_done(struct nfsd4_compoundres *resp)
{
	struct nfsd4_compound_state *cs = &resp->cstate;

	if (nfsd4_has_session(cs)) {
		if (cs->status != nfserr_replay_cache) {
			nfsd4_store_cache_entry(resp);
			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
		}
2570
		/* Drop session reference that was taken in nfsd4_sequence() */
2571
		nfsd4_put_session(cs->session);
2572 2573
	} else if (cs->clp)
		put_client_renew(cs->clp);
2574 2575
}

2576 2577 2578 2579
__be32
nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
{
	struct nfs4_client *conf, *unconf, *clp;
J
J. Bruce Fields 已提交
2580
	__be32 status = 0;
2581
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2582 2583

	nfs4_lock_state();
2584
	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2585
	conf = find_confirmed_client(&dc->clientid, true, nn);
2586
	WARN_ON_ONCE(conf && unconf);
2587 2588 2589 2590

	if (conf) {
		clp = conf;

2591
		if (client_has_state(conf)) {
2592 2593 2594 2595 2596 2597 2598 2599 2600
			status = nfserr_clientid_busy;
			goto out;
		}
	} else if (unconf)
		clp = unconf;
	else {
		status = nfserr_stale_clientid;
		goto out;
	}
2601 2602 2603 2604
	if (!mach_creds_match(clp, rqstp)) {
		status = nfserr_wrong_cred;
		goto out;
	}
2605 2606 2607 2608 2609 2610
	expire_client(clp);
out:
	nfs4_unlock_state();
	return status;
}

2611 2612 2613
__be32
nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
{
J
J. Bruce Fields 已提交
2614
	__be32 status = 0;
2615

2616 2617 2618 2619 2620 2621 2622 2623 2624
	if (rc->rca_one_fs) {
		if (!cstate->current_fh.fh_dentry)
			return nfserr_nofilehandle;
		/*
		 * We don't take advantage of the rca_one_fs case.
		 * That's OK, it's optional, we can safely ignore it.
		 */
		 return nfs_ok;
	}
2625

2626
	nfs4_lock_state();
2627
	status = nfserr_complete_already;
2628 2629
	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
			     &cstate->session->se_client->cl_flags))
2630 2631 2632 2633
		goto out;

	status = nfserr_stale_clientid;
	if (is_client_expired(cstate->session->se_client))
2634 2635 2636 2637 2638 2639 2640
		/*
		 * The following error isn't really legal.
		 * But we only get here if the client just explicitly
		 * destroyed the client.  Surely it no longer cares what
		 * error it gets back on an operation for the dead
		 * client.
		 */
2641 2642 2643
		goto out;

	status = nfs_ok;
2644
	nfsd4_client_record_create(cstate->session->se_client);
2645
out:
2646
	nfs4_unlock_state();
2647
	return status;
2648 2649
}

2650
__be32
2651 2652
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		  struct nfsd4_setclientid *setclid)
L
Linus Torvalds 已提交
2653
{
2654
	struct xdr_netobj 	clname = setclid->se_name;
L
Linus Torvalds 已提交
2655
	nfs4_verifier		clverifier = setclid->se_verf;
2656
	struct nfs4_client	*conf, *unconf, *new;
2657
	__be32 			status;
2658 2659
	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);

2660
	/* Cases below refer to rfc 3530 section 14.2.33: */
L
Linus Torvalds 已提交
2661
	nfs4_lock_state();
2662
	conf = find_confirmed_client_by_name(&clname, nn);
2663
	if (conf) {
2664
		/* case 0: */
L
Linus Torvalds 已提交
2665
		status = nfserr_clid_inuse;
2666 2667
		if (clp_used_exchangeid(conf))
			goto out;
2668
		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2669 2670 2671 2672 2673
			char addr_str[INET6_ADDRSTRLEN];
			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
				 sizeof(addr_str));
			dprintk("NFSD: setclientid: string in use by client "
				"at %s\n", addr_str);
L
Linus Torvalds 已提交
2674 2675 2676
			goto out;
		}
	}
2677
	unconf = find_unconfirmed_client_by_name(&clname, nn);
2678 2679
	if (unconf)
		expire_client(unconf);
2680
	status = nfserr_jukebox;
J
Jeff Layton 已提交
2681
	new = create_client(clname, rqstp, &clverifier);
2682 2683
	if (new == NULL)
		goto out;
2684
	if (conf && same_verf(&conf->cl_verifier, &clverifier))
2685
		/* case 1: probable callback update */
L
Linus Torvalds 已提交
2686
		copy_clid(new, conf);
2687
	else /* case 4 (new client) or cases 2, 3 (client reboot): */
2688
		gen_clid(new, nn);
2689
	new->cl_minorversion = 0;
2690
	gen_callback(new, setclid, rqstp);
2691
	add_to_unconfirmed(new);
L
Linus Torvalds 已提交
2692 2693 2694 2695 2696 2697 2698 2699 2700 2701
	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
	status = nfs_ok;
out:
	nfs4_unlock_state();
	return status;
}


2702
__be32
2703 2704 2705
nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
			 struct nfsd4_compound_state *cstate,
			 struct nfsd4_setclientid_confirm *setclientid_confirm)
L
Linus Torvalds 已提交
2706
{
2707
	struct nfs4_client *conf, *unconf;
L
Linus Torvalds 已提交
2708 2709
	nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
	clientid_t * clid = &setclientid_confirm->sc_clientid;
2710
	__be32 status;
2711
	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
2712

2713
	if (STALE_CLIENTID(clid, nn))
L
Linus Torvalds 已提交
2714 2715
		return nfserr_stale_clientid;
	nfs4_lock_state();
2716

2717
	conf = find_confirmed_client(clid, false, nn);
2718
	unconf = find_unconfirmed_client(clid, false, nn);
2719
	/*
2720 2721 2722 2723
	 * We try hard to give out unique clientid's, so if we get an
	 * attempt to confirm the same clientid with a different cred,
	 * there's a bug somewhere.  Let's charitably assume it's our
	 * bug.
2724
	 */
2725 2726 2727 2728 2729
	status = nfserr_serverfault;
	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
		goto out;
	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
		goto out;
2730
	/* cases below refer to rfc 3530 section 14.2.34: */
2731 2732
	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
		if (conf && !unconf) /* case 2: probable retransmit */
L
Linus Torvalds 已提交
2733
			status = nfs_ok;
2734 2735 2736 2737 2738 2739
		else /* case 4: client hasn't noticed we rebooted yet? */
			status = nfserr_stale_clientid;
		goto out;
	}
	status = nfs_ok;
	if (conf) { /* case 1: callback update */
2740 2741 2742
		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
		nfsd4_probe_callback(conf);
		expire_client(unconf);
2743
	} else { /* case 3: normal case; new or rebooted client */
2744
		conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2745 2746 2747 2748
		if (conf) {
			status = mark_client_expired(conf);
			if (status)
				goto out;
2749
			expire_client(conf);
2750
		}
2751
		move_to_confirmed(unconf);
2752
		nfsd4_probe_callback(unconf);
2753
	}
L
Linus Torvalds 已提交
2754 2755 2756 2757 2758
out:
	nfs4_unlock_state();
	return status;
}

2759 2760 2761 2762 2763
static struct nfs4_file *nfsd4_alloc_file(void)
{
	return kmem_cache_alloc(file_slab, GFP_KERNEL);
}

L
Linus Torvalds 已提交
2764
/* OPEN Share state helper functions */
2765
static void nfsd4_init_file(struct nfs4_file *fp, struct inode *ino)
L
Linus Torvalds 已提交
2766 2767 2768
{
	unsigned int hashval = file_hashval(ino);

2769 2770
	lockdep_assert_held(&state_lock);

2771
	atomic_set(&fp->fi_ref, 1);
2772
	spin_lock_init(&fp->fi_lock);
2773 2774
	INIT_LIST_HEAD(&fp->fi_stateids);
	INIT_LIST_HEAD(&fp->fi_delegations);
2775 2776
	ihold(ino);
	fp->fi_inode = ino;
2777 2778 2779 2780
	fp->fi_had_conflict = false;
	fp->fi_lease = NULL;
	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
	memset(fp->fi_access, 0, sizeof(fp->fi_access));
2781
	hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
L
Linus Torvalds 已提交
2782 2783
}

2784
void
L
Linus Torvalds 已提交
2785 2786
nfsd4_free_slabs(void)
{
C
Christoph Hellwig 已提交
2787 2788 2789 2790 2791
	kmem_cache_destroy(openowner_slab);
	kmem_cache_destroy(lockowner_slab);
	kmem_cache_destroy(file_slab);
	kmem_cache_destroy(stateid_slab);
	kmem_cache_destroy(deleg_slab);
N
NeilBrown 已提交
2792
}
L
Linus Torvalds 已提交
2793

2794
int
N
NeilBrown 已提交
2795 2796
nfsd4_init_slabs(void)
{
2797 2798 2799
	openowner_slab = kmem_cache_create("nfsd4_openowners",
			sizeof(struct nfs4_openowner), 0, 0, NULL);
	if (openowner_slab == NULL)
C
Christoph Hellwig 已提交
2800
		goto out;
2801
	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2802
			sizeof(struct nfs4_lockowner), 0, 0, NULL);
2803
	if (lockowner_slab == NULL)
C
Christoph Hellwig 已提交
2804
		goto out_free_openowner_slab;
N
NeilBrown 已提交
2805
	file_slab = kmem_cache_create("nfsd4_files",
2806
			sizeof(struct nfs4_file), 0, 0, NULL);
N
NeilBrown 已提交
2807
	if (file_slab == NULL)
C
Christoph Hellwig 已提交
2808
		goto out_free_lockowner_slab;
N
NeilBrown 已提交
2809
	stateid_slab = kmem_cache_create("nfsd4_stateids",
2810
			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
N
NeilBrown 已提交
2811
	if (stateid_slab == NULL)
C
Christoph Hellwig 已提交
2812
		goto out_free_file_slab;
N
NeilBrown 已提交
2813
	deleg_slab = kmem_cache_create("nfsd4_delegations",
2814
			sizeof(struct nfs4_delegation), 0, 0, NULL);
N
NeilBrown 已提交
2815
	if (deleg_slab == NULL)
C
Christoph Hellwig 已提交
2816
		goto out_free_stateid_slab;
N
NeilBrown 已提交
2817
	return 0;
C
Christoph Hellwig 已提交
2818 2819 2820 2821 2822 2823 2824 2825 2826 2827

out_free_stateid_slab:
	kmem_cache_destroy(stateid_slab);
out_free_file_slab:
	kmem_cache_destroy(file_slab);
out_free_lockowner_slab:
	kmem_cache_destroy(lockowner_slab);
out_free_openowner_slab:
	kmem_cache_destroy(openowner_slab);
out:
N
NeilBrown 已提交
2828 2829
	dprintk("nfsd4: out of memory while initializing nfsv4\n");
	return -ENOMEM;
L
Linus Torvalds 已提交
2830 2831
}

2832
static void init_nfs4_replay(struct nfs4_replay *rp)
L
Linus Torvalds 已提交
2833
{
2834 2835 2836
	rp->rp_status = nfserr_serverfault;
	rp->rp_buflen = 0;
	rp->rp_buf = rp->rp_ibuf;
L
Linus Torvalds 已提交
2837 2838
}

2839
static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
2840
{
L
Linus Torvalds 已提交
2841 2842
	struct nfs4_stateowner *sop;

2843
	sop = kmem_cache_alloc(slab, GFP_KERNEL);
2844 2845 2846 2847 2848
	if (!sop)
		return NULL;

	sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
	if (!sop->so_owner.data) {
2849
		kmem_cache_free(slab, sop);
L
Linus Torvalds 已提交
2850
		return NULL;
2851 2852 2853
	}
	sop->so_owner.len = owner->len;

2854
	INIT_LIST_HEAD(&sop->so_stateids);
2855 2856 2857 2858 2859
	sop->so_client = clp;
	init_nfs4_replay(&sop->so_replay);
	return sop;
}

2860
static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
2861
{
2862 2863 2864
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

	list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
2865
	list_add(&oo->oo_perclient, &clp->cl_openowners);
2866 2867
}

2868
static struct nfs4_openowner *
2869
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
2870 2871
			   struct nfsd4_compound_state *cstate)
{
2872
	struct nfs4_client *clp = cstate->clp;
2873
	struct nfs4_openowner *oo;
2874

2875 2876
	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
	if (!oo)
2877
		return NULL;
2878 2879
	oo->oo_owner.so_is_open_owner = 1;
	oo->oo_owner.so_seqid = open->op_seqid;
2880
	oo->oo_flags = NFS4_OO_NEW;
2881 2882
	if (nfsd4_has_session(cstate))
		oo->oo_flags |= NFS4_OO_CONFIRMED;
2883
	oo->oo_time = 0;
2884
	oo->oo_last_closed_stid = NULL;
2885 2886 2887
	INIT_LIST_HEAD(&oo->oo_close_lru);
	hash_openowner(oo, clp, strhashval);
	return oo;
L
Linus Torvalds 已提交
2888 2889
}

2890
static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
2891
	struct nfs4_openowner *oo = open->op_openowner;
L
Linus Torvalds 已提交
2892

J
J. Bruce Fields 已提交
2893
	stp->st_stid.sc_type = NFS4_OPEN_STID;
2894
	INIT_LIST_HEAD(&stp->st_locks);
2895 2896
	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
	stp->st_stateowner = &oo->oo_owner;
2897
	get_nfs4_file(fp);
L
Linus Torvalds 已提交
2898 2899 2900
	stp->st_file = fp;
	stp->st_access_bmap = 0;
	stp->st_deny_bmap = 0;
2901
	set_access(open->op_share_access, stp);
2902
	set_deny(open->op_share_deny, stp);
2903
	stp->st_openstp = NULL;
2904 2905 2906
	spin_lock(&fp->fi_lock);
	list_add(&stp->st_perfile, &fp->fi_stateids);
	spin_unlock(&fp->fi_lock);
L
Linus Torvalds 已提交
2907 2908
}

2909
static void
2910
move_to_close_lru(struct nfs4_openowner *oo, struct net *net)
L
Linus Torvalds 已提交
2911
{
2912 2913
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);

2914
	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
L
Linus Torvalds 已提交
2915

2916
	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
2917
	oo->oo_time = get_seconds();
L
Linus Torvalds 已提交
2918 2919 2920
}

static int
2921 2922 2923 2924 2925 2926
same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
							clientid_t *clid)
{
	return (sop->so_owner.len == owner->len) &&
		0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
		(sop->so_client->cl_clientid.cl_id == clid->cl_id);
L
Linus Torvalds 已提交
2927 2928
}

2929
static struct nfs4_openowner *
2930 2931
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
			bool sessions, struct nfsd_net *nn)
L
Linus Torvalds 已提交
2932
{
2933 2934
	struct nfs4_stateowner *so;
	struct nfs4_openowner *oo;
2935
	struct nfs4_client *clp;
L
Linus Torvalds 已提交
2936

2937
	list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
2938 2939
		if (!so->so_is_open_owner)
			continue;
2940 2941
		if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
			oo = openowner(so);
2942 2943 2944
			clp = oo->oo_owner.so_client;
			if ((bool)clp->cl_minorversion != sessions)
				return NULL;
2945 2946 2947
			renew_client(oo->oo_owner.so_client);
			return oo;
		}
L
Linus Torvalds 已提交
2948 2949 2950 2951 2952 2953
	}
	return NULL;
}

/* search file_hashtbl[] for file */
static struct nfs4_file *
2954
find_file_locked(struct inode *ino)
L
Linus Torvalds 已提交
2955 2956 2957 2958
{
	unsigned int hashval = file_hashval(ino);
	struct nfs4_file *fp;

2959 2960
	lockdep_assert_held(&state_lock);

2961
	hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
2962 2963
		if (fp->fi_inode == ino) {
			get_nfs4_file(fp);
L
Linus Torvalds 已提交
2964
			return fp;
2965
		}
L
Linus Torvalds 已提交
2966 2967 2968 2969
	}
	return NULL;
}

2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
static struct nfs4_file *
find_file(struct inode *ino)
{
	struct nfs4_file *fp;

	spin_lock(&state_lock);
	fp = find_file_locked(ino);
	spin_unlock(&state_lock);
	return fp;
}

static struct nfs4_file *
find_or_add_file(struct inode *ino, struct nfs4_file *new)
{
	struct nfs4_file *fp;

	spin_lock(&state_lock);
	fp = find_file_locked(ino);
	if (fp == NULL) {
		nfsd4_init_file(new, ino);
		fp = new;
	}
	spin_unlock(&state_lock);

	return fp;
}

L
Linus Torvalds 已提交
2997 2998 2999 3000
/*
 * Called to check deny when READ with all zero stateid or
 * WRITE with all zero or all one stateid
 */
3001
static __be32
L
Linus Torvalds 已提交
3002 3003 3004 3005
nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
{
	struct inode *ino = current_fh->fh_dentry->d_inode;
	struct nfs4_file *fp;
3006
	struct nfs4_ol_stateid *stp;
3007
	__be32 ret;
L
Linus Torvalds 已提交
3008 3009

	fp = find_file(ino);
3010 3011
	if (!fp)
		return nfs_ok;
3012
	ret = nfserr_locked;
L
Linus Torvalds 已提交
3013
	/* Search for conflicting share reservations */
3014
	spin_lock(&fp->fi_lock);
3015
	list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
3016 3017
		if (test_deny(deny_type, stp) ||
		    test_deny(NFS4_SHARE_DENY_BOTH, stp))
3018
			goto out;
L
Linus Torvalds 已提交
3019
	}
3020 3021
	ret = nfs_ok;
out:
3022
	spin_unlock(&fp->fi_lock);
3023 3024
	put_nfs4_file(fp);
	return ret;
L
Linus Torvalds 已提交
3025 3026
}

3027
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
L
Linus Torvalds 已提交
3028
{
3029 3030 3031
	struct nfs4_client *clp = dp->dl_stid.sc_client;
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

3032
	lockdep_assert_held(&state_lock);
L
Linus Torvalds 已提交
3033 3034 3035 3036 3037 3038 3039
	/* We're assuming the state code never drops its reference
	 * without first removing the lease.  Since we're in this lease
	 * callback (and since the lease code is serialized by the kernel
	 * lock) we know the server hasn't removed the lease yet, we know
	 * it's safe to take a reference: */
	atomic_inc(&dp->dl_count);

3040 3041 3042 3043 3044 3045 3046 3047
	/*
	 * If the dl_time != 0, then we know that it has already been
	 * queued for a lease break. Don't queue it again.
	 */
	if (dp->dl_time == 0) {
		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
		dp->dl_time = get_seconds();
	}
L
Linus Torvalds 已提交
3048

3049 3050
	block_delegations(&dp->dl_fh);

3051 3052 3053
	nfsd4_cb_recall(dp);
}

3054
/* Called from break_lease() with i_lock held. */
3055 3056
static void nfsd_break_deleg_cb(struct file_lock *fl)
{
3057 3058
	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
	struct nfs4_delegation *dp;
3059

3060 3061 3062 3063 3064 3065 3066 3067
	if (!fp) {
		WARN(1, "(%p)->fl_owner NULL\n", fl);
		return;
	}
	if (fp->fi_had_conflict) {
		WARN(1, "duplicate break on %p\n", fp);
		return;
	}
3068 3069
	/*
	 * We don't want the locks code to timeout the lease for us;
3070
	 * we'll remove it ourself if a delegation isn't returned
3071
	 * in time:
3072 3073
	 */
	fl->fl_break_time = 0;
L
Linus Torvalds 已提交
3074

3075
	spin_lock(&state_lock);
3076 3077 3078
	fp->fi_had_conflict = true;
	list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
		nfsd_break_one_deleg(dp);
3079
	spin_unlock(&state_lock);
L
Linus Torvalds 已提交
3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090
}

static
int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
{
	if (arg & F_UNLCK)
		return lease_modify(onlist, arg);
	else
		return -EAGAIN;
}

3091
static const struct lock_manager_operations nfsd_lease_mng_ops = {
J
J. Bruce Fields 已提交
3092 3093
	.lm_break = nfsd_break_deleg_cb,
	.lm_change = nfsd_change_deleg_cb,
L
Linus Torvalds 已提交
3094 3095
};

3096 3097 3098 3099 3100 3101 3102 3103 3104 3105
static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
{
	if (nfsd4_has_session(cstate))
		return nfs_ok;
	if (seqid == so->so_seqid - 1)
		return nfserr_replay_me;
	if (seqid == so->so_seqid)
		return nfs_ok;
	return nfserr_bad_seqid;
}
L
Linus Torvalds 已提交
3106

3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121 3122 3123 3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138
static __be32 lookup_clientid(clientid_t *clid,
		struct nfsd4_compound_state *cstate,
		struct nfsd_net *nn)
{
	struct nfs4_client *found;

	if (cstate->clp) {
		found = cstate->clp;
		if (!same_clid(&found->cl_clientid, clid))
			return nfserr_stale_clientid;
		return nfs_ok;
	}

	if (STALE_CLIENTID(clid, nn))
		return nfserr_stale_clientid;

	/*
	 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
	 * cached already then we know this is for is for v4.0 and "sessions"
	 * will be false.
	 */
	WARN_ON_ONCE(cstate->session);
	found = find_confirmed_client(clid, false, nn);
	if (!found)
		return nfserr_expired;

	/* Cache the nfs4_client in cstate! */
	cstate->clp = found;
	atomic_inc(&found->cl_refcount);
	return nfs_ok;
}

3139
__be32
A
Andy Adamson 已提交
3140
nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3141
		    struct nfsd4_open *open, struct nfsd_net *nn)
L
Linus Torvalds 已提交
3142 3143 3144 3145
{
	clientid_t *clientid = &open->op_clientid;
	struct nfs4_client *clp = NULL;
	unsigned int strhashval;
3146
	struct nfs4_openowner *oo = NULL;
3147
	__be32 status;
L
Linus Torvalds 已提交
3148

3149
	if (STALE_CLIENTID(&open->op_clientid, nn))
L
Linus Torvalds 已提交
3150
		return nfserr_stale_clientid;
3151 3152 3153 3154 3155 3156 3157
	/*
	 * In case we need it later, after we've already created the
	 * file and don't want to risk a further failure:
	 */
	open->op_file = nfsd4_alloc_file();
	if (open->op_file == NULL)
		return nfserr_jukebox;
L
Linus Torvalds 已提交
3158

3159 3160 3161 3162 3163
	status = lookup_clientid(clientid, cstate, nn);
	if (status)
		return status;
	clp = cstate->clp;

3164
	strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
3165
	oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
3166 3167
	open->op_openowner = oo;
	if (!oo) {
3168
		goto new_owner;
L
Linus Torvalds 已提交
3169
	}
3170
	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3171
		/* Replace unconfirmed owners without checking for replay. */
3172 3173
		release_openowner(oo);
		open->op_openowner = NULL;
3174
		goto new_owner;
3175
	}
3176 3177 3178 3179
	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
	if (status)
		return status;
	goto alloc_stateid;
3180
new_owner:
3181
	oo = alloc_init_open_stateowner(strhashval, open, cstate);
3182 3183 3184
	if (oo == NULL)
		return nfserr_jukebox;
	open->op_openowner = oo;
3185 3186 3187 3188
alloc_stateid:
	open->op_stp = nfs4_alloc_stateid(clp);
	if (!open->op_stp)
		return nfserr_jukebox;
3189
	return nfs_ok;
L
Linus Torvalds 已提交
3190 3191
}

3192
static inline __be32
N
NeilBrown 已提交
3193 3194 3195 3196 3197 3198 3199 3200
nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
{
	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
		return nfserr_openmode;
	else
		return nfs_ok;
}

3201
static int share_access_to_flags(u32 share_access)
3202
{
3203
	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3204 3205
}

3206
static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3207
{
3208
	struct nfs4_stid *ret;
3209

3210
	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3211 3212 3213
	if (!ret)
		return NULL;
	return delegstateid(ret);
3214 3215
}

3216 3217 3218 3219 3220 3221
static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
{
	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
}

3222
static __be32
3223
nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3224 3225 3226
		struct nfs4_delegation **dp)
{
	int flags;
3227
	__be32 status = nfserr_bad_stateid;
3228

3229
	*dp = find_deleg_stateid(cl, &open->op_delegate_stateid);
3230
	if (*dp == NULL)
3231
		goto out;
3232
	flags = share_access_to_flags(open->op_share_access);
3233 3234 3235
	status = nfs4_check_delegmode(*dp, flags);
	if (status)
		*dp = NULL;
3236
out:
3237
	if (!nfsd4_is_deleg_cur(open))
3238 3239 3240
		return nfs_ok;
	if (status)
		return status;
3241
	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3242
	return nfs_ok;
3243 3244
}

3245
static __be32
3246
nfs4_check_open(struct nfs4_file *fp, struct nfsd4_open *open, struct nfs4_ol_stateid **stpp)
L
Linus Torvalds 已提交
3247
{
3248
	struct nfs4_ol_stateid *local;
3249
	struct nfs4_openowner *oo = open->op_openowner;
L
Linus Torvalds 已提交
3250

3251
	spin_lock(&fp->fi_lock);
3252
	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
L
Linus Torvalds 已提交
3253 3254 3255 3256
		/* ignore lock owners */
		if (local->st_stateowner->so_is_open_owner == 0)
			continue;
		/* remember if we have seen this open owner */
3257
		if (local->st_stateowner == &oo->oo_owner)
L
Linus Torvalds 已提交
3258 3259
			*stpp = local;
		/* check for conflicting share reservations */
3260 3261
		if (!test_share(local, open)) {
			spin_unlock(&fp->fi_lock);
3262
			return nfserr_share_denied;
3263
		}
L
Linus Torvalds 已提交
3264
	}
3265
	spin_unlock(&fp->fi_lock);
3266
	return nfs_ok;
L
Linus Torvalds 已提交
3267 3268
}

3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279
static inline int nfs4_access_to_access(u32 nfs4_access)
{
	int flags = 0;

	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
		flags |= NFSD_MAY_READ;
	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
		flags |= NFSD_MAY_WRITE;
	return flags;
}

3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3292 3293 3294
static inline __be32
nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
		struct nfsd4_open *open)
{
	struct iattr iattr = {
		.ia_valid = ATTR_SIZE,
		.ia_size = 0,
	};
	if (!open->op_truncate)
		return 0;
	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
		return nfserr_inval;
	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
}

3295 3296
static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
		struct svc_fh *cur_fh, struct nfsd4_open *open)
3297
{
3298
	struct file *filp = NULL;
3299
	__be32 status;
3300 3301 3302
	int oflag = nfs4_access_to_omode(open->op_share_access);
	int access = nfs4_access_to_access(open->op_share_access);

3303
	spin_lock(&fp->fi_lock);
3304
	if (!fp->fi_fds[oflag]) {
3305 3306
		spin_unlock(&fp->fi_lock);
		status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3307
		if (status)
3308
			goto out;
3309 3310 3311 3312 3313
		spin_lock(&fp->fi_lock);
		if (!fp->fi_fds[oflag]) {
			fp->fi_fds[oflag] = filp;
			filp = NULL;
		}
3314
	}
3315
	status = nfs4_file_get_access(fp, open->op_share_access);
3316 3317 3318
	spin_unlock(&fp->fi_lock);
	if (filp)
		fput(filp);
3319 3320
	if (status)
		goto out_put_access;
3321

3322 3323 3324 3325
	status = nfsd4_truncate(rqstp, cur_fh, open);
	if (status)
		goto out_put_access;

3326 3327
	return nfs_ok;

3328
out_put_access:
3329
	nfs4_file_put_access(fp, open->op_share_access);
3330 3331
out:
	return status;
L
Linus Torvalds 已提交
3332 3333
}

3334
static __be32
3335
nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
L
Linus Torvalds 已提交
3336
{
3337
	u32 op_share_access = open->op_share_access;
3338
	__be32 status;
L
Linus Torvalds 已提交
3339

3340
	if (!test_access(op_share_access, stp))
3341
		status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
3342 3343 3344 3345
	else
		status = nfsd4_truncate(rqstp, cur_fh, open);

	if (status)
L
Linus Torvalds 已提交
3346
		return status;
3347

L
Linus Torvalds 已提交
3348
	/* remember the open */
3349
	set_access(op_share_access, stp);
3350
	set_deny(open->op_share_deny, stp);
L
Linus Torvalds 已提交
3351 3352 3353 3354 3355
	return nfs_ok;
}


static void
3356
nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
L
Linus Torvalds 已提交
3357
{
3358
	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
L
Linus Torvalds 已提交
3359 3360
}

3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373
/* Should we give out recallable state?: */
static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
{
	if (clp->cl_cb_state == NFSD4_CB_UP)
		return true;
	/*
	 * In the sessions case, since we don't have to establish a
	 * separate connection for callbacks, we assume it's OK
	 * until we hear otherwise:
	 */
	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
}

3374 3375 3376 3377 3378 3379 3380 3381 3382
static struct file_lock *nfs4_alloc_init_lease(struct nfs4_delegation *dp, int flag)
{
	struct file_lock *fl;

	fl = locks_alloc_lock();
	if (!fl)
		return NULL;
	locks_init_lock(fl);
	fl->fl_lmops = &nfsd_lease_mng_ops;
3383
	fl->fl_flags = FL_DELEG;
3384 3385
	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
	fl->fl_end = OFFSET_MAX;
3386
	fl->fl_owner = (fl_owner_t)(dp->dl_file);
3387 3388 3389 3390
	fl->fl_pid = current->tgid;
	return fl;
}

3391
static int nfs4_setlease(struct nfs4_delegation *dp)
3392
{
3393
	struct nfs4_file *fp = dp->dl_file;
3394 3395 3396
	struct file_lock *fl;
	int status;

3397
	fl = nfs4_alloc_init_lease(dp, NFS4_OPEN_DELEGATE_READ);
3398 3399
	if (!fl)
		return -ENOMEM;
3400 3401
	fl->fl_file = find_readable_file(fp);
	status = vfs_setlease(fl->fl_file, fl->fl_type, &fl);
3402 3403
	if (status)
		goto out_free;
3404
	fp->fi_lease = fl;
3405
	fp->fi_deleg_file = fl->fl_file;
3406
	atomic_set(&fp->fi_delegees, 1);
3407
	spin_lock(&state_lock);
3408
	hash_delegation_locked(dp, fp);
3409
	spin_unlock(&state_lock);
3410
	return 0;
3411
out_free:
3412 3413
	if (fl->fl_file)
		fput(fl->fl_file);
3414 3415
	locks_free_lock(fl);
	return status;
3416 3417
}

3418
static int nfs4_set_delegation(struct nfs4_delegation *dp, struct nfs4_file *fp)
3419
{
3420 3421 3422 3423
	if (fp->fi_had_conflict)
		return -EAGAIN;
	get_nfs4_file(fp);
	dp->dl_file = fp;
3424 3425
	if (!fp->fi_lease)
		return nfs4_setlease(dp);
3426
	spin_lock(&state_lock);
3427
	atomic_inc(&fp->fi_delegees);
3428
	if (fp->fi_had_conflict) {
3429
		spin_unlock(&state_lock);
3430
		return -EAGAIN;
3431
	}
3432
	hash_delegation_locked(dp, fp);
3433
	spin_unlock(&state_lock);
3434 3435 3436
	return 0;
}

3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452
static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
{
	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
	if (status == -EAGAIN)
		open->op_why_no_deleg = WND4_CONTENTION;
	else {
		open->op_why_no_deleg = WND4_RESOURCE;
		switch (open->op_deleg_want) {
		case NFS4_SHARE_WANT_READ_DELEG:
		case NFS4_SHARE_WANT_WRITE_DELEG:
		case NFS4_SHARE_WANT_ANY_DELEG:
			break;
		case NFS4_SHARE_WANT_CANCEL:
			open->op_why_no_deleg = WND4_CANCELLED;
			break;
		case NFS4_SHARE_WANT_NO_DELEG:
3453
			WARN_ON_ONCE(1);
3454 3455 3456 3457
		}
	}
}

L
Linus Torvalds 已提交
3458 3459
/*
 * Attempt to hand out a delegation.
3460 3461 3462
 *
 * Note we don't support write delegations, and won't until the vfs has
 * proper support for them.
L
Linus Torvalds 已提交
3463 3464
 */
static void
3465 3466
nfs4_open_delegation(struct net *net, struct svc_fh *fh,
		     struct nfsd4_open *open, struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
3467 3468
{
	struct nfs4_delegation *dp;
3469
	struct nfs4_openowner *oo = container_of(stp->st_stateowner, struct nfs4_openowner, oo_owner);
3470
	int cb_up;
3471
	int status = 0;
L
Linus Torvalds 已提交
3472

3473
	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
3474 3475 3476
	open->op_recall = 0;
	switch (open->op_claim_type) {
		case NFS4_OPEN_CLAIM_PREVIOUS:
3477
			if (!cb_up)
3478
				open->op_recall = 1;
3479 3480
			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
				goto out_no_deleg;
3481 3482
			break;
		case NFS4_OPEN_CLAIM_NULL:
3483
		case NFS4_OPEN_CLAIM_FH:
3484 3485 3486 3487
			/*
			 * Let's not give out any delegations till everyone's
			 * had the chance to reclaim theirs....
			 */
3488
			if (locks_in_grace(net))
3489
				goto out_no_deleg;
3490
			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3491
				goto out_no_deleg;
3492 3493 3494 3495 3496 3497 3498
			/*
			 * Also, if the file was opened for write or
			 * create, there's a good chance the client's
			 * about to write to it, resulting in an
			 * immediate recall (since we don't support
			 * write delegations):
			 */
3499
			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
3500 3501 3502
				goto out_no_deleg;
			if (open->op_create == NFS4_OPEN_CREATE)
				goto out_no_deleg;
3503 3504
			break;
		default:
3505
			goto out_no_deleg;
3506
	}
3507
	dp = alloc_init_deleg(oo->oo_owner.so_client, stp, fh);
3508 3509
	if (dp == NULL)
		goto out_no_deleg;
3510
	status = nfs4_set_delegation(dp, stp->st_file);
3511
	if (status)
3512
		goto out_free;
L
Linus Torvalds 已提交
3513

3514
	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
L
Linus Torvalds 已提交
3515

3516
	dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3517
		STATEID_VAL(&dp->dl_stid.sc_stateid));
3518
	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3519 3520
	return;
out_free:
3521
	destroy_delegation(dp);
3522
out_no_deleg:
3523 3524
	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
3525
	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
3526
		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
3527 3528
		open->op_recall = 1;
	}
3529 3530 3531 3532 3533

	/* 4.1 client asking for a delegation? */
	if (open->op_deleg_want)
		nfsd4_open_deleg_none_ext(open, status);
	return;
L
Linus Torvalds 已提交
3534 3535
}

3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553
static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
					struct nfs4_delegation *dp)
{
	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
	}
	/* Otherwise the client must be confused wanting a delegation
	 * it already has, therefore we don't return
	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
	 */
}

L
Linus Torvalds 已提交
3554 3555 3556
/*
 * called with nfs4_lock_state() held.
 */
3557
__be32
L
Linus Torvalds 已提交
3558 3559
nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
A
Andy Adamson 已提交
3560
	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3561
	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
L
Linus Torvalds 已提交
3562 3563
	struct nfs4_file *fp = NULL;
	struct inode *ino = current_fh->fh_dentry->d_inode;
3564
	struct nfs4_ol_stateid *stp = NULL;
3565
	struct nfs4_delegation *dp = NULL;
3566
	__be32 status;
L
Linus Torvalds 已提交
3567 3568 3569 3570 3571 3572

	/*
	 * Lookup file; if found, lookup stateid and check open request,
	 * and check for delegations in the process of being recalled.
	 * If not found, create the nfs4_file struct
	 */
3573 3574
	fp = find_or_add_file(ino, open->op_file);
	if (fp != open->op_file) {
L
Linus Torvalds 已提交
3575 3576
		if ((status = nfs4_check_open(fp, open, &stp)))
			goto out;
3577
		status = nfs4_check_deleg(cl, open, &dp);
3578 3579
		if (status)
			goto out;
L
Linus Torvalds 已提交
3580
	} else {
3581
		open->op_file = NULL;
3582
		status = nfserr_bad_stateid;
3583
		if (nfsd4_is_deleg_cur(open))
3584
			goto out;
3585
		status = nfserr_jukebox;
L
Linus Torvalds 已提交
3586 3587 3588 3589 3590 3591 3592 3593
	}

	/*
	 * OPEN the file, or upgrade an existing OPEN.
	 * If truncate fails, the OPEN fails.
	 */
	if (stp) {
		/* Stateid was found, this is an OPEN upgrade */
3594
		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
L
Linus Torvalds 已提交
3595 3596 3597
		if (status)
			goto out;
	} else {
3598
		status = nfs4_get_vfs_file(rqstp, fp, current_fh, open);
3599
		if (status)
L
Linus Torvalds 已提交
3600
			goto out;
3601 3602
		stp = open->op_stp;
		open->op_stp = NULL;
3603
		init_open_stateid(stp, fp, open);
L
Linus Torvalds 已提交
3604
	}
3605 3606
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
L
Linus Torvalds 已提交
3607

3608 3609 3610 3611 3612 3613 3614 3615
	if (nfsd4_has_session(&resp->cstate)) {
		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
			open->op_why_no_deleg = WND4_NOT_WANTED;
			goto nodeleg;
		}
	}

L
Linus Torvalds 已提交
3616 3617 3618 3619
	/*
	* Attempt to hand out a delegation. No error return, because the
	* OPEN succeeds even if we fail.
	*/
3620
	nfs4_open_delegation(SVC_NET(rqstp), current_fh, open, stp);
3621
nodeleg:
L
Linus Torvalds 已提交
3622 3623
	status = nfs_ok;

3624
	dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3625
		STATEID_VAL(&stp->st_stid.sc_stateid));
L
Linus Torvalds 已提交
3626
out:
3627 3628
	/* 4.1 client trying to upgrade/downgrade delegation? */
	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3629 3630
	    open->op_deleg_want)
		nfsd4_deleg_xgrade_none_ext(open, dp);
3631

3632 3633
	if (fp)
		put_nfs4_file(fp);
3634
	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3635
		nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
L
Linus Torvalds 已提交
3636 3637 3638 3639
	/*
	* To finish the open response, we just need to set the rflags.
	*/
	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3640
	if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
A
Andy Adamson 已提交
3641
	    !nfsd4_has_session(&resp->cstate))
L
Linus Torvalds 已提交
3642 3643 3644 3645 3646
		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;

	return status;
}

3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661
void nfsd4_cleanup_open_state(struct nfsd4_open *open, __be32 status)
{
	if (open->op_openowner) {
		struct nfs4_openowner *oo = open->op_openowner;

		if (!list_empty(&oo->oo_owner.so_stateids))
			list_del_init(&oo->oo_close_lru);
		if (oo->oo_flags & NFS4_OO_NEW) {
			if (status) {
				release_openowner(oo);
				open->op_openowner = NULL;
			} else
				oo->oo_flags &= ~NFS4_OO_NEW;
		}
	}
3662 3663
	if (open->op_file)
		nfsd4_free_file(open->op_file);
3664
	if (open->op_stp)
3665
		free_generic_stateid(open->op_stp);
3666 3667
}

3668
__be32
3669 3670
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
	    clientid_t *clid)
L
Linus Torvalds 已提交
3671 3672
{
	struct nfs4_client *clp;
3673
	__be32 status;
3674
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
3675 3676 3677 3678

	nfs4_lock_state();
	dprintk("process_renew(%08x/%08x): starting\n", 
			clid->cl_boot, clid->cl_id);
3679
	status = lookup_clientid(clid, cstate, nn);
3680
	if (status)
L
Linus Torvalds 已提交
3681
		goto out;
3682
	clp = cstate->clp;
L
Linus Torvalds 已提交
3683
	status = nfserr_cb_path_down;
3684
	if (!list_empty(&clp->cl_delegations)
3685
			&& clp->cl_cb_state != NFSD4_CB_UP)
L
Linus Torvalds 已提交
3686 3687 3688 3689 3690 3691 3692
		goto out;
	status = nfs_ok;
out:
	nfs4_unlock_state();
	return status;
}

3693
static void
3694
nfsd4_end_grace(struct nfsd_net *nn)
3695
{
3696
	/* do nothing if grace period already ended */
3697
	if (nn->grace_ended)
3698 3699
		return;

3700
	dprintk("NFSD: end of grace period\n");
3701
	nn->grace_ended = true;
3702
	nfsd4_record_grace_done(nn, nn->boot_time);
3703
	locks_end_grace(&nn->nfsd4_manager);
3704 3705 3706 3707 3708
	/*
	 * Now that every NFSv4 client has had the chance to recover and
	 * to see the (possibly new, possibly shorter) lease time, we
	 * can safely set the next grace time to the current lease time:
	 */
3709
	nn->nfsd4_grace = nn->nfsd4_lease;
3710 3711
}

3712
static time_t
3713
nfs4_laundromat(struct nfsd_net *nn)
L
Linus Torvalds 已提交
3714 3715
{
	struct nfs4_client *clp;
3716
	struct nfs4_openowner *oo;
L
Linus Torvalds 已提交
3717 3718
	struct nfs4_delegation *dp;
	struct list_head *pos, *next, reaplist;
3719
	time_t cutoff = get_seconds() - nn->nfsd4_lease;
3720
	time_t t, new_timeo = nn->nfsd4_lease;
L
Linus Torvalds 已提交
3721 3722 3723 3724

	nfs4_lock_state();

	dprintk("NFSD: laundromat service - starting\n");
3725
	nfsd4_end_grace(nn);
3726
	INIT_LIST_HEAD(&reaplist);
3727
	spin_lock(&nn->client_lock);
3728
	list_for_each_safe(pos, next, &nn->client_lru) {
L
Linus Torvalds 已提交
3729 3730 3731
		clp = list_entry(pos, struct nfs4_client, cl_lru);
		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
			t = clp->cl_time - cutoff;
3732
			new_timeo = min(new_timeo, t);
L
Linus Torvalds 已提交
3733 3734
			break;
		}
3735
		if (mark_client_expired_locked(clp)) {
3736 3737 3738 3739
			dprintk("NFSD: client in use (clientid %08x)\n",
				clp->cl_clientid.cl_id);
			continue;
		}
3740
		list_move(&clp->cl_lru, &reaplist);
3741
	}
3742
	spin_unlock(&nn->client_lock);
3743 3744
	list_for_each_safe(pos, next, &reaplist) {
		clp = list_entry(pos, struct nfs4_client, cl_lru);
L
Linus Torvalds 已提交
3745 3746 3747 3748
		dprintk("NFSD: purging unused client (clientid %08x)\n",
			clp->cl_clientid.cl_id);
		expire_client(clp);
	}
3749
	spin_lock(&state_lock);
3750
	list_for_each_safe(pos, next, &nn->del_recall_lru) {
L
Linus Torvalds 已提交
3751
		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3752 3753
		if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
			continue;
L
Linus Torvalds 已提交
3754
		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
3755 3756
			t = dp->dl_time - cutoff;
			new_timeo = min(new_timeo, t);
L
Linus Torvalds 已提交
3757 3758 3759 3760
			break;
		}
		list_move(&dp->dl_recall_lru, &reaplist);
	}
3761
	spin_unlock(&state_lock);
L
Linus Torvalds 已提交
3762 3763
	list_for_each_safe(pos, next, &reaplist) {
		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
3764
		revoke_delegation(dp);
L
Linus Torvalds 已提交
3765
	}
3766
	list_for_each_safe(pos, next, &nn->close_lru) {
3767 3768
		oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
		if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
3769 3770
			t = oo->oo_time - cutoff;
			new_timeo = min(new_timeo, t);
L
Linus Torvalds 已提交
3771 3772
			break;
		}
3773
		release_openowner(oo);
L
Linus Torvalds 已提交
3774
	}
3775
	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
L
Linus Torvalds 已提交
3776
	nfs4_unlock_state();
3777
	return new_timeo;
L
Linus Torvalds 已提交
3778 3779
}

H
Harvey Harrison 已提交
3780 3781 3782 3783
static struct workqueue_struct *laundry_wq;
static void laundromat_main(struct work_struct *);

static void
3784
laundromat_main(struct work_struct *laundry)
L
Linus Torvalds 已提交
3785 3786
{
	time_t t;
3787 3788 3789 3790
	struct delayed_work *dwork = container_of(laundry, struct delayed_work,
						  work);
	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
					   laundromat_work);
L
Linus Torvalds 已提交
3791

3792
	t = nfs4_laundromat(nn);
L
Linus Torvalds 已提交
3793
	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
3794
	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
L
Linus Torvalds 已提交
3795 3796
}

3797
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
3798
{
3799 3800 3801
	if (fhp->fh_dentry->d_inode != stp->st_file->fi_inode)
		return nfserr_bad_stateid;
	return nfs_ok;
L
Linus Torvalds 已提交
3802 3803 3804
}

static inline int
3805
access_permit_read(struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
3806
{
3807 3808 3809
	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
L
Linus Torvalds 已提交
3810 3811 3812
}

static inline int
3813
access_permit_write(struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
3814
{
3815 3816
	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
L
Linus Torvalds 已提交
3817 3818 3819
}

static
3820
__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
L
Linus Torvalds 已提交
3821
{
3822
        __be32 status = nfserr_openmode;
L
Linus Torvalds 已提交
3823

3824 3825 3826
	/* For lock stateid's, we test the parent open, not the lock: */
	if (stp->st_openstp)
		stp = stp->st_openstp;
3827
	if ((flags & WR_STATE) && !access_permit_write(stp))
L
Linus Torvalds 已提交
3828
                goto out;
3829
	if ((flags & RD_STATE) && !access_permit_read(stp))
L
Linus Torvalds 已提交
3830 3831 3832 3833 3834 3835
                goto out;
	status = nfs_ok;
out:
	return status;
}

3836
static inline __be32
3837
check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
L
Linus Torvalds 已提交
3838
{
3839
	if (ONE_STATEID(stateid) && (flags & RD_STATE))
L
Linus Torvalds 已提交
3840
		return nfs_ok;
3841
	else if (locks_in_grace(net)) {
L
Lucas De Marchi 已提交
3842
		/* Answer in remaining cases depends on existence of
L
Linus Torvalds 已提交
3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857
		 * conflicting state; so we must wait out the grace period. */
		return nfserr_grace;
	} else if (flags & WR_STATE)
		return nfs4_share_conflict(current_fh,
				NFS4_SHARE_DENY_WRITE);
	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
		return nfs4_share_conflict(current_fh,
				NFS4_SHARE_DENY_READ);
}

/*
 * Allow READ/WRITE during grace period on recovered state only for files
 * that are not able to provide mandatory locking.
 */
static inline int
3858
grace_disallows_io(struct net *net, struct inode *inode)
L
Linus Torvalds 已提交
3859
{
3860
	return locks_in_grace(net) && mandatory_lock(inode);
L
Linus Torvalds 已提交
3861 3862
}

3863 3864 3865
/* Returns true iff a is later than b: */
static bool stateid_generation_after(stateid_t *a, stateid_t *b)
{
J
Jim Rees 已提交
3866
	return (s32)(a->si_generation - b->si_generation) > 0;
3867 3868
}

J
J. Bruce Fields 已提交
3869
static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3870
{
A
Andy Adamson 已提交
3871 3872 3873 3874
	/*
	 * When sessions are used the stateid generation number is ignored
	 * when it is zero.
	 */
J
J. Bruce Fields 已提交
3875
	if (has_session && in->si_generation == 0)
3876 3877 3878 3879
		return nfs_ok;

	if (in->si_generation == ref->si_generation)
		return nfs_ok;
A
Andy Adamson 已提交
3880

3881
	/* If the client sends us a stateid from the future, it's buggy: */
3882
	if (stateid_generation_after(in, ref))
3883 3884
		return nfserr_bad_stateid;
	/*
3885 3886 3887 3888 3889 3890 3891 3892
	 * However, we could see a stateid from the past, even from a
	 * non-buggy client.  For example, if the client sends a lock
	 * while some IO is outstanding, the lock may bump si_generation
	 * while the IO is still in flight.  The client could avoid that
	 * situation by waiting for responses on all the IO requests,
	 * but better performance may result in retrying IO that
	 * receives an old_stateid error if requests are rarely
	 * reordered in flight:
3893
	 */
3894
	return nfserr_old_stateid;
3895 3896
}

3897
static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
3898
{
3899 3900 3901
	struct nfs4_stid *s;
	struct nfs4_ol_stateid *ols;
	__be32 status;
3902

3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913
	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
		return nfserr_bad_stateid;
	/* Client debugging aid. */
	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
		char addr_str[INET6_ADDRSTRLEN];
		rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
				 sizeof(addr_str));
		pr_warn_ratelimited("NFSD: client %s testing state ID "
					"with incorrect client ID\n", addr_str);
		return nfserr_bad_stateid;
	}
3914
	s = find_stateid(cl, stateid);
3915
	if (!s)
3916
		return nfserr_bad_stateid;
3917
	status = check_stateid_generation(stateid, &s->sc_stateid, 1);
3918
	if (status)
3919
		return status;
3920 3921
	switch (s->sc_type) {
	case NFS4_DELEG_STID:
3922
		return nfs_ok;
3923 3924
	case NFS4_REVOKED_DELEG_STID:
		return nfserr_deleg_revoked;
3925 3926 3927 3928 3929 3930 3931
	case NFS4_OPEN_STID:
	case NFS4_LOCK_STID:
		ols = openlockstateid(s);
		if (ols->st_stateowner->so_is_open_owner
	    			&& !(openowner(ols->st_stateowner)->oo_flags
						& NFS4_OO_CONFIRMED))
			return nfserr_bad_stateid;
3932
		return nfs_ok;
3933 3934 3935
	default:
		printk("unknown stateid type %x\n", s->sc_type);
	case NFS4_CLOSED_STID:
3936
		return nfserr_bad_stateid;
3937
	}
3938 3939
}

3940 3941 3942 3943
static __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
		     stateid_t *stateid, unsigned char typemask,
		     struct nfs4_stid **s, struct nfsd_net *nn)
3944
{
J
J. Bruce Fields 已提交
3945
	__be32 status;
3946 3947 3948

	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
		return nfserr_bad_stateid;
3949
	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
3950
	if (status == nfserr_stale_clientid) {
3951
		if (cstate->session)
3952
			return nfserr_bad_stateid;
3953
		return nfserr_stale_stateid;
3954
	}
J
J. Bruce Fields 已提交
3955 3956
	if (status)
		return status;
3957
	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
3958 3959 3960 3961 3962
	if (!*s)
		return nfserr_bad_stateid;
	return nfs_ok;
}

L
Linus Torvalds 已提交
3963 3964 3965
/*
* Checks for stateid operations
*/
3966
__be32
3967
nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
3968
			   stateid_t *stateid, int flags, struct file **filpp)
L
Linus Torvalds 已提交
3969
{
3970
	struct nfs4_stid *s;
3971
	struct nfs4_ol_stateid *stp = NULL;
L
Linus Torvalds 已提交
3972
	struct nfs4_delegation *dp = NULL;
3973
	struct svc_fh *current_fh = &cstate->current_fh;
L
Linus Torvalds 已提交
3974
	struct inode *ino = current_fh->fh_dentry->d_inode;
3975
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
3976
	struct file *file = NULL;
3977
	__be32 status;
L
Linus Torvalds 已提交
3978 3979 3980 3981

	if (filpp)
		*filpp = NULL;

3982
	if (grace_disallows_io(net, ino))
L
Linus Torvalds 已提交
3983 3984 3985
		return nfserr_grace;

	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3986
		return check_special_stateids(net, current_fh, stateid, flags);
L
Linus Torvalds 已提交
3987

3988 3989
	nfs4_lock_state();

3990
	status = nfsd4_lookup_stateid(cstate, stateid,
3991
				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
3992
				&s, nn);
3993
	if (status)
3994
		goto out;
3995 3996 3997
	status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
	if (status)
		goto out;
3998 3999
	switch (s->sc_type) {
	case NFS4_DELEG_STID:
4000
		dp = delegstateid(s);
4001 4002 4003
		status = nfs4_check_delegmode(dp, flags);
		if (status)
			goto out;
4004
		if (filpp) {
4005 4006
			file = dp->dl_file->fi_deleg_file;
			if (!file) {
4007 4008 4009 4010
				WARN_ON_ONCE(1);
				status = nfserr_serverfault;
				goto out;
			}
4011
			get_file(file);
4012
		}
4013 4014 4015
		break;
	case NFS4_OPEN_STID:
	case NFS4_LOCK_STID:
4016
		stp = openlockstateid(s);
4017 4018
		status = nfs4_check_fh(current_fh, stp);
		if (status)
L
Linus Torvalds 已提交
4019
			goto out;
4020
		if (stp->st_stateowner->so_is_open_owner
4021
		    && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
L
Linus Torvalds 已提交
4022
			goto out;
4023 4024
		status = nfs4_check_openmode(stp, flags);
		if (status)
L
Linus Torvalds 已提交
4025
			goto out;
4026 4027
		if (filpp) {
			if (flags & RD_STATE)
4028
				file = find_readable_file(stp->st_file);
4029
			else
4030
				file = find_writeable_file(stp->st_file);
4031
		}
4032 4033
		break;
	default:
4034 4035
		status = nfserr_bad_stateid;
		goto out;
L
Linus Torvalds 已提交
4036 4037
	}
	status = nfs_ok;
4038
	if (file)
4039
		*filpp = file;
L
Linus Torvalds 已提交
4040
out:
4041
	nfs4_unlock_state();
L
Linus Torvalds 已提交
4042 4043 4044
	return status;
}

4045
static __be32
4046
nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
4047
{
4048 4049 4050
	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);

	if (check_for_locks(stp->st_file, lo))
4051
		return nfserr_locks_held;
4052
	release_lockowner_if_empty(lo);
4053 4054 4055
	return nfs_ok;
}

4056 4057 4058 4059 4060 4061 4062
/*
 * Test if the stateid is valid
 */
__be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		   struct nfsd4_test_stateid *test_stateid)
{
4063 4064 4065 4066 4067
	struct nfsd4_test_stateid_id *stateid;
	struct nfs4_client *cl = cstate->session->se_client;

	nfs4_lock_state();
	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4068 4069
		stateid->ts_id_status =
			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4070 4071
	nfs4_unlock_state();

4072 4073 4074
	return nfs_ok;
}

4075 4076 4077 4078 4079
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		   struct nfsd4_free_stateid *free_stateid)
{
	stateid_t *stateid = &free_stateid->fr_stateid;
J
J. Bruce Fields 已提交
4080
	struct nfs4_stid *s;
4081
	struct nfs4_delegation *dp;
4082
	struct nfs4_client *cl = cstate->session->se_client;
J
J. Bruce Fields 已提交
4083
	__be32 ret = nfserr_bad_stateid;
4084 4085

	nfs4_lock_state();
4086
	s = find_stateid(cl, stateid);
J
J. Bruce Fields 已提交
4087
	if (!s)
4088
		goto out;
J
J. Bruce Fields 已提交
4089 4090
	switch (s->sc_type) {
	case NFS4_DELEG_STID:
4091 4092
		ret = nfserr_locks_held;
		goto out;
J
J. Bruce Fields 已提交
4093 4094 4095 4096 4097 4098 4099 4100 4101
	case NFS4_OPEN_STID:
	case NFS4_LOCK_STID:
		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
		if (ret)
			goto out;
		if (s->sc_type == NFS4_LOCK_STID)
			ret = nfsd4_free_lock_stateid(openlockstateid(s));
		else
			ret = nfserr_locks_held;
4102
		break;
4103 4104 4105 4106 4107
	case NFS4_REVOKED_DELEG_STID:
		dp = delegstateid(s);
		destroy_revoked_delegation(dp);
		ret = nfs_ok;
		break;
4108 4109
	default:
		ret = nfserr_bad_stateid;
4110 4111 4112 4113 4114 4115
	}
out:
	nfs4_unlock_state();
	return ret;
}

4116 4117 4118 4119 4120 4121
static inline int
setlkflg (int type)
{
	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
		RD_STATE : WR_STATE;
}
L
Linus Torvalds 已提交
4122

4123
static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
4124 4125 4126 4127 4128 4129 4130 4131
{
	struct svc_fh *current_fh = &cstate->current_fh;
	struct nfs4_stateowner *sop = stp->st_stateowner;
	__be32 status;

	status = nfsd4_check_seqid(cstate, sop, seqid);
	if (status)
		return status;
4132 4133
	if (stp->st_stid.sc_type == NFS4_CLOSED_STID
		|| stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4134 4135
		/*
		 * "Closed" stateid's exist *only* to return
4136 4137
		 * nfserr_replay_me from the previous step, and
		 * revoked delegations are kept only for free_stateid.
4138 4139 4140 4141 4142 4143
		 */
		return nfserr_bad_stateid;
	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
	if (status)
		return status;
	return nfs4_check_fh(current_fh, stp);
4144 4145
}

L
Linus Torvalds 已提交
4146 4147 4148
/* 
 * Checks for sequence id mutating operations. 
 */
4149
static __be32
4150
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4151
			 stateid_t *stateid, char typemask,
4152 4153
			 struct nfs4_ol_stateid **stpp,
			 struct nfsd_net *nn)
L
Linus Torvalds 已提交
4154
{
4155
	__be32 status;
4156
	struct nfs4_stid *s;
4157
	struct nfs4_ol_stateid *stp = NULL;
L
Linus Torvalds 已提交
4158

4159 4160
	dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
		seqid, STATEID_VAL(stateid));
4161

L
Linus Torvalds 已提交
4162
	*stpp = NULL;
4163
	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
4164 4165
	if (status)
		return status;
4166
	stp = openlockstateid(s);
4167
	if (!nfsd4_has_session(cstate))
4168
		cstate->replay_owner = stp->st_stateowner;
L
Linus Torvalds 已提交
4169

4170 4171 4172 4173
	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
	if (!status)
		*stpp = stp;
	return status;
4174
}
4175

4176 4177
static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
4178 4179 4180
{
	__be32 status;
	struct nfs4_openowner *oo;
L
Linus Torvalds 已提交
4181

4182
	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
4183
						NFS4_OPEN_STID, stpp, nn);
4184 4185
	if (status)
		return status;
4186
	oo = openowner((*stpp)->st_stateowner);
4187
	if (!(oo->oo_flags & NFS4_OO_CONFIRMED))
4188 4189
		return nfserr_bad_stateid;
	return nfs_ok;
L
Linus Torvalds 已提交
4190 4191
}

4192
__be32
4193
nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4194
		   struct nfsd4_open_confirm *oc)
L
Linus Torvalds 已提交
4195
{
4196
	__be32 status;
4197
	struct nfs4_openowner *oo;
4198
	struct nfs4_ol_stateid *stp;
4199
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
4200

A
Al Viro 已提交
4201 4202
	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
			cstate->current_fh.fh_dentry);
L
Linus Torvalds 已提交
4203

4204
	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
4205 4206
	if (status)
		return status;
L
Linus Torvalds 已提交
4207 4208 4209

	nfs4_lock_state();

4210
	status = nfs4_preprocess_seqid_op(cstate,
4211
					oc->oc_seqid, &oc->oc_req_stateid,
4212
					NFS4_OPEN_STID, &stp, nn);
4213
	if (status)
4214
		goto out;
4215
	oo = openowner(stp->st_stateowner);
4216
	status = nfserr_bad_stateid;
4217
	if (oo->oo_flags & NFS4_OO_CONFIRMED)
4218
		goto out;
4219
	oo->oo_flags |= NFS4_OO_CONFIRMED;
4220 4221
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4222
	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4223
		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4224

4225
	nfsd4_client_record_create(oo->oo_owner.so_client);
4226
	status = nfs_ok;
L
Linus Torvalds 已提交
4227
out:
4228
	nfsd4_bump_seqid(cstate, status);
4229 4230
	if (!cstate->replay_owner)
		nfs4_unlock_state();
L
Linus Torvalds 已提交
4231 4232 4233
	return status;
}

J
J. Bruce Fields 已提交
4234
static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
L
Linus Torvalds 已提交
4235
{
4236
	if (!test_access(access, stp))
J
J. Bruce Fields 已提交
4237
		return;
4238
	nfs4_file_put_access(stp->st_file, access);
4239
	clear_access(access, stp);
J
J. Bruce Fields 已提交
4240
}
4241

J
J. Bruce Fields 已提交
4242 4243 4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255
static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
{
	switch (to_access) {
	case NFS4_SHARE_ACCESS_READ:
		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
		break;
	case NFS4_SHARE_ACCESS_WRITE:
		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
		break;
	case NFS4_SHARE_ACCESS_BOTH:
		break;
	default:
4256
		WARN_ON_ONCE(1);
L
Linus Torvalds 已提交
4257 4258 4259 4260
	}
}

static void
4261
reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
4262 4263 4264 4265
{
	int i;
	for (i = 0; i < 4; i++) {
		if ((i & deny) != i)
4266
			clear_deny(i, stp);
L
Linus Torvalds 已提交
4267 4268 4269
	}
}

4270
__be32
4271 4272
nfsd4_open_downgrade(struct svc_rqst *rqstp,
		     struct nfsd4_compound_state *cstate,
4273
		     struct nfsd4_open_downgrade *od)
L
Linus Torvalds 已提交
4274
{
4275
	__be32 status;
4276
	struct nfs4_ol_stateid *stp;
4277
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
4278

A
Al Viro 已提交
4279 4280
	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
			cstate->current_fh.fh_dentry);
L
Linus Torvalds 已提交
4281

4282
	/* We don't yet support WANT bits: */
4283 4284 4285
	if (od->od_deleg_want)
		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
			od->od_deleg_want);
L
Linus Torvalds 已提交
4286 4287

	nfs4_lock_state();
4288
	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
4289
					&od->od_stateid, &stp, nn);
4290
	if (status)
L
Linus Torvalds 已提交
4291 4292
		goto out; 
	status = nfserr_inval;
4293 4294
	if (!test_access(od->od_share_access, stp)) {
		dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
L
Linus Torvalds 已提交
4295 4296 4297
			stp->st_access_bmap, od->od_share_access);
		goto out;
	}
4298
	if (!test_deny(od->od_share_deny, stp)) {
L
Linus Torvalds 已提交
4299 4300 4301 4302
		dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
			stp->st_deny_bmap, od->od_share_deny);
		goto out;
	}
J
J. Bruce Fields 已提交
4303
	nfs4_stateid_downgrade(stp, od->od_share_access);
L
Linus Torvalds 已提交
4304

4305
	reset_union_bmap_deny(od->od_share_deny, stp);
L
Linus Torvalds 已提交
4306

4307 4308
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
L
Linus Torvalds 已提交
4309 4310
	status = nfs_ok;
out:
4311
	nfsd4_bump_seqid(cstate, status);
4312 4313
	if (!cstate->replay_owner)
		nfs4_unlock_state();
L
Linus Torvalds 已提交
4314 4315 4316
	return status;
}

4317 4318
static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{
4319 4320 4321
	struct nfs4_client *clp = s->st_stid.sc_client;
	struct nfs4_openowner *oo = openowner(s->st_stateowner);

4322
	s->st_stid.sc_type = NFS4_CLOSED_STID;
4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337
	unhash_open_stateid(s);

	if (clp->cl_minorversion) {
		free_generic_stateid(s);
		if (list_empty(&oo->oo_owner.so_stateids))
			release_openowner(oo);
	} else {
		oo->oo_last_closed_stid = s;
		/*
		 * In the 4.0 case we need to keep the owners around a
		 * little while to handle CLOSE replay.
		 */
		if (list_empty(&oo->oo_owner.so_stateids))
			move_to_close_lru(oo, clp->net);
	}
4338 4339
}

L
Linus Torvalds 已提交
4340 4341 4342
/*
 * nfs4_unlock_state() called after encode
 */
4343
__be32
4344
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4345
	    struct nfsd4_close *close)
L
Linus Torvalds 已提交
4346
{
4347
	__be32 status;
4348
	struct nfs4_ol_stateid *stp;
4349 4350
	struct net *net = SVC_NET(rqstp);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
L
Linus Torvalds 已提交
4351

A
Al Viro 已提交
4352 4353
	dprintk("NFSD: nfsd4_close on file %pd\n", 
			cstate->current_fh.fh_dentry);
L
Linus Torvalds 已提交
4354 4355

	nfs4_lock_state();
4356 4357 4358
	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
					&close->cl_stateid,
					NFS4_OPEN_STID|NFS4_CLOSED_STID,
4359
					&stp, nn);
4360
	nfsd4_bump_seqid(cstate, status);
4361
	if (status)
L
Linus Torvalds 已提交
4362
		goto out; 
4363 4364
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
L
Linus Torvalds 已提交
4365

4366
	nfsd4_close_open_stateid(stp);
L
Linus Torvalds 已提交
4367
out:
4368 4369
	if (!cstate->replay_owner)
		nfs4_unlock_state();
L
Linus Torvalds 已提交
4370 4371 4372
	return status;
}

4373
__be32
4374 4375
nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		  struct nfsd4_delegreturn *dr)
L
Linus Torvalds 已提交
4376
{
4377 4378
	struct nfs4_delegation *dp;
	stateid_t *stateid = &dr->dr_stateid;
4379
	struct nfs4_stid *s;
4380
	__be32 status;
4381
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
4382

4383
	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4384
		return status;
L
Linus Torvalds 已提交
4385 4386

	nfs4_lock_state();
4387
	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
4388
	if (status)
4389
		goto out;
4390
	dp = delegstateid(s);
4391
	status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4392 4393 4394
	if (status)
		goto out;

4395
	destroy_delegation(dp);
L
Linus Torvalds 已提交
4396
out:
4397 4398
	nfs4_unlock_state();

L
Linus Torvalds 已提交
4399 4400 4401 4402 4403 4404
	return status;
}


#define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))

B
Benny Halevy 已提交
4405 4406 4407 4408 4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419
static inline u64
end_offset(u64 start, u64 len)
{
	u64 end;

	end = start + len;
	return end >= start ? end: NFS4_MAX_UINT64;
}

/* last octet in a range */
static inline u64
last_byte_offset(u64 start, u64 len)
{
	u64 end;

4420
	WARN_ON_ONCE(!len);
B
Benny Halevy 已提交
4421 4422 4423 4424
	end = start + len;
	return end > start ? end - 1: NFS4_MAX_UINT64;
}

L
Linus Torvalds 已提交
4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441
/*
 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
 * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
 * locking, this prevents us from being completely protocol-compliant.  The
 * real solution to this problem is to start using unsigned file offsets in
 * the VFS, but this is a very deep change!
 */
static inline void
nfs4_transform_lock_offset(struct file_lock *lock)
{
	if (lock->fl_start < 0)
		lock->fl_start = OFFSET_MAX;
	if (lock->fl_end < 0)
		lock->fl_end = OFFSET_MAX;
}

4442 4443
/* Hack!: For now, we're defining this just so we can use a pointer to it
 * as a unique cookie to identify our (NFSv4's) posix locks. */
4444
static const struct lock_manager_operations nfsd_posix_mng_ops  = {
4445
};
L
Linus Torvalds 已提交
4446 4447 4448 4449

static inline void
nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
{
4450
	struct nfs4_lockowner *lo;
L
Linus Torvalds 已提交
4451

4452
	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
4453 4454 4455
		lo = (struct nfs4_lockowner *) fl->fl_owner;
		deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
					lo->lo_owner.so_owner.len, GFP_KERNEL);
4456 4457 4458
		if (!deny->ld_owner.data)
			/* We just don't care that much */
			goto nevermind;
4459 4460
		deny->ld_owner.len = lo->lo_owner.so_owner.len;
		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
4461
	} else {
4462 4463 4464
nevermind:
		deny->ld_owner.len = 0;
		deny->ld_owner.data = NULL;
4465 4466
		deny->ld_clientid.cl_boot = 0;
		deny->ld_clientid.cl_id = 0;
L
Linus Torvalds 已提交
4467 4468
	}
	deny->ld_start = fl->fl_start;
B
Benny Halevy 已提交
4469 4470
	deny->ld_length = NFS4_MAX_UINT64;
	if (fl->fl_end != NFS4_MAX_UINT64)
L
Linus Torvalds 已提交
4471 4472 4473 4474 4475 4476
		deny->ld_length = fl->fl_end - fl->fl_start + 1;        
	deny->ld_type = NFS4_READ_LT;
	if (fl->fl_type != F_RDLCK)
		deny->ld_type = NFS4_WRITE_LT;
}

4477
static struct nfs4_lockowner *
4478 4479
find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
		struct nfsd_net *nn)
L
Linus Torvalds 已提交
4480
{
4481 4482
	unsigned int strhashval = ownerstr_hashval(clid->cl_id, owner);
	struct nfs4_stateowner *so;
L
Linus Torvalds 已提交
4483

4484 4485 4486 4487 4488 4489
	list_for_each_entry(so, &nn->ownerstr_hashtbl[strhashval], so_strhash) {
		if (so->so_is_open_owner)
			continue;
		if (!same_owner_str(so, owner, clid))
			continue;
		return lockowner(so);
L
Linus Torvalds 已提交
4490 4491 4492 4493 4494 4495 4496
	}
	return NULL;
}

/*
 * Alloc a lock owner structure.
 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
L
Lucas De Marchi 已提交
4497
 * occurred. 
L
Linus Torvalds 已提交
4498
 *
4499
 * strhashval = ownerstr_hashval
L
Linus Torvalds 已提交
4500
 */
4501
static struct nfs4_lockowner *
4502
alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
4503
	struct nfs4_lockowner *lo;
4504
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
L
Linus Torvalds 已提交
4505

4506 4507
	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
	if (!lo)
L
Linus Torvalds 已提交
4508
		return NULL;
4509 4510
	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
	lo->lo_owner.so_is_open_owner = 0;
4511 4512
	/* It is the openowner seqid that will be incremented in encode in the
	 * case of new lockowners; so increment the lock seqid manually: */
4513
	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid + 1;
4514
	list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
4515
	return lo;
L
Linus Torvalds 已提交
4516 4517
}

4518 4519
static struct nfs4_ol_stateid *
alloc_init_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp, struct nfs4_ol_stateid *open_stp)
L
Linus Torvalds 已提交
4520
{
4521
	struct nfs4_ol_stateid *stp;
4522
	struct nfs4_client *clp = lo->lo_owner.so_client;
L
Linus Torvalds 已提交
4523

4524
	stp = nfs4_alloc_stateid(clp);
N
NeilBrown 已提交
4525
	if (stp == NULL)
J
J. Bruce Fields 已提交
4526
		return NULL;
J
J. Bruce Fields 已提交
4527
	stp->st_stid.sc_type = NFS4_LOCK_STID;
4528 4529
	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
	stp->st_stateowner = &lo->lo_owner;
4530
	get_nfs4_file(fp);
L
Linus Torvalds 已提交
4531
	stp->st_file = fp;
J
J. Bruce Fields 已提交
4532
	stp->st_access_bmap = 0;
L
Linus Torvalds 已提交
4533
	stp->st_deny_bmap = open_stp->st_deny_bmap;
4534
	stp->st_openstp = open_stp;
4535
	list_add(&stp->st_locks, &open_stp->st_locks);
4536 4537 4538
	spin_lock(&fp->fi_lock);
	list_add(&stp->st_perfile, &fp->fi_stateids);
	spin_unlock(&fp->fi_lock);
L
Linus Torvalds 已提交
4539 4540 4541
	return stp;
}

4542 4543 4544 4545 4546 4547 4548 4549 4550 4551 4552 4553 4554
static struct nfs4_ol_stateid *
find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
{
	struct nfs4_ol_stateid *lst;

	list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
		if (lst->st_file == fp)
			return lst;
	}
	return NULL;
}


4555
static int
L
Linus Torvalds 已提交
4556 4557
check_lock_length(u64 offset, u64 length)
{
B
Benny Halevy 已提交
4558
	return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
L
Linus Torvalds 已提交
4559 4560 4561
	     LOFF_OVERFLOW(offset, length)));
}

4562
static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
J
J. Bruce Fields 已提交
4563 4564 4565
{
	struct nfs4_file *fp = lock_stp->st_file;

4566
	if (test_access(access, lock_stp))
J
J. Bruce Fields 已提交
4567
		return;
4568
	__nfs4_file_get_access(fp, access);
4569
	set_access(access, lock_stp);
J
J. Bruce Fields 已提交
4570 4571
}

J
J. Bruce Fields 已提交
4572
static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4573 4574 4575 4576 4577 4578
{
	struct nfs4_file *fi = ost->st_file;
	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
	struct nfs4_client *cl = oo->oo_owner.so_client;
	struct nfs4_lockowner *lo;
	unsigned int strhashval;
4579
	struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
4580

4581
	lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, nn);
4582 4583 4584 4585 4586 4587 4588 4589 4590 4591
	if (!lo) {
		strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
				&lock->v.new.owner);
		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
		if (lo == NULL)
			return nfserr_jukebox;
	} else {
		/* with an existing lockowner, seqids must be the same */
		if (!cstate->minorversion &&
		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
4592 4593
			return nfserr_bad_seqid;
	}
4594 4595

	*lst = find_lock_stateid(lo, fi);
4596
	if (*lst == NULL) {
4597 4598 4599 4600 4601 4602
		*lst = alloc_init_lock_stateid(lo, fi, ost);
		if (*lst == NULL) {
			release_lockowner_if_empty(lo);
			return nfserr_jukebox;
		}
		*new = true;
4603 4604 4605 4606
	}
	return nfs_ok;
}

L
Linus Torvalds 已提交
4607 4608 4609
/*
 *  LOCK operation 
 */
4610
__be32
4611
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4612
	   struct nfsd4_lock *lock)
L
Linus Torvalds 已提交
4613
{
4614 4615
	struct nfs4_openowner *open_sop = NULL;
	struct nfs4_lockowner *lock_sop = NULL;
4616
	struct nfs4_ol_stateid *lock_stp;
4617
	struct file *filp = NULL;
4618 4619
	struct file_lock *file_lock = NULL;
	struct file_lock *conflock = NULL;
4620
	__be32 status = 0;
4621
	bool new_state = false;
4622
	int lkflg;
4623
	int err;
4624 4625
	struct net *net = SVC_NET(rqstp);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
L
Linus Torvalds 已提交
4626 4627 4628 4629 4630 4631 4632 4633

	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
		(long long) lock->lk_offset,
		(long long) lock->lk_length);

	if (check_lock_length(lock->lk_offset, lock->lk_length))
		 return nfserr_inval;

4634
	if ((status = fh_verify(rqstp, &cstate->current_fh,
M
Miklos Szeredi 已提交
4635
				S_IFREG, NFSD_MAY_LOCK))) {
A
Andy Adamson 已提交
4636 4637 4638 4639
		dprintk("NFSD: nfsd4_lock: permission denied!\n");
		return status;
	}

L
Linus Torvalds 已提交
4640 4641 4642
	nfs4_lock_state();

	if (lock->lk_is_new) {
4643
		struct nfs4_ol_stateid *open_stp = NULL;
4644 4645 4646 4647 4648 4649 4650

		if (nfsd4_has_session(cstate))
			/* See rfc 5661 18.10.3: given clientid is ignored: */
			memcpy(&lock->v.new.clientid,
				&cstate->session->se_client->cl_clientid,
				sizeof(clientid_t));

L
Linus Torvalds 已提交
4651
		status = nfserr_stale_clientid;
4652
		if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
L
Linus Torvalds 已提交
4653 4654 4655
			goto out;

		/* validate and update open stateid and open seqid */
4656
		status = nfs4_preprocess_confirmed_seqid_op(cstate,
L
Linus Torvalds 已提交
4657 4658
				        lock->lk_new_open_seqid,
		                        &lock->lk_new_open_stateid,
4659
					&open_stp, nn);
4660
		if (status)
L
Linus Torvalds 已提交
4661
			goto out;
4662
		open_sop = openowner(open_stp->st_stateowner);
4663
		status = nfserr_bad_stateid;
4664
		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
4665 4666
						&lock->v.new.clientid))
			goto out;
4667 4668
		status = lookup_or_create_lock_state(cstate, open_stp, lock,
							&lock_stp, &new_state);
J
J. Bruce Fields 已提交
4669
	} else
4670
		status = nfs4_preprocess_seqid_op(cstate,
4671 4672
				       lock->lk_old_lock_seqid,
				       &lock->lk_old_lock_stateid,
4673
				       NFS4_LOCK_STID, &lock_stp, nn);
J
J. Bruce Fields 已提交
4674 4675
	if (status)
		goto out;
4676
	lock_sop = lockowner(lock_stp->st_stateowner);
L
Linus Torvalds 已提交
4677

4678 4679 4680 4681 4682
	lkflg = setlkflg(lock->lk_type);
	status = nfs4_check_openmode(lock_stp, lkflg);
	if (status)
		goto out;

4683
	status = nfserr_grace;
4684
	if (locks_in_grace(net) && !lock->lk_reclaim)
4685 4686
		goto out;
	status = nfserr_no_grace;
4687
	if (!locks_in_grace(net) && lock->lk_reclaim)
4688 4689
		goto out;

4690 4691 4692 4693 4694 4695 4696 4697
	file_lock = locks_alloc_lock();
	if (!file_lock) {
		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
		status = nfserr_jukebox;
		goto out;
	}

	locks_init_lock(file_lock);
L
Linus Torvalds 已提交
4698 4699 4700
	switch (lock->lk_type) {
		case NFS4_READ_LT:
		case NFS4_READW_LT:
J
J. Bruce Fields 已提交
4701 4702 4703
			filp = find_readable_file(lock_stp->st_file);
			if (filp)
				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
4704
			file_lock->fl_type = F_RDLCK;
4705
			break;
L
Linus Torvalds 已提交
4706 4707
		case NFS4_WRITE_LT:
		case NFS4_WRITEW_LT:
J
J. Bruce Fields 已提交
4708 4709 4710
			filp = find_writeable_file(lock_stp->st_file);
			if (filp)
				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
4711
			file_lock->fl_type = F_WRLCK;
4712
			break;
L
Linus Torvalds 已提交
4713 4714 4715 4716
		default:
			status = nfserr_inval;
		goto out;
	}
4717 4718 4719 4720
	if (!filp) {
		status = nfserr_openmode;
		goto out;
	}
4721 4722 4723 4724 4725 4726 4727 4728 4729 4730 4731 4732 4733 4734 4735
	file_lock->fl_owner = (fl_owner_t)lock_sop;
	file_lock->fl_pid = current->tgid;
	file_lock->fl_file = filp;
	file_lock->fl_flags = FL_POSIX;
	file_lock->fl_lmops = &nfsd_posix_mng_ops;
	file_lock->fl_start = lock->lk_offset;
	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
	nfs4_transform_lock_offset(file_lock);

	conflock = locks_alloc_lock();
	if (!conflock) {
		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
		status = nfserr_jukebox;
		goto out;
	}
L
Linus Torvalds 已提交
4736

4737
	err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
4738
	switch (-err) {
L
Linus Torvalds 已提交
4739
	case 0: /* success! */
4740 4741
		update_stateid(&lock_stp->st_stid.sc_stateid);
		memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 
L
Linus Torvalds 已提交
4742
				sizeof(stateid_t));
4743
		status = 0;
4744 4745 4746 4747
		break;
	case (EAGAIN):		/* conflock holds conflicting lock */
		status = nfserr_denied;
		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
4748
		nfs4_set_lock_denied(conflock, &lock->lk_denied);
4749
		break;
L
Linus Torvalds 已提交
4750 4751
	case (EDEADLK):
		status = nfserr_deadlock;
4752
		break;
4753
	default:
4754
		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
4755
		status = nfserrno(err);
4756
		break;
L
Linus Torvalds 已提交
4757 4758
	}
out:
4759 4760
	if (filp)
		fput(filp);
4761
	if (status && new_state)
4762
		release_lock_stateid(lock_stp);
4763
	nfsd4_bump_seqid(cstate, status);
4764 4765
	if (!cstate->replay_owner)
		nfs4_unlock_state();
4766 4767 4768 4769
	if (file_lock)
		locks_free_lock(file_lock);
	if (conflock)
		locks_free_lock(conflock);
L
Linus Torvalds 已提交
4770 4771 4772
	return status;
}

4773 4774 4775 4776 4777 4778
/*
 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
 * so we do a temporary open here just to get an open file to pass to
 * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
 * inode operation.)
 */
4779
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
4780 4781
{
	struct file *file;
4782 4783 4784 4785 4786
	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
	if (!err) {
		err = nfserrno(vfs_test_lock(file, lock));
		nfsd_close(file);
	}
4787 4788 4789
	return err;
}

L
Linus Torvalds 已提交
4790 4791 4792
/*
 * LOCKT operation
 */
4793
__be32
4794 4795
nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
	    struct nfsd4_lockt *lockt)
L
Linus Torvalds 已提交
4796
{
4797
	struct file_lock *file_lock = NULL;
4798
	struct nfs4_lockowner *lo;
4799
	__be32 status;
4800
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
4801

4802
	if (locks_in_grace(SVC_NET(rqstp)))
L
Linus Torvalds 已提交
4803 4804 4805 4806 4807 4808 4809
		return nfserr_grace;

	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
		 return nfserr_inval;

	nfs4_lock_state();

4810
	if (!nfsd4_has_session(cstate)) {
4811
		status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
4812 4813 4814
		if (status)
			goto out;
	}
L
Linus Torvalds 已提交
4815

4816
	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
L
Linus Torvalds 已提交
4817 4818
		goto out;

4819 4820 4821 4822 4823 4824 4825
	file_lock = locks_alloc_lock();
	if (!file_lock) {
		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
		status = nfserr_jukebox;
		goto out;
	}
	locks_init_lock(file_lock);
L
Linus Torvalds 已提交
4826 4827 4828
	switch (lockt->lt_type) {
		case NFS4_READ_LT:
		case NFS4_READW_LT:
4829
			file_lock->fl_type = F_RDLCK;
L
Linus Torvalds 已提交
4830 4831 4832
		break;
		case NFS4_WRITE_LT:
		case NFS4_WRITEW_LT:
4833
			file_lock->fl_type = F_WRLCK;
L
Linus Torvalds 已提交
4834 4835
		break;
		default:
4836
			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
L
Linus Torvalds 已提交
4837 4838 4839 4840
			status = nfserr_inval;
		goto out;
	}

4841
	lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner, nn);
4842
	if (lo)
4843 4844 4845
		file_lock->fl_owner = (fl_owner_t)lo;
	file_lock->fl_pid = current->tgid;
	file_lock->fl_flags = FL_POSIX;
L
Linus Torvalds 已提交
4846

4847 4848
	file_lock->fl_start = lockt->lt_offset;
	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
L
Linus Torvalds 已提交
4849

4850
	nfs4_transform_lock_offset(file_lock);
L
Linus Torvalds 已提交
4851

4852
	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
4853
	if (status)
4854
		goto out;
4855

4856
	if (file_lock->fl_type != F_UNLCK) {
L
Linus Torvalds 已提交
4857
		status = nfserr_denied;
4858
		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
L
Linus Torvalds 已提交
4859 4860 4861
	}
out:
	nfs4_unlock_state();
4862 4863
	if (file_lock)
		locks_free_lock(file_lock);
L
Linus Torvalds 已提交
4864 4865 4866
	return status;
}

4867
__be32
4868
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4869
	    struct nfsd4_locku *locku)
L
Linus Torvalds 已提交
4870
{
4871
	struct nfs4_ol_stateid *stp;
L
Linus Torvalds 已提交
4872
	struct file *filp = NULL;
4873
	struct file_lock *file_lock = NULL;
4874
	__be32 status;
4875
	int err;
4876 4877
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);

L
Linus Torvalds 已提交
4878 4879 4880 4881 4882 4883 4884 4885 4886
	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
		(long long) locku->lu_offset,
		(long long) locku->lu_length);

	if (check_lock_length(locku->lu_offset, locku->lu_length))
		 return nfserr_inval;

	nfs4_lock_state();
									        
4887
	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
4888 4889
					&locku->lu_stateid, NFS4_LOCK_STID,
					&stp, nn);
4890
	if (status)
L
Linus Torvalds 已提交
4891
		goto out;
4892 4893 4894 4895 4896
	filp = find_any_file(stp->st_file);
	if (!filp) {
		status = nfserr_lock_range;
		goto out;
	}
4897 4898 4899 4900
	file_lock = locks_alloc_lock();
	if (!file_lock) {
		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
		status = nfserr_jukebox;
4901
		goto fput;
4902 4903 4904
	}
	locks_init_lock(file_lock);
	file_lock->fl_type = F_UNLCK;
4905
	file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
4906 4907 4908 4909 4910 4911 4912 4913 4914
	file_lock->fl_pid = current->tgid;
	file_lock->fl_file = filp;
	file_lock->fl_flags = FL_POSIX;
	file_lock->fl_lmops = &nfsd_posix_mng_ops;
	file_lock->fl_start = locku->lu_offset;

	file_lock->fl_end = last_byte_offset(locku->lu_offset,
						locku->lu_length);
	nfs4_transform_lock_offset(file_lock);
L
Linus Torvalds 已提交
4915

4916
	err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
4917
	if (err) {
4918
		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
L
Linus Torvalds 已提交
4919 4920
		goto out_nfserr;
	}
4921 4922
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4923 4924
fput:
	fput(filp);
L
Linus Torvalds 已提交
4925
out:
4926
	nfsd4_bump_seqid(cstate, status);
4927 4928
	if (!cstate->replay_owner)
		nfs4_unlock_state();
4929 4930
	if (file_lock)
		locks_free_lock(file_lock);
L
Linus Torvalds 已提交
4931 4932 4933
	return status;

out_nfserr:
4934
	status = nfserrno(err);
4935
	goto fput;
L
Linus Torvalds 已提交
4936 4937 4938 4939 4940 4941 4942 4943
}

/*
 * returns
 * 	1: locks held by lockowner
 * 	0: no locks held by lockowner
 */
static int
4944
check_for_locks(struct nfs4_file *filp, struct nfs4_lockowner *lowner)
L
Linus Torvalds 已提交
4945 4946
{
	struct file_lock **flpp;
4947
	struct inode *inode = filp->fi_inode;
L
Linus Torvalds 已提交
4948 4949
	int status = 0;

4950
	spin_lock(&inode->i_lock);
L
Linus Torvalds 已提交
4951
	for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
4952
		if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
L
Linus Torvalds 已提交
4953 4954
			status = 1;
			goto out;
4955
		}
L
Linus Torvalds 已提交
4956 4957
	}
out:
4958
	spin_unlock(&inode->i_lock);
L
Linus Torvalds 已提交
4959 4960 4961
	return status;
}

4962
__be32
4963 4964 4965
nfsd4_release_lockowner(struct svc_rqst *rqstp,
			struct nfsd4_compound_state *cstate,
			struct nfsd4_release_lockowner *rlockowner)
L
Linus Torvalds 已提交
4966 4967
{
	clientid_t *clid = &rlockowner->rl_clientid;
4968
	struct nfs4_stateowner *sop = NULL, *tmp;
4969
	struct nfs4_lockowner *lo;
4970
	struct nfs4_ol_stateid *stp;
L
Linus Torvalds 已提交
4971
	struct xdr_netobj *owner = &rlockowner->rl_owner;
4972
	unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
4973
	__be32 status;
4974
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
4975 4976 4977 4978 4979 4980

	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
		clid->cl_boot, clid->cl_id);

	nfs4_lock_state();

4981
	status = lookup_clientid(clid, cstate, nn);
4982 4983 4984
	if (status)
		goto out;

4985
	status = nfserr_locks_held;
4986

4987 4988 4989
	/* Find the matching lock stateowner */
	list_for_each_entry(tmp, &nn->ownerstr_hashtbl[hashval], so_strhash) {
		if (tmp->so_is_open_owner)
4990
			continue;
4991 4992 4993
		if (same_owner_str(tmp, owner, clid)) {
			sop = tmp;
			break;
L
Linus Torvalds 已提交
4994
		}
4995
	}
4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007

	/* No matching owner found, maybe a replay? Just declare victory... */
	if (!sop) {
		status = nfs_ok;
		goto out;
	}

	lo = lockowner(sop);
	/* see if there are still any locks associated with it */
	list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
		if (check_for_locks(stp->st_file, lo))
			goto out;
L
Linus Torvalds 已提交
5008
	}
5009 5010 5011

	status = nfs_ok;
	release_lockowner(lo);
L
Linus Torvalds 已提交
5012 5013 5014 5015 5016 5017
out:
	nfs4_unlock_state();
	return status;
}

static inline struct nfs4_client_reclaim *
N
NeilBrown 已提交
5018
alloc_reclaim(void)
L
Linus Torvalds 已提交
5019
{
N
NeilBrown 已提交
5020
	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
L
Linus Torvalds 已提交
5021 5022
}

5023
bool
5024
nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
5025
{
5026
	struct nfs4_client_reclaim *crp;
5027

5028
	crp = nfsd4_find_reclaim_client(name, nn);
5029
	return (crp && crp->cr_clp);
5030 5031
}

L
Linus Torvalds 已提交
5032 5033 5034
/*
 * failure => all reset bets are off, nfserr_no_grace...
 */
5035
struct nfs4_client_reclaim *
5036
nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
L
Linus Torvalds 已提交
5037 5038
{
	unsigned int strhashval;
5039
	struct nfs4_client_reclaim *crp;
L
Linus Torvalds 已提交
5040

N
NeilBrown 已提交
5041 5042
	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
	crp = alloc_reclaim();
5043 5044 5045
	if (crp) {
		strhashval = clientstr_hashval(name);
		INIT_LIST_HEAD(&crp->cr_strhash);
5046
		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
5047
		memcpy(crp->cr_recdir, name, HEXDIR_LEN);
5048
		crp->cr_clp = NULL;
5049
		nn->reclaim_str_hashtbl_size++;
5050 5051
	}
	return crp;
L
Linus Torvalds 已提交
5052 5053
}

5054
void
5055
nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
5056 5057 5058
{
	list_del(&crp->cr_strhash);
	kfree(crp);
5059
	nn->reclaim_str_hashtbl_size--;
5060 5061
}

5062
void
5063
nfs4_release_reclaim(struct nfsd_net *nn)
L
Linus Torvalds 已提交
5064 5065 5066 5067 5068
{
	struct nfs4_client_reclaim *crp = NULL;
	int i;

	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5069 5070
		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
L
Linus Torvalds 已提交
5071
			                struct nfs4_client_reclaim, cr_strhash);
5072
			nfs4_remove_reclaim_record(crp, nn);
L
Linus Torvalds 已提交
5073 5074
		}
	}
5075
	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
L
Linus Torvalds 已提交
5076 5077 5078 5079
}

/*
 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
5080
struct nfs4_client_reclaim *
5081
nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
L
Linus Torvalds 已提交
5082 5083 5084 5085
{
	unsigned int strhashval;
	struct nfs4_client_reclaim *crp = NULL;

5086
	dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
L
Linus Torvalds 已提交
5087

5088
	strhashval = clientstr_hashval(recdir);
5089
	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
5090
		if (same_name(crp->cr_recdir, recdir)) {
L
Linus Torvalds 已提交
5091 5092 5093 5094 5095 5096 5097 5098 5099
			return crp;
		}
	}
	return NULL;
}

/*
* Called from OPEN. Look for clientid in reclaim list.
*/
5100
__be32
5101 5102 5103
nfs4_check_open_reclaim(clientid_t *clid,
		struct nfsd4_compound_state *cstate,
		struct nfsd_net *nn)
L
Linus Torvalds 已提交
5104
{
5105
	__be32 status;
5106 5107

	/* find clientid in conf_id_hashtbl */
5108 5109
	status = lookup_clientid(clid, cstate, nn);
	if (status)
5110 5111
		return nfserr_reclaim_bad;

5112 5113 5114 5115
	if (nfsd4_client_record_check(cstate->clp))
		return nfserr_reclaim_bad;

	return nfs_ok;
L
Linus Torvalds 已提交
5116 5117
}

B
Bryan Schumaker 已提交
5118 5119
#ifdef CONFIG_NFSD_FAULT_INJECTION

5120 5121
u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
{
5122 5123
	if (mark_client_expired(clp))
		return 0;
5124 5125 5126 5127
	expire_client(clp);
	return 1;
}

5128 5129 5130
u64 nfsd_print_client(struct nfs4_client *clp, u64 num)
{
	char buf[INET6_ADDRSTRLEN];
5131
	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5132 5133 5134 5135 5136 5137 5138 5139
	printk(KERN_INFO "NFS Client: %s\n", buf);
	return 1;
}

static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
			     const char *type)
{
	char buf[INET6_ADDRSTRLEN];
5140
	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5141 5142 5143
	printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
}

5144 5145
static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
				    void (*func)(struct nfs4_ol_stateid *))
B
Bryan Schumaker 已提交
5146 5147 5148
{
	struct nfs4_openowner *oop;
	struct nfs4_ol_stateid *stp, *st_next;
5149
	struct nfs4_ol_stateid *lst, *lst_next;
B
Bryan Schumaker 已提交
5150 5151 5152
	u64 count = 0;

	list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
5153 5154 5155 5156
		list_for_each_entry_safe(stp, st_next,
				&oop->oo_owner.so_stateids, st_perstateowner) {
			list_for_each_entry_safe(lst, lst_next,
					&stp->st_locks, st_locks) {
B
Bryan Schumaker 已提交
5157
				if (func)
5158
					func(lst);
B
Bryan Schumaker 已提交
5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169
				if (++count == max)
					return count;
			}
		}
	}

	return count;
}

u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max)
{
5170
	return nfsd_foreach_client_lock(clp, max, release_lock_stateid);
B
Bryan Schumaker 已提交
5171 5172
}

5173 5174 5175 5176 5177 5178 5179
u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max)
{
	u64 count = nfsd_foreach_client_lock(clp, max, NULL);
	nfsd_print_count(clp, count, "locked files");
	return count;
}

5180 5181 5182 5183 5184 5185 5186 5187 5188 5189 5190 5191 5192 5193 5194 5195 5196 5197 5198 5199
static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *))
{
	struct nfs4_openowner *oop, *next;
	u64 count = 0;

	list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
		if (func)
			func(oop);
		if (++count == max)
			break;
	}

	return count;
}

u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max)
{
	return nfsd_foreach_client_open(clp, max, release_openowner);
}

5200 5201 5202 5203 5204 5205 5206
u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max)
{
	u64 count = nfsd_foreach_client_open(clp, max, NULL);
	nfsd_print_count(clp, count, "open files");
	return count;
}

5207 5208 5209 5210 5211 5212
static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
				     struct list_head *victims)
{
	struct nfs4_delegation *dp, *next;
	u64 count = 0;

5213
	lockdep_assert_held(&state_lock);
5214
	list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229
		if (victims) {
			/*
			 * It's not safe to mess with delegations that have a
			 * non-zero dl_time. They might have already been broken
			 * and could be processed by the laundromat outside of
			 * the state_lock. Just leave them be.
			 */
			if (dp->dl_time != 0)
				continue;

			/*
			 * Increment dl_time to ensure that delegation breaks
			 * don't monkey with it now that we are.
			 */
			++dp->dl_time;
5230
			list_move(&dp->dl_recall_lru, victims);
5231
		}
5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243
		if (++count == max)
			break;
	}
	return count;
}

u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
{
	struct nfs4_delegation *dp, *next;
	LIST_HEAD(victims);
	u64 count;

5244
	spin_lock(&state_lock);
5245
	count = nfsd_find_all_delegations(clp, max, &victims);
5246
	spin_unlock(&state_lock);
5247 5248

	list_for_each_entry_safe(dp, next, &victims, dl_recall_lru)
5249
		revoke_delegation(dp);
5250 5251 5252 5253 5254 5255

	return count;
}

u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
{
5256
	struct nfs4_delegation *dp;
5257 5258 5259
	LIST_HEAD(victims);
	u64 count;

5260
	spin_lock(&state_lock);
5261
	count = nfsd_find_all_delegations(clp, max, &victims);
5262 5263 5264 5265 5266
	while (!list_empty(&victims)) {
		dp = list_first_entry(&victims, struct nfs4_delegation,
					dl_recall_lru);
		list_del_init(&dp->dl_recall_lru);
		dp->dl_time = 0;
5267
		nfsd_break_one_deleg(dp);
5268
	}
5269
	spin_unlock(&state_lock);
5270 5271 5272 5273

	return count;
}

5274 5275 5276 5277
u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max)
{
	u64 count = 0;

5278
	spin_lock(&state_lock);
5279
	count = nfsd_find_all_delegations(clp, max, NULL);
5280
	spin_unlock(&state_lock);
5281 5282 5283 5284 5285

	nfsd_print_count(clp, count, "delegations");
	return count;
}

5286
u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64))
B
Bryan Schumaker 已提交
5287 5288
{
	struct nfs4_client *clp, *next;
5289
	u64 count = 0;
5290
	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
B
Bryan Schumaker 已提交
5291

5292 5293 5294
	if (!nfsd_netns_ready(nn))
		return 0;

5295
	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
5296 5297
		count += func(clp, max - count);
		if ((max != 0) && (count >= max))
B
Bryan Schumaker 已提交
5298 5299 5300
			break;
	}

5301 5302 5303
	return count;
}

5304 5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318
struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
{
	struct nfs4_client *clp;
	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);

	if (!nfsd_netns_ready(nn))
		return NULL;

	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
		if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
			return clp;
	}
	return NULL;
}

B
Bryan Schumaker 已提交
5319 5320
#endif /* CONFIG_NFSD_FAULT_INJECTION */

5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341
/*
 * Since the lifetime of a delegation isn't limited to that of an open, a
 * client may quite reasonably hang on to a delegation as long as it has
 * the inode cached.  This becomes an obvious problem the first time a
 * client's inode cache approaches the size of the server's total memory.
 *
 * For now we avoid this problem by imposing a hard limit on the number
 * of delegations, which varies according to the server's memory size.
 */
static void
set_max_delegations(void)
{
	/*
	 * Allow at most 4 delegations per megabyte of RAM.  Quick
	 * estimates suggest that in the worst case (where every delegation
	 * is for a different inode), a delegation could take about 1.5K,
	 * giving a worst case usage of about 6% of memory.
	 */
	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
}

5342
static int nfs4_state_create_net(struct net *net)
5343 5344 5345 5346 5347 5348 5349
{
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
	int i;

	nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
			CLIENT_HASH_SIZE, GFP_KERNEL);
	if (!nn->conf_id_hashtbl)
5350
		goto err;
5351 5352 5353 5354
	nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
			CLIENT_HASH_SIZE, GFP_KERNEL);
	if (!nn->unconf_id_hashtbl)
		goto err_unconf_id;
5355 5356 5357 5358
	nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
			OWNER_HASH_SIZE, GFP_KERNEL);
	if (!nn->ownerstr_hashtbl)
		goto err_ownerstr;
5359 5360 5361 5362
	nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
			SESSION_HASH_SIZE, GFP_KERNEL);
	if (!nn->sessionid_hashtbl)
		goto err_sessionid;
5363

5364
	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5365
		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
5366
		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
5367
	}
5368 5369
	for (i = 0; i < OWNER_HASH_SIZE; i++)
		INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
5370 5371
	for (i = 0; i < SESSION_HASH_SIZE; i++)
		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
5372
	nn->conf_name_tree = RB_ROOT;
5373
	nn->unconf_name_tree = RB_ROOT;
5374
	INIT_LIST_HEAD(&nn->client_lru);
5375
	INIT_LIST_HEAD(&nn->close_lru);
5376
	INIT_LIST_HEAD(&nn->del_recall_lru);
5377
	spin_lock_init(&nn->client_lock);
5378

5379
	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
5380
	get_net(net);
5381

5382
	return 0;
5383

5384
err_sessionid:
5385
	kfree(nn->ownerstr_hashtbl);
5386 5387
err_ownerstr:
	kfree(nn->unconf_id_hashtbl);
5388 5389
err_unconf_id:
	kfree(nn->conf_id_hashtbl);
5390 5391
err:
	return -ENOMEM;
5392 5393 5394
}

static void
5395
nfs4_state_destroy_net(struct net *net)
5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406
{
	int i;
	struct nfs4_client *clp = NULL;
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);

	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
		while (!list_empty(&nn->conf_id_hashtbl[i])) {
			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
			destroy_client(clp);
		}
	}
5407

5408 5409 5410 5411 5412
	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
			destroy_client(clp);
		}
5413 5414
	}

5415
	kfree(nn->sessionid_hashtbl);
5416
	kfree(nn->ownerstr_hashtbl);
5417
	kfree(nn->unconf_id_hashtbl);
5418
	kfree(nn->conf_id_hashtbl);
5419
	put_net(net);
5420 5421
}

5422
int
5423
nfs4_state_start_net(struct net *net)
5424
{
5425
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5426 5427
	int ret;

5428
	ret = nfs4_state_create_net(net);
5429 5430
	if (ret)
		return ret;
5431
	nfsd4_client_tracking_init(net);
5432
	nn->boot_time = get_seconds();
5433
	locks_start_grace(net, &nn->nfsd4_manager);
5434
	nn->grace_ended = false;
5435
	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
5436 5437
	       nn->nfsd4_grace, net);
	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
5438 5439 5440 5441 5442 5443 5444 5445 5446 5447
	return 0;
}

/* initialization to perform when the nfsd service is started: */

int
nfs4_state_start(void)
{
	int ret;

5448
	ret = set_callback_cred();
5449 5450
	if (ret)
		return -ENOMEM;
5451
	laundry_wq = create_singlethread_workqueue("nfsd4");
5452 5453 5454 5455
	if (laundry_wq == NULL) {
		ret = -ENOMEM;
		goto out_recovery;
	}
5456 5457 5458
	ret = nfsd4_create_callback_queue();
	if (ret)
		goto out_free_laundry;
5459

5460
	set_max_delegations();
5461

5462
	return 0;
5463

5464 5465
out_free_laundry:
	destroy_workqueue(laundry_wq);
5466
out_recovery:
5467
	return ret;
L
Linus Torvalds 已提交
5468 5469
}

5470
void
5471
nfs4_state_shutdown_net(struct net *net)
L
Linus Torvalds 已提交
5472 5473 5474
{
	struct nfs4_delegation *dp = NULL;
	struct list_head *pos, *next, reaplist;
5475
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
L
Linus Torvalds 已提交
5476

5477 5478
	cancel_delayed_work_sync(&nn->laundromat_work);
	locks_end_grace(&nn->nfsd4_manager);
5479

5480
	nfs4_lock_state();
L
Linus Torvalds 已提交
5481
	INIT_LIST_HEAD(&reaplist);
5482
	spin_lock(&state_lock);
5483
	list_for_each_safe(pos, next, &nn->del_recall_lru) {
L
Linus Torvalds 已提交
5484 5485 5486
		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
		list_move(&dp->dl_recall_lru, &reaplist);
	}
5487
	spin_unlock(&state_lock);
L
Linus Torvalds 已提交
5488 5489
	list_for_each_safe(pos, next, &reaplist) {
		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5490
		destroy_delegation(dp);
L
Linus Torvalds 已提交
5491 5492
	}

5493
	nfsd4_client_tracking_exit(net);
5494
	nfs4_state_destroy_net(net);
5495
	nfs4_unlock_state();
L
Linus Torvalds 已提交
5496 5497 5498 5499 5500
}

void
nfs4_state_shutdown(void)
{
5501
	destroy_workqueue(laundry_wq);
5502
	nfsd4_destroy_callback_queue();
L
Linus Torvalds 已提交
5503
}
5504 5505 5506 5507

static void
get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
5508 5509
	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
5510 5511 5512 5513 5514
}

static void
put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
5515 5516 5517 5518 5519 5520 5521 5522 5523 5524
	if (cstate->minorversion) {
		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
	}
}

void
clear_current_stateid(struct nfsd4_compound_state *cstate)
{
	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5525 5526
}

5527 5528 5529
/*
 * functions to set current state id
 */
5530 5531 5532 5533 5534 5535
void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
{
	put_stateid(cstate, &odp->od_stateid);
}

5536 5537 5538 5539 5540 5541
void
nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
	put_stateid(cstate, &open->op_stateid);
}

5542 5543 5544 5545 5546 5547 5548 5549 5550 5551 5552 5553 5554 5555 5556
void
nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
{
	put_stateid(cstate, &close->cl_stateid);
}

void
nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
{
	put_stateid(cstate, &lock->lk_resp_stateid);
}

/*
 * functions to consume current state id
 */
5557

5558 5559 5560 5561 5562 5563 5564 5565 5566 5567 5568 5569
void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
{
	get_stateid(cstate, &odp->od_stateid);
}

void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
{
	get_stateid(cstate, &drp->dr_stateid);
}

5570 5571 5572 5573 5574 5575 5576 5577 5578 5579 5580 5581
void
nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
{
	get_stateid(cstate, &fsp->fr_stateid);
}

void
nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
{
	get_stateid(cstate, &setattr->sa_stateid);
}

5582 5583 5584 5585 5586 5587 5588
void
nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
{
	get_stateid(cstate, &close->cl_stateid);
}

void
5589
nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
5590
{
5591
	get_stateid(cstate, &locku->lu_stateid);
5592
}
5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604

void
nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
{
	get_stateid(cstate, &read->rd_stateid);
}

void
nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
{
	get_stateid(cstate, &write->wr_stateid);
}