nfs4state.c 154.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34
/*
*  Copyright (c) 2001 The Regents of the University of Michigan.
*  All rights reserved.
*
*  Kendrick Smith <kmsmith@umich.edu>
*  Andy Adamson <kandros@umich.edu>
*
*  Redistribution and use in source and binary forms, with or without
*  modification, are permitted provided that the following conditions
*  are met:
*
*  1. Redistributions of source code must retain the above copyright
*     notice, this list of conditions and the following disclaimer.
*  2. Redistributions in binary form must reproduce the above copyright
*     notice, this list of conditions and the following disclaimer in the
*     documentation and/or other materials provided with the distribution.
*  3. Neither the name of the University nor the names of its
*     contributors may be used to endorse or promote products derived
*     from this software without specific prior written permission.
*
*  THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
*  WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
*  MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
*  DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
*  FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
*  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
*  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
*  BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
*  LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
*  NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
*  SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/

35
#include <linux/file.h>
36
#include <linux/fs.h>
37
#include <linux/slab.h>
38
#include <linux/namei.h>
39
#include <linux/swap.h>
40
#include <linux/pagemap.h>
41
#include <linux/ratelimit.h>
42
#include <linux/sunrpc/svcauth_gss.h>
43
#include <linux/sunrpc/addr.h>
44
#include <linux/hash.h>
45
#include "xdr4.h"
46
#include "xdr4cb.h"
47
#include "vfs.h"
48
#include "current_stateid.h"
L
Linus Torvalds 已提交
49

50 51
#include "netns.h"

L
Linus Torvalds 已提交
52 53
#define NFSDDBG_FACILITY                NFSDDBG_PROC

54 55 56 57 58 59 60 61
#define all_ones {{~0,~0},~0}
static const stateid_t one_stateid = {
	.si_generation = ~0,
	.si_opaque = all_ones,
};
static const stateid_t zero_stateid = {
	/* all fields zero */
};
62 63 64
static const stateid_t currentstateid = {
	.si_generation = 1,
};
65

A
Andy Adamson 已提交
66
static u64 current_sessionid = 1;
67

68 69
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid)  (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
70
#define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
L
Linus Torvalds 已提交
71 72

/* forward declarations */
73
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
74
static void nfs4_free_ol_stateid(struct nfs4_stid *stid);
L
Linus Torvalds 已提交
75

76 77 78
/* Locking: */

/* Currently used for almost all code touching nfsv4 state: */
I
Ingo Molnar 已提交
79
static DEFINE_MUTEX(client_mutex);
L
Linus Torvalds 已提交
80

81 82 83 84 85
/*
 * Currently used for the del_recall_lru and file hash table.  In an
 * effort to decrease the scope of the client_mutex, this spinlock may
 * eventually cover more:
 */
86
static DEFINE_SPINLOCK(state_lock);
87

88 89 90 91 92 93
/*
 * A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
 * the refcount on the open stateid to drop.
 */
static DECLARE_WAIT_QUEUE_HEAD(close_wq);

C
Christoph Hellwig 已提交
94 95 96 97 98
static struct kmem_cache *openowner_slab;
static struct kmem_cache *lockowner_slab;
static struct kmem_cache *file_slab;
static struct kmem_cache *stateid_slab;
static struct kmem_cache *deleg_slab;
N
NeilBrown 已提交
99

L
Linus Torvalds 已提交
100 101 102
void
nfs4_lock_state(void)
{
I
Ingo Molnar 已提交
103
	mutex_lock(&client_mutex);
L
Linus Torvalds 已提交
104 105
}

106
static void free_session(struct nfsd4_session *);
107

108
static bool is_session_dead(struct nfsd4_session *ses)
109
{
110
	return ses->se_flags & NFS4_SESSION_DEAD;
111 112
}

113
static __be32 mark_session_dead_locked(struct nfsd4_session *ses, int ref_held_by_me)
114
{
115
	if (atomic_read(&ses->se_ref) > ref_held_by_me)
116 117 118
		return nfserr_jukebox;
	ses->se_flags |= NFS4_SESSION_DEAD;
	return nfs_ok;
119 120
}

L
Linus Torvalds 已提交
121 122 123
void
nfs4_unlock_state(void)
{
I
Ingo Molnar 已提交
124
	mutex_unlock(&client_mutex);
L
Linus Torvalds 已提交
125 126
}

127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
static bool is_client_expired(struct nfs4_client *clp)
{
	return clp->cl_time == 0;
}

static __be32 mark_client_expired_locked(struct nfs4_client *clp)
{
	if (atomic_read(&clp->cl_refcount))
		return nfserr_jukebox;
	clp->cl_time = 0;
	return nfs_ok;
}

static __be32 mark_client_expired(struct nfs4_client *clp)
{
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
	__be32 ret;

	spin_lock(&nn->client_lock);
	ret = mark_client_expired_locked(clp);
	spin_unlock(&nn->client_lock);
	return ret;
}

static __be32 get_client_locked(struct nfs4_client *clp)
{
	if (is_client_expired(clp))
		return nfserr_expired;
	atomic_inc(&clp->cl_refcount);
	return nfs_ok;
}

/* must be called under the client_lock */
static inline void
renew_client_locked(struct nfs4_client *clp)
{
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

	if (is_client_expired(clp)) {
		WARN_ON(1);
		printk("%s: client (clientid %08x/%08x) already expired\n",
			__func__,
			clp->cl_clientid.cl_boot,
			clp->cl_clientid.cl_id);
		return;
	}

	dprintk("renewing client (clientid %08x/%08x)\n",
			clp->cl_clientid.cl_boot,
			clp->cl_clientid.cl_id);
	list_move_tail(&clp->cl_lru, &nn->client_lru);
	clp->cl_time = get_seconds();
}

static inline void
renew_client(struct nfs4_client *clp)
{
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

	spin_lock(&nn->client_lock);
	renew_client_locked(clp);
	spin_unlock(&nn->client_lock);
}

191
static void put_client_renew_locked(struct nfs4_client *clp)
192 193 194 195 196 197 198
{
	if (!atomic_dec_and_test(&clp->cl_refcount))
		return;
	if (!is_client_expired(clp))
		renew_client_locked(clp);
}

199 200 201 202
static void put_client_renew(struct nfs4_client *clp)
{
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

203 204 205 206
	if (!atomic_dec_and_lock(&clp->cl_refcount, &nn->client_lock))
		return;
	if (!is_client_expired(clp))
		renew_client_locked(clp);
207 208 209
	spin_unlock(&nn->client_lock);
}

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
static __be32 nfsd4_get_session_locked(struct nfsd4_session *ses)
{
	__be32 status;

	if (is_session_dead(ses))
		return nfserr_badsession;
	status = get_client_locked(ses->se_client);
	if (status)
		return status;
	atomic_inc(&ses->se_ref);
	return nfs_ok;
}

static void nfsd4_put_session_locked(struct nfsd4_session *ses)
{
	struct nfs4_client *clp = ses->se_client;

	if (atomic_dec_and_test(&ses->se_ref) && is_session_dead(ses))
		free_session(ses);
	put_client_renew_locked(clp);
}

static void nfsd4_put_session(struct nfsd4_session *ses)
{
	struct nfs4_client *clp = ses->se_client;
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

	spin_lock(&nn->client_lock);
	nfsd4_put_session_locked(ses);
	spin_unlock(&nn->client_lock);
}

242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
static int
same_owner_str(struct nfs4_stateowner *sop, struct xdr_netobj *owner,
							clientid_t *clid)
{
	return (sop->so_owner.len == owner->len) &&
		0 == memcmp(sop->so_owner.data, owner->data, owner->len) &&
		(sop->so_client->cl_clientid.cl_id == clid->cl_id);
}

static struct nfs4_openowner *
find_openstateowner_str_locked(unsigned int hashval, struct nfsd4_open *open,
			bool sessions, struct nfsd_net *nn)
{
	struct nfs4_stateowner *so;
	struct nfs4_openowner *oo;
	struct nfs4_client *clp;

	lockdep_assert_held(&nn->client_lock);

	list_for_each_entry(so, &nn->ownerstr_hashtbl[hashval], so_strhash) {
		if (!so->so_is_open_owner)
			continue;
		if (same_owner_str(so, &open->op_owner, &open->op_clientid)) {
			oo = openowner(so);
			clp = oo->oo_owner.so_client;
			if ((bool)clp->cl_minorversion != sessions)
				break;
			renew_client_locked(clp);
			atomic_inc(&so->so_count);
			return oo;
		}
	}
	return NULL;
}

static struct nfs4_openowner *
find_openstateowner_str(unsigned int hashval, struct nfsd4_open *open,
			bool sessions, struct nfsd_net *nn)
{
	struct nfs4_openowner *oo;

	spin_lock(&nn->client_lock);
	oo = find_openstateowner_str_locked(hashval, open, sessions, nn);
	spin_unlock(&nn->client_lock);
	return oo;
}

289

L
Linus Torvalds 已提交
290 291 292 293 294 295 296 297 298 299 300 301 302
static inline u32
opaque_hashval(const void *ptr, int nbytes)
{
	unsigned char *cptr = (unsigned char *) ptr;

	u32 x = 0;
	while (nbytes--) {
		x *= 37;
		x += *cptr++;
	}
	return x;
}

303 304 305 306 307
static void nfsd4_free_file(struct nfs4_file *f)
{
	kmem_cache_free(file_slab, f);
}

308 309 310
static inline void
put_nfs4_file(struct nfs4_file *fi)
{
311 312
	might_lock(&state_lock);

313
	if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
314
		hlist_del(&fi->fi_hash);
315
		spin_unlock(&state_lock);
316
		nfsd4_free_file(fi);
317
	}
318 319 320 321 322
}

static inline void
get_nfs4_file(struct nfs4_file *fi)
{
323
	atomic_inc(&fi->fi_ref);
324 325
}

326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
static struct file *
__nfs4_get_fd(struct nfs4_file *f, int oflag)
{
	if (f->fi_fds[oflag])
		return get_file(f->fi_fds[oflag]);
	return NULL;
}

static struct file *
find_writeable_file_locked(struct nfs4_file *f)
{
	struct file *ret;

	lockdep_assert_held(&f->fi_lock);

	ret = __nfs4_get_fd(f, O_WRONLY);
	if (!ret)
		ret = __nfs4_get_fd(f, O_RDWR);
	return ret;
}

static struct file *
find_writeable_file(struct nfs4_file *f)
{
	struct file *ret;

	spin_lock(&f->fi_lock);
	ret = find_writeable_file_locked(f);
	spin_unlock(&f->fi_lock);

	return ret;
}

static struct file *find_readable_file_locked(struct nfs4_file *f)
{
	struct file *ret;

	lockdep_assert_held(&f->fi_lock);

	ret = __nfs4_get_fd(f, O_RDONLY);
	if (!ret)
		ret = __nfs4_get_fd(f, O_RDWR);
	return ret;
}

static struct file *
find_readable_file(struct nfs4_file *f)
{
	struct file *ret;

	spin_lock(&f->fi_lock);
	ret = find_readable_file_locked(f);
	spin_unlock(&f->fi_lock);

	return ret;
}

static struct file *
find_any_file(struct nfs4_file *f)
{
	struct file *ret;

	spin_lock(&f->fi_lock);
	ret = __nfs4_get_fd(f, O_RDWR);
	if (!ret) {
		ret = __nfs4_get_fd(f, O_WRONLY);
		if (!ret)
			ret = __nfs4_get_fd(f, O_RDONLY);
	}
	spin_unlock(&f->fi_lock);
	return ret;
}

399
static atomic_long_t num_delegations;
400
unsigned long max_delegations;
401 402 403 404 405

/*
 * Open owner state (share locks)
 */

406 407 408 409
/* hash tables for lock and open owners */
#define OWNER_HASH_BITS              8
#define OWNER_HASH_SIZE             (1 << OWNER_HASH_BITS)
#define OWNER_HASH_MASK             (OWNER_HASH_SIZE - 1)
410

411
static unsigned int ownerstr_hashval(u32 clientid, struct xdr_netobj *ownername)
412 413 414 415 416
{
	unsigned int ret;

	ret = opaque_hashval(ownername->data, ownername->len);
	ret += clientid;
417
	return ret & OWNER_HASH_MASK;
418
}
419 420 421 422

/* hash table for nfs4_file */
#define FILE_HASH_BITS                   8
#define FILE_HASH_SIZE                  (1 << FILE_HASH_BITS)
S
Shan Wei 已提交
423

424
static unsigned int nfsd_fh_hashval(struct knfsd_fh *fh)
425
{
426 427 428 429 430 431 432 433 434 435 436 437 438 439
	return jhash2(fh->fh_base.fh_pad, XDR_QUADLEN(fh->fh_size), 0);
}

static unsigned int file_hashval(struct knfsd_fh *fh)
{
	return nfsd_fh_hashval(fh) & (FILE_HASH_SIZE - 1);
}

static bool nfsd_fh_match(struct knfsd_fh *fh1, struct knfsd_fh *fh2)
{
	return fh1->fh_size == fh2->fh_size &&
		!memcmp(fh1->fh_base.fh_pad,
				fh2->fh_base.fh_pad,
				fh1->fh_size);
440 441
}

442
static struct hlist_head file_hashtbl[FILE_HASH_SIZE];
443

444 445
static void
__nfs4_file_get_access(struct nfs4_file *fp, u32 access)
446
{
447 448
	lockdep_assert_held(&fp->fi_lock);

449 450 451 452
	if (access & NFS4_SHARE_ACCESS_WRITE)
		atomic_inc(&fp->fi_access[O_WRONLY]);
	if (access & NFS4_SHARE_ACCESS_READ)
		atomic_inc(&fp->fi_access[O_RDONLY]);
453 454
}

455 456
static __be32
nfs4_file_get_access(struct nfs4_file *fp, u32 access)
457
{
458 459
	lockdep_assert_held(&fp->fi_lock);

460 461 462 463
	/* Does this access mode make sense? */
	if (access & ~NFS4_SHARE_ACCESS_BOTH)
		return nfserr_inval;

464 465 466 467
	/* Does it conflict with a deny mode already set? */
	if ((access & fp->fi_share_deny) != 0)
		return nfserr_share_denied;

468 469
	__nfs4_file_get_access(fp, access);
	return nfs_ok;
470 471
}

472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
static __be32 nfs4_file_check_deny(struct nfs4_file *fp, u32 deny)
{
	/* Common case is that there is no deny mode. */
	if (deny) {
		/* Does this deny mode make sense? */
		if (deny & ~NFS4_SHARE_DENY_BOTH)
			return nfserr_inval;

		if ((deny & NFS4_SHARE_DENY_READ) &&
		    atomic_read(&fp->fi_access[O_RDONLY]))
			return nfserr_share_denied;

		if ((deny & NFS4_SHARE_DENY_WRITE) &&
		    atomic_read(&fp->fi_access[O_WRONLY]))
			return nfserr_share_denied;
	}
	return nfs_ok;
}

491
static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag)
492
{
493 494 495 496 497 498
	might_lock(&fp->fi_lock);

	if (atomic_dec_and_lock(&fp->fi_access[oflag], &fp->fi_lock)) {
		struct file *f1 = NULL;
		struct file *f2 = NULL;

J
Jeff Layton 已提交
499
		swap(f1, fp->fi_fds[oflag]);
500
		if (atomic_read(&fp->fi_access[1 - oflag]) == 0)
J
Jeff Layton 已提交
501
			swap(f2, fp->fi_fds[O_RDWR]);
502 503 504 505 506
		spin_unlock(&fp->fi_lock);
		if (f1)
			fput(f1);
		if (f2)
			fput(f2);
507 508 509
	}
}

510
static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
511
{
512 513 514
	WARN_ON_ONCE(access & ~NFS4_SHARE_ACCESS_BOTH);

	if (access & NFS4_SHARE_ACCESS_WRITE)
515
		__nfs4_file_put_access(fp, O_WRONLY);
516 517
	if (access & NFS4_SHARE_ACCESS_READ)
		__nfs4_file_put_access(fp, O_RDONLY);
518 519
}

520 521
static struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
					 struct kmem_cache *slab)
522
{
J
J. Bruce Fields 已提交
523
	struct nfs4_stid *stid;
J
J. Bruce Fields 已提交
524
	int new_id;
525

526
	stid = kmem_cache_zalloc(slab, GFP_KERNEL);
J
J. Bruce Fields 已提交
527 528 529
	if (!stid)
		return NULL;

530 531 532 533 534
	idr_preload(GFP_KERNEL);
	spin_lock(&cl->cl_lock);
	new_id = idr_alloc_cyclic(&cl->cl_stateids, stid, 0, 0, GFP_NOWAIT);
	spin_unlock(&cl->cl_lock);
	idr_preload_end();
T
Tejun Heo 已提交
535
	if (new_id < 0)
J
J. Bruce Fields 已提交
536
		goto out_free;
537
	stid->sc_client = cl;
J
J. Bruce Fields 已提交
538 539
	stid->sc_stateid.si_opaque.so_id = new_id;
	stid->sc_stateid.si_opaque.so_clid = cl->cl_clientid;
540
	/* Will be incremented before return to client: */
541
	atomic_set(&stid->sc_count, 1);
542 543

	/*
J
J. Bruce Fields 已提交
544 545 546 547 548 549 550
	 * It shouldn't be a problem to reuse an opaque stateid value.
	 * I don't think it is for 4.1.  But with 4.0 I worry that, for
	 * example, a stray write retransmission could be accepted by
	 * the server when it should have been rejected.  Therefore,
	 * adopt a trick from the sctp code to attempt to maximize the
	 * amount of time until an id is reused, by ensuring they always
	 * "increase" (mod INT_MAX):
551
	 */
J
J. Bruce Fields 已提交
552 553
	return stid;
out_free:
554
	kmem_cache_free(slab, stid);
J
J. Bruce Fields 已提交
555
	return NULL;
556 557
}

558
static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
559
{
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
	struct nfs4_stid *stid;
	struct nfs4_ol_stateid *stp;

	stid = nfs4_alloc_stid(clp, stateid_slab);
	if (!stid)
		return NULL;

	stp = openlockstateid(stid);
	stp->st_stid.sc_free = nfs4_free_ol_stateid;
	return stp;
}

static void nfs4_free_deleg(struct nfs4_stid *stid)
{
	kmem_cache_free(deleg_slab, stid);
	atomic_long_dec(&num_delegations);
576 577
}

578 579 580 581 582 583 584 585 586 587 588 589 590 591
/*
 * When we recall a delegation, we should be careful not to hand it
 * out again straight away.
 * To ensure this we keep a pair of bloom filters ('new' and 'old')
 * in which the filehandles of recalled delegations are "stored".
 * If a filehandle appear in either filter, a delegation is blocked.
 * When a delegation is recalled, the filehandle is stored in the "new"
 * filter.
 * Every 30 seconds we swap the filters and clear the "new" one,
 * unless both are empty of course.
 *
 * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
 * low 3 bytes as hash-table indices.
 *
592
 * 'blocked_delegations_lock', which is always taken in block_delegations(),
593 594 595
 * is used to manage concurrent access.  Testing does not need the lock
 * except when swapping the two filters.
 */
596
static DEFINE_SPINLOCK(blocked_delegations_lock);
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611
static struct bloom_pair {
	int	entries, old_entries;
	time_t	swap_time;
	int	new; /* index into 'set' */
	DECLARE_BITMAP(set[2], 256);
} blocked_delegations;

static int delegation_blocked(struct knfsd_fh *fh)
{
	u32 hash;
	struct bloom_pair *bd = &blocked_delegations;

	if (bd->entries == 0)
		return 0;
	if (seconds_since_boot() - bd->swap_time > 30) {
612
		spin_lock(&blocked_delegations_lock);
613 614 615 616 617 618 619 620
		if (seconds_since_boot() - bd->swap_time > 30) {
			bd->entries -= bd->old_entries;
			bd->old_entries = bd->entries;
			memset(bd->set[bd->new], 0,
			       sizeof(bd->set[0]));
			bd->new = 1-bd->new;
			bd->swap_time = seconds_since_boot();
		}
621
		spin_unlock(&blocked_delegations_lock);
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
	}
	hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
	if (test_bit(hash&255, bd->set[0]) &&
	    test_bit((hash>>8)&255, bd->set[0]) &&
	    test_bit((hash>>16)&255, bd->set[0]))
		return 1;

	if (test_bit(hash&255, bd->set[1]) &&
	    test_bit((hash>>8)&255, bd->set[1]) &&
	    test_bit((hash>>16)&255, bd->set[1]))
		return 1;

	return 0;
}

static void block_delegations(struct knfsd_fh *fh)
{
	u32 hash;
	struct bloom_pair *bd = &blocked_delegations;

	hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);

644
	spin_lock(&blocked_delegations_lock);
645 646 647 648 649 650
	__set_bit(hash&255, bd->set[bd->new]);
	__set_bit((hash>>8)&255, bd->set[bd->new]);
	__set_bit((hash>>16)&255, bd->set[bd->new]);
	if (bd->entries == 0)
		bd->swap_time = seconds_since_boot();
	bd->entries += 1;
651
	spin_unlock(&blocked_delegations_lock);
652 653
}

L
Linus Torvalds 已提交
654
static struct nfs4_delegation *
655
alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
L
Linus Torvalds 已提交
656 657
{
	struct nfs4_delegation *dp;
658
	long n;
L
Linus Torvalds 已提交
659 660

	dprintk("NFSD alloc_init_deleg\n");
661 662 663
	n = atomic_long_inc_return(&num_delegations);
	if (n < 0 || n > max_delegations)
		goto out_dec;
664
	if (delegation_blocked(&current_fh->fh_handle))
665
		goto out_dec;
666
	dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
N
NeilBrown 已提交
667
	if (dp == NULL)
668
		goto out_dec;
669 670

	dp->dl_stid.sc_free = nfs4_free_deleg;
671 672
	/*
	 * delegation seqid's are never incremented.  The 4.1 special
J
J. Bruce Fields 已提交
673 674
	 * meaning of seqid 0 isn't meaningful, really, but let's avoid
	 * 0 anyway just for consistency and use 1:
675 676
	 */
	dp->dl_stid.sc_stateid.si_generation = 1;
677 678
	INIT_LIST_HEAD(&dp->dl_perfile);
	INIT_LIST_HEAD(&dp->dl_perclnt);
L
Linus Torvalds 已提交
679
	INIT_LIST_HEAD(&dp->dl_recall_lru);
680
	dp->dl_type = NFS4_OPEN_DELEGATE_READ;
681
	INIT_WORK(&dp->dl_recall.cb_work, nfsd4_run_cb_recall);
L
Linus Torvalds 已提交
682
	return dp;
683 684 685
out_dec:
	atomic_long_dec(&num_delegations);
	return NULL;
L
Linus Torvalds 已提交
686 687 688
}

void
689
nfs4_put_stid(struct nfs4_stid *s)
L
Linus Torvalds 已提交
690
{
691
	struct nfs4_file *fp = s->sc_file;
692 693
	struct nfs4_client *clp = s->sc_client;

694 695
	might_lock(&clp->cl_lock);

696 697
	if (!atomic_dec_and_lock(&s->sc_count, &clp->cl_lock)) {
		wake_up_all(&close_wq);
698
		return;
699
	}
700
	idr_remove(&clp->cl_stateids, s->sc_stateid.si_opaque.so_id);
701
	spin_unlock(&clp->cl_lock);
702
	s->sc_free(s);
703 704
	if (fp)
		put_nfs4_file(fp);
L
Linus Torvalds 已提交
705 706
}

707
static void nfs4_put_deleg_lease(struct nfs4_file *fp)
L
Linus Torvalds 已提交
708
{
709 710
	lockdep_assert_held(&state_lock);

711 712
	if (!fp->fi_lease)
		return;
713 714 715
	if (atomic_dec_and_test(&fp->fi_delegees)) {
		vfs_setlease(fp->fi_deleg_file, F_UNLCK, &fp->fi_lease);
		fp->fi_lease = NULL;
716
		fput(fp->fi_deleg_file);
717 718
		fp->fi_deleg_file = NULL;
	}
L
Linus Torvalds 已提交
719 720
}

J
J. Bruce Fields 已提交
721 722
static void unhash_stid(struct nfs4_stid *s)
{
J
J. Bruce Fields 已提交
723
	s->sc_type = 0;
J
J. Bruce Fields 已提交
724 725
}

726 727 728
static void
hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
{
729
	lockdep_assert_held(&state_lock);
730
	lockdep_assert_held(&fp->fi_lock);
731

732
	atomic_inc(&dp->dl_stid.sc_count);
733
	dp->dl_stid.sc_type = NFS4_DELEG_STID;
734 735 736 737
	list_add(&dp->dl_perfile, &fp->fi_delegations);
	list_add(&dp->dl_perclnt, &dp->dl_stid.sc_client->cl_delegations);
}

L
Linus Torvalds 已提交
738
static void
739
unhash_delegation_locked(struct nfs4_delegation *dp)
L
Linus Torvalds 已提交
740
{
741
	struct nfs4_file *fp = dp->dl_stid.sc_file;
742

743 744
	lockdep_assert_held(&state_lock);

745
	dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
746 747
	/* Ensure that deleg break won't try to requeue it */
	++dp->dl_time;
748
	spin_lock(&fp->fi_lock);
749
	list_del_init(&dp->dl_perclnt);
L
Linus Torvalds 已提交
750
	list_del_init(&dp->dl_recall_lru);
751 752
	list_del_init(&dp->dl_perfile);
	spin_unlock(&fp->fi_lock);
753
	if (fp)
754
		nfs4_put_deleg_lease(fp);
755 756 757 758
}

static void destroy_delegation(struct nfs4_delegation *dp)
{
759 760 761
	spin_lock(&state_lock);
	unhash_delegation_locked(dp);
	spin_unlock(&state_lock);
762
	nfs4_put_stid(&dp->dl_stid);
763 764 765 766 767 768
}

static void revoke_delegation(struct nfs4_delegation *dp)
{
	struct nfs4_client *clp = dp->dl_stid.sc_client;

769 770
	WARN_ON(!list_empty(&dp->dl_recall_lru));

771
	if (clp->cl_minorversion == 0)
772
		nfs4_put_stid(&dp->dl_stid);
773 774
	else {
		dp->dl_stid.sc_type = NFS4_REVOKED_DELEG_STID;
775 776 777
		spin_lock(&clp->cl_lock);
		list_add(&dp->dl_recall_lru, &clp->cl_revoked);
		spin_unlock(&clp->cl_lock);
778 779 780
	}
}

L
Linus Torvalds 已提交
781 782 783 784
/* 
 * SETCLIENTID state 
 */

785 786 787 788 789 790 791 792 793 794
static unsigned int clientid_hashval(u32 id)
{
	return id & CLIENT_HASH_MASK;
}

static unsigned int clientstr_hashval(const char *name)
{
	return opaque_hashval(name, 8) & CLIENT_HASH_MASK;
}

795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
/*
 * We store the NONE, READ, WRITE, and BOTH bits separately in the
 * st_{access,deny}_bmap field of the stateid, in order to track not
 * only what share bits are currently in force, but also what
 * combinations of share bits previous opens have used.  This allows us
 * to enforce the recommendation of rfc 3530 14.2.19 that the server
 * return an error if the client attempt to downgrade to a combination
 * of share bits not explicable by closing some of its previous opens.
 *
 * XXX: This enforcement is actually incomplete, since we don't keep
 * track of access/deny bit combinations; so, e.g., we allow:
 *
 *	OPEN allow read, deny write
 *	OPEN allow both, deny none
 *	DOWNGRADE allow read, deny none
 *
 * which we should reject.
 */
813 814
static unsigned int
bmap_to_share_mode(unsigned long bmap) {
815
	int i;
816
	unsigned int access = 0;
817 818 819

	for (i = 1; i < 4; i++) {
		if (test_bit(i, &bmap))
820
			access |= i;
821
	}
822
	return access;
823 824
}

825 826 827 828
/* set share access for a given stateid */
static inline void
set_access(u32 access, struct nfs4_ol_stateid *stp)
{
829 830 831 832
	unsigned char mask = 1 << access;

	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
	stp->st_access_bmap |= mask;
833 834 835 836 837 838
}

/* clear share access for a given stateid */
static inline void
clear_access(u32 access, struct nfs4_ol_stateid *stp)
{
839 840 841 842
	unsigned char mask = 1 << access;

	WARN_ON_ONCE(access > NFS4_SHARE_ACCESS_BOTH);
	stp->st_access_bmap &= ~mask;
843 844 845 846 847 848
}

/* test whether a given stateid has access */
static inline bool
test_access(u32 access, struct nfs4_ol_stateid *stp)
{
849 850 851
	unsigned char mask = 1 << access;

	return (bool)(stp->st_access_bmap & mask);
852 853
}

854 855
/* set share deny for a given stateid */
static inline void
856
set_deny(u32 deny, struct nfs4_ol_stateid *stp)
857
{
858 859 860 861
	unsigned char mask = 1 << deny;

	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
	stp->st_deny_bmap |= mask;
862 863 864 865
}

/* clear share deny for a given stateid */
static inline void
866
clear_deny(u32 deny, struct nfs4_ol_stateid *stp)
867
{
868 869 870 871
	unsigned char mask = 1 << deny;

	WARN_ON_ONCE(deny > NFS4_SHARE_DENY_BOTH);
	stp->st_deny_bmap &= ~mask;
872 873 874 875
}

/* test whether a given stateid is denying specific access */
static inline bool
876
test_deny(u32 deny, struct nfs4_ol_stateid *stp)
877
{
878 879 880
	unsigned char mask = 1 << deny;

	return (bool)(stp->st_deny_bmap & mask);
881 882 883 884
}

static int nfs4_access_to_omode(u32 access)
{
885
	switch (access & NFS4_SHARE_ACCESS_BOTH) {
886 887 888 889 890 891 892
	case NFS4_SHARE_ACCESS_READ:
		return O_RDONLY;
	case NFS4_SHARE_ACCESS_WRITE:
		return O_WRONLY;
	case NFS4_SHARE_ACCESS_BOTH:
		return O_RDWR;
	}
893 894
	WARN_ON_ONCE(1);
	return O_RDONLY;
895 896
}

897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
/*
 * A stateid that had a deny mode associated with it is being released
 * or downgraded. Recalculate the deny mode on the file.
 */
static void
recalculate_deny_mode(struct nfs4_file *fp)
{
	struct nfs4_ol_stateid *stp;

	spin_lock(&fp->fi_lock);
	fp->fi_share_deny = 0;
	list_for_each_entry(stp, &fp->fi_stateids, st_perfile)
		fp->fi_share_deny |= bmap_to_share_mode(stp->st_deny_bmap);
	spin_unlock(&fp->fi_lock);
}

static void
reset_union_bmap_deny(u32 deny, struct nfs4_ol_stateid *stp)
{
	int i;
	bool change = false;

	for (i = 1; i < 4; i++) {
		if ((i & deny) != i) {
			change = true;
			clear_deny(i, stp);
		}
	}

	/* Recalculate per-file deny mode if there was a change */
	if (change)
928
		recalculate_deny_mode(stp->st_stid.sc_file);
929 930
}

931 932 933 934 935
/* release all access and file references for a given stateid */
static void
release_all_access(struct nfs4_ol_stateid *stp)
{
	int i;
936
	struct nfs4_file *fp = stp->st_stid.sc_file;
937 938 939

	if (fp && stp->st_deny_bmap != 0)
		recalculate_deny_mode(fp);
940 941 942

	for (i = 1; i < 4; i++) {
		if (test_access(i, stp))
943
			nfs4_file_put_access(stp->st_stid.sc_file, i);
944 945 946 947
		clear_access(i, stp);
	}
}

948 949 950 951
static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
{
	if (!atomic_dec_and_test(&sop->so_count))
		return;
952
	sop->so_ops->so_unhash(sop);
953 954 955 956
	kfree(sop->so_owner.data);
	sop->so_ops->so_free(sop);
}

957
static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
958
{
959
	struct nfs4_file *fp = stp->st_stid.sc_file;
960

961 962
	lockdep_assert_held(&stp->st_stateowner->so_client->cl_lock);

963
	spin_lock(&fp->fi_lock);
964
	list_del(&stp->st_perfile);
965
	spin_unlock(&fp->fi_lock);
966 967 968
	list_del(&stp->st_perstateowner);
}

969
static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
970
{
971
	struct nfs4_ol_stateid *stp = openlockstateid(stid);
972

973
	release_all_access(stp);
974 975
	if (stp->st_stateowner)
		nfs4_put_stateowner(stp->st_stateowner);
976
	kmem_cache_free(stateid_slab, stid);
977 978
}

979
static void nfs4_free_lock_stateid(struct nfs4_stid *stid)
980
{
981 982
	struct nfs4_ol_stateid *stp = openlockstateid(stid);
	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);
983 984
	struct file *file;

985 986 987 988 989 990
	file = find_any_file(stp->st_stid.sc_file);
	if (file)
		filp_close(file, (fl_owner_t)lo);
	nfs4_free_ol_stateid(stid);
}

991
static void release_lock_stateid(struct nfs4_ol_stateid *stp)
992
{
993 994 995
	struct nfs4_openowner *oo = openowner(stp->st_openstp->st_stateowner);

	spin_lock(&oo->oo_owner.so_client->cl_lock);
996
	list_del(&stp->st_locks);
997
	unhash_generic_stateid(stp);
J
J. Bruce Fields 已提交
998
	unhash_stid(&stp->st_stid);
999
	spin_unlock(&oo->oo_owner.so_client->cl_lock);
1000
	nfs4_put_stid(&stp->st_stid);
1001 1002
}

1003
static void unhash_lockowner(struct nfs4_lockowner *lo)
1004 1005 1006 1007 1008
{
	list_del_init(&lo->lo_owner.so_strhash);
}

static void release_lockowner_stateids(struct nfs4_lockowner *lo)
1009
{
1010
	struct nfs4_ol_stateid *stp;
1011

1012 1013
	while (!list_empty(&lo->lo_owner.so_stateids)) {
		stp = list_first_entry(&lo->lo_owner.so_stateids,
1014
				struct nfs4_ol_stateid, st_perstateowner);
1015
		release_lock_stateid(stp);
1016 1017 1018
	}
}

1019
static void release_lockowner(struct nfs4_lockowner *lo)
1020
{
1021
	unhash_lockowner(lo);
1022
	release_lockowner_stateids(lo);
1023
	nfs4_put_stateowner(&lo->lo_owner);
1024 1025
}

1026
static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp)
1027 1028
	__releases(&open_stp->st_stateowner->so_client->cl_lock)
	__acquires(&open_stp->st_stateowner->so_client->cl_lock)
1029 1030 1031 1032 1033 1034
{
	struct nfs4_ol_stateid *stp;

	while (!list_empty(&open_stp->st_locks)) {
		stp = list_entry(open_stp->st_locks.next,
				struct nfs4_ol_stateid, st_locks);
1035
		spin_unlock(&open_stp->st_stateowner->so_client->cl_lock);
1036
		release_lock_stateid(stp);
1037
		spin_lock(&open_stp->st_stateowner->so_client->cl_lock);
1038 1039 1040
	}
}

1041
static void unhash_open_stateid(struct nfs4_ol_stateid *stp)
1042
{
1043
	spin_lock(&stp->st_stateowner->so_client->cl_lock);
1044
	unhash_generic_stateid(stp);
1045
	release_open_stateid_locks(stp);
1046
	spin_unlock(&stp->st_stateowner->so_client->cl_lock);
1047 1048 1049 1050 1051
}

static void release_open_stateid(struct nfs4_ol_stateid *stp)
{
	unhash_open_stateid(stp);
1052
	nfs4_put_stid(&stp->st_stid);
1053 1054
}

1055
static void unhash_openowner_locked(struct nfs4_openowner *oo)
1056
{
1057 1058 1059 1060 1061
	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
						nfsd_net_id);

	lockdep_assert_held(&nn->client_lock);

1062 1063
	list_del_init(&oo->oo_owner.so_strhash);
	list_del_init(&oo->oo_perclient);
1064 1065
}

1066 1067 1068 1069 1070
static void release_last_closed_stateid(struct nfs4_openowner *oo)
{
	struct nfs4_ol_stateid *s = oo->oo_last_closed_stid;

	if (s) {
1071
		list_del_init(&oo->oo_close_lru);
1072
		oo->oo_last_closed_stid = NULL;
1073
		nfs4_put_stid(&s->st_stid);
1074 1075 1076
	}
}

1077 1078 1079
static void release_openowner_stateids(struct nfs4_openowner *oo)
{
	struct nfs4_ol_stateid *stp;
1080 1081 1082 1083
	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
						nfsd_net_id);

	lockdep_assert_held(&nn->client_lock);
1084 1085 1086 1087

	while (!list_empty(&oo->oo_owner.so_stateids)) {
		stp = list_first_entry(&oo->oo_owner.so_stateids,
				struct nfs4_ol_stateid, st_perstateowner);
1088
		spin_unlock(&nn->client_lock);
1089
		release_open_stateid(stp);
1090
		spin_lock(&nn->client_lock);
1091 1092 1093
	}
}

1094
static void release_openowner(struct nfs4_openowner *oo)
1095
{
1096 1097 1098 1099 1100
	struct nfsd_net *nn = net_generic(oo->oo_owner.so_client->net,
						nfsd_net_id);

	spin_lock(&nn->client_lock);
	unhash_openowner_locked(oo);
1101
	release_openowner_stateids(oo);
1102
	spin_unlock(&nn->client_lock);
1103
	release_last_closed_stateid(oo);
1104
	nfs4_put_stateowner(&oo->oo_owner);
1105 1106
}

M
Marc Eshel 已提交
1107 1108 1109 1110 1111 1112 1113 1114
static inline int
hash_sessionid(struct nfs4_sessionid *sessionid)
{
	struct nfsd4_sessionid *sid = (struct nfsd4_sessionid *)sessionid;

	return sid->sequence % SESSION_HASH_SIZE;
}

1115
#ifdef NFSD_DEBUG
M
Marc Eshel 已提交
1116 1117 1118 1119 1120 1121
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
	u32 *ptr = (u32 *)(&sessionid->data[0]);
	dprintk("%s: %u:%u:%u:%u\n", fn, ptr[0], ptr[1], ptr[2], ptr[3]);
}
1122 1123 1124 1125 1126 1127 1128
#else
static inline void
dump_sessionid(const char *fn, struct nfs4_sessionid *sessionid)
{
}
#endif

1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140
/*
 * Bump the seqid on cstate->replay_owner, and clear replay_owner if it
 * won't be used for replay.
 */
void nfsd4_bump_seqid(struct nfsd4_compound_state *cstate, __be32 nfserr)
{
	struct nfs4_stateowner *so = cstate->replay_owner;

	if (nfserr == nfserr_replay_me)
		return;

	if (!seqid_mutating_err(ntohl(nfserr))) {
1141
		nfsd4_cstate_clear_replay(cstate);
1142 1143 1144 1145 1146 1147 1148 1149 1150
		return;
	}
	if (!so)
		return;
	if (so->so_is_open_owner)
		release_last_closed_stateid(openowner(so));
	so->so_seqid++;
	return;
}
M
Marc Eshel 已提交
1151

A
Andy Adamson 已提交
1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164
static void
gen_sessionid(struct nfsd4_session *ses)
{
	struct nfs4_client *clp = ses->se_client;
	struct nfsd4_sessionid *sid;

	sid = (struct nfsd4_sessionid *)ses->se_sessionid.data;
	sid->clientid = clp->cl_clientid;
	sid->sequence = current_sessionid++;
	sid->reserved = 0;
}

/*
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177
 * The protocol defines ca_maxresponssize_cached to include the size of
 * the rpc header, but all we need to cache is the data starting after
 * the end of the initial SEQUENCE operation--the rest we regenerate
 * each time.  Therefore we can advertise a ca_maxresponssize_cached
 * value that is the number of bytes in our cache plus a few additional
 * bytes.  In order to stay on the safe side, and not promise more than
 * we can cache, those additional bytes must be the minimum possible: 24
 * bytes of rpc header (xid through accept state, with AUTH_NULL
 * verifier), 12 for the compound header (with zero-length tag), and 44
 * for the SEQUENCE op response:
 */
#define NFSD_MIN_HDR_SEQ_SZ  (24 + 12 + 44)

1178 1179 1180 1181 1182 1183 1184 1185 1186
static void
free_session_slots(struct nfsd4_session *ses)
{
	int i;

	for (i = 0; i < ses->se_fchannel.maxreqs; i++)
		kfree(ses->se_slots[i]);
}

1187
/*
1188 1189 1190
 * We don't actually need to cache the rpc and session headers, so we
 * can allocate a little less for each slot:
 */
1191
static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
1192
{
1193
	u32 size;
1194

1195 1196 1197 1198 1199
	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
		size = 0;
	else
		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
	return size + sizeof(struct nfsd4_slot);
1200
}
A
Andy Adamson 已提交
1201

1202 1203
/*
 * XXX: If we run out of reserved DRC memory we could (up to a point)
1204
 * re-negotiate active sessions and reduce their slot usage to make
1205
 * room for new connections. For now we just fail the create session.
A
Andy Adamson 已提交
1206
 */
1207
static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca)
A
Andy Adamson 已提交
1208
{
1209 1210
	u32 slotsize = slot_bytes(ca);
	u32 num = ca->maxreqs;
1211
	int avail;
A
Andy Adamson 已提交
1212

1213
	spin_lock(&nfsd_drc_lock);
1214 1215
	avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION,
		    nfsd_drc_max_mem - nfsd_drc_mem_used);
1216 1217 1218
	num = min_t(int, num, avail / slotsize);
	nfsd_drc_mem_used += num * slotsize;
	spin_unlock(&nfsd_drc_lock);
A
Andy Adamson 已提交
1219

1220 1221
	return num;
}
A
Andy Adamson 已提交
1222

1223
static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
1224
{
1225 1226
	int slotsize = slot_bytes(ca);

1227
	spin_lock(&nfsd_drc_lock);
1228
	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
1229
	spin_unlock(&nfsd_drc_lock);
1230
}
A
Andy Adamson 已提交
1231

1232 1233
static struct nfsd4_session *alloc_session(struct nfsd4_channel_attrs *fattrs,
					   struct nfsd4_channel_attrs *battrs)
1234
{
1235 1236
	int numslots = fattrs->maxreqs;
	int slotsize = slot_bytes(fattrs);
1237 1238
	struct nfsd4_session *new;
	int mem, i;
1239

1240 1241 1242
	BUILD_BUG_ON(NFSD_MAX_SLOTS_PER_SESSION * sizeof(struct nfsd4_slot *)
			+ sizeof(struct nfsd4_session) > PAGE_SIZE);
	mem = numslots * sizeof(struct nfsd4_slot *);
A
Andy Adamson 已提交
1243

1244 1245 1246
	new = kzalloc(sizeof(*new) + mem, GFP_KERNEL);
	if (!new)
		return NULL;
1247
	/* allocate each struct nfsd4_slot and data cache in one piece */
1248
	for (i = 0; i < numslots; i++) {
1249
		new->se_slots[i] = kzalloc(slotsize, GFP_KERNEL);
1250
		if (!new->se_slots[i])
1251 1252
			goto out_free;
	}
1253 1254 1255 1256

	memcpy(&new->se_fchannel, fattrs, sizeof(struct nfsd4_channel_attrs));
	memcpy(&new->se_bchannel, battrs, sizeof(struct nfsd4_channel_attrs));

1257 1258 1259 1260 1261 1262
	return new;
out_free:
	while (i--)
		kfree(new->se_slots[i]);
	kfree(new);
	return NULL;
A
Andy Adamson 已提交
1263 1264
}

1265 1266 1267 1268 1269
static void free_conn(struct nfsd4_conn *c)
{
	svc_xprt_put(c->cn_xprt);
	kfree(c);
}
A
Andy Adamson 已提交
1270

1271 1272 1273 1274
static void nfsd4_conn_lost(struct svc_xpt_user *u)
{
	struct nfsd4_conn *c = container_of(u, struct nfsd4_conn, cn_xpt_user);
	struct nfs4_client *clp = c->cn_session->se_client;
A
Andy Adamson 已提交
1275

1276 1277 1278 1279 1280
	spin_lock(&clp->cl_lock);
	if (!list_empty(&c->cn_persession)) {
		list_del(&c->cn_persession);
		free_conn(c);
	}
1281
	nfsd4_probe_callback(clp);
1282
	spin_unlock(&clp->cl_lock);
1283
}
A
Andy Adamson 已提交
1284

1285
static struct nfsd4_conn *alloc_conn(struct svc_rqst *rqstp, u32 flags)
1286 1287
{
	struct nfsd4_conn *conn;
A
Andy Adamson 已提交
1288

1289 1290
	conn = kmalloc(sizeof(struct nfsd4_conn), GFP_KERNEL);
	if (!conn)
1291
		return NULL;
1292 1293
	svc_xprt_get(rqstp->rq_xprt);
	conn->cn_xprt = rqstp->rq_xprt;
1294
	conn->cn_flags = flags;
1295 1296 1297
	INIT_LIST_HEAD(&conn->cn_xpt_user.list);
	return conn;
}
1298

1299 1300 1301 1302
static void __nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
{
	conn->cn_session = ses;
	list_add(&conn->cn_persession, &ses->se_conns);
A
Andy Adamson 已提交
1303 1304
}

1305
static void nfsd4_hash_conn(struct nfsd4_conn *conn, struct nfsd4_session *ses)
1306
{
1307
	struct nfs4_client *clp = ses->se_client;
1308

1309
	spin_lock(&clp->cl_lock);
1310
	__nfsd4_hash_conn(conn, ses);
1311
	spin_unlock(&clp->cl_lock);
1312 1313
}

1314
static int nfsd4_register_conn(struct nfsd4_conn *conn)
1315
{
1316
	conn->cn_xpt_user.callback = nfsd4_conn_lost;
1317
	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
1318 1319
}

1320
static void nfsd4_init_conn(struct svc_rqst *rqstp, struct nfsd4_conn *conn, struct nfsd4_session *ses)
A
Andy Adamson 已提交
1321
{
1322
	int ret;
A
Andy Adamson 已提交
1323

1324
	nfsd4_hash_conn(conn, ses);
1325 1326 1327 1328
	ret = nfsd4_register_conn(conn);
	if (ret)
		/* oops; xprt is already down: */
		nfsd4_conn_lost(&conn->cn_xpt_user);
1329 1330
	/* We may have gained or lost a callback channel: */
	nfsd4_probe_callback_sync(ses->se_client);
1331
}
A
Andy Adamson 已提交
1332

1333
static struct nfsd4_conn *alloc_conn_from_crses(struct svc_rqst *rqstp, struct nfsd4_create_session *cses)
1334 1335 1336
{
	u32 dir = NFS4_CDFC4_FORE;

1337
	if (cses->flags & SESSION4_BACK_CHAN)
1338
		dir |= NFS4_CDFC4_BACK;
1339
	return alloc_conn(rqstp, dir);
1340 1341 1342
}

/* must be called under client_lock */
1343
static void nfsd4_del_conns(struct nfsd4_session *s)
1344
{
1345 1346
	struct nfs4_client *clp = s->se_client;
	struct nfsd4_conn *c;
A
Andy Adamson 已提交
1347

1348 1349 1350 1351 1352
	spin_lock(&clp->cl_lock);
	while (!list_empty(&s->se_conns)) {
		c = list_first_entry(&s->se_conns, struct nfsd4_conn, cn_persession);
		list_del_init(&c->cn_persession);
		spin_unlock(&clp->cl_lock);
1353

1354 1355
		unregister_xpt_user(c->cn_xprt, &c->cn_xpt_user);
		free_conn(c);
A
Andy Adamson 已提交
1356

1357 1358 1359
		spin_lock(&clp->cl_lock);
	}
	spin_unlock(&clp->cl_lock);
1360
}
A
Andy Adamson 已提交
1361

1362 1363 1364 1365 1366 1367
static void __free_session(struct nfsd4_session *ses)
{
	free_session_slots(ses);
	kfree(ses);
}

1368
static void free_session(struct nfsd4_session *ses)
1369
{
1370
	struct nfsd_net *nn = net_generic(ses->se_client->net, nfsd_net_id);
1371 1372

	lockdep_assert_held(&nn->client_lock);
1373
	nfsd4_del_conns(ses);
1374
	nfsd4_put_drc_mem(&ses->se_fchannel);
1375
	__free_session(ses);
1376 1377
}

1378
static void init_session(struct svc_rqst *rqstp, struct nfsd4_session *new, struct nfs4_client *clp, struct nfsd4_create_session *cses)
1379 1380
{
	int idx;
1381
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
1382

A
Andy Adamson 已提交
1383 1384 1385
	new->se_client = clp;
	gen_sessionid(new);

1386 1387
	INIT_LIST_HEAD(&new->se_conns);

1388
	new->se_cb_seq_nr = 1;
A
Andy Adamson 已提交
1389
	new->se_flags = cses->flags;
1390
	new->se_cb_prog = cses->callback_prog;
1391
	new->se_cb_sec = cses->cb_sec;
1392
	atomic_set(&new->se_ref, 0);
1393
	idx = hash_sessionid(&new->se_sessionid);
1394
	spin_lock(&nn->client_lock);
1395
	list_add(&new->se_hash, &nn->sessionid_hashtbl[idx]);
1396
	spin_lock(&clp->cl_lock);
A
Andy Adamson 已提交
1397
	list_add(&new->se_perclnt, &clp->cl_sessions);
1398
	spin_unlock(&clp->cl_lock);
1399
	spin_unlock(&nn->client_lock);
1400

1401
	if (cses->flags & SESSION4_BACK_CHAN) {
1402
		struct sockaddr *sa = svc_addr(rqstp);
1403 1404 1405 1406 1407 1408 1409
		/*
		 * This is a little silly; with sessions there's no real
		 * use for the callback address.  Use the peer address
		 * as a reasonable default for now, but consider fixing
		 * the rpc client not to require an address in the
		 * future:
		 */
1410 1411 1412
		rpc_copy_addr((struct sockaddr *)&clp->cl_cb_conn.cb_addr, sa);
		clp->cl_cb_conn.cb_addrlen = svc_addr_len(sa);
	}
A
Andy Adamson 已提交
1413 1414
}

1415
/* caller must hold client_lock */
M
Marc Eshel 已提交
1416
static struct nfsd4_session *
1417
__find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net)
M
Marc Eshel 已提交
1418 1419 1420
{
	struct nfsd4_session *elem;
	int idx;
1421
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
M
Marc Eshel 已提交
1422 1423 1424 1425

	dump_sessionid(__func__, sessionid);
	idx = hash_sessionid(sessionid);
	/* Search in the appropriate list */
1426
	list_for_each_entry(elem, &nn->sessionid_hashtbl[idx], se_hash) {
M
Marc Eshel 已提交
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
		if (!memcmp(elem->se_sessionid.data, sessionid->data,
			    NFS4_MAX_SESSIONID_LEN)) {
			return elem;
		}
	}

	dprintk("%s: session not found\n", __func__);
	return NULL;
}

1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454
static struct nfsd4_session *
find_in_sessionid_hashtbl(struct nfs4_sessionid *sessionid, struct net *net,
		__be32 *ret)
{
	struct nfsd4_session *session;
	__be32 status = nfserr_badsession;

	session = __find_in_sessionid_hashtbl(sessionid, net);
	if (!session)
		goto out;
	status = nfsd4_get_session_locked(session);
	if (status)
		session = NULL;
out:
	*ret = status;
	return session;
}

1455
/* caller must hold client_lock */
A
Andy Adamson 已提交
1456
static void
M
Marc Eshel 已提交
1457
unhash_session(struct nfsd4_session *ses)
A
Andy Adamson 已提交
1458 1459
{
	list_del(&ses->se_hash);
1460
	spin_lock(&ses->se_client->cl_lock);
A
Andy Adamson 已提交
1461
	list_del(&ses->se_perclnt);
1462
	spin_unlock(&ses->se_client->cl_lock);
M
Marc Eshel 已提交
1463 1464
}

L
Linus Torvalds 已提交
1465 1466
/* SETCLIENTID and SETCLIENTID_CONFIRM Helper functions */
static int
1467
STALE_CLIENTID(clientid_t *clid, struct nfsd_net *nn)
L
Linus Torvalds 已提交
1468
{
1469
	if (clid->cl_boot == nn->boot_time)
L
Linus Torvalds 已提交
1470
		return 0;
A
Andy Adamson 已提交
1471
	dprintk("NFSD stale clientid (%08x/%08x) boot_time %08lx\n",
1472
		clid->cl_boot, clid->cl_id, nn->boot_time);
L
Linus Torvalds 已提交
1473 1474 1475 1476 1477 1478 1479 1480
	return 1;
}

/* 
 * XXX Should we use a slab cache ?
 * This type of memory management is somewhat inefficient, but we use it
 * anyway since SETCLIENTID is not a common operation.
 */
1481
static struct nfs4_client *alloc_client(struct xdr_netobj name)
L
Linus Torvalds 已提交
1482 1483 1484
{
	struct nfs4_client *clp;

1485 1486 1487
	clp = kzalloc(sizeof(struct nfs4_client), GFP_KERNEL);
	if (clp == NULL)
		return NULL;
1488
	clp->cl_name.data = kmemdup(name.data, name.len, GFP_KERNEL);
1489 1490 1491
	if (clp->cl_name.data == NULL) {
		kfree(clp);
		return NULL;
L
Linus Torvalds 已提交
1492
	}
1493
	clp->cl_name.len = name.len;
1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
	INIT_LIST_HEAD(&clp->cl_sessions);
	idr_init(&clp->cl_stateids);
	atomic_set(&clp->cl_refcount, 0);
	clp->cl_cb_state = NFSD4_CB_UNKNOWN;
	INIT_LIST_HEAD(&clp->cl_idhash);
	INIT_LIST_HEAD(&clp->cl_openowners);
	INIT_LIST_HEAD(&clp->cl_delegations);
	INIT_LIST_HEAD(&clp->cl_lru);
	INIT_LIST_HEAD(&clp->cl_callbacks);
	INIT_LIST_HEAD(&clp->cl_revoked);
	spin_lock_init(&clp->cl_lock);
	rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
L
Linus Torvalds 已提交
1506 1507 1508
	return clp;
}

1509
static void
L
Linus Torvalds 已提交
1510 1511
free_client(struct nfs4_client *clp)
{
1512
	struct nfsd_net __maybe_unused *nn = net_generic(clp->net, nfsd_net_id);
1513 1514

	lockdep_assert_held(&nn->client_lock);
1515 1516 1517 1518 1519
	while (!list_empty(&clp->cl_sessions)) {
		struct nfsd4_session *ses;
		ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
				se_perclnt);
		list_del(&ses->se_perclnt);
1520 1521
		WARN_ON_ONCE(atomic_read(&ses->se_ref));
		free_session(ses);
1522
	}
1523
	rpc_destroy_wait_queue(&clp->cl_cb_waitq);
1524
	free_svc_cred(&clp->cl_cred);
L
Linus Torvalds 已提交
1525
	kfree(clp->cl_name.data);
M
majianpeng 已提交
1526
	idr_destroy(&clp->cl_stateids);
L
Linus Torvalds 已提交
1527 1528 1529
	kfree(clp);
}

B
Benny Halevy 已提交
1530 1531 1532 1533
/* must be called under the client_lock */
static inline void
unhash_client_locked(struct nfs4_client *clp)
{
1534 1535
	struct nfsd4_session *ses;

B
Benny Halevy 已提交
1536
	list_del(&clp->cl_lru);
1537
	spin_lock(&clp->cl_lock);
1538 1539
	list_for_each_entry(ses, &clp->cl_sessions, se_perclnt)
		list_del_init(&ses->se_hash);
1540
	spin_unlock(&clp->cl_lock);
B
Benny Halevy 已提交
1541 1542
}

L
Linus Torvalds 已提交
1543
static void
1544
destroy_client(struct nfs4_client *clp)
L
Linus Torvalds 已提交
1545
{
1546
	struct nfs4_openowner *oo;
L
Linus Torvalds 已提交
1547 1548
	struct nfs4_delegation *dp;
	struct list_head reaplist;
1549
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
L
Linus Torvalds 已提交
1550 1551

	INIT_LIST_HEAD(&reaplist);
1552
	spin_lock(&state_lock);
1553 1554
	while (!list_empty(&clp->cl_delegations)) {
		dp = list_entry(clp->cl_delegations.next, struct nfs4_delegation, dl_perclnt);
1555 1556
		unhash_delegation_locked(dp);
		list_add(&dp->dl_recall_lru, &reaplist);
L
Linus Torvalds 已提交
1557
	}
1558
	spin_unlock(&state_lock);
L
Linus Torvalds 已提交
1559 1560
	while (!list_empty(&reaplist)) {
		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1561
		list_del_init(&dp->dl_recall_lru);
1562
		nfs4_put_stid(&dp->dl_stid);
L
Linus Torvalds 已提交
1563
	}
1564
	while (!list_empty(&clp->cl_revoked)) {
1565
		dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
1566
		list_del_init(&dp->dl_recall_lru);
1567
		nfs4_put_stid(&dp->dl_stid);
1568
	}
1569
	while (!list_empty(&clp->cl_openowners)) {
1570
		oo = list_entry(clp->cl_openowners.next, struct nfs4_openowner, oo_perclient);
1571
		atomic_inc(&oo->oo_owner.so_count);
1572
		release_openowner(oo);
L
Linus Torvalds 已提交
1573
	}
1574
	nfsd4_shutdown_callback(clp);
B
Benny Halevy 已提交
1575 1576
	if (clp->cl_cb_conn.cb_xprt)
		svc_xprt_put(clp->cl_cb_conn.cb_xprt);
1577
	list_del(&clp->cl_idhash);
1578
	if (test_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags))
1579
		rb_erase(&clp->cl_namenode, &nn->conf_name_tree);
1580
	else
1581
		rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1582
	spin_lock(&nn->client_lock);
B
Benny Halevy 已提交
1583
	unhash_client_locked(clp);
1584 1585
	WARN_ON_ONCE(atomic_read(&clp->cl_refcount));
	free_client(clp);
1586
	spin_unlock(&nn->client_lock);
L
Linus Torvalds 已提交
1587 1588
}

1589 1590 1591 1592 1593 1594
static void expire_client(struct nfs4_client *clp)
{
	nfsd4_client_record_remove(clp);
	destroy_client(clp);
}

1595 1596 1597 1598
static void copy_verf(struct nfs4_client *target, nfs4_verifier *source)
{
	memcpy(target->cl_verifier.data, source->data,
			sizeof(target->cl_verifier.data));
L
Linus Torvalds 已提交
1599 1600
}

1601 1602
static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
{
L
Linus Torvalds 已提交
1603 1604 1605 1606
	target->cl_clientid.cl_boot = source->cl_clientid.cl_boot; 
	target->cl_clientid.cl_id = source->cl_clientid.cl_id; 
}

1607
static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1608
{
1609 1610 1611 1612 1613 1614 1615
	if (source->cr_principal) {
		target->cr_principal =
				kstrdup(source->cr_principal, GFP_KERNEL);
		if (target->cr_principal == NULL)
			return -ENOMEM;
	} else
		target->cr_principal = NULL;
1616
	target->cr_flavor = source->cr_flavor;
L
Linus Torvalds 已提交
1617 1618 1619 1620
	target->cr_uid = source->cr_uid;
	target->cr_gid = source->cr_gid;
	target->cr_group_info = source->cr_group_info;
	get_group_info(target->cr_group_info);
1621 1622 1623
	target->cr_gss_mech = source->cr_gss_mech;
	if (source->cr_gss_mech)
		gss_mech_get(source->cr_gss_mech);
1624
	return 0;
L
Linus Torvalds 已提交
1625 1626
}

1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
static long long
compare_blob(const struct xdr_netobj *o1, const struct xdr_netobj *o2)
{
	long long res;

	res = o1->len - o2->len;
	if (res)
		return res;
	return (long long)memcmp(o1->data, o2->data, o1->len);
}

1638
static int same_name(const char *n1, const char *n2)
1639
{
N
NeilBrown 已提交
1640
	return 0 == memcmp(n1, n2, HEXDIR_LEN);
L
Linus Torvalds 已提交
1641 1642 1643
}

static int
1644 1645 1646
same_verf(nfs4_verifier *v1, nfs4_verifier *v2)
{
	return 0 == memcmp(v1->data, v2->data, sizeof(v1->data));
L
Linus Torvalds 已提交
1647 1648 1649
}

static int
1650 1651 1652
same_clid(clientid_t *cl1, clientid_t *cl2)
{
	return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
L
Linus Torvalds 已提交
1653 1654
}

1655 1656 1657 1658 1659 1660 1661
static bool groups_equal(struct group_info *g1, struct group_info *g2)
{
	int i;

	if (g1->ngroups != g2->ngroups)
		return false;
	for (i=0; i<g1->ngroups; i++)
1662
		if (!gid_eq(GROUP_AT(g1, i), GROUP_AT(g2, i)))
1663 1664 1665 1666
			return false;
	return true;
}

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682
/*
 * RFC 3530 language requires clid_inuse be returned when the
 * "principal" associated with a requests differs from that previously
 * used.  We use uid, gid's, and gss principal string as our best
 * approximation.  We also don't want to allow non-gss use of a client
 * established using gss: in theory cr_principal should catch that
 * change, but in practice cr_principal can be null even in the gss case
 * since gssd doesn't always pass down a principal string.
 */
static bool is_gss_cred(struct svc_cred *cr)
{
	/* Is cr_flavor one of the gss "pseudoflavors"?: */
	return (cr->cr_flavor > RPC_AUTH_MAXFLAVOR);
}


1683
static bool
1684 1685
same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
{
1686
	if ((is_gss_cred(cr1) != is_gss_cred(cr2))
1687 1688
		|| (!uid_eq(cr1->cr_uid, cr2->cr_uid))
		|| (!gid_eq(cr1->cr_gid, cr2->cr_gid))
1689 1690 1691 1692 1693 1694
		|| !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
		return false;
	if (cr1->cr_principal == cr2->cr_principal)
		return true;
	if (!cr1->cr_principal || !cr2->cr_principal)
		return false;
1695
	return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
L
Linus Torvalds 已提交
1696 1697
}

1698 1699 1700 1701 1702
static bool svc_rqst_integrity_protected(struct svc_rqst *rqstp)
{
	struct svc_cred *cr = &rqstp->rq_cred;
	u32 service;

1703 1704
	if (!cr->cr_gss_mech)
		return false;
1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
	service = gss_pseudoflavor_to_service(cr->cr_gss_mech, cr->cr_flavor);
	return service == RPC_GSS_SVC_INTEGRITY ||
	       service == RPC_GSS_SVC_PRIVACY;
}

static bool mach_creds_match(struct nfs4_client *cl, struct svc_rqst *rqstp)
{
	struct svc_cred *cr = &rqstp->rq_cred;

	if (!cl->cl_mach_cred)
		return true;
	if (cl->cl_cred.cr_gss_mech != cr->cr_gss_mech)
		return false;
	if (!svc_rqst_integrity_protected(rqstp))
		return false;
	if (!cr->cr_principal)
		return false;
	return 0 == strcmp(cl->cl_cred.cr_principal, cr->cr_principal);
}

1725
static void gen_clid(struct nfs4_client *clp, struct nfsd_net *nn)
1726 1727 1728
{
	static u32 current_clientid = 1;

1729
	clp->cl_clientid.cl_boot = nn->boot_time;
L
Linus Torvalds 已提交
1730 1731 1732
	clp->cl_clientid.cl_id = current_clientid++; 
}

1733 1734
static void gen_confirm(struct nfs4_client *clp)
{
1735
	__be32 verf[2];
1736
	static u32 i;
L
Linus Torvalds 已提交
1737

1738 1739 1740 1741 1742 1743
	/*
	 * This is opaque to client, so no need to byte-swap. Use
	 * __force to keep sparse happy
	 */
	verf[0] = (__force __be32)get_seconds();
	verf[1] = (__force __be32)i++;
1744
	memcpy(clp->cl_confirm.data, verf, sizeof(clp->cl_confirm.data));
L
Linus Torvalds 已提交
1745 1746
}

1747 1748
static struct nfs4_stid *
find_stateid_locked(struct nfs4_client *cl, stateid_t *t)
1749
{
J
J. Bruce Fields 已提交
1750 1751 1752 1753 1754 1755
	struct nfs4_stid *ret;

	ret = idr_find(&cl->cl_stateids, t->si_opaque.so_id);
	if (!ret || !ret->sc_type)
		return NULL;
	return ret;
J
J. Bruce Fields 已提交
1756 1757
}

1758 1759
static struct nfs4_stid *
find_stateid_by_type(struct nfs4_client *cl, stateid_t *t, char typemask)
1760 1761
{
	struct nfs4_stid *s;
J
J. Bruce Fields 已提交
1762

1763 1764
	spin_lock(&cl->cl_lock);
	s = find_stateid_locked(cl, t);
1765 1766 1767 1768 1769 1770
	if (s != NULL) {
		if (typemask & s->sc_type)
			atomic_inc(&s->sc_count);
		else
			s = NULL;
	}
1771 1772
	spin_unlock(&cl->cl_lock);
	return s;
1773 1774
}

J
Jeff Layton 已提交
1775
static struct nfs4_client *create_client(struct xdr_netobj name,
1776 1777 1778 1779
		struct svc_rqst *rqstp, nfs4_verifier *verf)
{
	struct nfs4_client *clp;
	struct sockaddr *sa = svc_addr(rqstp);
1780
	int ret;
1781
	struct net *net = SVC_NET(rqstp);
1782
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1783 1784 1785 1786 1787

	clp = alloc_client(name);
	if (clp == NULL)
		return NULL;

1788 1789
	ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
	if (ret) {
1790
		spin_lock(&nn->client_lock);
1791
		free_client(clp);
1792
		spin_unlock(&nn->client_lock);
1793
		return NULL;
1794
	}
1795
	INIT_WORK(&clp->cl_cb_null.cb_work, nfsd4_run_cb_null);
B
Benny Halevy 已提交
1796
	clp->cl_time = get_seconds();
1797 1798 1799 1800
	clear_bit(0, &clp->cl_cb_slot_busy);
	copy_verf(clp, verf);
	rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
	gen_confirm(clp);
1801
	clp->cl_cb_session = NULL;
1802
	clp->net = net;
1803 1804 1805
	return clp;
}

1806
static void
1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847
add_clp_to_name_tree(struct nfs4_client *new_clp, struct rb_root *root)
{
	struct rb_node **new = &(root->rb_node), *parent = NULL;
	struct nfs4_client *clp;

	while (*new) {
		clp = rb_entry(*new, struct nfs4_client, cl_namenode);
		parent = *new;

		if (compare_blob(&clp->cl_name, &new_clp->cl_name) > 0)
			new = &((*new)->rb_left);
		else
			new = &((*new)->rb_right);
	}

	rb_link_node(&new_clp->cl_namenode, parent, new);
	rb_insert_color(&new_clp->cl_namenode, root);
}

static struct nfs4_client *
find_clp_in_name_tree(struct xdr_netobj *name, struct rb_root *root)
{
	long long cmp;
	struct rb_node *node = root->rb_node;
	struct nfs4_client *clp;

	while (node) {
		clp = rb_entry(node, struct nfs4_client, cl_namenode);
		cmp = compare_blob(&clp->cl_name, name);
		if (cmp > 0)
			node = node->rb_left;
		else if (cmp < 0)
			node = node->rb_right;
		else
			return clp;
	}
	return NULL;
}

static void
add_to_unconfirmed(struct nfs4_client *clp)
L
Linus Torvalds 已提交
1848 1849
{
	unsigned int idhashval;
1850
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
L
Linus Torvalds 已提交
1851

1852
	clear_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
1853
	add_clp_to_name_tree(clp, &nn->unconf_name_tree);
L
Linus Torvalds 已提交
1854
	idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1855
	list_add(&clp->cl_idhash, &nn->unconf_id_hashtbl[idhashval]);
1856
	renew_client(clp);
L
Linus Torvalds 已提交
1857 1858
}

1859
static void
L
Linus Torvalds 已提交
1860 1861 1862
move_to_confirmed(struct nfs4_client *clp)
{
	unsigned int idhashval = clientid_hashval(clp->cl_clientid.cl_id);
1863
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
L
Linus Torvalds 已提交
1864 1865

	dprintk("NFSD: move_to_confirm nfs4_client %p\n", clp);
1866
	list_move(&clp->cl_idhash, &nn->conf_id_hashtbl[idhashval]);
1867
	rb_erase(&clp->cl_namenode, &nn->unconf_name_tree);
1868
	add_clp_to_name_tree(clp, &nn->conf_name_tree);
1869
	set_bit(NFSD4_CLIENT_CONFIRMED, &clp->cl_flags);
L
Linus Torvalds 已提交
1870 1871 1872 1873
	renew_client(clp);
}

static struct nfs4_client *
J
J. Bruce Fields 已提交
1874
find_client_in_id_table(struct list_head *tbl, clientid_t *clid, bool sessions)
L
Linus Torvalds 已提交
1875 1876 1877 1878
{
	struct nfs4_client *clp;
	unsigned int idhashval = clientid_hashval(clid->cl_id);

J
J. Bruce Fields 已提交
1879
	list_for_each_entry(clp, &tbl[idhashval], cl_idhash) {
1880
		if (same_clid(&clp->cl_clientid, clid)) {
1881 1882
			if ((bool)clp->cl_minorversion != sessions)
				return NULL;
1883
			renew_client(clp);
L
Linus Torvalds 已提交
1884
			return clp;
1885
		}
L
Linus Torvalds 已提交
1886 1887 1888 1889
	}
	return NULL;
}

J
J. Bruce Fields 已提交
1890 1891 1892 1893 1894 1895 1896 1897
static struct nfs4_client *
find_confirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
{
	struct list_head *tbl = nn->conf_id_hashtbl;

	return find_client_in_id_table(tbl, clid, sessions);
}

L
Linus Torvalds 已提交
1898
static struct nfs4_client *
1899
find_unconfirmed_client(clientid_t *clid, bool sessions, struct nfsd_net *nn)
L
Linus Torvalds 已提交
1900
{
J
J. Bruce Fields 已提交
1901
	struct list_head *tbl = nn->unconf_id_hashtbl;
L
Linus Torvalds 已提交
1902

J
J. Bruce Fields 已提交
1903
	return find_client_in_id_table(tbl, clid, sessions);
L
Linus Torvalds 已提交
1904 1905
}

1906
static bool clp_used_exchangeid(struct nfs4_client *clp)
1907
{
1908
	return clp->cl_exchange_flags != 0;
1909
} 
1910

1911
static struct nfs4_client *
1912
find_confirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1913
{
1914
	return find_clp_in_name_tree(name, &nn->conf_name_tree);
1915 1916 1917
}

static struct nfs4_client *
1918
find_unconfirmed_client_by_name(struct xdr_netobj *name, struct nfsd_net *nn)
1919
{
1920
	return find_clp_in_name_tree(name, &nn->unconf_name_tree);
1921 1922
}

1923
static void
1924
gen_callback(struct nfs4_client *clp, struct nfsd4_setclientid *se, struct svc_rqst *rqstp)
L
Linus Torvalds 已提交
1925
{
1926
	struct nfs4_cb_conn *conn = &clp->cl_cb_conn;
1927 1928
	struct sockaddr	*sa = svc_addr(rqstp);
	u32 scopeid = rpc_get_scope_id(sa);
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938
	unsigned short expected_family;

	/* Currently, we only support tcp and tcp6 for the callback channel */
	if (se->se_callback_netid_len == 3 &&
	    !memcmp(se->se_callback_netid_val, "tcp", 3))
		expected_family = AF_INET;
	else if (se->se_callback_netid_len == 4 &&
		 !memcmp(se->se_callback_netid_val, "tcp6", 4))
		expected_family = AF_INET6;
	else
L
Linus Torvalds 已提交
1939 1940
		goto out_err;

1941
	conn->cb_addrlen = rpc_uaddr2sockaddr(clp->net, se->se_callback_addr_val,
1942
					    se->se_callback_addr_len,
1943 1944
					    (struct sockaddr *)&conn->cb_addr,
					    sizeof(conn->cb_addr));
1945

1946
	if (!conn->cb_addrlen || conn->cb_addr.ss_family != expected_family)
L
Linus Torvalds 已提交
1947
		goto out_err;
1948

1949 1950
	if (conn->cb_addr.ss_family == AF_INET6)
		((struct sockaddr_in6 *)&conn->cb_addr)->sin6_scope_id = scopeid;
1951

1952 1953
	conn->cb_prog = se->se_callback_prog;
	conn->cb_ident = se->se_callback_ident;
1954
	memcpy(&conn->cb_saddr, &rqstp->rq_daddr, rqstp->rq_daddrlen);
L
Linus Torvalds 已提交
1955 1956
	return;
out_err:
1957 1958
	conn->cb_addr.ss_family = AF_UNSPEC;
	conn->cb_addrlen = 0;
N
Neil Brown 已提交
1959
	dprintk(KERN_INFO "NFSD: this client (clientid %08x/%08x) "
L
Linus Torvalds 已提交
1960 1961 1962 1963 1964 1965
		"will not receive delegations\n",
		clp->cl_clientid.cl_boot, clp->cl_clientid.cl_id);

	return;
}

1966
/*
1967
 * Cache a reply. nfsd4_check_resp_size() has bounded the cache size.
1968
 */
1969
static void
1970 1971
nfsd4_store_cache_entry(struct nfsd4_compoundres *resp)
{
1972
	struct xdr_buf *buf = resp->xdr.buf;
1973 1974
	struct nfsd4_slot *slot = resp->cstate.slot;
	unsigned int base;
1975

1976
	dprintk("--> %s slot %p\n", __func__, slot);
1977

1978 1979
	slot->sl_opcnt = resp->opcnt;
	slot->sl_status = resp->cstate.status;
1980

1981
	slot->sl_flags |= NFSD4_SLOT_INITIALIZED;
1982
	if (nfsd4_not_cached(resp)) {
1983
		slot->sl_datalen = 0;
1984
		return;
1985
	}
1986 1987 1988
	base = resp->cstate.data_offset;
	slot->sl_datalen = buf->len - base;
	if (read_bytes_from_xdr_buf(buf, base, slot->sl_data, slot->sl_datalen))
1989 1990
		WARN("%s: sessions DRC could not cache compound\n", __func__);
	return;
1991 1992 1993
}

/*
1994 1995 1996 1997
 * Encode the replay sequence operation from the slot values.
 * If cachethis is FALSE encode the uncached rep error on the next
 * operation which sets resp->p and increments resp->opcnt for
 * nfs4svc_encode_compoundres.
1998 1999
 *
 */
2000 2001 2002
static __be32
nfsd4_enc_sequence_replay(struct nfsd4_compoundargs *args,
			  struct nfsd4_compoundres *resp)
2003
{
2004 2005
	struct nfsd4_op *op;
	struct nfsd4_slot *slot = resp->cstate.slot;
2006

2007 2008 2009
	/* Encode the replayed sequence operation */
	op = &args->ops[resp->opcnt - 1];
	nfsd4_encode_operation(resp, op);
2010

2011
	/* Return nfserr_retry_uncached_rep in next operation. */
2012
	if (args->opcnt > 1 && !(slot->sl_flags & NFSD4_SLOT_CACHETHIS)) {
2013 2014 2015
		op = &args->ops[resp->opcnt++];
		op->status = nfserr_retry_uncached_rep;
		nfsd4_encode_operation(resp, op);
2016
	}
2017
	return op->status;
2018 2019 2020
}

/*
2021 2022
 * The sequence operation is not cached because we can use the slot and
 * session values.
2023
 */
2024
static __be32
2025 2026
nfsd4_replay_cache_entry(struct nfsd4_compoundres *resp,
			 struct nfsd4_sequence *seq)
2027
{
2028
	struct nfsd4_slot *slot = resp->cstate.slot;
2029 2030
	struct xdr_stream *xdr = &resp->xdr;
	__be32 *p;
2031 2032
	__be32 status;

2033
	dprintk("--> %s slot %p\n", __func__, slot);
2034

2035
	status = nfsd4_enc_sequence_replay(resp->rqstp->rq_argp, resp);
2036
	if (status)
2037
		return status;
2038

2039 2040 2041 2042 2043 2044 2045
	p = xdr_reserve_space(xdr, slot->sl_datalen);
	if (!p) {
		WARN_ON_ONCE(1);
		return nfserr_serverfault;
	}
	xdr_encode_opaque_fixed(p, slot->sl_data, slot->sl_datalen);
	xdr_commit_encode(xdr);
2046

2047
	resp->opcnt = slot->sl_opcnt;
2048
	return slot->sl_status;
2049 2050
}

A
Andy Adamson 已提交
2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066
/*
 * Set the exchange_id flags returned by the server.
 */
static void
nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
{
	/* pNFS is not supported */
	new->cl_exchange_flags |= EXCHGID4_FLAG_USE_NON_PNFS;

	/* Referrals are supported, Migration is not. */
	new->cl_exchange_flags |= EXCHGID4_FLAG_SUPP_MOVED_REFER;

	/* set the wire flags to return to client. */
	clid->flags = new->cl_exchange_flags;
}

2067 2068 2069 2070 2071 2072 2073 2074
static bool client_has_state(struct nfs4_client *clp)
{
	/*
	 * Note clp->cl_openowners check isn't quite right: there's no
	 * need to count owners without stateid's.
	 *
	 * Also note we should probably be using this in 4.0 case too.
	 */
2075 2076 2077
	return !list_empty(&clp->cl_openowners)
		|| !list_empty(&clp->cl_delegations)
		|| !list_empty(&clp->cl_sessions);
2078 2079
}

A
Andy Adamson 已提交
2080 2081 2082 2083 2084
__be32
nfsd4_exchange_id(struct svc_rqst *rqstp,
		  struct nfsd4_compound_state *cstate,
		  struct nfsd4_exchange_id *exid)
{
A
Andy Adamson 已提交
2085
	struct nfs4_client *unconf, *conf, *new;
J
J. Bruce Fields 已提交
2086
	__be32 status;
2087
	char			addr_str[INET6_ADDRSTRLEN];
A
Andy Adamson 已提交
2088
	nfs4_verifier		verf = exid->verifier;
2089
	struct sockaddr		*sa = svc_addr(rqstp);
2090
	bool	update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
2091
	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
A
Andy Adamson 已提交
2092

2093
	rpc_ntop(sa, addr_str, sizeof(addr_str));
A
Andy Adamson 已提交
2094
	dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
2095
		"ip_addr=%s flags %x, spa_how %d\n",
A
Andy Adamson 已提交
2096
		__func__, rqstp, exid, exid->clname.len, exid->clname.data,
2097
		addr_str, exid->flags, exid->spa_how);
A
Andy Adamson 已提交
2098

2099
	if (exid->flags & ~EXCHGID4_FLAG_MASK_A)
A
Andy Adamson 已提交
2100 2101 2102
		return nfserr_inval;

	switch (exid->spa_how) {
2103 2104 2105
	case SP4_MACH_CRED:
		if (!svc_rqst_integrity_protected(rqstp))
			return nfserr_inval;
A
Andy Adamson 已提交
2106 2107
	case SP4_NONE:
		break;
2108 2109
	default:				/* checked by xdr code */
		WARN_ON_ONCE(1);
A
Andy Adamson 已提交
2110
	case SP4_SSV:
2111
		return nfserr_encr_alg_unsupp;
A
Andy Adamson 已提交
2112 2113
	}

2114
	/* Cases below refer to rfc 5661 section 18.35.4: */
A
Andy Adamson 已提交
2115
	nfs4_lock_state();
2116
	conf = find_confirmed_client_by_name(&exid->clname, nn);
A
Andy Adamson 已提交
2117
	if (conf) {
2118 2119 2120
		bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
		bool verfs_match = same_verf(&verf, &conf->cl_verifier);

2121 2122
		if (update) {
			if (!clp_used_exchangeid(conf)) { /* buggy client */
2123
				status = nfserr_inval;
2124 2125
				goto out;
			}
2126 2127 2128 2129
			if (!mach_creds_match(conf, rqstp)) {
				status = nfserr_wrong_cred;
				goto out;
			}
2130
			if (!creds_match) { /* case 9 */
2131
				status = nfserr_perm;
2132 2133 2134
				goto out;
			}
			if (!verfs_match) { /* case 8 */
A
Andy Adamson 已提交
2135 2136 2137
				status = nfserr_not_same;
				goto out;
			}
2138 2139 2140 2141
			/* case 6 */
			exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
			new = conf;
			goto out_copy;
A
Andy Adamson 已提交
2142
		}
2143
		if (!creds_match) { /* case 3 */
2144 2145
			if (client_has_state(conf)) {
				status = nfserr_clid_inuse;
A
Andy Adamson 已提交
2146 2147 2148 2149 2150
				goto out;
			}
			expire_client(conf);
			goto out_new;
		}
2151
		if (verfs_match) { /* case 2 */
2152
			conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
2153 2154 2155 2156 2157
			new = conf;
			goto out_copy;
		}
		/* case 5, client reboot */
		goto out_new;
2158 2159
	}

2160
	if (update) { /* case 7 */
2161 2162
		status = nfserr_noent;
		goto out;
A
Andy Adamson 已提交
2163 2164
	}

2165
	unconf  = find_unconfirmed_client_by_name(&exid->clname, nn);
2166
	if (unconf) /* case 4, possible retry or client restart */
A
Andy Adamson 已提交
2167 2168
		expire_client(unconf);

2169
	/* case 1 (normal case) */
A
Andy Adamson 已提交
2170
out_new:
J
Jeff Layton 已提交
2171
	new = create_client(exid->clname, rqstp, &verf);
A
Andy Adamson 已提交
2172
	if (new == NULL) {
2173
		status = nfserr_jukebox;
A
Andy Adamson 已提交
2174 2175
		goto out;
	}
2176
	new->cl_minorversion = cstate->minorversion;
2177
	new->cl_mach_cred = (exid->spa_how == SP4_MACH_CRED);
A
Andy Adamson 已提交
2178

2179
	gen_clid(new, nn);
2180
	add_to_unconfirmed(new);
A
Andy Adamson 已提交
2181 2182 2183 2184
out_copy:
	exid->clientid.cl_boot = new->cl_clientid.cl_boot;
	exid->clientid.cl_id = new->cl_clientid.cl_id;

2185
	exid->seqid = new->cl_cs_slot.sl_seqid + 1;
A
Andy Adamson 已提交
2186 2187 2188
	nfsd4_set_ex_flags(new, exid);

	dprintk("nfsd4_exchange_id seqid %d flags %x\n",
2189
		new->cl_cs_slot.sl_seqid, new->cl_exchange_flags);
A
Andy Adamson 已提交
2190 2191 2192 2193 2194
	status = nfs_ok;

out:
	nfs4_unlock_state();
	return status;
A
Andy Adamson 已提交
2195 2196
}

J
J. Bruce Fields 已提交
2197
static __be32
2198
check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
B
Benny Halevy 已提交
2199
{
2200 2201
	dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
		slot_seqid);
B
Benny Halevy 已提交
2202 2203

	/* The slot is in use, and no response has been sent. */
2204 2205
	if (slot_inuse) {
		if (seqid == slot_seqid)
B
Benny Halevy 已提交
2206 2207 2208 2209
			return nfserr_jukebox;
		else
			return nfserr_seq_misordered;
	}
2210
	/* Note unsigned 32-bit arithmetic handles wraparound: */
2211
	if (likely(seqid == slot_seqid + 1))
B
Benny Halevy 已提交
2212
		return nfs_ok;
2213
	if (seqid == slot_seqid)
B
Benny Halevy 已提交
2214 2215 2216 2217
		return nfserr_replay_cache;
	return nfserr_seq_misordered;
}

2218 2219 2220 2221 2222 2223 2224
/*
 * Cache the create session result into the create session single DRC
 * slot cache by saving the xdr structure. sl_seqid has been set.
 * Do this for solo or embedded create session operations.
 */
static void
nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
J
J. Bruce Fields 已提交
2225
			   struct nfsd4_clid_slot *slot, __be32 nfserr)
2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238
{
	slot->sl_status = nfserr;
	memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
}

static __be32
nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
			    struct nfsd4_clid_slot *slot)
{
	memcpy(cr_ses, &slot->sl_cr_ses, sizeof(*cr_ses));
	return slot->sl_status;
}

2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255
#define NFSD_MIN_REQ_HDR_SEQ_SZ	((\
			2 * 2 + /* credential,verifier: AUTH_NULL, length 0 */ \
			1 +	/* MIN tag is length with zero, only length */ \
			3 +	/* version, opcount, opcode */ \
			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
				/* seqid, slotID, slotID, cache */ \
			4 ) * sizeof(__be32))

#define NFSD_MIN_RESP_HDR_SEQ_SZ ((\
			2 +	/* verifier: AUTH_NULL, length 0 */\
			1 +	/* status */ \
			1 +	/* MIN tag is length with zero, only length */ \
			3 +	/* opcount, opcode, opstatus*/ \
			XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
				/* seqid, slotID, slotID, slotID, status */ \
			5 ) * sizeof(__be32))

2256
static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
2257
{
2258 2259
	u32 maxrpc = nn->nfsd_serv->sv_max_mesg;

2260 2261 2262 2263
	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
		return nfserr_toosmall;
	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
		return nfserr_toosmall;
2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
	ca->headerpadsz = 0;
	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
			NFSD_SLOT_CACHE_SIZE + NFSD_MIN_HDR_SEQ_SZ);
	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
	/*
	 * Note decreasing slot size below client's request may make it
	 * difficult for client to function correctly, whereas
	 * decreasing the number of slots will (just?) affect
	 * performance.  When short on memory we therefore prefer to
	 * decrease number of slots instead of their size.  Clients that
	 * request larger slots than they need will get poor results:
	 */
	ca->maxreqs = nfsd4_get_drc_mem(ca);
	if (!ca->maxreqs)
		return nfserr_jukebox;

2283
	return nfs_ok;
2284 2285
}

2286 2287 2288 2289 2290
#define NFSD_CB_MAX_REQ_SZ	((NFS4_enc_cb_recall_sz + \
				 RPC_MAX_HEADER_WITH_AUTH) * sizeof(__be32))
#define NFSD_CB_MAX_RESP_SZ	((NFS4_dec_cb_recall_sz + \
				 RPC_MAX_REPHEADER_WITH_AUTH) * sizeof(__be32))

2291
static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
2292
{
2293 2294 2295 2296 2297 2298 2299 2300
	ca->headerpadsz = 0;

	/*
	 * These RPC_MAX_HEADER macros are overkill, especially since we
	 * don't even do gss on the backchannel yet.  But this is still
	 * less than 1k.  Tighten up this estimate in the unlikely event
	 * it turns out to be a problem for some client:
	 */
2301
	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
2302
		return nfserr_toosmall;
2303
	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
2304 2305 2306 2307 2308 2309
		return nfserr_toosmall;
	ca->maxresp_cached = 0;
	if (ca->maxops < 2)
		return nfserr_toosmall;

	return nfs_ok;
2310 2311
}

2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329
static __be32 nfsd4_check_cb_sec(struct nfsd4_cb_sec *cbs)
{
	switch (cbs->flavor) {
	case RPC_AUTH_NULL:
	case RPC_AUTH_UNIX:
		return nfs_ok;
	default:
		/*
		 * GSS case: the spec doesn't allow us to return this
		 * error.  But it also doesn't allow us not to support
		 * GSS.
		 * I'd rather this fail hard than return some error the
		 * client might think it can already handle:
		 */
		return nfserr_encr_alg_unsupp;
	}
}

A
Andy Adamson 已提交
2330 2331 2332 2333 2334
__be32
nfsd4_create_session(struct svc_rqst *rqstp,
		     struct nfsd4_compound_state *cstate,
		     struct nfsd4_create_session *cr_ses)
{
2335
	struct sockaddr *sa = svc_addr(rqstp);
A
Andy Adamson 已提交
2336
	struct nfs4_client *conf, *unconf;
2337
	struct nfsd4_session *new;
2338
	struct nfsd4_conn *conn;
2339
	struct nfsd4_clid_slot *cs_slot = NULL;
J
J. Bruce Fields 已提交
2340
	__be32 status = 0;
2341
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
A
Andy Adamson 已提交
2342

2343 2344
	if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
		return nfserr_inval;
2345 2346 2347
	status = nfsd4_check_cb_sec(&cr_ses->cb_sec);
	if (status)
		return status;
2348
	status = check_forechannel_attrs(&cr_ses->fore_channel, nn);
2349 2350 2351
	if (status)
		return status;
	status = check_backchannel_attrs(&cr_ses->back_channel);
2352
	if (status)
2353
		goto out_release_drc_mem;
2354
	status = nfserr_jukebox;
2355
	new = alloc_session(&cr_ses->fore_channel, &cr_ses->back_channel);
2356 2357
	if (!new)
		goto out_release_drc_mem;
2358 2359 2360
	conn = alloc_conn_from_crses(rqstp, cr_ses);
	if (!conn)
		goto out_free_session;
2361

A
Andy Adamson 已提交
2362
	nfs4_lock_state();
2363
	unconf = find_unconfirmed_client(&cr_ses->clientid, true, nn);
2364
	conf = find_confirmed_client(&cr_ses->clientid, true, nn);
2365
	WARN_ON_ONCE(conf && unconf);
A
Andy Adamson 已提交
2366 2367

	if (conf) {
2368 2369 2370
		status = nfserr_wrong_cred;
		if (!mach_creds_match(conf, rqstp))
			goto out_free_conn;
2371 2372
		cs_slot = &conf->cl_cs_slot;
		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2373
		if (status == nfserr_replay_cache) {
2374
			status = nfsd4_replay_create_session(cr_ses, cs_slot);
2375
			goto out_free_conn;
2376
		} else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
A
Andy Adamson 已提交
2377
			status = nfserr_seq_misordered;
2378
			goto out_free_conn;
A
Andy Adamson 已提交
2379 2380
		}
	} else if (unconf) {
J
J. Bruce Fields 已提交
2381
		struct nfs4_client *old;
A
Andy Adamson 已提交
2382
		if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred) ||
2383
		    !rpc_cmp_addr(sa, (struct sockaddr *) &unconf->cl_addr)) {
A
Andy Adamson 已提交
2384
			status = nfserr_clid_inuse;
2385
			goto out_free_conn;
A
Andy Adamson 已提交
2386
		}
2387 2388 2389
		status = nfserr_wrong_cred;
		if (!mach_creds_match(unconf, rqstp))
			goto out_free_conn;
2390 2391
		cs_slot = &unconf->cl_cs_slot;
		status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
2392 2393
		if (status) {
			/* an unconfirmed replay returns misordered */
A
Andy Adamson 已提交
2394
			status = nfserr_seq_misordered;
2395
			goto out_free_conn;
A
Andy Adamson 已提交
2396
		}
2397
		old = find_confirmed_client_by_name(&unconf->cl_name, nn);
2398 2399 2400 2401
		if (old) {
			status = mark_client_expired(old);
			if (status)
				goto out_free_conn;
J
J. Bruce Fields 已提交
2402
			expire_client(old);
2403
		}
J
J. Bruce Fields 已提交
2404
		move_to_confirmed(unconf);
A
Andy Adamson 已提交
2405 2406 2407
		conf = unconf;
	} else {
		status = nfserr_stale_clientid;
2408
		goto out_free_conn;
A
Andy Adamson 已提交
2409
	}
2410
	status = nfs_ok;
2411 2412 2413 2414 2415 2416
	/*
	 * We do not support RDMA or persistent sessions
	 */
	cr_ses->flags &= ~SESSION4_PERSIST;
	cr_ses->flags &= ~SESSION4_RDMA;

2417 2418 2419
	init_session(rqstp, new, conf, cr_ses);
	nfsd4_init_conn(rqstp, conn, new);

2420
	memcpy(cr_ses->sessionid.data, new->se_sessionid.data,
A
Andy Adamson 已提交
2421
	       NFS4_MAX_SESSIONID_LEN);
2422
	cs_slot->sl_seqid++;
2423
	cr_ses->seqid = cs_slot->sl_seqid;
A
Andy Adamson 已提交
2424

2425 2426
	/* cache solo and embedded create sessions under the state lock */
	nfsd4_cache_create_session(cr_ses, cs_slot, status);
A
Andy Adamson 已提交
2427 2428
	nfs4_unlock_state();
	return status;
2429
out_free_conn:
2430
	nfs4_unlock_state();
2431 2432 2433
	free_conn(conn);
out_free_session:
	__free_session(new);
2434 2435
out_release_drc_mem:
	nfsd4_put_drc_mem(&cr_ses->fore_channel);
J
J. Bruce Fields 已提交
2436
	return status;
A
Andy Adamson 已提交
2437 2438
}

2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452
static __be32 nfsd4_map_bcts_dir(u32 *dir)
{
	switch (*dir) {
	case NFS4_CDFC4_FORE:
	case NFS4_CDFC4_BACK:
		return nfs_ok;
	case NFS4_CDFC4_FORE_OR_BOTH:
	case NFS4_CDFC4_BACK_OR_BOTH:
		*dir = NFS4_CDFC4_BOTH;
		return nfs_ok;
	};
	return nfserr_inval;
}

2453 2454 2455
__be32 nfsd4_backchannel_ctl(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_backchannel_ctl *bc)
{
	struct nfsd4_session *session = cstate->session;
2456
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2457
	__be32 status;
2458

2459 2460 2461
	status = nfsd4_check_cb_sec(&bc->bc_cb_sec);
	if (status)
		return status;
2462
	spin_lock(&nn->client_lock);
2463 2464
	session->se_cb_prog = bc->bc_cb_program;
	session->se_cb_sec = bc->bc_cb_sec;
2465
	spin_unlock(&nn->client_lock);
2466 2467 2468 2469 2470 2471

	nfsd4_probe_callback(session->se_client);

	return nfs_ok;
}

2472 2473 2474 2475 2476
__be32 nfsd4_bind_conn_to_session(struct svc_rqst *rqstp,
		     struct nfsd4_compound_state *cstate,
		     struct nfsd4_bind_conn_to_session *bcts)
{
	__be32 status;
2477
	struct nfsd4_conn *conn;
2478
	struct nfsd4_session *session;
2479 2480
	struct net *net = SVC_NET(rqstp);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
2481 2482 2483

	if (!nfsd4_last_compound_op(rqstp))
		return nfserr_not_only_op;
2484
	nfs4_lock_state();
2485
	spin_lock(&nn->client_lock);
2486
	session = find_in_sessionid_hashtbl(&bcts->sessionid, net, &status);
2487
	spin_unlock(&nn->client_lock);
2488
	if (!session)
2489
		goto out_no_session;
2490 2491 2492
	status = nfserr_wrong_cred;
	if (!mach_creds_match(session->se_client, rqstp))
		goto out;
2493
	status = nfsd4_map_bcts_dir(&bcts->dir);
2494
	if (status)
2495
		goto out;
2496
	conn = alloc_conn(rqstp, bcts->dir);
2497
	status = nfserr_jukebox;
2498
	if (!conn)
2499 2500 2501 2502
		goto out;
	nfsd4_init_conn(rqstp, conn, session);
	status = nfs_ok;
out:
2503 2504
	nfsd4_put_session(session);
out_no_session:
2505 2506
	nfs4_unlock_state();
	return status;
2507 2508
}

2509 2510 2511 2512 2513 2514 2515
static bool nfsd4_compound_in_session(struct nfsd4_session *session, struct nfs4_sessionid *sid)
{
	if (!session)
		return 0;
	return !memcmp(sid, &session->se_sessionid, sizeof(*sid));
}

A
Andy Adamson 已提交
2516 2517 2518 2519 2520
__be32
nfsd4_destroy_session(struct svc_rqst *r,
		      struct nfsd4_compound_state *cstate,
		      struct nfsd4_destroy_session *sessionid)
{
B
Benny Halevy 已提交
2521
	struct nfsd4_session *ses;
2522
	__be32 status;
2523
	int ref_held_by_me = 0;
2524 2525
	struct net *net = SVC_NET(r);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
B
Benny Halevy 已提交
2526

2527 2528
	nfs4_lock_state();
	status = nfserr_not_only_op;
2529
	if (nfsd4_compound_in_session(cstate->session, &sessionid->sessionid)) {
2530
		if (!nfsd4_last_compound_op(r))
2531
			goto out;
2532
		ref_held_by_me++;
2533
	}
B
Benny Halevy 已提交
2534
	dump_sessionid(__func__, &sessionid->sessionid);
2535
	spin_lock(&nn->client_lock);
2536
	ses = find_in_sessionid_hashtbl(&sessionid->sessionid, net, &status);
2537 2538
	if (!ses)
		goto out_client_lock;
2539 2540
	status = nfserr_wrong_cred;
	if (!mach_creds_match(ses->se_client, r))
2541
		goto out_put_session;
2542
	status = mark_session_dead_locked(ses, 1 + ref_held_by_me);
2543
	if (status)
2544
		goto out_put_session;
B
Benny Halevy 已提交
2545
	unhash_session(ses);
2546
	spin_unlock(&nn->client_lock);
B
Benny Halevy 已提交
2547

2548
	nfsd4_probe_callback_sync(ses->se_client);
2549

2550
	spin_lock(&nn->client_lock);
B
Benny Halevy 已提交
2551
	status = nfs_ok;
2552
out_put_session:
2553
	nfsd4_put_session_locked(ses);
2554 2555
out_client_lock:
	spin_unlock(&nn->client_lock);
B
Benny Halevy 已提交
2556
out:
2557
	nfs4_unlock_state();
B
Benny Halevy 已提交
2558
	return status;
A
Andy Adamson 已提交
2559 2560
}

2561
static struct nfsd4_conn *__nfsd4_find_conn(struct svc_xprt *xpt, struct nfsd4_session *s)
2562 2563 2564 2565
{
	struct nfsd4_conn *c;

	list_for_each_entry(c, &s->se_conns, cn_persession) {
2566
		if (c->cn_xprt == xpt) {
2567 2568 2569 2570 2571 2572
			return c;
		}
	}
	return NULL;
}

2573
static __be32 nfsd4_sequence_check_conn(struct nfsd4_conn *new, struct nfsd4_session *ses)
2574 2575
{
	struct nfs4_client *clp = ses->se_client;
2576
	struct nfsd4_conn *c;
2577
	__be32 status = nfs_ok;
2578
	int ret;
2579 2580

	spin_lock(&clp->cl_lock);
2581
	c = __nfsd4_find_conn(new->cn_xprt, ses);
2582 2583 2584 2585 2586
	if (c)
		goto out_free;
	status = nfserr_conn_not_bound_to_session;
	if (clp->cl_mach_cred)
		goto out_free;
2587 2588
	__nfsd4_hash_conn(new, ses);
	spin_unlock(&clp->cl_lock);
2589 2590 2591 2592
	ret = nfsd4_register_conn(new);
	if (ret)
		/* oops; xprt is already down: */
		nfsd4_conn_lost(&new->cn_xpt_user);
2593 2594 2595 2596 2597
	return nfs_ok;
out_free:
	spin_unlock(&clp->cl_lock);
	free_conn(new);
	return status;
2598 2599
}

2600 2601 2602 2603 2604 2605 2606
static bool nfsd4_session_too_many_ops(struct svc_rqst *rqstp, struct nfsd4_session *session)
{
	struct nfsd4_compoundargs *args = rqstp->rq_argp;

	return args->opcnt > session->se_fchannel.maxops;
}

M
Mi Jinlong 已提交
2607 2608 2609 2610 2611 2612 2613 2614
static bool nfsd4_request_too_big(struct svc_rqst *rqstp,
				  struct nfsd4_session *session)
{
	struct xdr_buf *xb = &rqstp->rq_arg;

	return xb->len > session->se_fchannel.maxreq_sz;
}

A
Andy Adamson 已提交
2615
__be32
B
Benny Halevy 已提交
2616
nfsd4_sequence(struct svc_rqst *rqstp,
A
Andy Adamson 已提交
2617 2618 2619
	       struct nfsd4_compound_state *cstate,
	       struct nfsd4_sequence *seq)
{
2620
	struct nfsd4_compoundres *resp = rqstp->rq_resp;
2621
	struct xdr_stream *xdr = &resp->xdr;
B
Benny Halevy 已提交
2622
	struct nfsd4_session *session;
2623
	struct nfs4_client *clp;
B
Benny Halevy 已提交
2624
	struct nfsd4_slot *slot;
2625
	struct nfsd4_conn *conn;
J
J. Bruce Fields 已提交
2626
	__be32 status;
2627
	int buflen;
2628 2629
	struct net *net = SVC_NET(rqstp);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
B
Benny Halevy 已提交
2630

2631 2632 2633
	if (resp->opcnt != 1)
		return nfserr_sequence_pos;

2634 2635 2636 2637 2638 2639 2640 2641
	/*
	 * Will be either used or freed by nfsd4_sequence_check_conn
	 * below.
	 */
	conn = alloc_conn(rqstp, NFS4_CDFC4_FORE);
	if (!conn)
		return nfserr_jukebox;

2642
	spin_lock(&nn->client_lock);
2643
	session = find_in_sessionid_hashtbl(&seq->sessionid, net, &status);
B
Benny Halevy 已提交
2644
	if (!session)
2645 2646
		goto out_no_session;
	clp = session->se_client;
B
Benny Halevy 已提交
2647

2648 2649
	status = nfserr_too_many_ops;
	if (nfsd4_session_too_many_ops(rqstp, session))
2650
		goto out_put_session;
2651

M
Mi Jinlong 已提交
2652 2653
	status = nfserr_req_too_big;
	if (nfsd4_request_too_big(rqstp, session))
2654
		goto out_put_session;
M
Mi Jinlong 已提交
2655

B
Benny Halevy 已提交
2656
	status = nfserr_badslot;
2657
	if (seq->slotid >= session->se_fchannel.maxreqs)
2658
		goto out_put_session;
B
Benny Halevy 已提交
2659

2660
	slot = session->se_slots[seq->slotid];
B
Benny Halevy 已提交
2661 2662
	dprintk("%s: slotid %d\n", __func__, seq->slotid);

2663 2664 2665 2666 2667
	/* We do not negotiate the number of slots yet, so set the
	 * maxslots to the session maxreqs which is used to encode
	 * sr_highest_slotid and the sr_target_slot id to maxslots */
	seq->maxslots = session->se_fchannel.maxreqs;

2668 2669
	status = check_slot_seqid(seq->seqid, slot->sl_seqid,
					slot->sl_flags & NFSD4_SLOT_INUSE);
B
Benny Halevy 已提交
2670
	if (status == nfserr_replay_cache) {
2671 2672
		status = nfserr_seq_misordered;
		if (!(slot->sl_flags & NFSD4_SLOT_INITIALIZED))
2673
			goto out_put_session;
B
Benny Halevy 已提交
2674 2675
		cstate->slot = slot;
		cstate->session = session;
2676
		cstate->clp = clp;
A
Andy Adamson 已提交
2677
		/* Return the cached reply status and set cstate->status
2678
		 * for nfsd4_proc_compound processing */
2679
		status = nfsd4_replay_cache_entry(resp, seq);
A
Andy Adamson 已提交
2680
		cstate->status = nfserr_replay_cache;
2681
		goto out;
B
Benny Halevy 已提交
2682 2683
	}
	if (status)
2684
		goto out_put_session;
B
Benny Halevy 已提交
2685

2686
	status = nfsd4_sequence_check_conn(conn, session);
2687
	conn = NULL;
2688 2689
	if (status)
		goto out_put_session;
2690

2691 2692 2693 2694 2695
	buflen = (seq->cachethis) ?
			session->se_fchannel.maxresp_cached :
			session->se_fchannel.maxresp_sz;
	status = (seq->cachethis) ? nfserr_rep_too_big_to_cache :
				    nfserr_rep_too_big;
2696
	if (xdr_restrict_buflen(xdr, buflen - rqstp->rq_auth_slack))
2697
		goto out_put_session;
2698
	svc_reserve(rqstp, buflen);
2699 2700

	status = nfs_ok;
B
Benny Halevy 已提交
2701 2702
	/* Success! bump slot seqid */
	slot->sl_seqid = seq->seqid;
2703
	slot->sl_flags |= NFSD4_SLOT_INUSE;
2704 2705
	if (seq->cachethis)
		slot->sl_flags |= NFSD4_SLOT_CACHETHIS;
2706 2707
	else
		slot->sl_flags &= ~NFSD4_SLOT_CACHETHIS;
B
Benny Halevy 已提交
2708 2709 2710

	cstate->slot = slot;
	cstate->session = session;
2711
	cstate->clp = clp;
B
Benny Halevy 已提交
2712 2713

out:
2714 2715 2716 2717 2718 2719 2720 2721 2722
	switch (clp->cl_cb_state) {
	case NFSD4_CB_DOWN:
		seq->status_flags = SEQ4_STATUS_CB_PATH_DOWN;
		break;
	case NFSD4_CB_FAULT:
		seq->status_flags = SEQ4_STATUS_BACKCHANNEL_FAULT;
		break;
	default:
		seq->status_flags = 0;
2723
	}
2724 2725
	if (!list_empty(&clp->cl_revoked))
		seq->status_flags |= SEQ4_STATUS_RECALLABLE_STATE_REVOKED;
2726
out_no_session:
2727 2728
	if (conn)
		free_conn(conn);
2729
	spin_unlock(&nn->client_lock);
B
Benny Halevy 已提交
2730
	return status;
2731
out_put_session:
2732
	nfsd4_put_session_locked(session);
2733
	goto out_no_session;
A
Andy Adamson 已提交
2734 2735
}

2736 2737 2738 2739 2740 2741 2742 2743 2744 2745
void
nfsd4_sequence_done(struct nfsd4_compoundres *resp)
{
	struct nfsd4_compound_state *cs = &resp->cstate;

	if (nfsd4_has_session(cs)) {
		if (cs->status != nfserr_replay_cache) {
			nfsd4_store_cache_entry(resp);
			cs->slot->sl_flags &= ~NFSD4_SLOT_INUSE;
		}
2746
		/* Drop session reference that was taken in nfsd4_sequence() */
2747
		nfsd4_put_session(cs->session);
2748 2749
	} else if (cs->clp)
		put_client_renew(cs->clp);
2750 2751
}

2752 2753 2754 2755
__be32
nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
{
	struct nfs4_client *conf, *unconf, *clp;
J
J. Bruce Fields 已提交
2756
	__be32 status = 0;
2757
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
2758 2759

	nfs4_lock_state();
2760
	unconf = find_unconfirmed_client(&dc->clientid, true, nn);
2761
	conf = find_confirmed_client(&dc->clientid, true, nn);
2762
	WARN_ON_ONCE(conf && unconf);
2763 2764 2765 2766

	if (conf) {
		clp = conf;

2767
		if (client_has_state(conf)) {
2768 2769 2770 2771 2772 2773 2774 2775 2776
			status = nfserr_clientid_busy;
			goto out;
		}
	} else if (unconf)
		clp = unconf;
	else {
		status = nfserr_stale_clientid;
		goto out;
	}
2777 2778 2779 2780
	if (!mach_creds_match(clp, rqstp)) {
		status = nfserr_wrong_cred;
		goto out;
	}
2781 2782 2783 2784 2785 2786
	expire_client(clp);
out:
	nfs4_unlock_state();
	return status;
}

2787 2788 2789
__be32
nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
{
J
J. Bruce Fields 已提交
2790
	__be32 status = 0;
2791

2792 2793 2794 2795 2796 2797 2798 2799 2800
	if (rc->rca_one_fs) {
		if (!cstate->current_fh.fh_dentry)
			return nfserr_nofilehandle;
		/*
		 * We don't take advantage of the rca_one_fs case.
		 * That's OK, it's optional, we can safely ignore it.
		 */
		 return nfs_ok;
	}
2801

2802
	nfs4_lock_state();
2803
	status = nfserr_complete_already;
2804 2805
	if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
			     &cstate->session->se_client->cl_flags))
2806 2807 2808 2809
		goto out;

	status = nfserr_stale_clientid;
	if (is_client_expired(cstate->session->se_client))
2810 2811 2812 2813 2814 2815 2816
		/*
		 * The following error isn't really legal.
		 * But we only get here if the client just explicitly
		 * destroyed the client.  Surely it no longer cares what
		 * error it gets back on an operation for the dead
		 * client.
		 */
2817 2818 2819
		goto out;

	status = nfs_ok;
2820
	nfsd4_client_record_create(cstate->session->se_client);
2821
out:
2822
	nfs4_unlock_state();
2823
	return status;
2824 2825
}

2826
__be32
2827 2828
nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		  struct nfsd4_setclientid *setclid)
L
Linus Torvalds 已提交
2829
{
2830
	struct xdr_netobj 	clname = setclid->se_name;
L
Linus Torvalds 已提交
2831
	nfs4_verifier		clverifier = setclid->se_verf;
2832
	struct nfs4_client	*conf, *unconf, *new;
2833
	__be32 			status;
2834 2835
	struct nfsd_net		*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);

2836
	/* Cases below refer to rfc 3530 section 14.2.33: */
L
Linus Torvalds 已提交
2837
	nfs4_lock_state();
2838
	conf = find_confirmed_client_by_name(&clname, nn);
2839
	if (conf) {
2840
		/* case 0: */
L
Linus Torvalds 已提交
2841
		status = nfserr_clid_inuse;
2842 2843
		if (clp_used_exchangeid(conf))
			goto out;
2844
		if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) {
2845 2846 2847 2848 2849
			char addr_str[INET6_ADDRSTRLEN];
			rpc_ntop((struct sockaddr *) &conf->cl_addr, addr_str,
				 sizeof(addr_str));
			dprintk("NFSD: setclientid: string in use by client "
				"at %s\n", addr_str);
L
Linus Torvalds 已提交
2850 2851 2852
			goto out;
		}
	}
2853
	unconf = find_unconfirmed_client_by_name(&clname, nn);
2854 2855
	if (unconf)
		expire_client(unconf);
2856
	status = nfserr_jukebox;
J
Jeff Layton 已提交
2857
	new = create_client(clname, rqstp, &clverifier);
2858 2859
	if (new == NULL)
		goto out;
2860
	if (conf && same_verf(&conf->cl_verifier, &clverifier))
2861
		/* case 1: probable callback update */
L
Linus Torvalds 已提交
2862
		copy_clid(new, conf);
2863
	else /* case 4 (new client) or cases 2, 3 (client reboot): */
2864
		gen_clid(new, nn);
2865
	new->cl_minorversion = 0;
2866
	gen_callback(new, setclid, rqstp);
2867
	add_to_unconfirmed(new);
L
Linus Torvalds 已提交
2868 2869 2870 2871 2872 2873 2874 2875 2876 2877
	setclid->se_clientid.cl_boot = new->cl_clientid.cl_boot;
	setclid->se_clientid.cl_id = new->cl_clientid.cl_id;
	memcpy(setclid->se_confirm.data, new->cl_confirm.data, sizeof(setclid->se_confirm.data));
	status = nfs_ok;
out:
	nfs4_unlock_state();
	return status;
}


2878
__be32
2879 2880 2881
nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
			 struct nfsd4_compound_state *cstate,
			 struct nfsd4_setclientid_confirm *setclientid_confirm)
L
Linus Torvalds 已提交
2882
{
2883
	struct nfs4_client *conf, *unconf;
L
Linus Torvalds 已提交
2884 2885
	nfs4_verifier confirm = setclientid_confirm->sc_confirm; 
	clientid_t * clid = &setclientid_confirm->sc_clientid;
2886
	__be32 status;
2887
	struct nfsd_net	*nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
2888

2889
	if (STALE_CLIENTID(clid, nn))
L
Linus Torvalds 已提交
2890 2891
		return nfserr_stale_clientid;
	nfs4_lock_state();
2892

2893
	conf = find_confirmed_client(clid, false, nn);
2894
	unconf = find_unconfirmed_client(clid, false, nn);
2895
	/*
2896 2897 2898 2899
	 * We try hard to give out unique clientid's, so if we get an
	 * attempt to confirm the same clientid with a different cred,
	 * there's a bug somewhere.  Let's charitably assume it's our
	 * bug.
2900
	 */
2901 2902 2903 2904 2905
	status = nfserr_serverfault;
	if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
		goto out;
	if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
		goto out;
2906
	/* cases below refer to rfc 3530 section 14.2.34: */
2907 2908
	if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
		if (conf && !unconf) /* case 2: probable retransmit */
L
Linus Torvalds 已提交
2909
			status = nfs_ok;
2910 2911 2912 2913 2914 2915
		else /* case 4: client hasn't noticed we rebooted yet? */
			status = nfserr_stale_clientid;
		goto out;
	}
	status = nfs_ok;
	if (conf) { /* case 1: callback update */
2916 2917 2918
		nfsd4_change_callback(conf, &unconf->cl_cb_conn);
		nfsd4_probe_callback(conf);
		expire_client(unconf);
2919
	} else { /* case 3: normal case; new or rebooted client */
2920
		conf = find_confirmed_client_by_name(&unconf->cl_name, nn);
2921 2922 2923 2924
		if (conf) {
			status = mark_client_expired(conf);
			if (status)
				goto out;
2925
			expire_client(conf);
2926
		}
2927
		move_to_confirmed(unconf);
2928
		nfsd4_probe_callback(unconf);
2929
	}
L
Linus Torvalds 已提交
2930 2931 2932 2933 2934
out:
	nfs4_unlock_state();
	return status;
}

2935 2936 2937 2938 2939
static struct nfs4_file *nfsd4_alloc_file(void)
{
	return kmem_cache_alloc(file_slab, GFP_KERNEL);
}

L
Linus Torvalds 已提交
2940
/* OPEN Share state helper functions */
2941
static void nfsd4_init_file(struct nfs4_file *fp, struct knfsd_fh *fh)
L
Linus Torvalds 已提交
2942
{
2943
	unsigned int hashval = file_hashval(fh);
L
Linus Torvalds 已提交
2944

2945 2946
	lockdep_assert_held(&state_lock);

2947
	atomic_set(&fp->fi_ref, 1);
2948
	spin_lock_init(&fp->fi_lock);
2949 2950
	INIT_LIST_HEAD(&fp->fi_stateids);
	INIT_LIST_HEAD(&fp->fi_delegations);
2951
	fh_copy_shallow(&fp->fi_fhandle, fh);
2952 2953
	fp->fi_had_conflict = false;
	fp->fi_lease = NULL;
2954
	fp->fi_share_deny = 0;
2955 2956
	memset(fp->fi_fds, 0, sizeof(fp->fi_fds));
	memset(fp->fi_access, 0, sizeof(fp->fi_access));
2957
	hlist_add_head(&fp->fi_hash, &file_hashtbl[hashval]);
L
Linus Torvalds 已提交
2958 2959
}

2960
void
L
Linus Torvalds 已提交
2961 2962
nfsd4_free_slabs(void)
{
C
Christoph Hellwig 已提交
2963 2964 2965 2966 2967
	kmem_cache_destroy(openowner_slab);
	kmem_cache_destroy(lockowner_slab);
	kmem_cache_destroy(file_slab);
	kmem_cache_destroy(stateid_slab);
	kmem_cache_destroy(deleg_slab);
N
NeilBrown 已提交
2968
}
L
Linus Torvalds 已提交
2969

2970
int
N
NeilBrown 已提交
2971 2972
nfsd4_init_slabs(void)
{
2973 2974 2975
	openowner_slab = kmem_cache_create("nfsd4_openowners",
			sizeof(struct nfs4_openowner), 0, 0, NULL);
	if (openowner_slab == NULL)
C
Christoph Hellwig 已提交
2976
		goto out;
2977
	lockowner_slab = kmem_cache_create("nfsd4_lockowners",
2978
			sizeof(struct nfs4_lockowner), 0, 0, NULL);
2979
	if (lockowner_slab == NULL)
C
Christoph Hellwig 已提交
2980
		goto out_free_openowner_slab;
N
NeilBrown 已提交
2981
	file_slab = kmem_cache_create("nfsd4_files",
2982
			sizeof(struct nfs4_file), 0, 0, NULL);
N
NeilBrown 已提交
2983
	if (file_slab == NULL)
C
Christoph Hellwig 已提交
2984
		goto out_free_lockowner_slab;
N
NeilBrown 已提交
2985
	stateid_slab = kmem_cache_create("nfsd4_stateids",
2986
			sizeof(struct nfs4_ol_stateid), 0, 0, NULL);
N
NeilBrown 已提交
2987
	if (stateid_slab == NULL)
C
Christoph Hellwig 已提交
2988
		goto out_free_file_slab;
N
NeilBrown 已提交
2989
	deleg_slab = kmem_cache_create("nfsd4_delegations",
2990
			sizeof(struct nfs4_delegation), 0, 0, NULL);
N
NeilBrown 已提交
2991
	if (deleg_slab == NULL)
C
Christoph Hellwig 已提交
2992
		goto out_free_stateid_slab;
N
NeilBrown 已提交
2993
	return 0;
C
Christoph Hellwig 已提交
2994 2995 2996 2997 2998 2999 3000 3001 3002 3003

out_free_stateid_slab:
	kmem_cache_destroy(stateid_slab);
out_free_file_slab:
	kmem_cache_destroy(file_slab);
out_free_lockowner_slab:
	kmem_cache_destroy(lockowner_slab);
out_free_openowner_slab:
	kmem_cache_destroy(openowner_slab);
out:
N
NeilBrown 已提交
3004 3005
	dprintk("nfsd4: out of memory while initializing nfsv4\n");
	return -ENOMEM;
L
Linus Torvalds 已提交
3006 3007
}

3008
static void init_nfs4_replay(struct nfs4_replay *rp)
L
Linus Torvalds 已提交
3009
{
3010 3011 3012
	rp->rp_status = nfserr_serverfault;
	rp->rp_buflen = 0;
	rp->rp_buf = rp->rp_ibuf;
3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034
	mutex_init(&rp->rp_mutex);
}

static void nfsd4_cstate_assign_replay(struct nfsd4_compound_state *cstate,
		struct nfs4_stateowner *so)
{
	if (!nfsd4_has_session(cstate)) {
		mutex_lock(&so->so_replay.rp_mutex);
		cstate->replay_owner = so;
		atomic_inc(&so->so_count);
	}
}

void nfsd4_cstate_clear_replay(struct nfsd4_compound_state *cstate)
{
	struct nfs4_stateowner *so = cstate->replay_owner;

	if (so != NULL) {
		cstate->replay_owner = NULL;
		mutex_unlock(&so->so_replay.rp_mutex);
		nfs4_put_stateowner(so);
	}
L
Linus Torvalds 已提交
3035 3036
}

3037
static inline void *alloc_stateowner(struct kmem_cache *slab, struct xdr_netobj *owner, struct nfs4_client *clp)
3038
{
L
Linus Torvalds 已提交
3039 3040
	struct nfs4_stateowner *sop;

3041
	sop = kmem_cache_alloc(slab, GFP_KERNEL);
3042 3043 3044 3045 3046
	if (!sop)
		return NULL;

	sop->so_owner.data = kmemdup(owner->data, owner->len, GFP_KERNEL);
	if (!sop->so_owner.data) {
3047
		kmem_cache_free(slab, sop);
L
Linus Torvalds 已提交
3048
		return NULL;
3049 3050 3051
	}
	sop->so_owner.len = owner->len;

3052
	INIT_LIST_HEAD(&sop->so_stateids);
3053 3054
	sop->so_client = clp;
	init_nfs4_replay(&sop->so_replay);
3055
	atomic_set(&sop->so_count, 1);
3056 3057 3058
	return sop;
}

3059
static void hash_openowner(struct nfs4_openowner *oo, struct nfs4_client *clp, unsigned int strhashval)
3060
{
3061 3062 3063
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);

	list_add(&oo->oo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
3064
	list_add(&oo->oo_perclient, &clp->cl_openowners);
3065 3066
}

3067 3068 3069
static void nfs4_unhash_openowner(struct nfs4_stateowner *so)
{
	struct nfs4_openowner *oo = openowner(so);
3070
	struct nfsd_net *nn = net_generic(so->so_client->net, nfsd_net_id);
3071

3072 3073 3074
	spin_lock(&nn->client_lock);
	unhash_openowner_locked(oo);
	spin_unlock(&nn->client_lock);
3075 3076
}

3077 3078 3079 3080 3081 3082 3083 3084
static void nfs4_free_openowner(struct nfs4_stateowner *so)
{
	struct nfs4_openowner *oo = openowner(so);

	kmem_cache_free(openowner_slab, oo);
}

static const struct nfs4_stateowner_operations openowner_ops = {
3085 3086
	.so_unhash =	nfs4_unhash_openowner,
	.so_free =	nfs4_free_openowner,
3087 3088
};

3089
static struct nfs4_openowner *
3090
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
3091 3092
			   struct nfsd4_compound_state *cstate)
{
3093
	struct nfs4_client *clp = cstate->clp;
3094 3095
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
	struct nfs4_openowner *oo, *ret;
3096

3097 3098
	oo = alloc_stateowner(openowner_slab, &open->op_owner, clp);
	if (!oo)
3099
		return NULL;
3100
	oo->oo_owner.so_ops = &openowner_ops;
3101 3102
	oo->oo_owner.so_is_open_owner = 1;
	oo->oo_owner.so_seqid = open->op_seqid;
3103
	oo->oo_flags = 0;
3104 3105
	if (nfsd4_has_session(cstate))
		oo->oo_flags |= NFS4_OO_CONFIRMED;
3106
	oo->oo_time = 0;
3107
	oo->oo_last_closed_stid = NULL;
3108
	INIT_LIST_HEAD(&oo->oo_close_lru);
3109 3110 3111 3112 3113 3114 3115 3116 3117
	spin_lock(&nn->client_lock);
	ret = find_openstateowner_str_locked(strhashval,
			open, clp->cl_minorversion, nn);
	if (ret == NULL) {
		hash_openowner(oo, clp, strhashval);
		ret = oo;
	} else
		nfs4_free_openowner(&oo->oo_owner);
	spin_unlock(&nn->client_lock);
3118
	return oo;
L
Linus Torvalds 已提交
3119 3120
}

3121
static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
3122
	struct nfs4_openowner *oo = open->op_openowner;
L
Linus Torvalds 已提交
3123

3124
	atomic_inc(&stp->st_stid.sc_count);
J
J. Bruce Fields 已提交
3125
	stp->st_stid.sc_type = NFS4_OPEN_STID;
3126
	INIT_LIST_HEAD(&stp->st_locks);
3127
	stp->st_stateowner = &oo->oo_owner;
3128
	atomic_inc(&stp->st_stateowner->so_count);
3129
	get_nfs4_file(fp);
3130
	stp->st_stid.sc_file = fp;
L
Linus Torvalds 已提交
3131 3132
	stp->st_access_bmap = 0;
	stp->st_deny_bmap = 0;
3133
	stp->st_openstp = NULL;
3134 3135
	spin_lock(&oo->oo_owner.so_client->cl_lock);
	list_add(&stp->st_perstateowner, &oo->oo_owner.so_stateids);
3136 3137 3138
	spin_lock(&fp->fi_lock);
	list_add(&stp->st_perfile, &fp->fi_stateids);
	spin_unlock(&fp->fi_lock);
3139
	spin_unlock(&oo->oo_owner.so_client->cl_lock);
L
Linus Torvalds 已提交
3140 3141
}

3142 3143 3144 3145 3146
/*
 * In the 4.0 case we need to keep the owners around a little while to handle
 * CLOSE replay. We still do need to release any file access that is held by
 * them before returning however.
 */
3147
static void
3148
move_to_close_lru(struct nfs4_ol_stateid *s, struct net *net)
L
Linus Torvalds 已提交
3149
{
3150 3151 3152
	struct nfs4_openowner *oo = openowner(s->st_stateowner);
	struct nfsd_net *nn = net_generic(s->st_stid.sc_client->net,
						nfsd_net_id);
3153

3154
	dprintk("NFSD: move_to_close_lru nfs4_openowner %p\n", oo);
L
Linus Torvalds 已提交
3155

3156 3157 3158 3159 3160 3161 3162 3163 3164 3165 3166
	/*
	 * We know that we hold one reference via nfsd4_close, and another
	 * "persistent" reference for the client. If the refcount is higher
	 * than 2, then there are still calls in progress that are using this
	 * stateid. We can't put the sc_file reference until they are finished.
	 * Wait for the refcount to drop to 2. Since it has been unhashed,
	 * there should be no danger of the refcount going back up again at
	 * this point.
	 */
	wait_event(close_wq, atomic_read(&s->st_stid.sc_count) == 2);

3167 3168 3169 3170 3171 3172 3173
	release_all_access(s);
	if (s->st_stid.sc_file) {
		put_nfs4_file(s->st_stid.sc_file);
		s->st_stid.sc_file = NULL;
	}
	release_last_closed_stateid(oo);
	oo->oo_last_closed_stid = s;
3174
	list_move_tail(&oo->oo_close_lru, &nn->close_lru);
3175
	oo->oo_time = get_seconds();
L
Linus Torvalds 已提交
3176 3177 3178 3179
}

/* search file_hashtbl[] for file */
static struct nfs4_file *
3180
find_file_locked(struct knfsd_fh *fh)
L
Linus Torvalds 已提交
3181
{
3182
	unsigned int hashval = file_hashval(fh);
L
Linus Torvalds 已提交
3183 3184
	struct nfs4_file *fp;

3185 3186
	lockdep_assert_held(&state_lock);

3187
	hlist_for_each_entry(fp, &file_hashtbl[hashval], fi_hash) {
3188
		if (nfsd_fh_match(&fp->fi_fhandle, fh)) {
3189
			get_nfs4_file(fp);
L
Linus Torvalds 已提交
3190
			return fp;
3191
		}
L
Linus Torvalds 已提交
3192 3193 3194 3195
	}
	return NULL;
}

3196
static struct nfs4_file *
3197
find_file(struct knfsd_fh *fh)
3198 3199 3200 3201
{
	struct nfs4_file *fp;

	spin_lock(&state_lock);
3202
	fp = find_file_locked(fh);
3203 3204 3205 3206 3207
	spin_unlock(&state_lock);
	return fp;
}

static struct nfs4_file *
3208
find_or_add_file(struct nfs4_file *new, struct knfsd_fh *fh)
3209 3210 3211 3212
{
	struct nfs4_file *fp;

	spin_lock(&state_lock);
3213
	fp = find_file_locked(fh);
3214
	if (fp == NULL) {
3215
		nfsd4_init_file(new, fh);
3216 3217 3218 3219 3220 3221 3222
		fp = new;
	}
	spin_unlock(&state_lock);

	return fp;
}

L
Linus Torvalds 已提交
3223 3224 3225 3226
/*
 * Called to check deny when READ with all zero stateid or
 * WRITE with all zero or all one stateid
 */
3227
static __be32
L
Linus Torvalds 已提交
3228 3229 3230
nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
{
	struct nfs4_file *fp;
3231
	__be32 ret = nfs_ok;
L
Linus Torvalds 已提交
3232

3233
	fp = find_file(&current_fh->fh_handle);
3234
	if (!fp)
3235 3236
		return ret;
	/* Check for conflicting share reservations */
3237
	spin_lock(&fp->fi_lock);
3238 3239
	if (fp->fi_share_deny & deny_type)
		ret = nfserr_locked;
3240
	spin_unlock(&fp->fi_lock);
3241 3242
	put_nfs4_file(fp);
	return ret;
L
Linus Torvalds 已提交
3243 3244
}

3245
void nfsd4_prepare_cb_recall(struct nfs4_delegation *dp)
L
Linus Torvalds 已提交
3246
{
3247 3248
	struct nfsd_net *nn = net_generic(dp->dl_stid.sc_client->net,
					  nfsd_net_id);
3249

3250
	block_delegations(&dp->dl_stid.sc_file->fi_fhandle);
3251

3252
	/*
3253 3254 3255
	 * We can't do this in nfsd_break_deleg_cb because it is
	 * already holding inode->i_lock.
	 *
3256 3257 3258
	 * If the dl_time != 0, then we know that it has already been
	 * queued for a lease break. Don't queue it again.
	 */
3259
	spin_lock(&state_lock);
3260 3261
	if (dp->dl_time == 0) {
		dp->dl_time = get_seconds();
3262
		list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
3263
	}
3264 3265
	spin_unlock(&state_lock);
}
L
Linus Torvalds 已提交
3266

3267 3268 3269 3270 3271 3272 3273 3274 3275
static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
{
	/*
	 * We're assuming the state code never drops its reference
	 * without first removing the lease.  Since we're in this lease
	 * callback (and since the lease code is serialized by the kernel
	 * lock) we know the server hasn't removed the lease yet, we know
	 * it's safe to take a reference.
	 */
3276
	atomic_inc(&dp->dl_stid.sc_count);
3277 3278 3279
	nfsd4_cb_recall(dp);
}

3280
/* Called from break_lease() with i_lock held. */
3281 3282
static void nfsd_break_deleg_cb(struct file_lock *fl)
{
3283 3284
	struct nfs4_file *fp = (struct nfs4_file *)fl->fl_owner;
	struct nfs4_delegation *dp;
3285

3286 3287 3288 3289 3290 3291 3292 3293
	if (!fp) {
		WARN(1, "(%p)->fl_owner NULL\n", fl);
		return;
	}
	if (fp->fi_had_conflict) {
		WARN(1, "duplicate break on %p\n", fp);
		return;
	}
3294 3295
	/*
	 * We don't want the locks code to timeout the lease for us;
3296
	 * we'll remove it ourself if a delegation isn't returned
3297
	 * in time:
3298 3299
	 */
	fl->fl_break_time = 0;
L
Linus Torvalds 已提交
3300

3301
	spin_lock(&fp->fi_lock);
3302 3303 3304 3305 3306 3307 3308 3309 3310 3311 3312 3313
	fp->fi_had_conflict = true;
	/*
	 * If there are no delegations on the list, then we can't count on this
	 * lease ever being cleaned up. Set the fl_break_time to jiffies so that
	 * time_out_leases will do it ASAP. The fact that fi_had_conflict is now
	 * true should keep any new delegations from being hashed.
	 */
	if (list_empty(&fp->fi_delegations))
		fl->fl_break_time = jiffies;
	else
		list_for_each_entry(dp, &fp->fi_delegations, dl_perfile)
			nfsd_break_one_deleg(dp);
3314
	spin_unlock(&fp->fi_lock);
L
Linus Torvalds 已提交
3315 3316 3317 3318 3319 3320 3321 3322 3323 3324 3325
}

static
int nfsd_change_deleg_cb(struct file_lock **onlist, int arg)
{
	if (arg & F_UNLCK)
		return lease_modify(onlist, arg);
	else
		return -EAGAIN;
}

3326
static const struct lock_manager_operations nfsd_lease_mng_ops = {
J
J. Bruce Fields 已提交
3327 3328
	.lm_break = nfsd_break_deleg_cb,
	.lm_change = nfsd_change_deleg_cb,
L
Linus Torvalds 已提交
3329 3330
};

3331 3332 3333 3334 3335 3336 3337 3338 3339 3340
static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4_stateowner *so, u32 seqid)
{
	if (nfsd4_has_session(cstate))
		return nfs_ok;
	if (seqid == so->so_seqid - 1)
		return nfserr_replay_me;
	if (seqid == so->so_seqid)
		return nfs_ok;
	return nfserr_bad_seqid;
}
L
Linus Torvalds 已提交
3341

3342 3343 3344 3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355 3356 3357 3358 3359 3360 3361 3362 3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373
static __be32 lookup_clientid(clientid_t *clid,
		struct nfsd4_compound_state *cstate,
		struct nfsd_net *nn)
{
	struct nfs4_client *found;

	if (cstate->clp) {
		found = cstate->clp;
		if (!same_clid(&found->cl_clientid, clid))
			return nfserr_stale_clientid;
		return nfs_ok;
	}

	if (STALE_CLIENTID(clid, nn))
		return nfserr_stale_clientid;

	/*
	 * For v4.1+ we get the client in the SEQUENCE op. If we don't have one
	 * cached already then we know this is for is for v4.0 and "sessions"
	 * will be false.
	 */
	WARN_ON_ONCE(cstate->session);
	found = find_confirmed_client(clid, false, nn);
	if (!found)
		return nfserr_expired;

	/* Cache the nfs4_client in cstate! */
	cstate->clp = found;
	atomic_inc(&found->cl_refcount);
	return nfs_ok;
}

3374
__be32
A
Andy Adamson 已提交
3375
nfsd4_process_open1(struct nfsd4_compound_state *cstate,
3376
		    struct nfsd4_open *open, struct nfsd_net *nn)
L
Linus Torvalds 已提交
3377 3378 3379 3380
{
	clientid_t *clientid = &open->op_clientid;
	struct nfs4_client *clp = NULL;
	unsigned int strhashval;
3381
	struct nfs4_openowner *oo = NULL;
3382
	__be32 status;
L
Linus Torvalds 已提交
3383

3384
	if (STALE_CLIENTID(&open->op_clientid, nn))
L
Linus Torvalds 已提交
3385
		return nfserr_stale_clientid;
3386 3387 3388 3389 3390 3391 3392
	/*
	 * In case we need it later, after we've already created the
	 * file and don't want to risk a further failure:
	 */
	open->op_file = nfsd4_alloc_file();
	if (open->op_file == NULL)
		return nfserr_jukebox;
L
Linus Torvalds 已提交
3393

3394 3395 3396 3397 3398
	status = lookup_clientid(clientid, cstate, nn);
	if (status)
		return status;
	clp = cstate->clp;

3399
	strhashval = ownerstr_hashval(clientid->cl_id, &open->op_owner);
3400
	oo = find_openstateowner_str(strhashval, open, cstate->minorversion, nn);
3401 3402
	open->op_openowner = oo;
	if (!oo) {
3403
		goto new_owner;
L
Linus Torvalds 已提交
3404
	}
3405
	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
3406
		/* Replace unconfirmed owners without checking for replay. */
3407 3408
		release_openowner(oo);
		open->op_openowner = NULL;
3409
		goto new_owner;
3410
	}
3411 3412 3413 3414
	status = nfsd4_check_seqid(cstate, &oo->oo_owner, open->op_seqid);
	if (status)
		return status;
	goto alloc_stateid;
3415
new_owner:
3416
	oo = alloc_init_open_stateowner(strhashval, open, cstate);
3417 3418 3419
	if (oo == NULL)
		return nfserr_jukebox;
	open->op_openowner = oo;
3420
alloc_stateid:
3421
	open->op_stp = nfs4_alloc_open_stateid(clp);
3422 3423
	if (!open->op_stp)
		return nfserr_jukebox;
3424
	return nfs_ok;
L
Linus Torvalds 已提交
3425 3426
}

3427
static inline __be32
N
NeilBrown 已提交
3428 3429 3430 3431 3432 3433 3434 3435
nfs4_check_delegmode(struct nfs4_delegation *dp, int flags)
{
	if ((flags & WR_STATE) && (dp->dl_type == NFS4_OPEN_DELEGATE_READ))
		return nfserr_openmode;
	else
		return nfs_ok;
}

3436
static int share_access_to_flags(u32 share_access)
3437
{
3438
	return share_access == NFS4_SHARE_ACCESS_READ ? RD_STATE : WR_STATE;
3439 3440
}

3441
static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, stateid_t *s)
3442
{
3443
	struct nfs4_stid *ret;
3444

3445
	ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3446 3447 3448
	if (!ret)
		return NULL;
	return delegstateid(ret);
3449 3450
}

3451 3452 3453 3454 3455 3456
static bool nfsd4_is_deleg_cur(struct nfsd4_open *open)
{
	return open->op_claim_type == NFS4_OPEN_CLAIM_DELEGATE_CUR ||
	       open->op_claim_type == NFS4_OPEN_CLAIM_DELEG_CUR_FH;
}

3457
static __be32
3458
nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3459 3460 3461
		struct nfs4_delegation **dp)
{
	int flags;
3462
	__be32 status = nfserr_bad_stateid;
3463
	struct nfs4_delegation *deleg;
3464

3465 3466
	deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
	if (deleg == NULL)
3467
		goto out;
3468
	flags = share_access_to_flags(open->op_share_access);
3469 3470 3471 3472 3473 3474
	status = nfs4_check_delegmode(deleg, flags);
	if (status) {
		nfs4_put_stid(&deleg->dl_stid);
		goto out;
	}
	*dp = deleg;
3475
out:
3476
	if (!nfsd4_is_deleg_cur(open))
3477 3478 3479
		return nfs_ok;
	if (status)
		return status;
3480
	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
3481
	return nfs_ok;
3482 3483
}

3484 3485
static struct nfs4_ol_stateid *
nfsd4_find_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
L
Linus Torvalds 已提交
3486
{
3487
	struct nfs4_ol_stateid *local, *ret = NULL;
3488
	struct nfs4_openowner *oo = open->op_openowner;
L
Linus Torvalds 已提交
3489

3490
	spin_lock(&fp->fi_lock);
3491
	list_for_each_entry(local, &fp->fi_stateids, st_perfile) {
L
Linus Torvalds 已提交
3492 3493 3494
		/* ignore lock owners */
		if (local->st_stateowner->so_is_open_owner == 0)
			continue;
3495
		if (local->st_stateowner == &oo->oo_owner) {
3496
			ret = local;
3497
			atomic_inc(&ret->st_stid.sc_count);
3498
			break;
3499
		}
L
Linus Torvalds 已提交
3500
	}
3501
	spin_unlock(&fp->fi_lock);
3502
	return ret;
L
Linus Torvalds 已提交
3503 3504
}

3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515
static inline int nfs4_access_to_access(u32 nfs4_access)
{
	int flags = 0;

	if (nfs4_access & NFS4_SHARE_ACCESS_READ)
		flags |= NFSD_MAY_READ;
	if (nfs4_access & NFS4_SHARE_ACCESS_WRITE)
		flags |= NFSD_MAY_WRITE;
	return flags;
}

3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529 3530
static inline __be32
nfsd4_truncate(struct svc_rqst *rqstp, struct svc_fh *fh,
		struct nfsd4_open *open)
{
	struct iattr iattr = {
		.ia_valid = ATTR_SIZE,
		.ia_size = 0,
	};
	if (!open->op_truncate)
		return 0;
	if (!(open->op_share_access & NFS4_SHARE_ACCESS_WRITE))
		return nfserr_inval;
	return nfsd_setattr(rqstp, fh, &iattr, 0, (time_t)0);
}

3531
static __be32 nfs4_get_vfs_file(struct svc_rqst *rqstp, struct nfs4_file *fp,
3532 3533
		struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp,
		struct nfsd4_open *open)
3534
{
3535
	struct file *filp = NULL;
3536
	__be32 status;
3537 3538
	int oflag = nfs4_access_to_omode(open->op_share_access);
	int access = nfs4_access_to_access(open->op_share_access);
3539
	unsigned char old_access_bmap, old_deny_bmap;
3540

3541
	spin_lock(&fp->fi_lock);
3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568

	/*
	 * Are we trying to set a deny mode that would conflict with
	 * current access?
	 */
	status = nfs4_file_check_deny(fp, open->op_share_deny);
	if (status != nfs_ok) {
		spin_unlock(&fp->fi_lock);
		goto out;
	}

	/* set access to the file */
	status = nfs4_file_get_access(fp, open->op_share_access);
	if (status != nfs_ok) {
		spin_unlock(&fp->fi_lock);
		goto out;
	}

	/* Set access bits in stateid */
	old_access_bmap = stp->st_access_bmap;
	set_access(open->op_share_access, stp);

	/* Set new deny mask */
	old_deny_bmap = stp->st_deny_bmap;
	set_deny(open->op_share_deny, stp);
	fp->fi_share_deny |= (open->op_share_deny & NFS4_SHARE_DENY_BOTH);

3569
	if (!fp->fi_fds[oflag]) {
3570 3571
		spin_unlock(&fp->fi_lock);
		status = nfsd_open(rqstp, cur_fh, S_IFREG, access, &filp);
3572
		if (status)
3573
			goto out_put_access;
3574 3575 3576 3577 3578
		spin_lock(&fp->fi_lock);
		if (!fp->fi_fds[oflag]) {
			fp->fi_fds[oflag] = filp;
			filp = NULL;
		}
3579
	}
3580 3581 3582
	spin_unlock(&fp->fi_lock);
	if (filp)
		fput(filp);
3583

3584 3585 3586 3587 3588
	status = nfsd4_truncate(rqstp, cur_fh, open);
	if (status)
		goto out_put_access;
out:
	return status;
3589 3590 3591 3592 3593
out_put_access:
	stp->st_access_bmap = old_access_bmap;
	nfs4_file_put_access(fp, open->op_share_access);
	reset_union_bmap_deny(bmap_to_share_mode(old_deny_bmap), stp);
	goto out;
L
Linus Torvalds 已提交
3594 3595
}

3596
static __be32
3597
nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *cur_fh, struct nfs4_ol_stateid *stp, struct nfsd4_open *open)
L
Linus Torvalds 已提交
3598
{
3599
	__be32 status;
3600
	unsigned char old_deny_bmap;
L
Linus Torvalds 已提交
3601

3602
	if (!test_access(open->op_share_access, stp))
3603
		return nfs4_get_vfs_file(rqstp, fp, cur_fh, stp, open);
3604

3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616
	/* test and set deny mode */
	spin_lock(&fp->fi_lock);
	status = nfs4_file_check_deny(fp, open->op_share_deny);
	if (status == nfs_ok) {
		old_deny_bmap = stp->st_deny_bmap;
		set_deny(open->op_share_deny, stp);
		fp->fi_share_deny |=
				(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
	}
	spin_unlock(&fp->fi_lock);

	if (status != nfs_ok)
L
Linus Torvalds 已提交
3617 3618
		return status;

3619 3620 3621 3622 3623
	status = nfsd4_truncate(rqstp, cur_fh, open);
	if (status != nfs_ok)
		reset_union_bmap_deny(old_deny_bmap, stp);
	return status;
}
L
Linus Torvalds 已提交
3624 3625

static void
3626
nfs4_set_claim_prev(struct nfsd4_open *open, bool has_session)
L
Linus Torvalds 已提交
3627
{
3628
	open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
L
Linus Torvalds 已提交
3629 3630
}

3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643
/* Should we give out recallable state?: */
static bool nfsd4_cb_channel_good(struct nfs4_client *clp)
{
	if (clp->cl_cb_state == NFSD4_CB_UP)
		return true;
	/*
	 * In the sessions case, since we don't have to establish a
	 * separate connection for callbacks, we assume it's OK
	 * until we hear otherwise:
	 */
	return clp->cl_minorversion && clp->cl_cb_state == NFSD4_CB_UNKNOWN;
}

3644
static struct file_lock *nfs4_alloc_init_lease(struct nfs4_file *fp, int flag)
3645 3646 3647 3648 3649 3650 3651 3652
{
	struct file_lock *fl;

	fl = locks_alloc_lock();
	if (!fl)
		return NULL;
	locks_init_lock(fl);
	fl->fl_lmops = &nfsd_lease_mng_ops;
3653
	fl->fl_flags = FL_DELEG;
3654 3655
	fl->fl_type = flag == NFS4_OPEN_DELEGATE_READ? F_RDLCK: F_WRLCK;
	fl->fl_end = OFFSET_MAX;
3656
	fl->fl_owner = (fl_owner_t)fp;
3657 3658 3659 3660
	fl->fl_pid = current->tgid;
	return fl;
}

3661
static int nfs4_setlease(struct nfs4_delegation *dp)
3662
{
3663
	struct nfs4_file *fp = dp->dl_stid.sc_file;
3664
	struct file_lock *fl;
3665 3666
	struct file *filp;
	int status = 0;
3667

3668
	fl = nfs4_alloc_init_lease(fp, NFS4_OPEN_DELEGATE_READ);
3669 3670
	if (!fl)
		return -ENOMEM;
3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695
	filp = find_readable_file(fp);
	if (!filp) {
		/* We should always have a readable file here */
		WARN_ON_ONCE(1);
		return -EBADF;
	}
	fl->fl_file = filp;
	status = vfs_setlease(filp, fl->fl_type, &fl);
	if (status) {
		locks_free_lock(fl);
		goto out_fput;
	}
	spin_lock(&state_lock);
	spin_lock(&fp->fi_lock);
	/* Did the lease get broken before we took the lock? */
	status = -EAGAIN;
	if (fp->fi_had_conflict)
		goto out_unlock;
	/* Race breaker */
	if (fp->fi_lease) {
		status = 0;
		atomic_inc(&fp->fi_delegees);
		hash_delegation_locked(dp, fp);
		goto out_unlock;
	}
3696
	fp->fi_lease = fl;
3697
	fp->fi_deleg_file = filp;
3698
	atomic_set(&fp->fi_delegees, 1);
3699
	hash_delegation_locked(dp, fp);
3700
	spin_unlock(&fp->fi_lock);
3701
	spin_unlock(&state_lock);
3702
	return 0;
3703 3704 3705 3706 3707
out_unlock:
	spin_unlock(&fp->fi_lock);
	spin_unlock(&state_lock);
out_fput:
	fput(filp);
3708
	return status;
3709 3710
}

J
Jeff Layton 已提交
3711 3712 3713
static struct nfs4_delegation *
nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
		    struct nfs4_file *fp)
3714
{
J
Jeff Layton 已提交
3715 3716
	int status;
	struct nfs4_delegation *dp;
3717

3718
	if (fp->fi_had_conflict)
J
Jeff Layton 已提交
3719 3720 3721 3722 3723 3724
		return ERR_PTR(-EAGAIN);

	dp = alloc_init_deleg(clp, fh);
	if (!dp)
		return ERR_PTR(-ENOMEM);

3725
	get_nfs4_file(fp);
3726 3727
	spin_lock(&state_lock);
	spin_lock(&fp->fi_lock);
3728
	dp->dl_stid.sc_file = fp;
3729 3730 3731
	if (!fp->fi_lease) {
		spin_unlock(&fp->fi_lock);
		spin_unlock(&state_lock);
J
Jeff Layton 已提交
3732 3733
		status = nfs4_setlease(dp);
		goto out;
3734
	}
3735
	atomic_inc(&fp->fi_delegees);
3736
	if (fp->fi_had_conflict) {
3737 3738
		status = -EAGAIN;
		goto out_unlock;
3739
	}
3740
	hash_delegation_locked(dp, fp);
J
Jeff Layton 已提交
3741
	status = 0;
3742 3743
out_unlock:
	spin_unlock(&fp->fi_lock);
3744
	spin_unlock(&state_lock);
J
Jeff Layton 已提交
3745 3746
out:
	if (status) {
3747
		nfs4_put_stid(&dp->dl_stid);
J
Jeff Layton 已提交
3748 3749 3750
		return ERR_PTR(status);
	}
	return dp;
3751 3752
}

3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768
static void nfsd4_open_deleg_none_ext(struct nfsd4_open *open, int status)
{
	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
	if (status == -EAGAIN)
		open->op_why_no_deleg = WND4_CONTENTION;
	else {
		open->op_why_no_deleg = WND4_RESOURCE;
		switch (open->op_deleg_want) {
		case NFS4_SHARE_WANT_READ_DELEG:
		case NFS4_SHARE_WANT_WRITE_DELEG:
		case NFS4_SHARE_WANT_ANY_DELEG:
			break;
		case NFS4_SHARE_WANT_CANCEL:
			open->op_why_no_deleg = WND4_CANCELLED;
			break;
		case NFS4_SHARE_WANT_NO_DELEG:
3769
			WARN_ON_ONCE(1);
3770 3771 3772 3773
		}
	}
}

L
Linus Torvalds 已提交
3774 3775
/*
 * Attempt to hand out a delegation.
3776 3777 3778
 *
 * Note we don't support write delegations, and won't until the vfs has
 * proper support for them.
L
Linus Torvalds 已提交
3779 3780
 */
static void
3781 3782
nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
			struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
3783 3784
{
	struct nfs4_delegation *dp;
3785 3786
	struct nfs4_openowner *oo = openowner(stp->st_stateowner);
	struct nfs4_client *clp = stp->st_stid.sc_client;
3787
	int cb_up;
3788
	int status = 0;
L
Linus Torvalds 已提交
3789

3790
	cb_up = nfsd4_cb_channel_good(oo->oo_owner.so_client);
3791 3792 3793
	open->op_recall = 0;
	switch (open->op_claim_type) {
		case NFS4_OPEN_CLAIM_PREVIOUS:
3794
			if (!cb_up)
3795
				open->op_recall = 1;
3796 3797
			if (open->op_delegate_type != NFS4_OPEN_DELEGATE_READ)
				goto out_no_deleg;
3798 3799
			break;
		case NFS4_OPEN_CLAIM_NULL:
3800
		case NFS4_OPEN_CLAIM_FH:
3801 3802 3803 3804
			/*
			 * Let's not give out any delegations till everyone's
			 * had the chance to reclaim theirs....
			 */
3805
			if (locks_in_grace(clp->net))
3806
				goto out_no_deleg;
3807
			if (!cb_up || !(oo->oo_flags & NFS4_OO_CONFIRMED))
3808
				goto out_no_deleg;
3809 3810 3811 3812 3813 3814 3815
			/*
			 * Also, if the file was opened for write or
			 * create, there's a good chance the client's
			 * about to write to it, resulting in an
			 * immediate recall (since we don't support
			 * write delegations):
			 */
3816
			if (open->op_share_access & NFS4_SHARE_ACCESS_WRITE)
3817 3818 3819
				goto out_no_deleg;
			if (open->op_create == NFS4_OPEN_CREATE)
				goto out_no_deleg;
3820 3821
			break;
		default:
3822
			goto out_no_deleg;
3823
	}
3824
	dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
J
Jeff Layton 已提交
3825
	if (IS_ERR(dp))
3826
		goto out_no_deleg;
L
Linus Torvalds 已提交
3827

3828
	memcpy(&open->op_delegate_stateid, &dp->dl_stid.sc_stateid, sizeof(dp->dl_stid.sc_stateid));
L
Linus Torvalds 已提交
3829

3830
	dprintk("NFSD: delegation stateid=" STATEID_FMT "\n",
3831
		STATEID_VAL(&dp->dl_stid.sc_stateid));
3832
	open->op_delegate_type = NFS4_OPEN_DELEGATE_READ;
3833
	nfs4_put_stid(&dp->dl_stid);
3834 3835
	return;
out_no_deleg:
3836 3837
	open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE;
	if (open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS &&
3838
	    open->op_delegate_type != NFS4_OPEN_DELEGATE_NONE) {
3839
		dprintk("NFSD: WARNING: refusing delegation reclaim\n");
3840 3841
		open->op_recall = 1;
	}
3842 3843 3844 3845 3846

	/* 4.1 client asking for a delegation? */
	if (open->op_deleg_want)
		nfsd4_open_deleg_none_ext(open, status);
	return;
L
Linus Torvalds 已提交
3847 3848
}

3849 3850 3851 3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866
static void nfsd4_deleg_xgrade_none_ext(struct nfsd4_open *open,
					struct nfs4_delegation *dp)
{
	if (open->op_deleg_want == NFS4_SHARE_WANT_READ_DELEG &&
	    dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
		open->op_why_no_deleg = WND4_NOT_SUPP_DOWNGRADE;
	} else if (open->op_deleg_want == NFS4_SHARE_WANT_WRITE_DELEG &&
		   dp->dl_type == NFS4_OPEN_DELEGATE_WRITE) {
		open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
		open->op_why_no_deleg = WND4_NOT_SUPP_UPGRADE;
	}
	/* Otherwise the client must be confused wanting a delegation
	 * it already has, therefore we don't return
	 * NFS4_OPEN_DELEGATE_NONE_EXT and reason.
	 */
}

L
Linus Torvalds 已提交
3867 3868 3869
/*
 * called with nfs4_lock_state() held.
 */
3870
__be32
L
Linus Torvalds 已提交
3871 3872
nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_open *open)
{
A
Andy Adamson 已提交
3873
	struct nfsd4_compoundres *resp = rqstp->rq_resp;
3874
	struct nfs4_client *cl = open->op_openowner->oo_owner.so_client;
L
Linus Torvalds 已提交
3875
	struct nfs4_file *fp = NULL;
3876
	struct nfs4_ol_stateid *stp = NULL;
3877
	struct nfs4_delegation *dp = NULL;
3878
	__be32 status;
L
Linus Torvalds 已提交
3879 3880 3881 3882 3883 3884

	/*
	 * Lookup file; if found, lookup stateid and check open request,
	 * and check for delegations in the process of being recalled.
	 * If not found, create the nfs4_file struct
	 */
3885
	fp = find_or_add_file(open->op_file, &current_fh->fh_handle);
3886
	if (fp != open->op_file) {
3887
		status = nfs4_check_deleg(cl, open, &dp);
3888 3889
		if (status)
			goto out;
3890
		stp = nfsd4_find_existing_open(fp, open);
L
Linus Torvalds 已提交
3891
	} else {
3892
		open->op_file = NULL;
3893
		status = nfserr_bad_stateid;
3894
		if (nfsd4_is_deleg_cur(open))
3895
			goto out;
3896
		status = nfserr_jukebox;
L
Linus Torvalds 已提交
3897 3898 3899 3900 3901 3902 3903 3904
	}

	/*
	 * OPEN the file, or upgrade an existing OPEN.
	 * If truncate fails, the OPEN fails.
	 */
	if (stp) {
		/* Stateid was found, this is an OPEN upgrade */
3905
		status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
L
Linus Torvalds 已提交
3906 3907 3908
		if (status)
			goto out;
	} else {
3909 3910
		stp = open->op_stp;
		open->op_stp = NULL;
3911
		init_open_stateid(stp, fp, open);
3912 3913 3914 3915 3916
		status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
		if (status) {
			release_open_stateid(stp);
			goto out;
		}
L
Linus Torvalds 已提交
3917
	}
3918 3919
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
L
Linus Torvalds 已提交
3920

3921 3922 3923 3924 3925 3926 3927 3928
	if (nfsd4_has_session(&resp->cstate)) {
		if (open->op_deleg_want & NFS4_SHARE_WANT_NO_DELEG) {
			open->op_delegate_type = NFS4_OPEN_DELEGATE_NONE_EXT;
			open->op_why_no_deleg = WND4_NOT_WANTED;
			goto nodeleg;
		}
	}

L
Linus Torvalds 已提交
3929 3930 3931 3932
	/*
	* Attempt to hand out a delegation. No error return, because the
	* OPEN succeeds even if we fail.
	*/
3933
	nfs4_open_delegation(current_fh, open, stp);
3934
nodeleg:
L
Linus Torvalds 已提交
3935 3936
	status = nfs_ok;

3937
	dprintk("%s: stateid=" STATEID_FMT "\n", __func__,
3938
		STATEID_VAL(&stp->st_stid.sc_stateid));
L
Linus Torvalds 已提交
3939
out:
3940 3941
	/* 4.1 client trying to upgrade/downgrade delegation? */
	if (open->op_delegate_type == NFS4_OPEN_DELEGATE_NONE && dp &&
3942 3943
	    open->op_deleg_want)
		nfsd4_deleg_xgrade_none_ext(open, dp);
3944

3945 3946
	if (fp)
		put_nfs4_file(fp);
3947
	if (status == 0 && open->op_claim_type == NFS4_OPEN_CLAIM_PREVIOUS)
3948
		nfs4_set_claim_prev(open, nfsd4_has_session(&resp->cstate));
L
Linus Torvalds 已提交
3949 3950 3951 3952
	/*
	* To finish the open response, we just need to set the rflags.
	*/
	open->op_rflags = NFS4_OPEN_RESULT_LOCKTYPE_POSIX;
3953
	if (!(open->op_openowner->oo_flags & NFS4_OO_CONFIRMED) &&
A
Andy Adamson 已提交
3954
	    !nfsd4_has_session(&resp->cstate))
L
Linus Torvalds 已提交
3955
		open->op_rflags |= NFS4_OPEN_RESULT_CONFIRM;
3956 3957
	if (dp)
		nfs4_put_stid(&dp->dl_stid);
3958 3959
	if (stp)
		nfs4_put_stid(&stp->st_stid);
L
Linus Torvalds 已提交
3960 3961 3962 3963

	return status;
}

3964 3965
void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
			      struct nfsd4_open *open, __be32 status)
3966 3967
{
	if (open->op_openowner) {
3968 3969 3970 3971
		struct nfs4_stateowner *so = &open->op_openowner->oo_owner;

		nfsd4_cstate_assign_replay(cstate, so);
		nfs4_put_stateowner(so);
3972
	}
3973 3974
	if (open->op_file)
		nfsd4_free_file(open->op_file);
3975
	if (open->op_stp)
3976
		nfs4_put_stid(&open->op_stp->st_stid);
3977 3978
}

3979
__be32
3980 3981
nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
	    clientid_t *clid)
L
Linus Torvalds 已提交
3982 3983
{
	struct nfs4_client *clp;
3984
	__be32 status;
3985
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
3986 3987 3988 3989

	nfs4_lock_state();
	dprintk("process_renew(%08x/%08x): starting\n", 
			clid->cl_boot, clid->cl_id);
3990
	status = lookup_clientid(clid, cstate, nn);
3991
	if (status)
L
Linus Torvalds 已提交
3992
		goto out;
3993
	clp = cstate->clp;
L
Linus Torvalds 已提交
3994
	status = nfserr_cb_path_down;
3995
	if (!list_empty(&clp->cl_delegations)
3996
			&& clp->cl_cb_state != NFSD4_CB_UP)
L
Linus Torvalds 已提交
3997 3998 3999 4000 4001 4002 4003
		goto out;
	status = nfs_ok;
out:
	nfs4_unlock_state();
	return status;
}

4004
static void
4005
nfsd4_end_grace(struct nfsd_net *nn)
4006
{
4007
	/* do nothing if grace period already ended */
4008
	if (nn->grace_ended)
4009 4010
		return;

4011
	dprintk("NFSD: end of grace period\n");
4012
	nn->grace_ended = true;
4013
	nfsd4_record_grace_done(nn, nn->boot_time);
4014
	locks_end_grace(&nn->nfsd4_manager);
4015 4016 4017 4018 4019
	/*
	 * Now that every NFSv4 client has had the chance to recover and
	 * to see the (possibly new, possibly shorter) lease time, we
	 * can safely set the next grace time to the current lease time:
	 */
4020
	nn->nfsd4_grace = nn->nfsd4_lease;
4021 4022
}

4023
static time_t
4024
nfs4_laundromat(struct nfsd_net *nn)
L
Linus Torvalds 已提交
4025 4026
{
	struct nfs4_client *clp;
4027
	struct nfs4_openowner *oo;
L
Linus Torvalds 已提交
4028 4029
	struct nfs4_delegation *dp;
	struct list_head *pos, *next, reaplist;
4030
	time_t cutoff = get_seconds() - nn->nfsd4_lease;
4031
	time_t t, new_timeo = nn->nfsd4_lease;
L
Linus Torvalds 已提交
4032 4033 4034 4035

	nfs4_lock_state();

	dprintk("NFSD: laundromat service - starting\n");
4036
	nfsd4_end_grace(nn);
4037
	INIT_LIST_HEAD(&reaplist);
4038
	spin_lock(&nn->client_lock);
4039
	list_for_each_safe(pos, next, &nn->client_lru) {
L
Linus Torvalds 已提交
4040 4041 4042
		clp = list_entry(pos, struct nfs4_client, cl_lru);
		if (time_after((unsigned long)clp->cl_time, (unsigned long)cutoff)) {
			t = clp->cl_time - cutoff;
4043
			new_timeo = min(new_timeo, t);
L
Linus Torvalds 已提交
4044 4045
			break;
		}
4046
		if (mark_client_expired_locked(clp)) {
4047 4048 4049 4050
			dprintk("NFSD: client in use (clientid %08x)\n",
				clp->cl_clientid.cl_id);
			continue;
		}
4051
		list_move(&clp->cl_lru, &reaplist);
4052
	}
4053
	spin_unlock(&nn->client_lock);
4054 4055
	list_for_each_safe(pos, next, &reaplist) {
		clp = list_entry(pos, struct nfs4_client, cl_lru);
L
Linus Torvalds 已提交
4056 4057 4058 4059
		dprintk("NFSD: purging unused client (clientid %08x)\n",
			clp->cl_clientid.cl_id);
		expire_client(clp);
	}
4060
	spin_lock(&state_lock);
4061
	list_for_each_safe(pos, next, &nn->del_recall_lru) {
L
Linus Torvalds 已提交
4062
		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
4063 4064
		if (net_generic(dp->dl_stid.sc_client->net, nfsd_net_id) != nn)
			continue;
L
Linus Torvalds 已提交
4065
		if (time_after((unsigned long)dp->dl_time, (unsigned long)cutoff)) {
4066 4067
			t = dp->dl_time - cutoff;
			new_timeo = min(new_timeo, t);
L
Linus Torvalds 已提交
4068 4069
			break;
		}
4070 4071
		unhash_delegation_locked(dp);
		list_add(&dp->dl_recall_lru, &reaplist);
L
Linus Torvalds 已提交
4072
	}
4073
	spin_unlock(&state_lock);
4074 4075 4076 4077
	while (!list_empty(&reaplist)) {
		dp = list_first_entry(&reaplist, struct nfs4_delegation,
					dl_recall_lru);
		list_del_init(&dp->dl_recall_lru);
4078
		revoke_delegation(dp);
L
Linus Torvalds 已提交
4079
	}
4080
	list_for_each_safe(pos, next, &nn->close_lru) {
4081 4082
		oo = container_of(pos, struct nfs4_openowner, oo_close_lru);
		if (time_after((unsigned long)oo->oo_time, (unsigned long)cutoff)) {
4083 4084
			t = oo->oo_time - cutoff;
			new_timeo = min(new_timeo, t);
L
Linus Torvalds 已提交
4085 4086
			break;
		}
4087
		release_last_closed_stateid(oo);
L
Linus Torvalds 已提交
4088
	}
4089
	new_timeo = max_t(time_t, new_timeo, NFSD_LAUNDROMAT_MINTIMEOUT);
L
Linus Torvalds 已提交
4090
	nfs4_unlock_state();
4091
	return new_timeo;
L
Linus Torvalds 已提交
4092 4093
}

H
Harvey Harrison 已提交
4094 4095 4096 4097
static struct workqueue_struct *laundry_wq;
static void laundromat_main(struct work_struct *);

static void
4098
laundromat_main(struct work_struct *laundry)
L
Linus Torvalds 已提交
4099 4100
{
	time_t t;
4101 4102 4103 4104
	struct delayed_work *dwork = container_of(laundry, struct delayed_work,
						  work);
	struct nfsd_net *nn = container_of(dwork, struct nfsd_net,
					   laundromat_work);
L
Linus Torvalds 已提交
4105

4106
	t = nfs4_laundromat(nn);
L
Linus Torvalds 已提交
4107
	dprintk("NFSD: laundromat_main - sleeping for %ld seconds\n", t);
4108
	queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
L
Linus Torvalds 已提交
4109 4110
}

4111
static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
4112
{
4113
	if (!nfsd_fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle))
4114 4115
		return nfserr_bad_stateid;
	return nfs_ok;
L
Linus Torvalds 已提交
4116 4117 4118
}

static inline int
4119
access_permit_read(struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
4120
{
4121 4122 4123
	return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
		test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
		test_access(NFS4_SHARE_ACCESS_WRITE, stp);
L
Linus Torvalds 已提交
4124 4125 4126
}

static inline int
4127
access_permit_write(struct nfs4_ol_stateid *stp)
L
Linus Torvalds 已提交
4128
{
4129 4130
	return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
		test_access(NFS4_SHARE_ACCESS_BOTH, stp);
L
Linus Torvalds 已提交
4131 4132 4133
}

static
4134
__be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
L
Linus Torvalds 已提交
4135
{
4136
        __be32 status = nfserr_openmode;
L
Linus Torvalds 已提交
4137

4138 4139 4140
	/* For lock stateid's, we test the parent open, not the lock: */
	if (stp->st_openstp)
		stp = stp->st_openstp;
4141
	if ((flags & WR_STATE) && !access_permit_write(stp))
L
Linus Torvalds 已提交
4142
                goto out;
4143
	if ((flags & RD_STATE) && !access_permit_read(stp))
L
Linus Torvalds 已提交
4144 4145 4146 4147 4148 4149
                goto out;
	status = nfs_ok;
out:
	return status;
}

4150
static inline __be32
4151
check_special_stateids(struct net *net, svc_fh *current_fh, stateid_t *stateid, int flags)
L
Linus Torvalds 已提交
4152
{
4153
	if (ONE_STATEID(stateid) && (flags & RD_STATE))
L
Linus Torvalds 已提交
4154
		return nfs_ok;
4155
	else if (locks_in_grace(net)) {
L
Lucas De Marchi 已提交
4156
		/* Answer in remaining cases depends on existence of
L
Linus Torvalds 已提交
4157 4158 4159 4160 4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171
		 * conflicting state; so we must wait out the grace period. */
		return nfserr_grace;
	} else if (flags & WR_STATE)
		return nfs4_share_conflict(current_fh,
				NFS4_SHARE_DENY_WRITE);
	else /* (flags & RD_STATE) && ZERO_STATEID(stateid) */
		return nfs4_share_conflict(current_fh,
				NFS4_SHARE_DENY_READ);
}

/*
 * Allow READ/WRITE during grace period on recovered state only for files
 * that are not able to provide mandatory locking.
 */
static inline int
4172
grace_disallows_io(struct net *net, struct inode *inode)
L
Linus Torvalds 已提交
4173
{
4174
	return locks_in_grace(net) && mandatory_lock(inode);
L
Linus Torvalds 已提交
4175 4176
}

4177 4178 4179
/* Returns true iff a is later than b: */
static bool stateid_generation_after(stateid_t *a, stateid_t *b)
{
J
Jim Rees 已提交
4180
	return (s32)(a->si_generation - b->si_generation) > 0;
4181 4182
}

J
J. Bruce Fields 已提交
4183
static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
4184
{
A
Andy Adamson 已提交
4185 4186 4187 4188
	/*
	 * When sessions are used the stateid generation number is ignored
	 * when it is zero.
	 */
J
J. Bruce Fields 已提交
4189
	if (has_session && in->si_generation == 0)
4190 4191 4192 4193
		return nfs_ok;

	if (in->si_generation == ref->si_generation)
		return nfs_ok;
A
Andy Adamson 已提交
4194

4195
	/* If the client sends us a stateid from the future, it's buggy: */
4196
	if (stateid_generation_after(in, ref))
4197 4198
		return nfserr_bad_stateid;
	/*
4199 4200 4201 4202 4203 4204 4205 4206
	 * However, we could see a stateid from the past, even from a
	 * non-buggy client.  For example, if the client sends a lock
	 * while some IO is outstanding, the lock may bump si_generation
	 * while the IO is still in flight.  The client could avoid that
	 * situation by waiting for responses on all the IO requests,
	 * but better performance may result in retrying IO that
	 * receives an old_stateid error if requests are rarely
	 * reordered in flight:
4207
	 */
4208
	return nfserr_old_stateid;
4209 4210
}

4211
static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4212
{
4213 4214
	struct nfs4_stid *s;
	struct nfs4_ol_stateid *ols;
4215
	__be32 status = nfserr_bad_stateid;
4216

4217
	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4218
		return status;
4219 4220 4221 4222 4223 4224 4225
	/* Client debugging aid. */
	if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
		char addr_str[INET6_ADDRSTRLEN];
		rpc_ntop((struct sockaddr *)&cl->cl_addr, addr_str,
				 sizeof(addr_str));
		pr_warn_ratelimited("NFSD: client %s testing state ID "
					"with incorrect client ID\n", addr_str);
4226
		return status;
4227
	}
4228 4229
	spin_lock(&cl->cl_lock);
	s = find_stateid_locked(cl, stateid);
4230
	if (!s)
4231
		goto out_unlock;
4232
	status = check_stateid_generation(stateid, &s->sc_stateid, 1);
4233
	if (status)
4234
		goto out_unlock;
4235 4236
	switch (s->sc_type) {
	case NFS4_DELEG_STID:
4237 4238
		status = nfs_ok;
		break;
4239
	case NFS4_REVOKED_DELEG_STID:
4240 4241
		status = nfserr_deleg_revoked;
		break;
4242 4243 4244 4245 4246 4247
	case NFS4_OPEN_STID:
	case NFS4_LOCK_STID:
		ols = openlockstateid(s);
		if (ols->st_stateowner->so_is_open_owner
	    			&& !(openowner(ols->st_stateowner)->oo_flags
						& NFS4_OO_CONFIRMED))
4248 4249 4250 4251
			status = nfserr_bad_stateid;
		else
			status = nfs_ok;
		break;
4252 4253
	default:
		printk("unknown stateid type %x\n", s->sc_type);
4254
		/* Fallthrough */
4255
	case NFS4_CLOSED_STID:
4256
	case NFS4_CLOSED_DELEG_STID:
4257
		status = nfserr_bad_stateid;
4258
	}
4259 4260 4261
out_unlock:
	spin_unlock(&cl->cl_lock);
	return status;
4262 4263
}

4264 4265 4266 4267
static __be32
nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
		     stateid_t *stateid, unsigned char typemask,
		     struct nfs4_stid **s, struct nfsd_net *nn)
4268
{
J
J. Bruce Fields 已提交
4269
	__be32 status;
4270 4271 4272

	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
		return nfserr_bad_stateid;
4273
	status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4274
	if (status == nfserr_stale_clientid) {
4275
		if (cstate->session)
4276
			return nfserr_bad_stateid;
4277
		return nfserr_stale_stateid;
4278
	}
J
J. Bruce Fields 已提交
4279 4280
	if (status)
		return status;
4281
	*s = find_stateid_by_type(cstate->clp, stateid, typemask);
4282 4283 4284 4285 4286
	if (!*s)
		return nfserr_bad_stateid;
	return nfs_ok;
}

L
Linus Torvalds 已提交
4287 4288 4289
/*
* Checks for stateid operations
*/
4290
__be32
4291
nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
4292
			   stateid_t *stateid, int flags, struct file **filpp)
L
Linus Torvalds 已提交
4293
{
4294
	struct nfs4_stid *s;
4295
	struct nfs4_ol_stateid *stp = NULL;
L
Linus Torvalds 已提交
4296
	struct nfs4_delegation *dp = NULL;
4297
	struct svc_fh *current_fh = &cstate->current_fh;
L
Linus Torvalds 已提交
4298
	struct inode *ino = current_fh->fh_dentry->d_inode;
4299
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
4300
	struct file *file = NULL;
4301
	__be32 status;
L
Linus Torvalds 已提交
4302 4303 4304 4305

	if (filpp)
		*filpp = NULL;

4306
	if (grace_disallows_io(net, ino))
L
Linus Torvalds 已提交
4307 4308 4309
		return nfserr_grace;

	if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4310
		return check_special_stateids(net, current_fh, stateid, flags);
L
Linus Torvalds 已提交
4311

4312 4313
	nfs4_lock_state();

4314
	status = nfsd4_lookup_stateid(cstate, stateid,
4315
				NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
4316
				&s, nn);
4317
	if (status)
4318
		goto unlock_state;
4319 4320 4321
	status = check_stateid_generation(stateid, &s->sc_stateid, nfsd4_has_session(cstate));
	if (status)
		goto out;
4322 4323
	switch (s->sc_type) {
	case NFS4_DELEG_STID:
4324
		dp = delegstateid(s);
4325 4326 4327
		status = nfs4_check_delegmode(dp, flags);
		if (status)
			goto out;
4328
		if (filpp) {
4329
			file = dp->dl_stid.sc_file->fi_deleg_file;
4330
			if (!file) {
4331 4332 4333 4334
				WARN_ON_ONCE(1);
				status = nfserr_serverfault;
				goto out;
			}
4335
			get_file(file);
4336
		}
4337 4338 4339
		break;
	case NFS4_OPEN_STID:
	case NFS4_LOCK_STID:
4340
		stp = openlockstateid(s);
4341 4342
		status = nfs4_check_fh(current_fh, stp);
		if (status)
L
Linus Torvalds 已提交
4343
			goto out;
4344
		if (stp->st_stateowner->so_is_open_owner
4345
		    && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
L
Linus Torvalds 已提交
4346
			goto out;
4347 4348
		status = nfs4_check_openmode(stp, flags);
		if (status)
L
Linus Torvalds 已提交
4349
			goto out;
4350
		if (filpp) {
4351 4352
			struct nfs4_file *fp = stp->st_stid.sc_file;

4353
			if (flags & RD_STATE)
4354
				file = find_readable_file(fp);
4355
			else
4356
				file = find_writeable_file(fp);
4357
		}
4358 4359
		break;
	default:
4360 4361
		status = nfserr_bad_stateid;
		goto out;
L
Linus Torvalds 已提交
4362 4363
	}
	status = nfs_ok;
4364
	if (file)
4365
		*filpp = file;
L
Linus Torvalds 已提交
4366
out:
4367 4368
	nfs4_put_stid(s);
unlock_state:
4369
	nfs4_unlock_state();
L
Linus Torvalds 已提交
4370 4371 4372
	return status;
}

4373
static __be32
4374
nfsd4_free_lock_stateid(struct nfs4_ol_stateid *stp)
4375
{
4376 4377
	struct nfs4_lockowner *lo = lockowner(stp->st_stateowner);

4378
	if (check_for_locks(stp->st_stid.sc_file, lo))
4379
		return nfserr_locks_held;
4380
	release_lock_stateid(stp);
4381 4382 4383
	return nfs_ok;
}

4384 4385 4386 4387 4388 4389 4390
/*
 * Test if the stateid is valid
 */
__be32
nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		   struct nfsd4_test_stateid *test_stateid)
{
4391 4392 4393 4394 4395
	struct nfsd4_test_stateid_id *stateid;
	struct nfs4_client *cl = cstate->session->se_client;

	nfs4_lock_state();
	list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
4396 4397
		stateid->ts_id_status =
			nfsd4_validate_stateid(cl, &stateid->ts_id_stateid);
4398 4399
	nfs4_unlock_state();

4400 4401 4402
	return nfs_ok;
}

4403 4404 4405 4406 4407
__be32
nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		   struct nfsd4_free_stateid *free_stateid)
{
	stateid_t *stateid = &free_stateid->fr_stateid;
J
J. Bruce Fields 已提交
4408
	struct nfs4_stid *s;
4409
	struct nfs4_delegation *dp;
4410
	struct nfs4_client *cl = cstate->session->se_client;
J
J. Bruce Fields 已提交
4411
	__be32 ret = nfserr_bad_stateid;
4412 4413

	nfs4_lock_state();
4414 4415
	spin_lock(&cl->cl_lock);
	s = find_stateid_locked(cl, stateid);
J
J. Bruce Fields 已提交
4416
	if (!s)
4417
		goto out_unlock;
J
J. Bruce Fields 已提交
4418 4419
	switch (s->sc_type) {
	case NFS4_DELEG_STID:
4420
		ret = nfserr_locks_held;
4421
		break;
J
J. Bruce Fields 已提交
4422 4423 4424
	case NFS4_OPEN_STID:
		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
		if (ret)
4425 4426
			break;
		ret = nfserr_locks_held;
4427
		break;
4428 4429 4430 4431 4432 4433 4434
	case NFS4_LOCK_STID:
		ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
		if (ret)
			break;
		spin_unlock(&cl->cl_lock);
		ret = nfsd4_free_lock_stateid(openlockstateid(s));
		goto out;
4435 4436
	case NFS4_REVOKED_DELEG_STID:
		dp = delegstateid(s);
4437 4438
		list_del_init(&dp->dl_recall_lru);
		spin_unlock(&cl->cl_lock);
4439
		nfs4_put_stid(s);
4440
		ret = nfs_ok;
4441 4442
		goto out;
	/* Default falls through and returns nfserr_bad_stateid */
4443
	}
4444 4445
out_unlock:
	spin_unlock(&cl->cl_lock);
4446 4447 4448 4449 4450
out:
	nfs4_unlock_state();
	return ret;
}

4451 4452 4453 4454 4455 4456
static inline int
setlkflg (int type)
{
	return (type == NFS4_READW_LT || type == NFS4_READ_LT) ?
		RD_STATE : WR_STATE;
}
L
Linus Torvalds 已提交
4457

4458
static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_t *stateid, u32 seqid, struct nfs4_ol_stateid *stp)
4459 4460 4461 4462 4463 4464 4465 4466
{
	struct svc_fh *current_fh = &cstate->current_fh;
	struct nfs4_stateowner *sop = stp->st_stateowner;
	__be32 status;

	status = nfsd4_check_seqid(cstate, sop, seqid);
	if (status)
		return status;
4467 4468
	if (stp->st_stid.sc_type == NFS4_CLOSED_STID
		|| stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4469 4470
		/*
		 * "Closed" stateid's exist *only* to return
4471 4472
		 * nfserr_replay_me from the previous step, and
		 * revoked delegations are kept only for free_stateid.
4473 4474 4475 4476 4477 4478
		 */
		return nfserr_bad_stateid;
	status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
	if (status)
		return status;
	return nfs4_check_fh(current_fh, stp);
4479 4480
}

L
Linus Torvalds 已提交
4481 4482 4483
/* 
 * Checks for sequence id mutating operations. 
 */
4484
static __be32
4485
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
4486
			 stateid_t *stateid, char typemask,
4487 4488
			 struct nfs4_ol_stateid **stpp,
			 struct nfsd_net *nn)
L
Linus Torvalds 已提交
4489
{
4490
	__be32 status;
4491
	struct nfs4_stid *s;
4492
	struct nfs4_ol_stateid *stp = NULL;
L
Linus Torvalds 已提交
4493

4494 4495
	dprintk("NFSD: %s: seqid=%d stateid = " STATEID_FMT "\n", __func__,
		seqid, STATEID_VAL(stateid));
4496

L
Linus Torvalds 已提交
4497
	*stpp = NULL;
4498
	status = nfsd4_lookup_stateid(cstate, stateid, typemask, &s, nn);
4499 4500
	if (status)
		return status;
4501
	stp = openlockstateid(s);
4502
	nfsd4_cstate_assign_replay(cstate, stp->st_stateowner);
L
Linus Torvalds 已提交
4503

4504
	status = nfs4_seqid_op_checks(cstate, stateid, seqid, stp);
4505
	if (!status)
4506
		*stpp = stp;
4507 4508
	else
		nfs4_put_stid(&stp->st_stid);
4509
	return status;
4510
}
4511

4512 4513
static __be32 nfs4_preprocess_confirmed_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,
						 stateid_t *stateid, struct nfs4_ol_stateid **stpp, struct nfsd_net *nn)
4514 4515 4516
{
	__be32 status;
	struct nfs4_openowner *oo;
4517
	struct nfs4_ol_stateid *stp;
L
Linus Torvalds 已提交
4518

4519
	status = nfs4_preprocess_seqid_op(cstate, seqid, stateid,
4520
						NFS4_OPEN_STID, &stp, nn);
4521 4522
	if (status)
		return status;
4523 4524 4525
	oo = openowner(stp->st_stateowner);
	if (!(oo->oo_flags & NFS4_OO_CONFIRMED)) {
		nfs4_put_stid(&stp->st_stid);
4526
		return nfserr_bad_stateid;
4527 4528
	}
	*stpp = stp;
4529
	return nfs_ok;
L
Linus Torvalds 已提交
4530 4531
}

4532
__be32
4533
nfsd4_open_confirm(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4534
		   struct nfsd4_open_confirm *oc)
L
Linus Torvalds 已提交
4535
{
4536
	__be32 status;
4537
	struct nfs4_openowner *oo;
4538
	struct nfs4_ol_stateid *stp;
4539
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
4540

A
Al Viro 已提交
4541 4542
	dprintk("NFSD: nfsd4_open_confirm on file %pd\n",
			cstate->current_fh.fh_dentry);
L
Linus Torvalds 已提交
4543

4544
	status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0);
4545 4546
	if (status)
		return status;
L
Linus Torvalds 已提交
4547 4548 4549

	nfs4_lock_state();

4550
	status = nfs4_preprocess_seqid_op(cstate,
4551
					oc->oc_seqid, &oc->oc_req_stateid,
4552
					NFS4_OPEN_STID, &stp, nn);
4553
	if (status)
4554
		goto out;
4555
	oo = openowner(stp->st_stateowner);
4556
	status = nfserr_bad_stateid;
4557
	if (oo->oo_flags & NFS4_OO_CONFIRMED)
4558
		goto put_stateid;
4559
	oo->oo_flags |= NFS4_OO_CONFIRMED;
4560 4561
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&oc->oc_resp_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
4562
	dprintk("NFSD: %s: success, seqid=%d stateid=" STATEID_FMT "\n",
4563
		__func__, oc->oc_seqid, STATEID_VAL(&stp->st_stid.sc_stateid));
4564

4565
	nfsd4_client_record_create(oo->oo_owner.so_client);
4566
	status = nfs_ok;
4567 4568
put_stateid:
	nfs4_put_stid(&stp->st_stid);
L
Linus Torvalds 已提交
4569
out:
4570
	nfsd4_bump_seqid(cstate, status);
4571
	nfs4_unlock_state();
L
Linus Torvalds 已提交
4572 4573 4574
	return status;
}

J
J. Bruce Fields 已提交
4575
static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
L
Linus Torvalds 已提交
4576
{
4577
	if (!test_access(access, stp))
J
J. Bruce Fields 已提交
4578
		return;
4579
	nfs4_file_put_access(stp->st_stid.sc_file, access);
4580
	clear_access(access, stp);
J
J. Bruce Fields 已提交
4581
}
4582

J
J. Bruce Fields 已提交
4583 4584 4585 4586 4587 4588 4589 4590 4591 4592 4593 4594 4595 4596
static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
{
	switch (to_access) {
	case NFS4_SHARE_ACCESS_READ:
		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_WRITE);
		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
		break;
	case NFS4_SHARE_ACCESS_WRITE:
		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_READ);
		nfs4_stateid_downgrade_bit(stp, NFS4_SHARE_ACCESS_BOTH);
		break;
	case NFS4_SHARE_ACCESS_BOTH:
		break;
	default:
4597
		WARN_ON_ONCE(1);
L
Linus Torvalds 已提交
4598 4599 4600
	}
}

4601
__be32
4602 4603
nfsd4_open_downgrade(struct svc_rqst *rqstp,
		     struct nfsd4_compound_state *cstate,
4604
		     struct nfsd4_open_downgrade *od)
L
Linus Torvalds 已提交
4605
{
4606
	__be32 status;
4607
	struct nfs4_ol_stateid *stp;
4608
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
4609

A
Al Viro 已提交
4610 4611
	dprintk("NFSD: nfsd4_open_downgrade on file %pd\n", 
			cstate->current_fh.fh_dentry);
L
Linus Torvalds 已提交
4612

4613
	/* We don't yet support WANT bits: */
4614 4615 4616
	if (od->od_deleg_want)
		dprintk("NFSD: %s: od_deleg_want=0x%x ignored\n", __func__,
			od->od_deleg_want);
L
Linus Torvalds 已提交
4617 4618

	nfs4_lock_state();
4619
	status = nfs4_preprocess_confirmed_seqid_op(cstate, od->od_seqid,
4620
					&od->od_stateid, &stp, nn);
4621
	if (status)
L
Linus Torvalds 已提交
4622 4623
		goto out; 
	status = nfserr_inval;
4624
	if (!test_access(od->od_share_access, stp)) {
4625
		dprintk("NFSD: access not a subset of current bitmap: 0x%hhx, input access=%08x\n",
L
Linus Torvalds 已提交
4626
			stp->st_access_bmap, od->od_share_access);
4627
		goto put_stateid;
L
Linus Torvalds 已提交
4628
	}
4629
	if (!test_deny(od->od_share_deny, stp)) {
4630
		dprintk("NFSD: deny not a subset of current bitmap: 0x%hhx, input deny=%08x\n",
L
Linus Torvalds 已提交
4631
			stp->st_deny_bmap, od->od_share_deny);
4632
		goto put_stateid;
L
Linus Torvalds 已提交
4633
	}
J
J. Bruce Fields 已提交
4634
	nfs4_stateid_downgrade(stp, od->od_share_access);
L
Linus Torvalds 已提交
4635

4636
	reset_union_bmap_deny(od->od_share_deny, stp);
L
Linus Torvalds 已提交
4637

4638 4639
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
L
Linus Torvalds 已提交
4640
	status = nfs_ok;
4641 4642
put_stateid:
	nfs4_put_stid(&stp->st_stid);
L
Linus Torvalds 已提交
4643
out:
4644
	nfsd4_bump_seqid(cstate, status);
4645
	nfs4_unlock_state();
L
Linus Torvalds 已提交
4646 4647 4648
	return status;
}

4649 4650
static void nfsd4_close_open_stateid(struct nfs4_ol_stateid *s)
{
4651 4652
	struct nfs4_client *clp = s->st_stid.sc_client;

4653
	s->st_stid.sc_type = NFS4_CLOSED_STID;
4654 4655
	unhash_open_stateid(s);

4656
	if (clp->cl_minorversion)
4657
		nfs4_put_stid(&s->st_stid);
4658 4659
	else
		move_to_close_lru(s, clp->net);
4660 4661
}

L
Linus Torvalds 已提交
4662 4663 4664
/*
 * nfs4_unlock_state() called after encode
 */
4665
__be32
4666
nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4667
	    struct nfsd4_close *close)
L
Linus Torvalds 已提交
4668
{
4669
	__be32 status;
4670
	struct nfs4_ol_stateid *stp;
4671 4672
	struct net *net = SVC_NET(rqstp);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
L
Linus Torvalds 已提交
4673

A
Al Viro 已提交
4674 4675
	dprintk("NFSD: nfsd4_close on file %pd\n", 
			cstate->current_fh.fh_dentry);
L
Linus Torvalds 已提交
4676 4677

	nfs4_lock_state();
4678 4679 4680
	status = nfs4_preprocess_seqid_op(cstate, close->cl_seqid,
					&close->cl_stateid,
					NFS4_OPEN_STID|NFS4_CLOSED_STID,
4681
					&stp, nn);
4682
	nfsd4_bump_seqid(cstate, status);
4683
	if (status)
L
Linus Torvalds 已提交
4684
		goto out; 
4685 4686
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
L
Linus Torvalds 已提交
4687

4688
	nfsd4_close_open_stateid(stp);
4689 4690 4691

	/* put reference from nfs4_preprocess_seqid_op */
	nfs4_put_stid(&stp->st_stid);
L
Linus Torvalds 已提交
4692
out:
4693
	nfs4_unlock_state();
L
Linus Torvalds 已提交
4694 4695 4696
	return status;
}

4697
__be32
4698 4699
nfsd4_delegreturn(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
		  struct nfsd4_delegreturn *dr)
L
Linus Torvalds 已提交
4700
{
4701 4702
	struct nfs4_delegation *dp;
	stateid_t *stateid = &dr->dr_stateid;
4703
	struct nfs4_stid *s;
4704
	__be32 status;
4705
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
4706

4707
	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
4708
		return status;
L
Linus Torvalds 已提交
4709 4710

	nfs4_lock_state();
4711
	status = nfsd4_lookup_stateid(cstate, stateid, NFS4_DELEG_STID, &s, nn);
4712
	if (status)
4713
		goto out;
4714
	dp = delegstateid(s);
4715
	status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
4716
	if (status)
4717
		goto put_stateid;
4718

4719
	destroy_delegation(dp);
4720 4721
put_stateid:
	nfs4_put_stid(&dp->dl_stid);
L
Linus Torvalds 已提交
4722
out:
4723 4724
	nfs4_unlock_state();

L
Linus Torvalds 已提交
4725 4726 4727 4728 4729 4730
	return status;
}


#define LOFF_OVERFLOW(start, len)      ((u64)(len) > ~(u64)(start))

B
Benny Halevy 已提交
4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741 4742 4743 4744 4745
static inline u64
end_offset(u64 start, u64 len)
{
	u64 end;

	end = start + len;
	return end >= start ? end: NFS4_MAX_UINT64;
}

/* last octet in a range */
static inline u64
last_byte_offset(u64 start, u64 len)
{
	u64 end;

4746
	WARN_ON_ONCE(!len);
B
Benny Halevy 已提交
4747 4748 4749 4750
	end = start + len;
	return end > start ? end - 1: NFS4_MAX_UINT64;
}

L
Linus Torvalds 已提交
4751 4752 4753 4754 4755 4756 4757 4758 4759 4760 4761 4762 4763 4764 4765 4766 4767
/*
 * TODO: Linux file offsets are _signed_ 64-bit quantities, which means that
 * we can't properly handle lock requests that go beyond the (2^63 - 1)-th
 * byte, because of sign extension problems.  Since NFSv4 calls for 64-bit
 * locking, this prevents us from being completely protocol-compliant.  The
 * real solution to this problem is to start using unsigned file offsets in
 * the VFS, but this is a very deep change!
 */
static inline void
nfs4_transform_lock_offset(struct file_lock *lock)
{
	if (lock->fl_start < 0)
		lock->fl_start = OFFSET_MAX;
	if (lock->fl_end < 0)
		lock->fl_end = OFFSET_MAX;
}

4768 4769
/* Hack!: For now, we're defining this just so we can use a pointer to it
 * as a unique cookie to identify our (NFSv4's) posix locks. */
4770
static const struct lock_manager_operations nfsd_posix_mng_ops  = {
4771
};
L
Linus Torvalds 已提交
4772 4773 4774 4775

static inline void
nfs4_set_lock_denied(struct file_lock *fl, struct nfsd4_lock_denied *deny)
{
4776
	struct nfs4_lockowner *lo;
L
Linus Torvalds 已提交
4777

4778
	if (fl->fl_lmops == &nfsd_posix_mng_ops) {
4779 4780 4781
		lo = (struct nfs4_lockowner *) fl->fl_owner;
		deny->ld_owner.data = kmemdup(lo->lo_owner.so_owner.data,
					lo->lo_owner.so_owner.len, GFP_KERNEL);
4782 4783 4784
		if (!deny->ld_owner.data)
			/* We just don't care that much */
			goto nevermind;
4785 4786
		deny->ld_owner.len = lo->lo_owner.so_owner.len;
		deny->ld_clientid = lo->lo_owner.so_client->cl_clientid;
4787
	} else {
4788 4789 4790
nevermind:
		deny->ld_owner.len = 0;
		deny->ld_owner.data = NULL;
4791 4792
		deny->ld_clientid.cl_boot = 0;
		deny->ld_clientid.cl_id = 0;
L
Linus Torvalds 已提交
4793 4794
	}
	deny->ld_start = fl->fl_start;
B
Benny Halevy 已提交
4795 4796
	deny->ld_length = NFS4_MAX_UINT64;
	if (fl->fl_end != NFS4_MAX_UINT64)
L
Linus Torvalds 已提交
4797 4798 4799 4800 4801 4802
		deny->ld_length = fl->fl_end - fl->fl_start + 1;        
	deny->ld_type = NFS4_READ_LT;
	if (fl->fl_type != F_RDLCK)
		deny->ld_type = NFS4_WRITE_LT;
}

4803
static struct nfs4_lockowner *
4804 4805
find_lockowner_str(clientid_t *clid, struct xdr_netobj *owner,
		struct nfsd_net *nn)
L
Linus Torvalds 已提交
4806
{
4807 4808
	unsigned int strhashval = ownerstr_hashval(clid->cl_id, owner);
	struct nfs4_stateowner *so;
L
Linus Torvalds 已提交
4809

4810 4811 4812 4813 4814
	list_for_each_entry(so, &nn->ownerstr_hashtbl[strhashval], so_strhash) {
		if (so->so_is_open_owner)
			continue;
		if (!same_owner_str(so, owner, clid))
			continue;
4815
		atomic_inc(&so->so_count);
4816
		return lockowner(so);
L
Linus Torvalds 已提交
4817 4818 4819 4820
	}
	return NULL;
}

4821 4822 4823 4824 4825
static void nfs4_unhash_lockowner(struct nfs4_stateowner *sop)
{
	unhash_lockowner(lockowner(sop));
}

4826 4827 4828 4829 4830 4831 4832 4833
static void nfs4_free_lockowner(struct nfs4_stateowner *sop)
{
	struct nfs4_lockowner *lo = lockowner(sop);

	kmem_cache_free(lockowner_slab, lo);
}

static const struct nfs4_stateowner_operations lockowner_ops = {
4834 4835
	.so_unhash =	nfs4_unhash_lockowner,
	.so_free =	nfs4_free_lockowner,
4836 4837
};

L
Linus Torvalds 已提交
4838 4839 4840
/*
 * Alloc a lock owner structure.
 * Called in nfsd4_lock - therefore, OPEN and OPEN_CONFIRM (if needed) has 
L
Lucas De Marchi 已提交
4841
 * occurred. 
L
Linus Torvalds 已提交
4842
 *
4843
 * strhashval = ownerstr_hashval
L
Linus Torvalds 已提交
4844
 */
4845
static struct nfs4_lockowner *
4846
alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp, struct nfs4_ol_stateid *open_stp, struct nfsd4_lock *lock) {
4847
	struct nfs4_lockowner *lo;
4848
	struct nfsd_net *nn = net_generic(clp->net, nfsd_net_id);
L
Linus Torvalds 已提交
4849

4850 4851
	lo = alloc_stateowner(lockowner_slab, &lock->lk_new_owner, clp);
	if (!lo)
L
Linus Torvalds 已提交
4852
		return NULL;
4853 4854
	INIT_LIST_HEAD(&lo->lo_owner.so_stateids);
	lo->lo_owner.so_is_open_owner = 0;
4855
	lo->lo_owner.so_seqid = lock->lk_new_lock_seqid;
4856
	lo->lo_owner.so_ops = &lockowner_ops;
4857
	list_add(&lo->lo_owner.so_strhash, &nn->ownerstr_hashtbl[strhashval]);
4858
	return lo;
L
Linus Torvalds 已提交
4859 4860
}

4861 4862 4863 4864
static void
init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
		  struct nfs4_file *fp, struct inode *inode,
		  struct nfs4_ol_stateid *open_stp)
L
Linus Torvalds 已提交
4865
{
4866
	struct nfs4_client *clp = lo->lo_owner.so_client;
L
Linus Torvalds 已提交
4867

4868 4869
	lockdep_assert_held(&clp->cl_lock);

4870
	atomic_inc(&stp->st_stid.sc_count);
J
J. Bruce Fields 已提交
4871
	stp->st_stid.sc_type = NFS4_LOCK_STID;
4872
	stp->st_stateowner = &lo->lo_owner;
4873
	atomic_inc(&lo->lo_owner.so_count);
4874
	get_nfs4_file(fp);
4875
	stp->st_stid.sc_file = fp;
4876
	stp->st_stid.sc_free = nfs4_free_lock_stateid;
J
J. Bruce Fields 已提交
4877
	stp->st_access_bmap = 0;
L
Linus Torvalds 已提交
4878
	stp->st_deny_bmap = open_stp->st_deny_bmap;
4879
	stp->st_openstp = open_stp;
4880
	list_add(&stp->st_locks, &open_stp->st_locks);
4881
	list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
4882 4883 4884
	spin_lock(&fp->fi_lock);
	list_add(&stp->st_perfile, &fp->fi_stateids);
	spin_unlock(&fp->fi_lock);
L
Linus Torvalds 已提交
4885 4886
}

4887 4888 4889 4890
static struct nfs4_ol_stateid *
find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
{
	struct nfs4_ol_stateid *lst;
4891 4892 4893
	struct nfs4_client *clp = lo->lo_owner.so_client;

	lockdep_assert_held(&clp->cl_lock);
4894 4895

	list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
4896 4897
		if (lst->st_stid.sc_file == fp) {
			atomic_inc(&lst->st_stid.sc_count);
4898
			return lst;
4899
		}
4900 4901 4902 4903
	}
	return NULL;
}

4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935
static struct nfs4_ol_stateid *
find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
			    struct inode *inode, struct nfs4_ol_stateid *ost,
			    bool *new)
{
	struct nfs4_stid *ns = NULL;
	struct nfs4_ol_stateid *lst;
	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
	struct nfs4_client *clp = oo->oo_owner.so_client;

	spin_lock(&clp->cl_lock);
	lst = find_lock_stateid(lo, fi);
	if (lst == NULL) {
		spin_unlock(&clp->cl_lock);
		ns = nfs4_alloc_stid(clp, stateid_slab);
		if (ns == NULL)
			return NULL;

		spin_lock(&clp->cl_lock);
		lst = find_lock_stateid(lo, fi);
		if (likely(!lst)) {
			lst = openlockstateid(ns);
			init_lock_stateid(lst, lo, fi, inode, ost);
			ns = NULL;
			*new = true;
		}
	}
	spin_unlock(&clp->cl_lock);
	if (ns)
		nfs4_put_stid(ns);
	return lst;
}
4936

4937
static int
L
Linus Torvalds 已提交
4938 4939
check_lock_length(u64 offset, u64 length)
{
B
Benny Halevy 已提交
4940
	return ((length == 0)  || ((length != NFS4_MAX_UINT64) &&
L
Linus Torvalds 已提交
4941 4942 4943
	     LOFF_OVERFLOW(offset, length)));
}

4944
static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
J
J. Bruce Fields 已提交
4945
{
4946
	struct nfs4_file *fp = lock_stp->st_stid.sc_file;
J
J. Bruce Fields 已提交
4947

4948 4949
	lockdep_assert_held(&fp->fi_lock);

4950
	if (test_access(access, lock_stp))
J
J. Bruce Fields 已提交
4951
		return;
4952
	__nfs4_file_get_access(fp, access);
4953
	set_access(access, lock_stp);
J
J. Bruce Fields 已提交
4954 4955
}

4956 4957 4958 4959 4960
static __be32
lookup_or_create_lock_state(struct nfsd4_compound_state *cstate,
			    struct nfs4_ol_stateid *ost,
			    struct nfsd4_lock *lock,
			    struct nfs4_ol_stateid **lst, bool *new)
4961
{
4962
	__be32 status;
4963
	struct nfs4_file *fi = ost->st_stid.sc_file;
4964 4965
	struct nfs4_openowner *oo = openowner(ost->st_stateowner);
	struct nfs4_client *cl = oo->oo_owner.so_client;
4966
	struct inode *inode = cstate->current_fh.fh_dentry->d_inode;
4967 4968
	struct nfs4_lockowner *lo;
	unsigned int strhashval;
4969
	struct nfsd_net *nn = net_generic(cl->net, nfsd_net_id);
4970

4971
	lo = find_lockowner_str(&cl->cl_clientid, &lock->v.new.owner, nn);
4972 4973 4974 4975 4976 4977 4978 4979
	if (!lo) {
		strhashval = ownerstr_hashval(cl->cl_clientid.cl_id,
				&lock->v.new.owner);
		lo = alloc_init_lock_stateowner(strhashval, cl, ost, lock);
		if (lo == NULL)
			return nfserr_jukebox;
	} else {
		/* with an existing lockowner, seqids must be the same */
4980
		status = nfserr_bad_seqid;
4981 4982
		if (!cstate->minorversion &&
		    lock->lk_new_lock_seqid != lo->lo_owner.so_seqid)
4983
			goto out;
4984
	}
4985

4986
	*lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
4987
	if (*lst == NULL) {
4988 4989
		status = nfserr_jukebox;
		goto out;
4990
	}
4991 4992 4993 4994
	status = nfs_ok;
out:
	nfs4_put_stateowner(&lo->lo_owner);
	return status;
4995 4996
}

L
Linus Torvalds 已提交
4997 4998 4999
/*
 *  LOCK operation 
 */
5000
__be32
5001
nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5002
	   struct nfsd4_lock *lock)
L
Linus Torvalds 已提交
5003
{
5004 5005
	struct nfs4_openowner *open_sop = NULL;
	struct nfs4_lockowner *lock_sop = NULL;
5006
	struct nfs4_ol_stateid *lock_stp = NULL;
5007
	struct nfs4_ol_stateid *open_stp = NULL;
5008
	struct nfs4_file *fp;
5009
	struct file *filp = NULL;
5010 5011
	struct file_lock *file_lock = NULL;
	struct file_lock *conflock = NULL;
5012
	__be32 status = 0;
5013
	int lkflg;
5014
	int err;
5015
	bool new = false;
5016 5017
	struct net *net = SVC_NET(rqstp);
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
L
Linus Torvalds 已提交
5018 5019 5020 5021 5022 5023 5024 5025

	dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
		(long long) lock->lk_offset,
		(long long) lock->lk_length);

	if (check_lock_length(lock->lk_offset, lock->lk_length))
		 return nfserr_inval;

5026
	if ((status = fh_verify(rqstp, &cstate->current_fh,
M
Miklos Szeredi 已提交
5027
				S_IFREG, NFSD_MAY_LOCK))) {
A
Andy Adamson 已提交
5028 5029 5030 5031
		dprintk("NFSD: nfsd4_lock: permission denied!\n");
		return status;
	}

L
Linus Torvalds 已提交
5032 5033 5034
	nfs4_lock_state();

	if (lock->lk_is_new) {
5035 5036 5037 5038 5039 5040
		if (nfsd4_has_session(cstate))
			/* See rfc 5661 18.10.3: given clientid is ignored: */
			memcpy(&lock->v.new.clientid,
				&cstate->session->se_client->cl_clientid,
				sizeof(clientid_t));

L
Linus Torvalds 已提交
5041
		status = nfserr_stale_clientid;
5042
		if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
L
Linus Torvalds 已提交
5043 5044 5045
			goto out;

		/* validate and update open stateid and open seqid */
5046
		status = nfs4_preprocess_confirmed_seqid_op(cstate,
L
Linus Torvalds 已提交
5047 5048
				        lock->lk_new_open_seqid,
		                        &lock->lk_new_open_stateid,
5049
					&open_stp, nn);
5050
		if (status)
L
Linus Torvalds 已提交
5051
			goto out;
5052
		open_sop = openowner(open_stp->st_stateowner);
5053
		status = nfserr_bad_stateid;
5054
		if (!same_clid(&open_sop->oo_owner.so_client->cl_clientid,
5055 5056
						&lock->v.new.clientid))
			goto out;
5057
		status = lookup_or_create_lock_state(cstate, open_stp, lock,
5058
							&lock_stp, &new);
5059
	} else {
5060
		status = nfs4_preprocess_seqid_op(cstate,
5061 5062
				       lock->lk_old_lock_seqid,
				       &lock->lk_old_lock_stateid,
5063
				       NFS4_LOCK_STID, &lock_stp, nn);
5064
	}
J
J. Bruce Fields 已提交
5065 5066
	if (status)
		goto out;
5067
	lock_sop = lockowner(lock_stp->st_stateowner);
L
Linus Torvalds 已提交
5068

5069 5070 5071 5072 5073
	lkflg = setlkflg(lock->lk_type);
	status = nfs4_check_openmode(lock_stp, lkflg);
	if (status)
		goto out;

5074
	status = nfserr_grace;
5075
	if (locks_in_grace(net) && !lock->lk_reclaim)
5076 5077
		goto out;
	status = nfserr_no_grace;
5078
	if (!locks_in_grace(net) && lock->lk_reclaim)
5079 5080
		goto out;

5081 5082 5083 5084 5085 5086 5087
	file_lock = locks_alloc_lock();
	if (!file_lock) {
		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
		status = nfserr_jukebox;
		goto out;
	}

5088
	fp = lock_stp->st_stid.sc_file;
5089
	locks_init_lock(file_lock);
L
Linus Torvalds 已提交
5090 5091 5092
	switch (lock->lk_type) {
		case NFS4_READ_LT:
		case NFS4_READW_LT:
5093 5094
			spin_lock(&fp->fi_lock);
			filp = find_readable_file_locked(fp);
J
J. Bruce Fields 已提交
5095 5096
			if (filp)
				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_READ);
5097
			spin_unlock(&fp->fi_lock);
5098
			file_lock->fl_type = F_RDLCK;
5099
			break;
L
Linus Torvalds 已提交
5100 5101
		case NFS4_WRITE_LT:
		case NFS4_WRITEW_LT:
5102 5103
			spin_lock(&fp->fi_lock);
			filp = find_writeable_file_locked(fp);
J
J. Bruce Fields 已提交
5104 5105
			if (filp)
				get_lock_access(lock_stp, NFS4_SHARE_ACCESS_WRITE);
5106
			spin_unlock(&fp->fi_lock);
5107
			file_lock->fl_type = F_WRLCK;
5108
			break;
L
Linus Torvalds 已提交
5109 5110 5111 5112
		default:
			status = nfserr_inval;
		goto out;
	}
5113 5114 5115 5116
	if (!filp) {
		status = nfserr_openmode;
		goto out;
	}
5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131
	file_lock->fl_owner = (fl_owner_t)lock_sop;
	file_lock->fl_pid = current->tgid;
	file_lock->fl_file = filp;
	file_lock->fl_flags = FL_POSIX;
	file_lock->fl_lmops = &nfsd_posix_mng_ops;
	file_lock->fl_start = lock->lk_offset;
	file_lock->fl_end = last_byte_offset(lock->lk_offset, lock->lk_length);
	nfs4_transform_lock_offset(file_lock);

	conflock = locks_alloc_lock();
	if (!conflock) {
		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
		status = nfserr_jukebox;
		goto out;
	}
L
Linus Torvalds 已提交
5132

5133
	err = vfs_lock_file(filp, F_SETLK, file_lock, conflock);
5134
	switch (-err) {
L
Linus Torvalds 已提交
5135
	case 0: /* success! */
5136 5137
		update_stateid(&lock_stp->st_stid.sc_stateid);
		memcpy(&lock->lk_resp_stateid, &lock_stp->st_stid.sc_stateid, 
L
Linus Torvalds 已提交
5138
				sizeof(stateid_t));
5139
		status = 0;
5140 5141 5142 5143
		break;
	case (EAGAIN):		/* conflock holds conflicting lock */
		status = nfserr_denied;
		dprintk("NFSD: nfsd4_lock: conflicting lock found!\n");
5144
		nfs4_set_lock_denied(conflock, &lock->lk_denied);
5145
		break;
L
Linus Torvalds 已提交
5146 5147
	case (EDEADLK):
		status = nfserr_deadlock;
5148
		break;
5149
	default:
5150
		dprintk("NFSD: nfsd4_lock: vfs_lock_file() failed! status %d\n",err);
5151
		status = nfserrno(err);
5152
		break;
L
Linus Torvalds 已提交
5153 5154
	}
out:
5155 5156
	if (filp)
		fput(filp);
5157 5158 5159 5160 5161 5162 5163 5164 5165 5166 5167 5168 5169 5170
	if (lock_stp) {
		/* Bump seqid manually if the 4.0 replay owner is openowner */
		if (cstate->replay_owner &&
		    cstate->replay_owner != &lock_sop->lo_owner &&
		    seqid_mutating_err(ntohl(status)))
			lock_sop->lo_owner.so_seqid++;

		/*
		 * If this is a new, never-before-used stateid, and we are
		 * returning an error, then just go ahead and release it.
		 */
		if (status && new)
			release_lock_stateid(lock_stp);

5171
		nfs4_put_stid(&lock_stp->st_stid);
5172
	}
5173 5174
	if (open_stp)
		nfs4_put_stid(&open_stp->st_stid);
5175
	nfsd4_bump_seqid(cstate, status);
5176
	nfs4_unlock_state();
5177 5178 5179 5180
	if (file_lock)
		locks_free_lock(file_lock);
	if (conflock)
		locks_free_lock(conflock);
L
Linus Torvalds 已提交
5181 5182 5183
	return status;
}

5184 5185 5186 5187 5188 5189
/*
 * The NFSv4 spec allows a client to do a LOCKT without holding an OPEN,
 * so we do a temporary open here just to get an open file to pass to
 * vfs_test_lock.  (Arguably perhaps test_lock should be done with an
 * inode operation.)
 */
5190
static __be32 nfsd_test_lock(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file_lock *lock)
5191 5192
{
	struct file *file;
5193 5194 5195 5196 5197
	__be32 err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
	if (!err) {
		err = nfserrno(vfs_test_lock(file, lock));
		nfsd_close(file);
	}
5198 5199 5200
	return err;
}

L
Linus Torvalds 已提交
5201 5202 5203
/*
 * LOCKT operation
 */
5204
__be32
5205 5206
nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
	    struct nfsd4_lockt *lockt)
L
Linus Torvalds 已提交
5207
{
5208
	struct file_lock *file_lock = NULL;
5209
	struct nfs4_lockowner *lo = NULL;
5210
	__be32 status;
5211
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
5212

5213
	if (locks_in_grace(SVC_NET(rqstp)))
L
Linus Torvalds 已提交
5214 5215 5216 5217 5218 5219 5220
		return nfserr_grace;

	if (check_lock_length(lockt->lt_offset, lockt->lt_length))
		 return nfserr_inval;

	nfs4_lock_state();

5221
	if (!nfsd4_has_session(cstate)) {
5222
		status = lookup_clientid(&lockt->lt_clientid, cstate, nn);
5223 5224 5225
		if (status)
			goto out;
	}
L
Linus Torvalds 已提交
5226

5227
	if ((status = fh_verify(rqstp, &cstate->current_fh, S_IFREG, 0)))
L
Linus Torvalds 已提交
5228 5229
		goto out;

5230 5231 5232 5233 5234 5235 5236
	file_lock = locks_alloc_lock();
	if (!file_lock) {
		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
		status = nfserr_jukebox;
		goto out;
	}
	locks_init_lock(file_lock);
L
Linus Torvalds 已提交
5237 5238 5239
	switch (lockt->lt_type) {
		case NFS4_READ_LT:
		case NFS4_READW_LT:
5240
			file_lock->fl_type = F_RDLCK;
L
Linus Torvalds 已提交
5241 5242 5243
		break;
		case NFS4_WRITE_LT:
		case NFS4_WRITEW_LT:
5244
			file_lock->fl_type = F_WRLCK;
L
Linus Torvalds 已提交
5245 5246
		break;
		default:
5247
			dprintk("NFSD: nfs4_lockt: bad lock type!\n");
L
Linus Torvalds 已提交
5248 5249 5250 5251
			status = nfserr_inval;
		goto out;
	}

5252
	lo = find_lockowner_str(&lockt->lt_clientid, &lockt->lt_owner, nn);
5253
	if (lo)
5254 5255 5256
		file_lock->fl_owner = (fl_owner_t)lo;
	file_lock->fl_pid = current->tgid;
	file_lock->fl_flags = FL_POSIX;
L
Linus Torvalds 已提交
5257

5258 5259
	file_lock->fl_start = lockt->lt_offset;
	file_lock->fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
L
Linus Torvalds 已提交
5260

5261
	nfs4_transform_lock_offset(file_lock);
L
Linus Torvalds 已提交
5262

5263
	status = nfsd_test_lock(rqstp, &cstate->current_fh, file_lock);
5264
	if (status)
5265
		goto out;
5266

5267
	if (file_lock->fl_type != F_UNLCK) {
L
Linus Torvalds 已提交
5268
		status = nfserr_denied;
5269
		nfs4_set_lock_denied(file_lock, &lockt->lt_denied);
L
Linus Torvalds 已提交
5270 5271
	}
out:
5272 5273
	if (lo)
		nfs4_put_stateowner(&lo->lo_owner);
L
Linus Torvalds 已提交
5274
	nfs4_unlock_state();
5275 5276
	if (file_lock)
		locks_free_lock(file_lock);
L
Linus Torvalds 已提交
5277 5278 5279
	return status;
}

5280
__be32
5281
nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
5282
	    struct nfsd4_locku *locku)
L
Linus Torvalds 已提交
5283
{
5284
	struct nfs4_ol_stateid *stp;
L
Linus Torvalds 已提交
5285
	struct file *filp = NULL;
5286
	struct file_lock *file_lock = NULL;
5287
	__be32 status;
5288
	int err;
5289 5290
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);

L
Linus Torvalds 已提交
5291 5292 5293 5294 5295 5296 5297 5298 5299
	dprintk("NFSD: nfsd4_locku: start=%Ld length=%Ld\n",
		(long long) locku->lu_offset,
		(long long) locku->lu_length);

	if (check_lock_length(locku->lu_offset, locku->lu_length))
		 return nfserr_inval;

	nfs4_lock_state();
									        
5300
	status = nfs4_preprocess_seqid_op(cstate, locku->lu_seqid,
5301 5302
					&locku->lu_stateid, NFS4_LOCK_STID,
					&stp, nn);
5303
	if (status)
L
Linus Torvalds 已提交
5304
		goto out;
5305
	filp = find_any_file(stp->st_stid.sc_file);
5306 5307
	if (!filp) {
		status = nfserr_lock_range;
5308
		goto put_stateid;
5309
	}
5310 5311 5312 5313
	file_lock = locks_alloc_lock();
	if (!file_lock) {
		dprintk("NFSD: %s: unable to allocate lock!\n", __func__);
		status = nfserr_jukebox;
5314
		goto fput;
5315 5316 5317
	}
	locks_init_lock(file_lock);
	file_lock->fl_type = F_UNLCK;
5318
	file_lock->fl_owner = (fl_owner_t)lockowner(stp->st_stateowner);
5319 5320 5321 5322 5323 5324 5325 5326 5327
	file_lock->fl_pid = current->tgid;
	file_lock->fl_file = filp;
	file_lock->fl_flags = FL_POSIX;
	file_lock->fl_lmops = &nfsd_posix_mng_ops;
	file_lock->fl_start = locku->lu_offset;

	file_lock->fl_end = last_byte_offset(locku->lu_offset,
						locku->lu_length);
	nfs4_transform_lock_offset(file_lock);
L
Linus Torvalds 已提交
5328

5329
	err = vfs_lock_file(filp, F_SETLK, file_lock, NULL);
5330
	if (err) {
5331
		dprintk("NFSD: nfs4_locku: vfs_lock_file failed!\n");
L
Linus Torvalds 已提交
5332 5333
		goto out_nfserr;
	}
5334 5335
	update_stateid(&stp->st_stid.sc_stateid);
	memcpy(&locku->lu_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
5336 5337
fput:
	fput(filp);
5338 5339
put_stateid:
	nfs4_put_stid(&stp->st_stid);
L
Linus Torvalds 已提交
5340
out:
5341
	nfsd4_bump_seqid(cstate, status);
5342
	nfs4_unlock_state();
5343 5344
	if (file_lock)
		locks_free_lock(file_lock);
L
Linus Torvalds 已提交
5345 5346 5347
	return status;

out_nfserr:
5348
	status = nfserrno(err);
5349
	goto fput;
L
Linus Torvalds 已提交
5350 5351 5352 5353
}

/*
 * returns
5354 5355
 * 	true:  locks held by lockowner
 * 	false: no locks held by lockowner
L
Linus Torvalds 已提交
5356
 */
5357 5358
static bool
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
L
Linus Torvalds 已提交
5359 5360
{
	struct file_lock **flpp;
5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371
	int status = false;
	struct file *filp = find_any_file(fp);
	struct inode *inode;

	if (!filp) {
		/* Any valid lock stateid should have some sort of access */
		WARN_ON_ONCE(1);
		return status;
	}

	inode = file_inode(filp);
L
Linus Torvalds 已提交
5372

5373
	spin_lock(&inode->i_lock);
L
Linus Torvalds 已提交
5374
	for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) {
5375
		if ((*flpp)->fl_owner == (fl_owner_t)lowner) {
5376 5377
			status = true;
			break;
5378
		}
L
Linus Torvalds 已提交
5379
	}
5380
	spin_unlock(&inode->i_lock);
5381
	fput(filp);
L
Linus Torvalds 已提交
5382 5383 5384
	return status;
}

5385
__be32
5386 5387 5388
nfsd4_release_lockowner(struct svc_rqst *rqstp,
			struct nfsd4_compound_state *cstate,
			struct nfsd4_release_lockowner *rlockowner)
L
Linus Torvalds 已提交
5389 5390
{
	clientid_t *clid = &rlockowner->rl_clientid;
5391
	struct nfs4_stateowner *sop = NULL, *tmp;
5392
	struct nfs4_lockowner *lo;
5393
	struct nfs4_ol_stateid *stp;
L
Linus Torvalds 已提交
5394
	struct xdr_netobj *owner = &rlockowner->rl_owner;
5395
	unsigned int hashval = ownerstr_hashval(clid->cl_id, owner);
5396
	__be32 status;
5397
	struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
L
Linus Torvalds 已提交
5398 5399 5400 5401 5402 5403

	dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
		clid->cl_boot, clid->cl_id);

	nfs4_lock_state();

5404
	status = lookup_clientid(clid, cstate, nn);
5405 5406 5407
	if (status)
		goto out;

5408
	status = nfserr_locks_held;
5409

5410 5411 5412
	/* Find the matching lock stateowner */
	list_for_each_entry(tmp, &nn->ownerstr_hashtbl[hashval], so_strhash) {
		if (tmp->so_is_open_owner)
5413
			continue;
5414 5415
		if (same_owner_str(tmp, owner, clid)) {
			sop = tmp;
5416
			atomic_inc(&sop->so_count);
5417
			break;
L
Linus Torvalds 已提交
5418
		}
5419
	}
5420 5421 5422 5423 5424 5425 5426 5427 5428 5429

	/* No matching owner found, maybe a replay? Just declare victory... */
	if (!sop) {
		status = nfs_ok;
		goto out;
	}

	lo = lockowner(sop);
	/* see if there are still any locks associated with it */
	list_for_each_entry(stp, &sop->so_stateids, st_perstateowner) {
5430 5431
		if (check_for_locks(stp->st_stid.sc_file, lo)) {
			nfs4_put_stateowner(sop);
5432
			goto out;
5433
		}
L
Linus Torvalds 已提交
5434
	}
5435 5436 5437

	status = nfs_ok;
	release_lockowner(lo);
L
Linus Torvalds 已提交
5438 5439 5440 5441 5442 5443
out:
	nfs4_unlock_state();
	return status;
}

static inline struct nfs4_client_reclaim *
N
NeilBrown 已提交
5444
alloc_reclaim(void)
L
Linus Torvalds 已提交
5445
{
N
NeilBrown 已提交
5446
	return kmalloc(sizeof(struct nfs4_client_reclaim), GFP_KERNEL);
L
Linus Torvalds 已提交
5447 5448
}

5449
bool
5450
nfs4_has_reclaimed_state(const char *name, struct nfsd_net *nn)
5451
{
5452
	struct nfs4_client_reclaim *crp;
5453

5454
	crp = nfsd4_find_reclaim_client(name, nn);
5455
	return (crp && crp->cr_clp);
5456 5457
}

L
Linus Torvalds 已提交
5458 5459 5460
/*
 * failure => all reset bets are off, nfserr_no_grace...
 */
5461
struct nfs4_client_reclaim *
5462
nfs4_client_to_reclaim(const char *name, struct nfsd_net *nn)
L
Linus Torvalds 已提交
5463 5464
{
	unsigned int strhashval;
5465
	struct nfs4_client_reclaim *crp;
L
Linus Torvalds 已提交
5466

N
NeilBrown 已提交
5467 5468
	dprintk("NFSD nfs4_client_to_reclaim NAME: %.*s\n", HEXDIR_LEN, name);
	crp = alloc_reclaim();
5469 5470 5471
	if (crp) {
		strhashval = clientstr_hashval(name);
		INIT_LIST_HEAD(&crp->cr_strhash);
5472
		list_add(&crp->cr_strhash, &nn->reclaim_str_hashtbl[strhashval]);
5473
		memcpy(crp->cr_recdir, name, HEXDIR_LEN);
5474
		crp->cr_clp = NULL;
5475
		nn->reclaim_str_hashtbl_size++;
5476 5477
	}
	return crp;
L
Linus Torvalds 已提交
5478 5479
}

5480
void
5481
nfs4_remove_reclaim_record(struct nfs4_client_reclaim *crp, struct nfsd_net *nn)
5482 5483 5484
{
	list_del(&crp->cr_strhash);
	kfree(crp);
5485
	nn->reclaim_str_hashtbl_size--;
5486 5487
}

5488
void
5489
nfs4_release_reclaim(struct nfsd_net *nn)
L
Linus Torvalds 已提交
5490 5491 5492 5493 5494
{
	struct nfs4_client_reclaim *crp = NULL;
	int i;

	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5495 5496
		while (!list_empty(&nn->reclaim_str_hashtbl[i])) {
			crp = list_entry(nn->reclaim_str_hashtbl[i].next,
L
Linus Torvalds 已提交
5497
			                struct nfs4_client_reclaim, cr_strhash);
5498
			nfs4_remove_reclaim_record(crp, nn);
L
Linus Torvalds 已提交
5499 5500
		}
	}
5501
	WARN_ON_ONCE(nn->reclaim_str_hashtbl_size);
L
Linus Torvalds 已提交
5502 5503 5504 5505
}

/*
 * called from OPEN, CLAIM_PREVIOUS with a new clientid. */
5506
struct nfs4_client_reclaim *
5507
nfsd4_find_reclaim_client(const char *recdir, struct nfsd_net *nn)
L
Linus Torvalds 已提交
5508 5509 5510 5511
{
	unsigned int strhashval;
	struct nfs4_client_reclaim *crp = NULL;

5512
	dprintk("NFSD: nfs4_find_reclaim_client for recdir %s\n", recdir);
L
Linus Torvalds 已提交
5513

5514
	strhashval = clientstr_hashval(recdir);
5515
	list_for_each_entry(crp, &nn->reclaim_str_hashtbl[strhashval], cr_strhash) {
5516
		if (same_name(crp->cr_recdir, recdir)) {
L
Linus Torvalds 已提交
5517 5518 5519 5520 5521 5522 5523 5524 5525
			return crp;
		}
	}
	return NULL;
}

/*
* Called from OPEN. Look for clientid in reclaim list.
*/
5526
__be32
5527 5528 5529
nfs4_check_open_reclaim(clientid_t *clid,
		struct nfsd4_compound_state *cstate,
		struct nfsd_net *nn)
L
Linus Torvalds 已提交
5530
{
5531
	__be32 status;
5532 5533

	/* find clientid in conf_id_hashtbl */
5534 5535
	status = lookup_clientid(clid, cstate, nn);
	if (status)
5536 5537
		return nfserr_reclaim_bad;

5538 5539 5540 5541
	if (nfsd4_client_record_check(cstate->clp))
		return nfserr_reclaim_bad;

	return nfs_ok;
L
Linus Torvalds 已提交
5542 5543
}

B
Bryan Schumaker 已提交
5544 5545
#ifdef CONFIG_NFSD_FAULT_INJECTION

5546 5547
u64 nfsd_forget_client(struct nfs4_client *clp, u64 max)
{
5548 5549
	if (mark_client_expired(clp))
		return 0;
5550 5551 5552 5553
	expire_client(clp);
	return 1;
}

5554 5555 5556
u64 nfsd_print_client(struct nfs4_client *clp, u64 num)
{
	char buf[INET6_ADDRSTRLEN];
5557
	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5558 5559 5560 5561 5562 5563 5564 5565
	printk(KERN_INFO "NFS Client: %s\n", buf);
	return 1;
}

static void nfsd_print_count(struct nfs4_client *clp, unsigned int count,
			     const char *type)
{
	char buf[INET6_ADDRSTRLEN];
5566
	rpc_ntop((struct sockaddr *)&clp->cl_addr, buf, sizeof(buf));
5567 5568 5569
	printk(KERN_INFO "NFS Client: %s has %u %s\n", buf, count, type);
}

5570 5571
static u64 nfsd_foreach_client_lock(struct nfs4_client *clp, u64 max,
				    void (*func)(struct nfs4_ol_stateid *))
B
Bryan Schumaker 已提交
5572 5573 5574
{
	struct nfs4_openowner *oop;
	struct nfs4_ol_stateid *stp, *st_next;
5575
	struct nfs4_ol_stateid *lst, *lst_next;
B
Bryan Schumaker 已提交
5576 5577 5578
	u64 count = 0;

	list_for_each_entry(oop, &clp->cl_openowners, oo_perclient) {
5579 5580 5581 5582
		list_for_each_entry_safe(stp, st_next,
				&oop->oo_owner.so_stateids, st_perstateowner) {
			list_for_each_entry_safe(lst, lst_next,
					&stp->st_locks, st_locks) {
B
Bryan Schumaker 已提交
5583
				if (func)
5584
					func(lst);
B
Bryan Schumaker 已提交
5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595
				if (++count == max)
					return count;
			}
		}
	}

	return count;
}

u64 nfsd_forget_client_locks(struct nfs4_client *clp, u64 max)
{
5596
	return nfsd_foreach_client_lock(clp, max, release_lock_stateid);
B
Bryan Schumaker 已提交
5597 5598
}

5599 5600 5601 5602 5603 5604 5605
u64 nfsd_print_client_locks(struct nfs4_client *clp, u64 max)
{
	u64 count = nfsd_foreach_client_lock(clp, max, NULL);
	nfsd_print_count(clp, count, "locked files");
	return count;
}

5606 5607 5608 5609 5610 5611 5612 5613 5614 5615 5616 5617 5618 5619 5620 5621 5622 5623 5624 5625
static u64 nfsd_foreach_client_open(struct nfs4_client *clp, u64 max, void (*func)(struct nfs4_openowner *))
{
	struct nfs4_openowner *oop, *next;
	u64 count = 0;

	list_for_each_entry_safe(oop, next, &clp->cl_openowners, oo_perclient) {
		if (func)
			func(oop);
		if (++count == max)
			break;
	}

	return count;
}

u64 nfsd_forget_client_openowners(struct nfs4_client *clp, u64 max)
{
	return nfsd_foreach_client_open(clp, max, release_openowner);
}

5626 5627 5628 5629 5630 5631 5632
u64 nfsd_print_client_openowners(struct nfs4_client *clp, u64 max)
{
	u64 count = nfsd_foreach_client_open(clp, max, NULL);
	nfsd_print_count(clp, count, "open files");
	return count;
}

5633 5634 5635 5636 5637 5638
static u64 nfsd_find_all_delegations(struct nfs4_client *clp, u64 max,
				     struct list_head *victims)
{
	struct nfs4_delegation *dp, *next;
	u64 count = 0;

5639
	lockdep_assert_held(&state_lock);
5640
	list_for_each_entry_safe(dp, next, &clp->cl_delegations, dl_perclnt) {
5641 5642 5643 5644 5645 5646 5647 5648 5649 5650
		if (victims) {
			/*
			 * It's not safe to mess with delegations that have a
			 * non-zero dl_time. They might have already been broken
			 * and could be processed by the laundromat outside of
			 * the state_lock. Just leave them be.
			 */
			if (dp->dl_time != 0)
				continue;

5651 5652
			unhash_delegation_locked(dp);
			list_add(&dp->dl_recall_lru, victims);
5653
		}
5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665
		if (++count == max)
			break;
	}
	return count;
}

u64 nfsd_forget_client_delegations(struct nfs4_client *clp, u64 max)
{
	struct nfs4_delegation *dp, *next;
	LIST_HEAD(victims);
	u64 count;

5666
	spin_lock(&state_lock);
5667
	count = nfsd_find_all_delegations(clp, max, &victims);
5668
	spin_unlock(&state_lock);
5669

5670 5671
	list_for_each_entry_safe(dp, next, &victims, dl_recall_lru) {
		list_del_init(&dp->dl_recall_lru);
5672
		revoke_delegation(dp);
5673
	}
5674 5675 5676 5677 5678 5679

	return count;
}

u64 nfsd_recall_client_delegations(struct nfs4_client *clp, u64 max)
{
5680
	struct nfs4_delegation *dp;
5681 5682 5683
	LIST_HEAD(victims);
	u64 count;

5684
	spin_lock(&state_lock);
5685
	count = nfsd_find_all_delegations(clp, max, &victims);
5686 5687 5688 5689 5690
	while (!list_empty(&victims)) {
		dp = list_first_entry(&victims, struct nfs4_delegation,
					dl_recall_lru);
		list_del_init(&dp->dl_recall_lru);
		dp->dl_time = 0;
5691
		nfsd_break_one_deleg(dp);
5692
	}
5693
	spin_unlock(&state_lock);
5694 5695 5696 5697

	return count;
}

5698 5699 5700 5701
u64 nfsd_print_client_delegations(struct nfs4_client *clp, u64 max)
{
	u64 count = 0;

5702
	spin_lock(&state_lock);
5703
	count = nfsd_find_all_delegations(clp, max, NULL);
5704
	spin_unlock(&state_lock);
5705 5706 5707 5708 5709

	nfsd_print_count(clp, count, "delegations");
	return count;
}

5710
u64 nfsd_for_n_state(u64 max, u64 (*func)(struct nfs4_client *, u64))
B
Bryan Schumaker 已提交
5711 5712
{
	struct nfs4_client *clp, *next;
5713
	u64 count = 0;
5714
	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);
B
Bryan Schumaker 已提交
5715

5716 5717 5718
	if (!nfsd_netns_ready(nn))
		return 0;

5719
	list_for_each_entry_safe(clp, next, &nn->client_lru, cl_lru) {
5720 5721
		count += func(clp, max - count);
		if ((max != 0) && (count >= max))
B
Bryan Schumaker 已提交
5722 5723 5724
			break;
	}

5725 5726 5727
	return count;
}

5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742
struct nfs4_client *nfsd_find_client(struct sockaddr_storage *addr, size_t addr_size)
{
	struct nfs4_client *clp;
	struct nfsd_net *nn = net_generic(current->nsproxy->net_ns, nfsd_net_id);

	if (!nfsd_netns_ready(nn))
		return NULL;

	list_for_each_entry(clp, &nn->client_lru, cl_lru) {
		if (memcmp(&clp->cl_addr, addr, addr_size) == 0)
			return clp;
	}
	return NULL;
}

B
Bryan Schumaker 已提交
5743 5744
#endif /* CONFIG_NFSD_FAULT_INJECTION */

5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758 5759 5760 5761 5762 5763 5764 5765
/*
 * Since the lifetime of a delegation isn't limited to that of an open, a
 * client may quite reasonably hang on to a delegation as long as it has
 * the inode cached.  This becomes an obvious problem the first time a
 * client's inode cache approaches the size of the server's total memory.
 *
 * For now we avoid this problem by imposing a hard limit on the number
 * of delegations, which varies according to the server's memory size.
 */
static void
set_max_delegations(void)
{
	/*
	 * Allow at most 4 delegations per megabyte of RAM.  Quick
	 * estimates suggest that in the worst case (where every delegation
	 * is for a different inode), a delegation could take about 1.5K,
	 * giving a worst case usage of about 6% of memory.
	 */
	max_delegations = nr_free_buffer_pages() >> (20 - 2 - PAGE_SHIFT);
}

5766
static int nfs4_state_create_net(struct net *net)
5767 5768 5769 5770 5771 5772 5773
{
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
	int i;

	nn->conf_id_hashtbl = kmalloc(sizeof(struct list_head) *
			CLIENT_HASH_SIZE, GFP_KERNEL);
	if (!nn->conf_id_hashtbl)
5774
		goto err;
5775 5776 5777 5778
	nn->unconf_id_hashtbl = kmalloc(sizeof(struct list_head) *
			CLIENT_HASH_SIZE, GFP_KERNEL);
	if (!nn->unconf_id_hashtbl)
		goto err_unconf_id;
5779 5780 5781 5782
	nn->ownerstr_hashtbl = kmalloc(sizeof(struct list_head) *
			OWNER_HASH_SIZE, GFP_KERNEL);
	if (!nn->ownerstr_hashtbl)
		goto err_ownerstr;
5783 5784 5785 5786
	nn->sessionid_hashtbl = kmalloc(sizeof(struct list_head) *
			SESSION_HASH_SIZE, GFP_KERNEL);
	if (!nn->sessionid_hashtbl)
		goto err_sessionid;
5787

5788
	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
5789
		INIT_LIST_HEAD(&nn->conf_id_hashtbl[i]);
5790
		INIT_LIST_HEAD(&nn->unconf_id_hashtbl[i]);
5791
	}
5792 5793
	for (i = 0; i < OWNER_HASH_SIZE; i++)
		INIT_LIST_HEAD(&nn->ownerstr_hashtbl[i]);
5794 5795
	for (i = 0; i < SESSION_HASH_SIZE; i++)
		INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
5796
	nn->conf_name_tree = RB_ROOT;
5797
	nn->unconf_name_tree = RB_ROOT;
5798
	INIT_LIST_HEAD(&nn->client_lru);
5799
	INIT_LIST_HEAD(&nn->close_lru);
5800
	INIT_LIST_HEAD(&nn->del_recall_lru);
5801
	spin_lock_init(&nn->client_lock);
5802

5803
	INIT_DELAYED_WORK(&nn->laundromat_work, laundromat_main);
5804
	get_net(net);
5805

5806
	return 0;
5807

5808
err_sessionid:
5809
	kfree(nn->ownerstr_hashtbl);
5810 5811
err_ownerstr:
	kfree(nn->unconf_id_hashtbl);
5812 5813
err_unconf_id:
	kfree(nn->conf_id_hashtbl);
5814 5815
err:
	return -ENOMEM;
5816 5817 5818
}

static void
5819
nfs4_state_destroy_net(struct net *net)
5820 5821 5822 5823 5824 5825 5826 5827 5828 5829 5830
{
	int i;
	struct nfs4_client *clp = NULL;
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);

	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
		while (!list_empty(&nn->conf_id_hashtbl[i])) {
			clp = list_entry(nn->conf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
			destroy_client(clp);
		}
	}
5831

5832 5833 5834 5835 5836
	for (i = 0; i < CLIENT_HASH_SIZE; i++) {
		while (!list_empty(&nn->unconf_id_hashtbl[i])) {
			clp = list_entry(nn->unconf_id_hashtbl[i].next, struct nfs4_client, cl_idhash);
			destroy_client(clp);
		}
5837 5838
	}

5839
	kfree(nn->sessionid_hashtbl);
5840
	kfree(nn->ownerstr_hashtbl);
5841
	kfree(nn->unconf_id_hashtbl);
5842
	kfree(nn->conf_id_hashtbl);
5843
	put_net(net);
5844 5845
}

5846
int
5847
nfs4_state_start_net(struct net *net)
5848
{
5849
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
5850 5851
	int ret;

5852
	ret = nfs4_state_create_net(net);
5853 5854
	if (ret)
		return ret;
5855
	nfsd4_client_tracking_init(net);
5856
	nn->boot_time = get_seconds();
5857
	locks_start_grace(net, &nn->nfsd4_manager);
5858
	nn->grace_ended = false;
5859
	printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
5860 5861
	       nn->nfsd4_grace, net);
	queue_delayed_work(laundry_wq, &nn->laundromat_work, nn->nfsd4_grace * HZ);
5862 5863 5864 5865 5866 5867 5868 5869 5870 5871
	return 0;
}

/* initialization to perform when the nfsd service is started: */

int
nfs4_state_start(void)
{
	int ret;

5872
	ret = set_callback_cred();
5873 5874
	if (ret)
		return -ENOMEM;
5875
	laundry_wq = create_singlethread_workqueue("nfsd4");
5876 5877 5878 5879
	if (laundry_wq == NULL) {
		ret = -ENOMEM;
		goto out_recovery;
	}
5880 5881 5882
	ret = nfsd4_create_callback_queue();
	if (ret)
		goto out_free_laundry;
5883

5884
	set_max_delegations();
5885

5886
	return 0;
5887

5888 5889
out_free_laundry:
	destroy_workqueue(laundry_wq);
5890
out_recovery:
5891
	return ret;
L
Linus Torvalds 已提交
5892 5893
}

5894
void
5895
nfs4_state_shutdown_net(struct net *net)
L
Linus Torvalds 已提交
5896 5897 5898
{
	struct nfs4_delegation *dp = NULL;
	struct list_head *pos, *next, reaplist;
5899
	struct nfsd_net *nn = net_generic(net, nfsd_net_id);
L
Linus Torvalds 已提交
5900

5901 5902
	cancel_delayed_work_sync(&nn->laundromat_work);
	locks_end_grace(&nn->nfsd4_manager);
5903

5904
	nfs4_lock_state();
L
Linus Torvalds 已提交
5905
	INIT_LIST_HEAD(&reaplist);
5906
	spin_lock(&state_lock);
5907
	list_for_each_safe(pos, next, &nn->del_recall_lru) {
L
Linus Torvalds 已提交
5908
		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5909 5910
		unhash_delegation_locked(dp);
		list_add(&dp->dl_recall_lru, &reaplist);
L
Linus Torvalds 已提交
5911
	}
5912
	spin_unlock(&state_lock);
L
Linus Torvalds 已提交
5913 5914
	list_for_each_safe(pos, next, &reaplist) {
		dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
5915
		list_del_init(&dp->dl_recall_lru);
5916
		nfs4_put_stid(&dp->dl_stid);
L
Linus Torvalds 已提交
5917 5918
	}

5919
	nfsd4_client_tracking_exit(net);
5920
	nfs4_state_destroy_net(net);
5921
	nfs4_unlock_state();
L
Linus Torvalds 已提交
5922 5923 5924 5925 5926
}

void
nfs4_state_shutdown(void)
{
5927
	destroy_workqueue(laundry_wq);
5928
	nfsd4_destroy_callback_queue();
L
Linus Torvalds 已提交
5929
}
5930 5931 5932 5933

static void
get_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
5934 5935
	if (HAS_STATE_ID(cstate, CURRENT_STATE_ID_FLAG) && CURRENT_STATEID(stateid))
		memcpy(stateid, &cstate->current_stateid, sizeof(stateid_t));
5936 5937 5938 5939 5940
}

static void
put_stateid(struct nfsd4_compound_state *cstate, stateid_t *stateid)
{
5941 5942 5943 5944 5945 5946 5947 5948 5949 5950
	if (cstate->minorversion) {
		memcpy(&cstate->current_stateid, stateid, sizeof(stateid_t));
		SET_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
	}
}

void
clear_current_stateid(struct nfsd4_compound_state *cstate)
{
	CLEAR_STATE_ID(cstate, CURRENT_STATE_ID_FLAG);
5951 5952
}

5953 5954 5955
/*
 * functions to set current state id
 */
5956 5957 5958 5959 5960 5961
void
nfsd4_set_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
{
	put_stateid(cstate, &odp->od_stateid);
}

5962 5963 5964 5965 5966 5967
void
nfsd4_set_openstateid(struct nfsd4_compound_state *cstate, struct nfsd4_open *open)
{
	put_stateid(cstate, &open->op_stateid);
}

5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982
void
nfsd4_set_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
{
	put_stateid(cstate, &close->cl_stateid);
}

void
nfsd4_set_lockstateid(struct nfsd4_compound_state *cstate, struct nfsd4_lock *lock)
{
	put_stateid(cstate, &lock->lk_resp_stateid);
}

/*
 * functions to consume current state id
 */
5983

5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995
void
nfsd4_get_opendowngradestateid(struct nfsd4_compound_state *cstate, struct nfsd4_open_downgrade *odp)
{
	get_stateid(cstate, &odp->od_stateid);
}

void
nfsd4_get_delegreturnstateid(struct nfsd4_compound_state *cstate, struct nfsd4_delegreturn *drp)
{
	get_stateid(cstate, &drp->dr_stateid);
}

5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007
void
nfsd4_get_freestateid(struct nfsd4_compound_state *cstate, struct nfsd4_free_stateid *fsp)
{
	get_stateid(cstate, &fsp->fr_stateid);
}

void
nfsd4_get_setattrstateid(struct nfsd4_compound_state *cstate, struct nfsd4_setattr *setattr)
{
	get_stateid(cstate, &setattr->sa_stateid);
}

6008 6009 6010 6011 6012 6013 6014
void
nfsd4_get_closestateid(struct nfsd4_compound_state *cstate, struct nfsd4_close *close)
{
	get_stateid(cstate, &close->cl_stateid);
}

void
6015
nfsd4_get_lockustateid(struct nfsd4_compound_state *cstate, struct nfsd4_locku *locku)
6016
{
6017
	get_stateid(cstate, &locku->lu_stateid);
6018
}
6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030

void
nfsd4_get_readstateid(struct nfsd4_compound_state *cstate, struct nfsd4_read *read)
{
	get_stateid(cstate, &read->rd_stateid);
}

void
nfsd4_get_writestateid(struct nfsd4_compound_state *cstate, struct nfsd4_write *write)
{
	get_stateid(cstate, &write->wr_stateid);
}