pnfs.c 71.5 KB
Newer Older
R
Ricardo Labiaga 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 *  pNFS functions to call and manage layout drivers.
 *
 *  Copyright (c) 2002 [year of first publication]
 *  The Regents of the University of Michigan
 *  All Rights Reserved
 *
 *  Dean Hildebrand <dhildebz@umich.edu>
 *
 *  Permission is granted to use, copy, create derivative works, and
 *  redistribute this software and such derivative works for any purpose,
 *  so long as the name of the University of Michigan is not used in
 *  any advertising or publicity pertaining to the use or distribution
 *  of this software without specific, written prior authorization. If
 *  the above copyright notice or any other identification of the
 *  University of Michigan is included in any copy of any portion of
 *  this software, then the disclaimer below must also be included.
 *
 *  This software is provided as is, without representation or warranty
 *  of any kind either express or implied, including without limitation
 *  the implied warranties of merchantability, fitness for a particular
 *  purpose, or noninfringement.  The Regents of the University of
 *  Michigan shall not be liable for any damages, including special,
 *  indirect, incidental, or consequential damages, with respect to any
 *  claim arising out of or in connection with the use of the software,
 *  even if it has been or is hereafter advised of the possibility of
 *  such damages.
 */

#include <linux/nfs_fs.h>
31
#include <linux/nfs_page.h>
32
#include <linux/module.h>
33
#include <linux/sort.h>
34
#include "internal.h"
R
Ricardo Labiaga 已提交
35
#include "pnfs.h"
A
Andy Adamson 已提交
36
#include "iostat.h"
37
#include "nfs4trace.h"
38
#include "delegation.h"
39
#include "nfs42.h"
R
Ricardo Labiaga 已提交
40 41

#define NFSDBG_FACILITY		NFSDBG_PNFS
42
#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
R
Ricardo Labiaga 已提交
43

44 45 46 47 48 49 50 51 52 53 54 55
/* Locking:
 *
 * pnfs_spinlock:
 *      protects pnfs_modules_tbl.
 */
static DEFINE_SPINLOCK(pnfs_spinlock);

/*
 * pnfs_modules_tbl holds all pnfs modules
 */
static LIST_HEAD(pnfs_modules_tbl);

56
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
57 58 59 60
static void pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
		struct list_head *free_me,
		const struct pnfs_layout_range *range,
		u32 seq);
61 62
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		                struct list_head *tmp_list);
63

64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
{
	struct pnfs_layoutdriver_type *local;

	list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
		if (local->id == id)
			goto out;
	local = NULL;
out:
	dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
	return local;
}

R
Ricardo Labiaga 已提交
79 80 81
static struct pnfs_layoutdriver_type *
find_pnfs_driver(u32 id)
{
82 83 84 85
	struct pnfs_layoutdriver_type *local;

	spin_lock(&pnfs_spinlock);
	local = find_pnfs_driver_locked(id);
86 87 88 89
	if (local != NULL && !try_module_get(local->owner)) {
		dprintk("%s: Could not grab reference on module\n", __func__);
		local = NULL;
	}
90 91
	spin_unlock(&pnfs_spinlock);
	return local;
R
Ricardo Labiaga 已提交
92 93 94 95 96
}

void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
97 98 99
	if (nfss->pnfs_curr_ld) {
		if (nfss->pnfs_curr_ld->clear_layoutdriver)
			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
100 101 102
		/* Decrement the MDS count. Purge the deviceid cache if zero */
		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
			nfs4_deviceid_purge_client(nfss->nfs_client);
103
		module_put(nfss->pnfs_curr_ld->owner);
104
	}
R
Ricardo Labiaga 已提交
105 106 107
	nfss->pnfs_curr_ld = NULL;
}

108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
/*
 * When the server sends a list of layout types, we choose one in the order
 * given in the list below.
 *
 * FIXME: should this list be configurable in some fashion? module param?
 * 	  mount option? something else?
 */
static const u32 ld_prefs[] = {
	LAYOUT_SCSI,
	LAYOUT_BLOCK_VOLUME,
	LAYOUT_OSD2_OBJECTS,
	LAYOUT_FLEX_FILES,
	LAYOUT_NFSV4_1_FILES,
	0
};

static int
ld_cmp(const void *e1, const void *e2)
{
	u32 ld1 = *((u32 *)e1);
	u32 ld2 = *((u32 *)e2);
	int i;

	for (i = 0; ld_prefs[i] != 0; i++) {
		if (ld1 == ld_prefs[i])
			return -1;

		if (ld2 == ld_prefs[i])
			return 1;
	}
	return 0;
}

R
Ricardo Labiaga 已提交
141 142 143 144
/*
 * Try to set the server's pnfs module to the pnfs layout type specified by id.
 * Currently only one pNFS layout driver per filesystem is supported.
 *
145
 * @ids array of layout types supported by MDS.
R
Ricardo Labiaga 已提交
146 147
 */
void
148
set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
149
		      struct nfs_fsinfo *fsinfo)
R
Ricardo Labiaga 已提交
150 151
{
	struct pnfs_layoutdriver_type *ld_type = NULL;
152
	u32 id;
153
	int i;
R
Ricardo Labiaga 已提交
154

155 156
	if (fsinfo->nlayouttypes == 0)
		goto out_no_driver;
R
Ricardo Labiaga 已提交
157 158
	if (!(server->nfs_client->cl_exchange_flags &
		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
159 160
		printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
			__func__, server->nfs_client->cl_exchange_flags);
R
Ricardo Labiaga 已提交
161 162
		goto out_no_driver;
	}
163

164 165
	sort(fsinfo->layouttype, fsinfo->nlayouttypes,
		sizeof(*fsinfo->layouttype), ld_cmp, NULL);
166

167 168
	for (i = 0; i < fsinfo->nlayouttypes; i++) {
		id = fsinfo->layouttype[i];
R
Ricardo Labiaga 已提交
169
		ld_type = find_pnfs_driver(id);
170 171 172 173 174 175 176
		if (!ld_type) {
			request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
					id);
			ld_type = find_pnfs_driver(id);
		}
		if (ld_type)
			break;
R
Ricardo Labiaga 已提交
177
	}
178 179

	if (!ld_type) {
180
		dprintk("%s: No pNFS module found!\n", __func__);
181 182 183
		goto out_no_driver;
	}

R
Ricardo Labiaga 已提交
184
	server->pnfs_curr_ld = ld_type;
185 186
	if (ld_type->set_layoutdriver
	    && ld_type->set_layoutdriver(server, mntfh)) {
187 188
		printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
			"driver %u.\n", __func__, id);
189 190 191
		module_put(ld_type->owner);
		goto out_no_driver;
	}
192 193
	/* Bump the MDS count */
	atomic_inc(&server->nfs_client->cl_mds_count);
194

R
Ricardo Labiaga 已提交
195 196 197 198 199 200 201
	dprintk("%s: pNFS module for %u set\n", __func__, id);
	return;

out_no_driver:
	dprintk("%s: Using NFSv4 I/O\n", __func__);
	server->pnfs_curr_ld = NULL;
}
202 203 204 205 206 207 208 209

int
pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	int status = -EINVAL;
	struct pnfs_layoutdriver_type *tmp;

	if (ld_type->id == 0) {
210
		printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
211 212
		return status;
	}
213
	if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
214
		printk(KERN_ERR "NFS: %s Layout driver must provide "
215 216 217
		       "alloc_lseg and free_lseg.\n", __func__);
		return status;
	}
218 219 220 221 222 223 224 225 226

	spin_lock(&pnfs_spinlock);
	tmp = find_pnfs_driver_locked(ld_type->id);
	if (!tmp) {
		list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
		status = 0;
		dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
			ld_type->name);
	} else {
227
		printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
			__func__, ld_type->id);
	}
	spin_unlock(&pnfs_spinlock);

	return status;
}
EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);

void
pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
	spin_lock(&pnfs_spinlock);
	list_del(&ld_type->pnfs_tblid);
	spin_unlock(&pnfs_spinlock);
}
EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
245

246 247 248 249
/*
 * pNFS client layout cache
 */

250
/* Need to hold i_lock if caller does not already hold reference */
F
Fred Isaman 已提交
251
void
252
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
253
{
254
	atomic_inc(&lo->plh_refcount);
255 256
}

257 258 259 260
static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
261
	return ld->alloc_layout_hdr(ino, gfp_flags);
262 263 264 265 266
}

static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
267 268 269 270 271 272 273 274 275 276
	struct nfs_server *server = NFS_SERVER(lo->plh_inode);
	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;

	if (!list_empty(&lo->plh_layouts)) {
		struct nfs_client *clp = server->nfs_client;

		spin_lock(&clp->cl_lock);
		list_del_init(&lo->plh_layouts);
		spin_unlock(&clp->cl_lock);
	}
277
	put_rpccred(lo->plh_lc_cred);
278
	return ld->free_layout_hdr(lo);
279 280
}

281
static void
282
pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
283
{
284
	struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
285
	dprintk("%s: freeing layout cache %p\n", __func__, lo);
286 287 288 289
	nfsi->layout = NULL;
	/* Reset MDS Threshold I/O counters */
	nfsi->write_io = 0;
	nfsi->read_io = 0;
290 291
}

292
void
293
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
294
{
295 296
	struct inode *inode = lo->plh_inode;

297 298
	pnfs_layoutreturn_before_put_layout_hdr(lo);

299
	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
300 301
		if (!list_empty(&lo->plh_segs))
			WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
302
		pnfs_detach_layout_hdr(lo);
303
		spin_unlock(&inode->i_lock);
304
		pnfs_free_layout_hdr(lo);
305
	}
306 307
}

308 309 310 311 312 313 314 315 316 317 318 319 320 321
static void
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
			 u32 seq)
{
	if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
		iomode = IOMODE_ANY;
	lo->plh_return_iomode = iomode;
	set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
	if (seq != 0) {
		WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
		lo->plh_return_seq = seq;
	}
}

322 323 324 325 326 327 328 329
static void
pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
{
	lo->plh_return_iomode = 0;
	lo->plh_return_seq = 0;
	clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
}

330 331 332 333 334 335 336 337 338
static void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
{
	clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
	clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
	smp_mb__after_atomic();
	wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
	rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
}

339 340 341 342 343 344 345 346 347 348 349 350
static void
pnfs_clear_lseg_state(struct pnfs_layout_segment *lseg,
		struct list_head *free_me)
{
	clear_bit(NFS_LSEG_ROC, &lseg->pls_flags);
	clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags))
		pnfs_lseg_dec_and_remove_zero(lseg, free_me);
	if (test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
		pnfs_lseg_dec_and_remove_zero(lseg, free_me);
}

351 352 353 354 355 356 357
/*
 * Mark a pnfs_layout_hdr and all associated layout segments as invalid
 *
 * In order to continue using the pnfs_layout_hdr, a full recovery
 * is required.
 * Note that caller must hold inode->i_lock.
 */
358
int
359 360 361 362 363 364 365 366
pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
		struct list_head *lseg_list)
{
	struct pnfs_layout_range range = {
		.iomode = IOMODE_ANY,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
367
	struct pnfs_layout_segment *lseg, *next;
368 369

	set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
370
	pnfs_clear_layoutreturn_info(lo);
371 372
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
		pnfs_clear_lseg_state(lseg, lseg_list);
373
	pnfs_free_returned_lsegs(lo, lseg_list, &range, 0);
374 375 376
	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags) &&
	    !test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
		pnfs_clear_layoutreturn_waitbit(lo);
377
	return !list_empty(&lo->plh_segs);
378 379
}

380 381 382 383 384 385 386 387
static int
pnfs_iomode_to_fail_bit(u32 iomode)
{
	return iomode == IOMODE_RW ?
		NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
}

static void
388
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
389
{
390
	lo->plh_retry_timestamp = jiffies;
391
	if (!test_and_set_bit(fail_bit, &lo->plh_flags))
392 393 394 395 396 397 398 399 400 401 402 403 404 405
		atomic_inc(&lo->plh_refcount);
}

static void
pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
	if (test_and_clear_bit(fail_bit, &lo->plh_flags))
		atomic_dec(&lo->plh_refcount);
}

static void
pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
	struct inode *inode = lo->plh_inode;
406 407 408 409 410 411
	struct pnfs_layout_range range = {
		.iomode = iomode,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	LIST_HEAD(head);
412 413 414

	spin_lock(&inode->i_lock);
	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
415
	pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
416
	spin_unlock(&inode->i_lock);
417
	pnfs_free_lseg_list(&head);
418 419 420 421 422 423 424
	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
			iomode == IOMODE_RW ?  "RW" : "READ");
}

static bool
pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
425
	unsigned long start, end;
426 427 428
	int fail_bit = pnfs_iomode_to_fail_bit(iomode);

	if (test_bit(fail_bit, &lo->plh_flags) == 0)
429 430 431 432 433
		return false;
	end = jiffies;
	start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
	if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
		/* It is time to retry the failed layoutgets */
434
		pnfs_layout_clear_fail_bit(lo, fail_bit);
435 436 437
		return false;
	}
	return true;
438 439
}

440
static void
441 442 443
pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
		const struct pnfs_layout_range *range,
		const nfs4_stateid *stateid)
444
{
445
	INIT_LIST_HEAD(&lseg->pls_list);
446
	INIT_LIST_HEAD(&lseg->pls_lc_list);
447 448
	atomic_set(&lseg->pls_refcount, 1);
	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
449
	lseg->pls_layout = lo;
450 451
	lseg->pls_range = *range;
	lseg->pls_seq = be32_to_cpu(stateid->seqid);
452 453
}

454
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
455
{
456 457 458 459
	if (lseg != NULL) {
		struct inode *inode = lseg->pls_layout->plh_inode;
		NFS_SERVER(inode)->pnfs_curr_ld->free_lseg(lseg);
	}
460 461
}

F
Fred Isaman 已提交
462
static void
463 464
pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
F
Fred Isaman 已提交
465
{
466
	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
F
Fred Isaman 已提交
467
	list_del_init(&lseg->pls_list);
468 469
	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
	atomic_dec(&lo->plh_refcount);
470 471
	if (test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
		return;
472 473 474
	if (list_empty(&lo->plh_segs) &&
	    !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
475 476
		if (atomic_read(&lo->plh_outstanding) == 0)
			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
477
		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
478
	}
F
Fred Isaman 已提交
479 480
}

481 482 483 484 485 486
static bool
pnfs_cache_lseg_for_layoutreturn(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
{
	if (test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
	    pnfs_layout_is_valid(lo)) {
487
		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
488 489 490 491 492 493
		list_move_tail(&lseg->pls_list, &lo->plh_return_segs);
		return true;
	}
	return false;
}

494
void
495
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
496
{
497
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
498 499 500 501 502
	struct inode *inode;

	if (!lseg)
		return;

503 504 505
	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
506

507 508
	lo = lseg->pls_layout;
	inode = lo->plh_inode;
509

F
Fred Isaman 已提交
510
	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
511 512 513 514
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
			spin_unlock(&inode->i_lock);
			return;
		}
515
		pnfs_get_layout_hdr(lo);
516
		pnfs_layout_remove_lseg(lo, lseg);
517 518
		if (pnfs_cache_lseg_for_layoutreturn(lo, lseg))
			lseg = NULL;
519 520 521
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg(lseg);
		pnfs_put_layout_hdr(lo);
522 523
	}
}
524
EXPORT_SYMBOL_GPL(pnfs_put_lseg);
525

526
static void pnfs_free_lseg_async_work(struct work_struct *work)
527 528
{
	struct pnfs_layout_segment *lseg;
529
	struct pnfs_layout_hdr *lo;
530 531

	lseg = container_of(work, struct pnfs_layout_segment, pls_work);
532
	lo = lseg->pls_layout;
533

534 535
	pnfs_free_lseg(lseg);
	pnfs_put_layout_hdr(lo);
536 537
}

538
static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
539
{
540
	INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
541 542
	schedule_work(&lseg->pls_work);
}
543 544 545 546 547 548 549 550 551 552 553 554 555 556

void
pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
{
	if (!lseg)
		return;

	assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);

	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
	if (atomic_dec_and_test(&lseg->pls_refcount)) {
		struct pnfs_layout_hdr *lo = lseg->pls_layout;
557 558
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
			return;
559
		pnfs_layout_remove_lseg(lo, lseg);
560 561 562 563
		if (!pnfs_cache_lseg_for_layoutreturn(lo, lseg)) {
			pnfs_get_layout_hdr(lo);
			pnfs_free_lseg_async(lseg);
		}
564 565 566
	}
}
EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
567

568 569 570 571 572 573 574
/*
 * is l2 fully contained in l1?
 *   start1                             end1
 *   [----------------------------------)
 *           start2           end2
 *           [----------------)
 */
575
static bool
576
pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
577
		 const struct pnfs_layout_range *l2)
578 579
{
	u64 start1 = l1->offset;
580
	u64 end1 = pnfs_end_offset(start1, l1->length);
581
	u64 start2 = l2->offset;
582
	u64 end2 = pnfs_end_offset(start2, l2->length);
583 584 585 586

	return (start1 <= start2) && (end1 >= end2);
}

587 588 589 590 591 592 593 594 595 596
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		struct list_head *tmp_list)
{
	if (!atomic_dec_and_test(&lseg->pls_refcount))
		return false;
	pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
	list_add(&lseg->pls_list, tmp_list);
	return true;
}

597 598 599 600 601 602 603 604 605 606 607
/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
			     struct list_head *tmp_list)
{
	int rv = 0;

	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
		/* Remove the reference keeping the lseg in the
		 * list.  It will now be removed when all
		 * outstanding io is finished.
		 */
F
Fred Isaman 已提交
608 609
		dprintk("%s: lseg %p ref %d\n", __func__, lseg,
			atomic_read(&lseg->pls_refcount));
610
		if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
F
Fred Isaman 已提交
611
			rv = 1;
612 613 614 615
	}
	return rv;
}

616 617 618 619 620 621 622 623 624
/*
 * Compare 2 layout stateid sequence ids, to see which is newer,
 * taking into account wraparound issues.
 */
static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
{
	return (s32)(s1 - s2) > 0;
}

625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
static bool
pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
		 const struct pnfs_layout_range *recall_range)
{
	return (recall_range->iomode == IOMODE_ANY ||
		lseg_range->iomode == recall_range->iomode) &&
	       pnfs_lseg_range_intersecting(lseg_range, recall_range);
}

static bool
pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
		const struct pnfs_layout_range *recall_range,
		u32 seq)
{
	if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
		return false;
	if (recall_range == NULL)
		return true;
	return pnfs_should_free_range(&lseg->pls_range, recall_range);
}

646 647 648 649 650 651 652 653 654 655 656 657 658 659
/**
 * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
 * @lo: layout header containing the lsegs
 * @tmp_list: list head where doomed lsegs should go
 * @recall_range: optional recall range argument to match (may be NULL)
 * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
 *
 * Walk the list of lsegs in the layout header, and tear down any that should
 * be destroyed. If "recall_range" is specified then the segment must match
 * that range. If "seq" is non-zero, then only match segments that were handed
 * out at or before that sequence.
 *
 * Returns number of matching invalid lsegs remaining in list after scanning
 * it and purging them.
660
 */
F
Fred Isaman 已提交
661
int
662
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
663
			    struct list_head *tmp_list,
664 665
			    const struct pnfs_layout_range *recall_range,
			    u32 seq)
666 667
{
	struct pnfs_layout_segment *lseg, *next;
668
	int remaining = 0;
669 670 671

	dprintk("%s:Begin lo %p\n", __func__, lo);

672
	if (list_empty(&lo->plh_segs))
673
		return 0;
674
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
675
		if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
676
			dprintk("%s: freeing lseg %p iomode %d seq %u"
677
				"offset %llu length %llu\n", __func__,
678 679
				lseg, lseg->pls_range.iomode, lseg->pls_seq,
				lseg->pls_range.offset, lseg->pls_range.length);
680 681
			if (!mark_lseg_invalid(lseg, tmp_list))
				remaining++;
682
		}
683 684
	dprintk("%s:Return %i\n", __func__, remaining);
	return remaining;
685 686
}

687 688 689 690 691 692 693 694 695 696 697 698 699 700
static void
pnfs_free_returned_lsegs(struct pnfs_layout_hdr *lo,
		struct list_head *free_me,
		const struct pnfs_layout_range *range,
		u32 seq)
{
	struct pnfs_layout_segment *lseg, *next;

	list_for_each_entry_safe(lseg, next, &lo->plh_return_segs, pls_list) {
		if (pnfs_match_lseg_recall(lseg, range, seq))
			list_move_tail(&lseg->pls_list, free_me);
	}
}

701
/* note free_me must contain lsegs from a single layout_hdr */
F
Fred Isaman 已提交
702
void
703
pnfs_free_lseg_list(struct list_head *free_me)
704
{
705
	struct pnfs_layout_segment *lseg, *tmp;
706 707 708 709

	if (list_empty(free_me))
		return;

710
	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
711
		list_del(&lseg->pls_list);
712
		pnfs_free_lseg(lseg);
713 714 715
	}
}

716 717 718 719
void
pnfs_destroy_layout(struct nfs_inode *nfsi)
{
	struct pnfs_layout_hdr *lo;
720
	LIST_HEAD(tmp_list);
721 722 723 724

	spin_lock(&nfsi->vfs_inode.i_lock);
	lo = nfsi->layout;
	if (lo) {
725
		pnfs_get_layout_hdr(lo);
726
		pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
727 728 729 730 731 732 733
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
		spin_unlock(&nfsi->vfs_inode.i_lock);
		pnfs_free_lseg_list(&tmp_list);
		pnfs_put_layout_hdr(lo);
	} else
		spin_unlock(&nfsi->vfs_inode.i_lock);
734
}
735
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
736

737 738 739
static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
		struct list_head *layout_list)
740 741
{
	struct pnfs_layout_hdr *lo;
742
	bool ret = false;
743

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
		pnfs_get_layout_hdr(lo);
		list_add(&lo->plh_bulk_destroy, layout_list);
		ret = true;
	}
	spin_unlock(&inode->i_lock);
	return ret;
}

/* Caller must hold rcu_read_lock and clp->cl_lock */
static int
pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
		struct nfs_server *server,
		struct list_head *layout_list)
{
	struct pnfs_layout_hdr *lo, *next;
	struct inode *inode;

	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
765 766
		if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags))
			continue;
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
		inode = igrab(lo->plh_inode);
		if (inode == NULL)
			continue;
		list_del_init(&lo->plh_layouts);
		if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
			continue;
		rcu_read_unlock();
		spin_unlock(&clp->cl_lock);
		iput(inode);
		spin_lock(&clp->cl_lock);
		rcu_read_lock();
		return -EAGAIN;
	}
	return 0;
}

static int
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
		bool is_bulk_recall)
{
	struct pnfs_layout_hdr *lo;
	struct inode *inode;
	LIST_HEAD(lseg_list);
	int ret = 0;

	while (!list_empty(layout_list)) {
		lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
				plh_bulk_destroy);
		dprintk("%s freeing layout for inode %lu\n", __func__,
			lo->plh_inode->i_ino);
		inode = lo->plh_inode;
798 799 800

		pnfs_layoutcommit_inode(inode, false);

801 802
		spin_lock(&inode->i_lock);
		list_del_init(&lo->plh_bulk_destroy);
803 804 805
		if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
			if (is_bulk_recall)
				set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
806
			ret = -EAGAIN;
807
		}
808 809
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg_list(&lseg_list);
810 811
		/* Free all lsegs that are attached to commit buckets */
		nfs_commit_inode(inode, 0);
812 813 814 815 816 817 818 819 820 821 822 823 824
		pnfs_put_layout_hdr(lo);
		iput(inode);
	}
	return ret;
}

int
pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
		struct nfs_fsid *fsid,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);
825

826
	spin_lock(&clp->cl_lock);
827
	rcu_read_lock();
828
restart:
829
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
830 831 832 833 834 835
		if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
			continue;
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
				server,
				&layout_list) != 0)
			goto restart;
836 837
	}
	rcu_read_unlock();
838 839
	spin_unlock(&clp->cl_lock);

840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859
	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

int
pnfs_destroy_layouts_byclid(struct nfs_client *clp,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
restart:
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
					server,
					&layout_list) != 0)
			goto restart;
860
	}
861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879
	rcu_read_unlock();
	spin_unlock(&clp->cl_lock);

	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

/*
 * Called by the state manger to remove all layouts established under an
 * expired lease.
 */
void
pnfs_destroy_all_layouts(struct nfs_client *clp)
{
	nfs4_deviceid_mark_client_invalid(clp);
	nfs4_deviceid_purge_client(clp);

	pnfs_destroy_layouts_byclid(clp, false);
880 881
}

882
/* update lo->plh_stateid with new if is more recent */
F
Fred Isaman 已提交
883 884 885
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
			bool update_barrier)
886
{
887
	u32 oldseq, newseq, new_barrier = 0;
888

889 890
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
	newseq = be32_to_cpu(new->seqid);
891 892 893 894 895 896 897 898 899

	if (!pnfs_layout_is_valid(lo)) {
		nfs4_stateid_copy(&lo->plh_stateid, new);
		lo->plh_barrier = newseq;
		pnfs_clear_layoutreturn_info(lo);
		clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
		return;
	}
	if (pnfs_seqid_is_newer(newseq, oldseq)) {
900
		nfs4_stateid_copy(&lo->plh_stateid, new);
901 902 903 904 905
		/*
		 * Because of wraparound, we want to keep the barrier
		 * "close" to the current seqids.
		 */
		new_barrier = newseq - atomic_read(&lo->plh_outstanding);
F
Fred Isaman 已提交
906
	}
907 908 909 910
	if (update_barrier)
		new_barrier = be32_to_cpu(new->seqid);
	else if (new_barrier == 0)
		return;
911
	if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
912
		lo->plh_barrier = new_barrier;
913 914
}

915
static bool
916 917
pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid)
F
Fred Isaman 已提交
918
{
919
	u32 seqid = be32_to_cpu(stateid->seqid);
920

921 922 923 924 925
	return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
}

/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
926
pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
927
{
F
Fred Isaman 已提交
928
	return lo->plh_block_lgets ||
929
		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
930 931
}

932
/*
933 934 935 936 937
 * Get layout from server.
 *    for now, assume that whole file layouts are requested.
 *    arg->offset: 0
 *    arg->length: all ones
 */
938 939 940
static struct pnfs_layout_segment *
send_layoutget(struct pnfs_layout_hdr *lo,
	   struct nfs_open_context *ctx,
941
	   nfs4_stateid *stateid,
942
	   const struct pnfs_layout_range *range,
943
	   long *timeout, gfp_t gfp_flags)
944
{
945
	struct inode *ino = lo->plh_inode;
946 947
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs4_layoutget *lgp;
948
	loff_t i_size;
949 950

	dprintk("--> %s\n", __func__);
951

952 953 954 955 956
	/*
	 * Synchronously retrieve layout information from server and
	 * store in lseg. If we race with a concurrent seqid morphing
	 * op, then re-send the LAYOUTGET.
	 */
957 958 959 960 961 962 963 964 965 966 967 968 969 970
	lgp = kzalloc(sizeof(*lgp), gfp_flags);
	if (lgp == NULL)
		return ERR_PTR(-ENOMEM);

	i_size = i_size_read(ino);

	lgp->args.minlength = PAGE_SIZE;
	if (lgp->args.minlength > range->length)
		lgp->args.minlength = range->length;
	if (range->iomode == IOMODE_READ) {
		if (range->offset >= i_size)
			lgp->args.minlength = 0;
		else if (i_size - range->offset < lgp->args.minlength)
			lgp->args.minlength = i_size - range->offset;
971
	}
972 973 974 975 976
	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
	pnfs_copy_range(&lgp->args.range, range);
	lgp->args.type = server->pnfs_curr_ld->id;
	lgp->args.inode = ino;
	lgp->args.ctx = get_nfs_open_context(ctx);
977
	nfs4_stateid_copy(&lgp->args.stateid, stateid);
978 979
	lgp->gfp_flags = gfp_flags;
	lgp->cred = lo->plh_lc_cred;
980

981
	return nfs4_proc_layoutget(lgp, timeout, gfp_flags);
982 983
}

984 985 986 987 988 989 990 991 992 993 994 995 996 997 998
static void pnfs_clear_layoutcommit(struct inode *inode,
		struct list_head *head)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	struct pnfs_layout_segment *lseg, *tmp;

	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		return;
	list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
		if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
			continue;
		pnfs_lseg_dec_and_remove_zero(lseg, head);
	}
}

999
void pnfs_layoutreturn_free_lsegs(struct pnfs_layout_hdr *lo,
1000
		const nfs4_stateid *arg_stateid,
1001 1002 1003 1004 1005 1006 1007
		const struct pnfs_layout_range *range,
		const nfs4_stateid *stateid)
{
	struct inode *inode = lo->plh_inode;
	LIST_HEAD(freeme);

	spin_lock(&inode->i_lock);
1008 1009 1010
	if (!pnfs_layout_is_valid(lo) || !arg_stateid ||
	    !nfs4_stateid_match_other(&lo->plh_stateid, arg_stateid))
		goto out_unlock;
1011
	if (stateid) {
1012 1013
		u32 seq = be32_to_cpu(arg_stateid->seqid);

1014 1015 1016 1017 1018
		pnfs_mark_matching_lsegs_invalid(lo, &freeme, range, seq);
		pnfs_free_returned_lsegs(lo, &freeme, range, seq);
		pnfs_set_layout_stateid(lo, stateid, true);
	} else
		pnfs_mark_layout_stateid_invalid(lo, &freeme);
1019
out_unlock:
1020 1021 1022 1023 1024 1025
	pnfs_clear_layoutreturn_waitbit(lo);
	spin_unlock(&inode->i_lock);
	pnfs_free_lseg_list(&freeme);

}

1026
static bool
1027 1028 1029
pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
		nfs4_stateid *stateid,
		enum pnfs_iomode *iomode)
1030
{
1031 1032 1033
	/* Serialise LAYOUTGET/LAYOUTRETURN */
	if (atomic_read(&lo->plh_outstanding) != 0)
		return false;
1034
	if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
1035
		return false;
1036
	set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1037
	pnfs_get_layout_hdr(lo);
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
		if (stateid != NULL) {
			nfs4_stateid_copy(stateid, &lo->plh_stateid);
			if (lo->plh_return_seq != 0)
				stateid->seqid = cpu_to_be32(lo->plh_return_seq);
		}
		if (iomode != NULL)
			*iomode = lo->plh_return_iomode;
		pnfs_clear_layoutreturn_info(lo);
		return true;
	}
	if (stateid != NULL)
		nfs4_stateid_copy(stateid, &lo->plh_stateid);
	if (iomode != NULL)
		*iomode = IOMODE_ANY;
1053 1054 1055
	return true;
}

1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072
static void
pnfs_init_layoutreturn_args(struct nfs4_layoutreturn_args *args,
		struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid,
		enum pnfs_iomode iomode)
{
	struct inode *inode = lo->plh_inode;

	args->layout_type = NFS_SERVER(inode)->pnfs_curr_ld->id;
	args->inode = inode;
	args->range.iomode = iomode;
	args->range.offset = 0;
	args->range.length = NFS4_MAX_UINT64;
	args->layout = lo;
	nfs4_stateid_copy(&args->stateid, stateid);
}

1073
static int
1074
pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
1075
		       enum pnfs_iomode iomode, bool sync)
1076 1077
{
	struct inode *ino = lo->plh_inode;
1078
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
1079 1080 1081
	struct nfs4_layoutreturn *lrp;
	int status = 0;

1082
	lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
1083 1084 1085
	if (unlikely(lrp == NULL)) {
		status = -ENOMEM;
		spin_lock(&ino->i_lock);
1086
		pnfs_clear_layoutreturn_waitbit(lo);
1087 1088 1089 1090 1091
		spin_unlock(&ino->i_lock);
		pnfs_put_layout_hdr(lo);
		goto out;
	}

1092
	pnfs_init_layoutreturn_args(&lrp->args, lo, stateid, iomode);
1093
	lrp->args.ld_private = &lrp->ld_private;
1094 1095
	lrp->clp = NFS_SERVER(ino)->nfs_client;
	lrp->cred = lo->plh_lc_cred;
1096 1097
	if (ld->prepare_layoutreturn)
		ld->prepare_layoutreturn(&lrp->args);
1098

1099
	status = nfs4_proc_layoutreturn(lrp, sync);
1100 1101 1102 1103 1104
out:
	dprintk("<-- %s status: %d\n", __func__, status);
	return status;
}

1105 1106 1107 1108 1109 1110
/* Return true if layoutreturn is needed */
static bool
pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
{
	struct pnfs_layout_segment *s;

1111
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
		return false;

	/* Defer layoutreturn until all lsegs are done */
	list_for_each_entry(s, &lo->plh_segs, pls_list) {
		if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
			return false;
	}

	return true;
}

static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
{
	struct inode *inode= lo->plh_inode;

1127
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1128 1129 1130 1131 1132 1133 1134
		return;
	spin_lock(&inode->i_lock);
	if (pnfs_layout_need_return(lo)) {
		nfs4_stateid stateid;
		enum pnfs_iomode iomode;
		bool send;

1135
		send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1136 1137 1138 1139 1140 1141 1142 1143 1144
		spin_unlock(&inode->i_lock);
		if (send) {
			/* Send an async layoutreturn so we dont deadlock */
			pnfs_send_layoutreturn(lo, &stateid, iomode, false);
		}
	} else
		spin_unlock(&inode->i_lock);
}

1145 1146 1147 1148 1149 1150 1151 1152
/*
 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
 * when the layout segment list is empty.
 *
 * Note that a pnfs_layout_hdr can exist with an empty layout segment
 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
 * deviceid is marked invalid.
 */
B
Benny Halevy 已提交
1153 1154 1155 1156 1157 1158 1159
int
_pnfs_return_layout(struct inode *ino)
{
	struct pnfs_layout_hdr *lo = NULL;
	struct nfs_inode *nfsi = NFS_I(ino);
	LIST_HEAD(tmp_list);
	nfs4_stateid stateid;
1160
	int status = 0;
1161
	bool send;
B
Benny Halevy 已提交
1162

1163
	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
B
Benny Halevy 已提交
1164 1165 1166

	spin_lock(&ino->i_lock);
	lo = nfsi->layout;
1167
	if (!lo) {
B
Benny Halevy 已提交
1168
		spin_unlock(&ino->i_lock);
1169 1170
		dprintk("NFS: %s no layout to return\n", __func__);
		goto out;
B
Benny Halevy 已提交
1171 1172
	}
	/* Reference matched in nfs4_layoutreturn_release */
1173
	pnfs_get_layout_hdr(lo);
1174 1175 1176 1177 1178 1179 1180 1181
	/* Is there an outstanding layoutreturn ? */
	if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
		spin_unlock(&ino->i_lock);
		if (wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
					TASK_UNINTERRUPTIBLE))
			goto out_put_layout_hdr;
		spin_lock(&ino->i_lock);
	}
1182
	pnfs_clear_layoutcommit(ino, &tmp_list);
1183
	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
1184 1185 1186 1187 1188 1189 1190 1191 1192 1193

	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		struct pnfs_layout_range range = {
			.iomode		= IOMODE_ANY,
			.offset		= 0,
			.length		= NFS4_MAX_UINT64,
		};
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
	}

1194
	/* Don't send a LAYOUTRETURN if list was initially empty */
1195
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
1196 1197
		spin_unlock(&ino->i_lock);
		dprintk("NFS: %s no layout segments to return\n", __func__);
1198
		goto out_put_layout_hdr;
1199
	}
1200

1201
	send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
B
Benny Halevy 已提交
1202 1203
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&tmp_list);
1204
	if (send)
1205
		status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1206 1207
out_put_layout_hdr:
	pnfs_put_layout_hdr(lo);
B
Benny Halevy 已提交
1208 1209 1210 1211
out:
	dprintk("<-- %s status: %d\n", __func__, status);
	return status;
}
1212
EXPORT_SYMBOL_GPL(_pnfs_return_layout);
B
Benny Halevy 已提交
1213

1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240
int
pnfs_commit_and_return_layout(struct inode *inode)
{
	struct pnfs_layout_hdr *lo;
	int ret;

	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo == NULL) {
		spin_unlock(&inode->i_lock);
		return 0;
	}
	pnfs_get_layout_hdr(lo);
	/* Block new layoutgets and read/write to ds */
	lo->plh_block_lgets++;
	spin_unlock(&inode->i_lock);
	filemap_fdatawait(inode->i_mapping);
	ret = pnfs_layoutcommit_inode(inode, true);
	if (ret == 0)
		ret = _pnfs_return_layout(inode);
	spin_lock(&inode->i_lock);
	lo->plh_block_lgets--;
	spin_unlock(&inode->i_lock);
	pnfs_put_layout_hdr(lo);
	return ret;
}

1241 1242 1243 1244
bool pnfs_roc(struct inode *ino,
		struct nfs4_layoutreturn_args *args,
		struct nfs4_layoutreturn_res *res,
		const struct rpc_cred *cred)
F
Fred Isaman 已提交
1245
{
1246 1247 1248
	struct nfs_inode *nfsi = NFS_I(ino);
	struct nfs_open_context *ctx;
	struct nfs4_state *state;
F
Fred Isaman 已提交
1249
	struct pnfs_layout_hdr *lo;
1250
	struct pnfs_layout_segment *lseg, *next;
1251
	nfs4_stateid stateid;
1252 1253
	enum pnfs_iomode iomode = 0;
	bool layoutreturn = false, roc = false;
F
Fred Isaman 已提交
1254

1255 1256
	if (!nfs_have_layout(ino))
		return false;
1257
retry:
F
Fred Isaman 已提交
1258
	spin_lock(&ino->i_lock);
1259
	lo = nfsi->layout;
1260 1261
	if (!lo || !pnfs_layout_is_valid(lo) ||
	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
1262
		goto out_noroc;
1263 1264 1265 1266 1267 1268 1269 1270
	if (test_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags)) {
		pnfs_get_layout_hdr(lo);
		spin_unlock(&ino->i_lock);
		wait_on_bit(&lo->plh_flags, NFS_LAYOUT_RETURN,
				TASK_UNINTERRUPTIBLE);
		pnfs_put_layout_hdr(lo);
		goto retry;
	}
1271

1272
	/* no roc if we hold a delegation */
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
	if (nfs4_check_delegation(ino, FMODE_READ))
		goto out_noroc;

	list_for_each_entry(ctx, &nfsi->open_files, list) {
		state = ctx->state;
		/* Don't return layout if there is open file state */
		if (state != NULL && state->state != 0)
			goto out_noroc;
	}

1283

1284
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) {
1285
		/* If we are sending layoutreturn, invalidate all valid lsegs */
1286 1287 1288 1289 1290 1291 1292 1293 1294 1295
		if (!test_and_clear_bit(NFS_LSEG_ROC, &lseg->pls_flags))
			continue;
		/*
		 * Note: mark lseg for return so pnfs_layout_remove_lseg
		 * doesn't invalidate the layout for us.
		 */
		set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
		if (!mark_lseg_invalid(lseg, &lo->plh_return_segs))
			continue;
		pnfs_set_plh_return_info(lo, lseg->pls_range.iomode, 0);
1296 1297
	}

1298 1299
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
		goto out_noroc;
1300

1301
	/* ROC in two conditions:
1302 1303 1304
	 * 1. there are ROC lsegs
	 * 2. we don't send layoutreturn
	 */
1305 1306 1307 1308 1309 1310 1311 1312 1313 1314
	/* lo ref dropped in pnfs_roc_release() */
	layoutreturn = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
	/* If the creds don't match, we can't compound the layoutreturn */
	if (!layoutreturn || cred != lo->plh_lc_cred)
		goto out_noroc;

	roc = layoutreturn;
	pnfs_init_layoutreturn_args(args, lo, &stateid, iomode);
	res->lrs_present = 0;
	layoutreturn = false;
F
Fred Isaman 已提交
1315

1316
out_noroc:
F
Fred Isaman 已提交
1317
	spin_unlock(&ino->i_lock);
1318
	pnfs_layoutcommit_inode(ino, true);
1319 1320 1321 1322 1323 1324
	if (roc) {
		struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
		if (ld->prepare_layoutreturn)
			ld->prepare_layoutreturn(args);
		return true;
	}
1325
	if (layoutreturn)
1326
		pnfs_send_layoutreturn(lo, &stateid, iomode, true);
1327
	return false;
F
Fred Isaman 已提交
1328 1329
}

1330 1331 1332
void pnfs_roc_release(struct nfs4_layoutreturn_args *args,
		struct nfs4_layoutreturn_res *res,
		int ret)
F
Fred Isaman 已提交
1333
{
1334 1335 1336
	struct pnfs_layout_hdr *lo = args->layout;
	const nfs4_stateid *arg_stateid = NULL;
	const nfs4_stateid *res_stateid = NULL;
1337
	struct nfs4_xdr_opaque_data *ld_private = args->ld_private;
F
Fred Isaman 已提交
1338

1339 1340 1341 1342 1343 1344 1345
	if (ret == 0) {
		arg_stateid = &args->stateid;
		if (res->lrs_present)
			res_stateid = &res->stateid;
	}
	pnfs_layoutreturn_free_lsegs(lo, arg_stateid, &args->range,
			res_stateid);
1346 1347
	if (ld_private && ld_private->ops && ld_private->ops->free)
		ld_private->ops->free(ld_private);
1348 1349
	pnfs_put_layout_hdr(lo);
	trace_nfs4_layoutreturn_on_close(args->inode, 0);
F
Fred Isaman 已提交
1350 1351
}

1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
{
	struct nfs_inode *nfsi = NFS_I(ino);
        struct pnfs_layout_hdr *lo;
        bool sleep = false;

	/* we might not have grabbed lo reference. so need to check under
	 * i_lock */
        spin_lock(&ino->i_lock);
        lo = nfsi->layout;
1362 1363
        if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
                rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1364
                sleep = true;
1365
	}
1366 1367 1368 1369
        spin_unlock(&ino->i_lock);
        return sleep;
}

1370 1371 1372 1373 1374 1375
/*
 * Compare two layout segments for sorting into layout cache.
 * We want to preferentially return RW over RO layouts, so ensure those
 * are seen first.
 */
static s64
1376
pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1377
	   const struct pnfs_layout_range *l2)
1378
{
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
	s64 d;

	/* high offset > low offset */
	d = l1->offset - l2->offset;
	if (d)
		return d;

	/* short length > long length */
	d = l2->length - l1->length;
	if (d)
		return d;

1391
	/* read > read/write */
1392
	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1393 1394
}

1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
static bool
pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
		const struct pnfs_layout_range *l2)
{
	return pnfs_lseg_range_cmp(l1, l2) > 0;
}

static bool
pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
		struct pnfs_layout_segment *old)
{
	return false;
}

void
pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
		   struct pnfs_layout_segment *lseg,
		   bool (*is_after)(const struct pnfs_layout_range *,
			   const struct pnfs_layout_range *),
		   bool (*do_merge)(struct pnfs_layout_segment *,
			   struct pnfs_layout_segment *),
		   struct list_head *free_me)
1417
{
1418
	struct pnfs_layout_segment *lp, *tmp;
1419

1420 1421
	dprintk("%s:Begin\n", __func__);

1422 1423 1424 1425 1426 1427 1428 1429
	list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
		if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
			continue;
		if (do_merge(lseg, lp)) {
			mark_lseg_invalid(lp, free_me);
			continue;
		}
		if (is_after(&lseg->pls_range, &lp->pls_range))
1430
			continue;
1431
		list_add_tail(&lseg->pls_list, &lp->pls_list);
1432 1433 1434
		dprintk("%s: inserted lseg %p "
			"iomode %d offset %llu length %llu before "
			"lp %p iomode %d offset %llu length %llu\n",
1435 1436 1437 1438
			__func__, lseg, lseg->pls_range.iomode,
			lseg->pls_range.offset, lseg->pls_range.length,
			lp, lp->pls_range.iomode, lp->pls_range.offset,
			lp->pls_range.length);
1439
		goto out;
1440
	}
1441 1442 1443 1444 1445 1446
	list_add_tail(&lseg->pls_list, &lo->plh_segs);
	dprintk("%s: inserted lseg %p "
		"iomode %d offset %llu length %llu at tail\n",
		__func__, lseg, lseg->pls_range.iomode,
		lseg->pls_range.offset, lseg->pls_range.length);
out:
1447
	pnfs_get_layout_hdr(lo);
1448 1449

	dprintk("%s:Return\n", __func__);
1450
}
1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468
EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);

static void
pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
		   struct pnfs_layout_segment *lseg,
		   struct list_head *free_me)
{
	struct inode *inode = lo->plh_inode;
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;

	if (ld->add_lseg != NULL)
		ld->add_lseg(lo, lseg, free_me);
	else
		pnfs_generic_layout_insert_lseg(lo, lseg,
				pnfs_lseg_range_is_after,
				pnfs_lseg_no_merge,
				free_me);
}
1469 1470

static struct pnfs_layout_hdr *
1471 1472 1473
alloc_init_layout_hdr(struct inode *ino,
		      struct nfs_open_context *ctx,
		      gfp_t gfp_flags)
1474 1475 1476
{
	struct pnfs_layout_hdr *lo;

1477
	lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1478 1479
	if (!lo)
		return NULL;
1480
	atomic_set(&lo->plh_refcount, 1);
1481 1482
	INIT_LIST_HEAD(&lo->plh_layouts);
	INIT_LIST_HEAD(&lo->plh_segs);
1483
	INIT_LIST_HEAD(&lo->plh_return_segs);
1484
	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1485
	lo->plh_inode = ino;
1486
	lo->plh_lc_cred = get_rpccred(ctx->cred);
1487
	lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1488 1489 1490 1491
	return lo;
}

static struct pnfs_layout_hdr *
1492 1493 1494
pnfs_find_alloc_layout(struct inode *ino,
		       struct nfs_open_context *ctx,
		       gfp_t gfp_flags)
1495 1496
	__releases(&ino->i_lock)
	__acquires(&ino->i_lock)
1497 1498 1499 1500 1501 1502
{
	struct nfs_inode *nfsi = NFS_I(ino);
	struct pnfs_layout_hdr *new = NULL;

	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);

1503 1504
	if (nfsi->layout != NULL)
		goto out_existing;
1505
	spin_unlock(&ino->i_lock);
1506
	new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1507 1508
	spin_lock(&ino->i_lock);

1509
	if (likely(nfsi->layout == NULL)) {	/* Won the race? */
1510
		nfsi->layout = new;
1511
		return new;
1512 1513
	} else if (new != NULL)
		pnfs_free_layout_hdr(new);
1514 1515
out_existing:
	pnfs_get_layout_hdr(nfsi->layout);
1516 1517 1518
	return nfsi->layout;
}

1519 1520
/*
 * iomode matching rules:
1521 1522 1523 1524 1525 1526 1527 1528 1529 1530
 * iomode	lseg	strict match
 *                      iomode
 * -----	-----	------ -----
 * ANY		READ	N/A    true
 * ANY		RW	N/A    true
 * RW		READ	N/A    false
 * RW		RW	N/A    true
 * READ		READ	N/A    true
 * READ		RW	true   false
 * READ		RW	false  true
1531
 */
1532
static bool
1533
pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1534 1535
		 const struct pnfs_layout_range *range,
		 bool strict_iomode)
1536
{
1537 1538 1539 1540
	struct pnfs_layout_range range1;

	if ((range->iomode == IOMODE_RW &&
	     ls_range->iomode != IOMODE_RW) ||
1541 1542
	    (range->iomode != ls_range->iomode &&
	     strict_iomode == true) ||
1543
	    !pnfs_lseg_range_intersecting(ls_range, range))
1544 1545 1546 1547 1548
		return 0;

	/* range1 covers only the first byte in the range */
	range1 = *range;
	range1.length = 1;
1549
	return pnfs_lseg_range_contained(ls_range, &range1);
1550 1551 1552 1553 1554
}

/*
 * lookup range in layout
 */
1555
static struct pnfs_layout_segment *
1556
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1557 1558
		struct pnfs_layout_range *range,
		bool strict_iomode)
1559
{
1560 1561 1562 1563
	struct pnfs_layout_segment *lseg, *ret = NULL;

	dprintk("%s:Begin\n", __func__);

1564
	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1565
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1566
		    !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
1567 1568
		    pnfs_lseg_range_match(&lseg->pls_range, range,
					  strict_iomode)) {
1569
			ret = pnfs_get_lseg(lseg);
1570 1571 1572 1573 1574
			break;
		}
	}

	dprintk("%s:Return lseg %p ref %d\n",
1575
		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1576
	return ret;
1577 1578
}

1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
/*
 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
 * to the MDS or over pNFS
 *
 * The nfs_inode read_io and write_io fields are cumulative counters reset
 * when there are no layout segments. Note that in pnfs_update_layout iomode
 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
 * WRITE request.
 *
 * A return of true means use MDS I/O.
 *
 * From rfc 5661:
 * If a file's size is smaller than the file size threshold, data accesses
 * SHOULD be sent to the metadata server.  If an I/O request has a length that
 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
 * server.  If both file size and I/O size are provided, the client SHOULD
 * reach or exceed  both thresholds before sending its read or write
 * requests to the data server.
 */
static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
				     struct inode *ino, int iomode)
{
	struct nfs4_threshold *t = ctx->mdsthreshold;
	struct nfs_inode *nfsi = NFS_I(ino);
	loff_t fsize = i_size_read(ino);
	bool size = false, size_set = false, io = false, io_set = false, ret = false;

	if (t == NULL)
		return ret;

	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);

	switch (iomode) {
	case IOMODE_READ:
		if (t->bm & THRESHOLD_RD) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->rd_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_RD_IO) {
			dprintk("%s nfsi->read_io %llu\n", __func__,
				nfsi->read_io);
			io_set = true;
			if (nfsi->read_io < t->rd_io_sz)
				io = true;
		}
		break;
	case IOMODE_RW:
		if (t->bm & THRESHOLD_WR) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->wr_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_WR_IO) {
			dprintk("%s nfsi->write_io %llu\n", __func__,
				nfsi->write_io);
			io_set = true;
			if (nfsi->write_io < t->wr_io_sz)
				io = true;
		}
		break;
	}
	if (size_set && io_set) {
		if (size && io)
			ret = true;
	} else if (size || io)
		ret = true;

	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
	return ret;
}

1654 1655 1656 1657 1658 1659 1660 1661
static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
{
	/*
	 * send layoutcommit as it can hold up layoutreturn due to lseg
	 * reference
	 */
	pnfs_layoutcommit_inode(lo->plh_inode, false);
	return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1662
				   nfs_wait_bit_killable,
1663 1664 1665
				   TASK_UNINTERRUPTIBLE);
}

1666 1667 1668 1669 1670 1671 1672 1673 1674
static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
{
	unsigned long *bitlock = &lo->plh_flags;

	clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
	smp_mb__after_atomic();
	wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
}

1675 1676 1677 1678
/*
 * Layout segment is retreived from the server if not cached.
 * The appropriate layout segment is referenced and returned to the caller.
 */
1679
struct pnfs_layout_segment *
1680 1681
pnfs_update_layout(struct inode *ino,
		   struct nfs_open_context *ctx,
1682 1683
		   loff_t pos,
		   u64 count,
1684
		   enum pnfs_iomode iomode,
1685
		   bool strict_iomode,
1686
		   gfp_t gfp_flags)
1687
{
1688 1689 1690 1691 1692
	struct pnfs_layout_range arg = {
		.iomode = iomode,
		.offset = pos,
		.length = count,
	};
1693
	unsigned pg_offset, seq;
1694 1695
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs_client *clp = server->nfs_client;
1696
	struct pnfs_layout_hdr *lo = NULL;
1697
	struct pnfs_layout_segment *lseg = NULL;
1698 1699
	nfs4_stateid stateid;
	long timeout = 0;
1700
	unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1701
	bool first;
1702

1703
	if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1704
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1705
				 PNFS_UPDATE_LAYOUT_NO_PNFS);
1706
		goto out;
1707
	}
1708

1709
	if (iomode == IOMODE_READ && i_size_read(ino) == 0) {
1710
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1711
				 PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
1712
		goto out;
1713
	}
1714

1715
	if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1716
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1717
				 PNFS_UPDATE_LAYOUT_MDSTHRESH);
1718
		goto out;
1719
	}
1720

1721
lookup_again:
1722
	nfs4_client_recover_expired_lease(clp);
1723
	first = false;
1724
	spin_lock(&ino->i_lock);
1725
	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1726 1727
	if (lo == NULL) {
		spin_unlock(&ino->i_lock);
1728
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1729
				 PNFS_UPDATE_LAYOUT_NOMEM);
1730 1731
		goto out;
	}
1732

F
Fred Isaman 已提交
1733
	/* Do we even need to bother with this? */
1734
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1735
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1736
				 PNFS_UPDATE_LAYOUT_BULK_RECALL);
F
Fred Isaman 已提交
1737
		dprintk("%s matches recall, use MDS\n", __func__);
1738 1739 1740 1741
		goto out_unlock;
	}

	/* if LAYOUTGET already failed once we don't try again */
1742
	if (pnfs_layout_io_test_failed(lo, iomode)) {
1743
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1744
				 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
1745
		goto out_unlock;
1746
	}
1747

1748
	lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
	if (lseg) {
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				PNFS_UPDATE_LAYOUT_FOUND_CACHED);
		goto out_unlock;
	}

	if (!nfs4_valid_open_stateid(ctx->state)) {
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				PNFS_UPDATE_LAYOUT_INVALID_OPEN);
		goto out_unlock;
	}

	/*
	 * Choose a stateid for the LAYOUTGET. If we don't have a layout
	 * stateid, or it has been invalidated, then we must use the open
	 * stateid.
	 */
1766
	if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1767 1768 1769

		/*
		 * The first layoutget for the file. Need to serialize per
1770 1771 1772 1773 1774 1775 1776 1777
		 * RFC 5661 Errata 3208.
		 */
		if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
				     &lo->plh_flags)) {
			spin_unlock(&ino->i_lock);
			wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
				    TASK_UNINTERRUPTIBLE);
			pnfs_put_layout_hdr(lo);
1778
			dprintk("%s retrying\n", __func__);
1779 1780
			goto lookup_again;
		}
1781 1782 1783 1784 1785 1786

		first = true;
		do {
			seq = read_seqbegin(&ctx->state->seqlock);
			nfs4_stateid_copy(&stateid, &ctx->state->stateid);
		} while (read_seqretry(&ctx->state->seqlock, seq));
1787
	} else {
1788
		nfs4_stateid_copy(&stateid, &lo->plh_stateid);
1789
	}
1790

1791 1792 1793 1794
	/*
	 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
	 * for LAYOUTRETURN even if first is true.
	 */
1795
	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1796 1797 1798
		spin_unlock(&ino->i_lock);
		dprintk("%s wait for layoutreturn\n", __func__);
		if (pnfs_prepare_to_retry_layoutget(lo)) {
1799 1800
			if (first)
				pnfs_clear_first_layoutget(lo);
1801 1802
			pnfs_put_layout_hdr(lo);
			dprintk("%s retrying\n", __func__);
1803 1804
			trace_pnfs_update_layout(ino, pos, count, iomode, lo,
					lseg, PNFS_UPDATE_LAYOUT_RETRY);
1805 1806
			goto lookup_again;
		}
1807
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1808
				PNFS_UPDATE_LAYOUT_RETURN);
1809 1810 1811
		goto out_put_layout_hdr;
	}

1812
	if (pnfs_layoutgets_blocked(lo)) {
1813
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1814
				PNFS_UPDATE_LAYOUT_BLOCKED);
1815
		goto out_unlock;
1816
	}
1817
	atomic_inc(&lo->plh_outstanding);
1818
	spin_unlock(&ino->i_lock);
1819

1820
	if (list_empty(&lo->plh_layouts)) {
1821 1822 1823 1824
		/* The lo must be on the clp list if there is any
		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
		 */
		spin_lock(&clp->cl_lock);
1825 1826
		if (list_empty(&lo->plh_layouts))
			list_add_tail(&lo->plh_layouts, &server->layouts);
1827 1828
		spin_unlock(&clp->cl_lock);
	}
1829

1830
	pg_offset = arg.offset & ~PAGE_MASK;
1831 1832 1833 1834
	if (pg_offset) {
		arg.offset -= pg_offset;
		arg.length += pg_offset;
	}
1835
	if (arg.length != NFS4_MAX_UINT64)
1836
		arg.length = PAGE_ALIGN(arg.length);
1837

1838 1839 1840
	lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
	trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
1841
	atomic_dec(&lo->plh_outstanding);
1842
	if (IS_ERR(lseg)) {
1843
		switch(PTR_ERR(lseg)) {
1844
		case -EBUSY:
1845 1846
			if (time_after(jiffies, giveup))
				lseg = NULL;
1847 1848 1849 1850 1851 1852 1853 1854 1855 1856
			break;
		case -ERECALLCONFLICT:
			/* Huh? We hold no layouts, how is there a recall? */
			if (first) {
				lseg = NULL;
				break;
			}
			/* Destroy the existing layout and start over */
			if (time_after(jiffies, giveup))
				pnfs_destroy_layout(NFS_I(ino));
1857 1858
			/* Fallthrough */
		case -EAGAIN:
1859
			break;
1860 1861 1862 1863 1864
		default:
			if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
				pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
				lseg = NULL;
			}
1865 1866 1867 1868 1869 1870 1871 1872 1873
			goto out_put_layout_hdr;
		}
		if (lseg) {
			if (first)
				pnfs_clear_first_layoutget(lo);
			trace_pnfs_update_layout(ino, pos, count,
				iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
			pnfs_put_layout_hdr(lo);
			goto lookup_again;
1874 1875 1876 1877 1878
		}
	} else {
		pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
	}

1879
out_put_layout_hdr:
1880 1881
	if (first)
		pnfs_clear_first_layoutget(lo);
1882
	pnfs_put_layout_hdr(lo);
1883
out:
1884 1885 1886 1887
	dprintk("%s: inode %s/%llu pNFS layout segment %s for "
			"(%s, offset: %llu, length: %llu)\n",
			__func__, ino->i_sb->s_id,
			(unsigned long long)NFS_FILEID(ino),
1888
			IS_ERR_OR_NULL(lseg) ? "not found" : "found",
1889 1890 1891
			iomode==IOMODE_RW ?  "read/write" : "read-only",
			(unsigned long long)pos,
			(unsigned long long)count);
1892 1893 1894
	return lseg;
out_unlock:
	spin_unlock(&ino->i_lock);
1895
	goto out_put_layout_hdr;
1896
}
1897
EXPORT_SYMBOL_GPL(pnfs_update_layout);
1898

1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
static bool
pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
{
	switch (range->iomode) {
	case IOMODE_READ:
	case IOMODE_RW:
		break;
	default:
		return false;
	}
	if (range->offset == NFS4_MAX_UINT64)
		return false;
	if (range->length == 0)
		return false;
	if (range->length != NFS4_MAX_UINT64 &&
	    range->length > NFS4_MAX_UINT64 - range->offset)
		return false;
	return true;
}

1919
struct pnfs_layout_segment *
1920 1921 1922 1923 1924
pnfs_layout_process(struct nfs4_layoutget *lgp)
{
	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
	struct nfs4_layoutget_res *res = &lgp->res;
	struct pnfs_layout_segment *lseg;
1925
	struct inode *ino = lo->plh_inode;
1926
	LIST_HEAD(free_me);
1927 1928

	if (!pnfs_sanity_check_layout_range(&res->range))
1929
		return ERR_PTR(-EINVAL);
1930 1931

	/* Inject layout blob into I/O device driver */
1932
	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1933
	if (IS_ERR_OR_NULL(lseg)) {
1934
		if (!lseg)
1935 1936 1937 1938 1939
			lseg = ERR_PTR(-ENOMEM);

		dprintk("%s: Could not allocate layout: error %ld\n",
		       __func__, PTR_ERR(lseg));
		return lseg;
1940 1941
	}

1942
	pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
1943

1944
	spin_lock(&ino->i_lock);
1945
	if (pnfs_layoutgets_blocked(lo)) {
F
Fred Isaman 已提交
1946
		dprintk("%s forget reply due to state\n", __func__);
1947
		goto out_forget;
F
Fred Isaman 已提交
1948
	}
1949

1950 1951 1952 1953
	if (!pnfs_layout_is_valid(lo)) {
		/* We have a completely new layout */
		pnfs_set_layout_stateid(lo, &res->stateid, true);
	} else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
1954 1955 1956
		/* existing state ID, make sure the sequence number matches. */
		if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
			dprintk("%s forget reply due to sequence\n", __func__);
1957
			goto out_forget;
1958 1959 1960 1961 1962
		}
		pnfs_set_layout_stateid(lo, &res->stateid, false);
	} else {
		/*
		 * We got an entirely new state ID.  Mark all segments for the
1963
		 * inode invalid, and retry the layoutget
1964
		 */
1965
		pnfs_mark_layout_stateid_invalid(lo, &free_me);
1966
		goto out_forget;
1967
	}
1968

1969
	pnfs_get_lseg(lseg);
1970
	pnfs_layout_insert_lseg(lo, lseg, &free_me);
1971

1972

P
Peng Tao 已提交
1973
	if (res->return_on_close)
F
Fred Isaman 已提交
1974 1975
		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);

1976
	spin_unlock(&ino->i_lock);
1977
	pnfs_free_lseg_list(&free_me);
1978
	return lseg;
F
Fred Isaman 已提交
1979

1980
out_forget:
F
Fred Isaman 已提交
1981 1982 1983
	spin_unlock(&ino->i_lock);
	lseg->pls_layout = lo;
	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1984
	return ERR_PTR(-EAGAIN);
1985 1986
}

1987 1988 1989 1990 1991 1992 1993 1994 1995 1996
/**
 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
 * @lo: pointer to layout header
 * @tmp_list: list header to be used with pnfs_free_lseg_list()
 * @return_range: describe layout segment ranges to be returned
 *
 * This function is mainly intended for use by layoutrecall. It attempts
 * to free the layout segment immediately, or else to mark it for return
 * as soon as its reference count drops to zero.
 */
1997
int
1998 1999
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
				struct list_head *tmp_list,
2000 2001
				const struct pnfs_layout_range *return_range,
				u32 seq)
2002 2003
{
	struct pnfs_layout_segment *lseg, *next;
2004
	int remaining = 0;
2005 2006 2007 2008

	dprintk("%s:Begin lo %p\n", __func__, lo);

	if (list_empty(&lo->plh_segs))
2009
		return 0;
2010

2011
	assert_spin_locked(&lo->plh_inode->i_lock);
2012 2013

	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
2014
		if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
2015 2016 2017 2018 2019
			dprintk("%s: marking lseg %p iomode %d "
				"offset %llu length %llu\n", __func__,
				lseg, lseg->pls_range.iomode,
				lseg->pls_range.offset,
				lseg->pls_range.length);
2020 2021 2022
			if (mark_lseg_invalid(lseg, tmp_list))
				continue;
			remaining++;
2023 2024
			set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
		}
2025 2026 2027 2028

	if (remaining)
		pnfs_set_plh_return_info(lo, return_range->iomode, seq);

2029
	return remaining;
2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040
}

void pnfs_error_mark_layout_for_return(struct inode *inode,
				       struct pnfs_layout_segment *lseg)
{
	struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
	struct pnfs_layout_range range = {
		.iomode = lseg->pls_range.iomode,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
2041
	bool return_now = false;
2042 2043

	spin_lock(&inode->i_lock);
2044
	pnfs_set_plh_return_info(lo, range.iomode, 0);
2045 2046
	/* Block LAYOUTGET */
	set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
2047 2048 2049 2050 2051
	/*
	 * mark all matching lsegs so that we are sure to have no live
	 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
	 * for how it works.
	 */
2052
	if (!pnfs_mark_matching_lsegs_return(lo, &lo->plh_return_segs, &range, 0)) {
2053
		nfs4_stateid stateid;
2054
		enum pnfs_iomode iomode;
2055

2056
		return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
2057 2058 2059 2060 2061 2062 2063
		spin_unlock(&inode->i_lock);
		if (return_now)
			pnfs_send_layoutreturn(lo, &stateid, iomode, false);
	} else {
		spin_unlock(&inode->i_lock);
		nfs_commit_inode(inode, 0);
	}
2064 2065 2066
}
EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);

2067 2068 2069
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
P
Peng Tao 已提交
2070 2071
	u64 rd_size = req->wb_bytes;

2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082
	if (pgio->pg_lseg == NULL) {
		if (pgio->pg_dreq == NULL)
			rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
		else
			rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);

		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
						   req->wb_context,
						   req_offset(req),
						   rd_size,
						   IOMODE_READ,
2083
						   false,
2084
						   GFP_KERNEL);
2085 2086 2087 2088 2089
		if (IS_ERR(pgio->pg_lseg)) {
			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
			pgio->pg_lseg = NULL;
			return;
		}
2090
	}
2091 2092
	/* If no lseg, fall back to read through mds */
	if (pgio->pg_lseg == NULL)
2093
		nfs_pageio_reset_read_mds(pgio);
2094

2095 2096 2097 2098
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);

void
2099 2100
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
			   struct nfs_page *req, u64 wb_size)
2101
{
2102
	if (pgio->pg_lseg == NULL) {
2103 2104 2105 2106 2107
		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
						   req->wb_context,
						   req_offset(req),
						   wb_size,
						   IOMODE_RW,
2108
						   false,
2109
						   GFP_NOFS);
2110 2111 2112 2113 2114 2115
		if (IS_ERR(pgio->pg_lseg)) {
			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
			pgio->pg_lseg = NULL;
			return;
		}
	}
2116 2117
	/* If no lseg, fall back to write through mds */
	if (pgio->pg_lseg == NULL)
2118
		nfs_pageio_reset_write_mds(pgio);
2119 2120 2121
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);

2122 2123 2124 2125 2126 2127 2128 2129 2130 2131
void
pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
{
	if (desc->pg_lseg) {
		pnfs_put_lseg(desc->pg_lseg);
		desc->pg_lseg = NULL;
	}
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);

2132 2133 2134 2135 2136
/*
 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
 * of bytes (maximum @req->wb_bytes) that can be coalesced.
 */
size_t
2137 2138
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
		     struct nfs_page *prev, struct nfs_page *req)
2139
{
2140
	unsigned int size;
2141
	u64 seg_end, req_start, seg_left;
2142 2143 2144 2145

	size = nfs_generic_pg_test(pgio, prev, req);
	if (!size)
		return 0;
2146

2147
	/*
2148 2149 2150 2151 2152
	 * 'size' contains the number of bytes left in the current page (up
	 * to the original size asked for in @req->wb_bytes).
	 *
	 * Calculate how many bytes are left in the layout segment
	 * and if there are less bytes than 'size', return that instead.
2153 2154 2155 2156 2157
	 *
	 * Please also note that 'end_offset' is actually the offset of the
	 * first byte that lies outside the pnfs_layout_range. FIXME?
	 *
	 */
2158
	if (pgio->pg_lseg) {
2159
		seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2160 2161
				     pgio->pg_lseg->pls_range.length);
		req_start = req_offset(req);
2162
		WARN_ON_ONCE(req_start >= seg_end);
2163
		/* start of request is past the last byte of this segment */
2164 2165 2166 2167 2168 2169
		if (req_start >= seg_end) {
			/* reference the new lseg */
			if (pgio->pg_ops->pg_cleanup)
				pgio->pg_ops->pg_cleanup(pgio);
			if (pgio->pg_ops->pg_init)
				pgio->pg_ops->pg_init(pgio, req);
2170
			return 0;
2171
		}
2172 2173 2174 2175 2176 2177

		/* adjust 'size' iff there are fewer bytes left in the
		 * segment than what nfs_generic_pg_test returned */
		seg_left = seg_end - req_start;
		if (seg_left < size)
			size = (unsigned int)seg_left;
2178
	}
2179

2180
	return size;
2181
}
2182
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2183

2184
int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2185 2186 2187 2188
{
	struct nfs_pageio_descriptor pgio;

	/* Resend all requests through the MDS */
2189 2190
	nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
			      hdr->completion_ops);
2191
	set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2192
	return nfs_pageio_resend(&pgio, hdr);
2193
}
2194
EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2195

2196
static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2197
{
2198 2199 2200

	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2201
	    PNFS_LAYOUTRET_ON_ERROR) {
2202
		pnfs_return_layout(hdr->inode);
2203
	}
2204
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2205
		hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2206 2207
}

2208 2209 2210
/*
 * Called by non rpc-based layout drivers
 */
2211
void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2212
{
2213
	if (likely(!hdr->pnfs_error)) {
2214 2215
		pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
				hdr->mds_offset + hdr->res.count);
2216
		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2217 2218 2219
	}
	trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
	if (unlikely(hdr->pnfs_error))
2220 2221
		pnfs_ld_handle_write_error(hdr);
	hdr->mds_ops->rpc_release(hdr);
2222
}
2223
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2224

2225 2226
static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2227
		struct nfs_pgio_header *hdr)
2228
{
2229
	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2230

2231
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2232
		list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2233
		nfs_pageio_reset_write_mds(desc);
2234
		mirror->pg_recoalesce = 1;
2235
	}
2236
	nfs_pgio_data_destroy(hdr);
2237
	hdr->release(hdr);
2238 2239 2240
}

static enum pnfs_try_status
2241
pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2242 2243 2244
			const struct rpc_call_ops *call_ops,
			struct pnfs_layout_segment *lseg,
			int how)
2245
{
2246
	struct inode *inode = hdr->inode;
2247 2248 2249
	enum pnfs_try_status trypnfs;
	struct nfs_server *nfss = NFS_SERVER(inode);

2250
	hdr->mds_ops = call_ops;
2251 2252

	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2253 2254
		inode->i_ino, hdr->args.count, hdr->args.offset, how);
	trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2255
	if (trypnfs != PNFS_NOT_ATTEMPTED)
2256 2257 2258 2259 2260
		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}

2261
static void
2262 2263
pnfs_do_write(struct nfs_pageio_descriptor *desc,
	      struct nfs_pgio_header *hdr, int how)
2264 2265 2266
{
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;
2267
	enum pnfs_try_status trypnfs;
2268

2269
	trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2270
	if (trypnfs == PNFS_NOT_ATTEMPTED)
2271
		pnfs_write_through_mds(desc, hdr);
2272 2273
}

2274 2275
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
{
2276
	pnfs_put_lseg(hdr->lseg);
2277
	nfs_pgio_header_free(hdr);
2278 2279
}

2280 2281 2282
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
2283
	struct nfs_pgio_header *hdr;
2284 2285
	int ret;

2286 2287
	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
	if (!hdr) {
P
Peng Tao 已提交
2288 2289
		desc->pg_error = -ENOMEM;
		return desc->pg_error;
2290
	}
2291
	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2292

2293
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2294
	ret = nfs_generic_pgio(desc, hdr);
2295
	if (!ret)
2296
		pnfs_do_write(desc, hdr, desc->pg_ioflags);
2297

2298
	return ret;
2299 2300 2301
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);

2302
int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2303 2304 2305
{
	struct nfs_pageio_descriptor pgio;

2306
	/* Resend all requests through the MDS */
2307 2308
	nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
	return nfs_pageio_resend(&pgio, hdr);
2309
}
2310
EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2311

2312
static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2313
{
2314 2315
	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2316
	    PNFS_LAYOUTRET_ON_ERROR) {
2317
		pnfs_return_layout(hdr->inode);
2318
	}
2319
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2320
		hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2321 2322
}

2323 2324 2325
/*
 * Called by non rpc-based layout drivers
 */
2326
void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2327
{
2328
	if (likely(!hdr->pnfs_error))
2329
		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2330 2331
	trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
	if (unlikely(hdr->pnfs_error))
2332 2333
		pnfs_ld_handle_read_error(hdr);
	hdr->mds_ops->rpc_release(hdr);
2334 2335 2336
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);

2337 2338
static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2339
		struct nfs_pgio_header *hdr)
2340
{
2341
	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2342

2343
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2344
		list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2345
		nfs_pageio_reset_read_mds(desc);
2346
		mirror->pg_recoalesce = 1;
2347
	}
2348
	nfs_pgio_data_destroy(hdr);
2349
	hdr->release(hdr);
2350 2351
}

A
Andy Adamson 已提交
2352 2353 2354
/*
 * Call the appropriate parallel I/O subsystem read function.
 */
2355
static enum pnfs_try_status
2356
pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2357 2358
		       const struct rpc_call_ops *call_ops,
		       struct pnfs_layout_segment *lseg)
A
Andy Adamson 已提交
2359
{
2360
	struct inode *inode = hdr->inode;
A
Andy Adamson 已提交
2361 2362 2363
	struct nfs_server *nfss = NFS_SERVER(inode);
	enum pnfs_try_status trypnfs;

2364
	hdr->mds_ops = call_ops;
A
Andy Adamson 已提交
2365 2366

	dprintk("%s: Reading ino:%lu %u@%llu\n",
2367
		__func__, inode->i_ino, hdr->args.count, hdr->args.offset);
A
Andy Adamson 已提交
2368

2369
	trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2370
	if (trypnfs != PNFS_NOT_ATTEMPTED)
A
Andy Adamson 已提交
2371 2372 2373 2374
		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}
A
Andy Adamson 已提交
2375

2376
/* Resend all requests through pnfs. */
2377
void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2378 2379 2380
{
	struct nfs_pageio_descriptor pgio;

2381
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2382 2383 2384 2385
		/* Prevent deadlocks with layoutreturn! */
		pnfs_put_lseg(hdr->lseg);
		hdr->lseg = NULL;

2386 2387 2388 2389
		nfs_pageio_init_read(&pgio, hdr->inode, false,
					hdr->completion_ops);
		hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
	}
2390 2391 2392
}
EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);

2393
static void
2394
pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2395 2396 2397
{
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;
2398
	enum pnfs_try_status trypnfs;
2399

2400
	trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2401
	if (trypnfs == PNFS_TRY_AGAIN)
2402 2403
		pnfs_read_resend_pnfs(hdr);
	if (trypnfs == PNFS_NOT_ATTEMPTED || hdr->task.tk_status)
2404
		pnfs_read_through_mds(desc, hdr);
2405 2406
}

2407 2408
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
{
2409
	pnfs_put_lseg(hdr->lseg);
2410
	nfs_pgio_header_free(hdr);
2411 2412
}

2413 2414 2415
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
2416
	struct nfs_pgio_header *hdr;
2417 2418
	int ret;

2419 2420
	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
	if (!hdr) {
P
Peng Tao 已提交
2421 2422
		desc->pg_error = -ENOMEM;
		return desc->pg_error;
2423
	}
2424
	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2425
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2426
	ret = nfs_generic_pgio(desc, hdr);
2427
	if (!ret)
2428
		pnfs_do_read(desc, hdr);
2429
	return ret;
2430 2431 2432
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);

2433 2434 2435 2436 2437
static void pnfs_clear_layoutcommitting(struct inode *inode)
{
	unsigned long *bitlock = &NFS_I(inode)->flags;

	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2438
	smp_mb__after_atomic();
2439 2440 2441
	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
}

A
Andy Adamson 已提交
2442
/*
2443
 * There can be multiple RW segments.
A
Andy Adamson 已提交
2444
 */
2445
static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
A
Andy Adamson 已提交
2446
{
2447
	struct pnfs_layout_segment *lseg;
A
Andy Adamson 已提交
2448

2449 2450
	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
		if (lseg->pls_range.iomode == IOMODE_RW &&
2451
		    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2452 2453
			list_add(&lseg->pls_lc_list, listp);
	}
A
Andy Adamson 已提交
2454 2455
}

2456 2457 2458 2459 2460 2461 2462 2463 2464 2465
static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
{
	struct pnfs_layout_segment *lseg, *tmp;

	/* Matched by references in pnfs_set_layoutcommit */
	list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
		list_del_init(&lseg->pls_lc_list);
		pnfs_put_lseg(lseg);
	}

2466
	pnfs_clear_layoutcommitting(inode);
2467 2468
}

P
Peng Tao 已提交
2469 2470
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
2471
	pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
P
Peng Tao 已提交
2472 2473 2474
}
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);

A
Andy Adamson 已提交
2475
void
2476 2477
pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
		loff_t end_pos)
A
Andy Adamson 已提交
2478
{
2479
	struct nfs_inode *nfsi = NFS_I(inode);
2480
	bool mark_as_dirty = false;
A
Andy Adamson 已提交
2481

2482
	spin_lock(&inode->i_lock);
A
Andy Adamson 已提交
2483
	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2484
		nfsi->layout->plh_lwb = end_pos;
2485
		mark_as_dirty = true;
A
Andy Adamson 已提交
2486
		dprintk("%s: Set layoutcommit for inode %lu ",
2487
			__func__, inode->i_ino);
2488 2489
	} else if (end_pos > nfsi->layout->plh_lwb)
		nfsi->layout->plh_lwb = end_pos;
2490
	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2491
		/* references matched in nfs4_layoutcommit_release */
2492
		pnfs_get_lseg(lseg);
2493
	}
2494
	spin_unlock(&inode->i_lock);
2495
	dprintk("%s: lseg %p end_pos %llu\n",
2496
		__func__, lseg, nfsi->layout->plh_lwb);
2497 2498 2499 2500

	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
	if (mark_as_dirty)
2501
		mark_inode_dirty_sync(inode);
A
Andy Adamson 已提交
2502 2503 2504
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);

A
Andy Adamson 已提交
2505 2506 2507 2508 2509 2510
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
{
	struct nfs_server *nfss = NFS_SERVER(data->args.inode);

	if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
		nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2511
	pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
A
Andy Adamson 已提交
2512 2513
}

2514 2515 2516 2517 2518 2519 2520 2521
/*
 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
 * data to disk to allow the server to recover the data if it crashes.
 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
 * is off, and a COMMIT is sent to a data server, or
 * if WRITEs to a data server return NFS_DATA_SYNC.
 */
A
Andy Adamson 已提交
2522
int
2523
pnfs_layoutcommit_inode(struct inode *inode, bool sync)
A
Andy Adamson 已提交
2524
{
2525
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
A
Andy Adamson 已提交
2526 2527 2528
	struct nfs4_layoutcommit_data *data;
	struct nfs_inode *nfsi = NFS_I(inode);
	loff_t end_pos;
2529
	int status;
A
Andy Adamson 已提交
2530

2531
	if (!pnfs_layoutcommit_outstanding(inode))
2532 2533
		return 0;

2534
	dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
P
Peng Tao 已提交
2535

2536
	status = -EAGAIN;
P
Peng Tao 已提交
2537
	if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2538 2539
		if (!sync)
			goto out;
2540
		status = wait_on_bit_lock_action(&nfsi->flags,
2541 2542 2543
				NFS_INO_LAYOUTCOMMITTING,
				nfs_wait_bit_killable,
				TASK_KILLABLE);
P
Peng Tao 已提交
2544
		if (status)
2545
			goto out;
P
Peng Tao 已提交
2546 2547
	}

2548 2549 2550 2551 2552 2553 2554
	status = -ENOMEM;
	/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
	data = kzalloc(sizeof(*data), GFP_NOFS);
	if (!data)
		goto clear_layoutcommitting;

	status = 0;
2555
	spin_lock(&inode->i_lock);
2556 2557
	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		goto out_unlock;
2558

2559
	INIT_LIST_HEAD(&data->lseg_list);
2560
	pnfs_list_write_lseg(inode, &data->lseg_list);
A
Andy Adamson 已提交
2561

2562
	end_pos = nfsi->layout->plh_lwb;
A
Andy Adamson 已提交
2563

2564
	nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
A
Andy Adamson 已提交
2565 2566 2567
	spin_unlock(&inode->i_lock);

	data->args.inode = inode;
2568
	data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
A
Andy Adamson 已提交
2569 2570 2571
	nfs_fattr_init(&data->fattr);
	data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
	data->res.fattr = &data->fattr;
2572 2573 2574 2575
	if (end_pos != 0)
		data->args.lastbytewritten = end_pos - 1;
	else
		data->args.lastbytewritten = U64_MAX;
A
Andy Adamson 已提交
2576 2577
	data->res.server = NFS_SERVER(inode);

2578 2579 2580
	if (ld->prepare_layoutcommit) {
		status = ld->prepare_layoutcommit(&data->args);
		if (status) {
2581
			put_rpccred(data->cred);
2582
			spin_lock(&inode->i_lock);
2583 2584
			set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
			if (end_pos > nfsi->layout->plh_lwb)
2585
				nfsi->layout->plh_lwb = end_pos;
2586
			goto out_unlock;
2587 2588 2589 2590
		}
	}


A
Andy Adamson 已提交
2591 2592
	status = nfs4_proc_layoutcommit(data, sync);
out:
P
Peng Tao 已提交
2593 2594
	if (status)
		mark_inode_dirty_sync(inode);
A
Andy Adamson 已提交
2595 2596
	dprintk("<-- %s status %d\n", __func__, status);
	return status;
2597 2598
out_unlock:
	spin_unlock(&inode->i_lock);
P
Peng Tao 已提交
2599
	kfree(data);
2600 2601
clear_layoutcommitting:
	pnfs_clear_layoutcommitting(inode);
P
Peng Tao 已提交
2602
	goto out;
A
Andy Adamson 已提交
2603
}
2604
EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
2605

2606 2607 2608 2609 2610 2611 2612
int
pnfs_generic_sync(struct inode *inode, bool datasync)
{
	return pnfs_layoutcommit_inode(inode, true);
}
EXPORT_SYMBOL_GPL(pnfs_generic_sync);

2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
	struct nfs4_threshold *thp;

	thp = kzalloc(sizeof(*thp), GFP_NOFS);
	if (!thp) {
		dprintk("%s mdsthreshold allocation failed\n", __func__);
		return NULL;
	}
	return thp;
}
2624

2625
#if IS_ENABLED(CONFIG_NFS_V4_2)
2626
int
2627
pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
2628 2629 2630
{
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
	struct nfs_server *server = NFS_SERVER(inode);
2631
	struct nfs_inode *nfsi = NFS_I(inode);
2632 2633 2634 2635 2636 2637 2638
	struct nfs42_layoutstat_data *data;
	struct pnfs_layout_hdr *hdr;
	int status = 0;

	if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
		goto out;

2639 2640 2641
	if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
		goto out;

2642 2643 2644
	if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
		goto out;

2645 2646 2647
	spin_lock(&inode->i_lock);
	if (!NFS_I(inode)->layout) {
		spin_unlock(&inode->i_lock);
2648
		goto out_clear_layoutstats;
2649 2650 2651 2652 2653
	}
	hdr = NFS_I(inode)->layout;
	pnfs_get_layout_hdr(hdr);
	spin_unlock(&inode->i_lock);

2654
	data = kzalloc(sizeof(*data), gfp_flags);
2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675
	if (!data) {
		status = -ENOMEM;
		goto out_put;
	}

	data->args.fh = NFS_FH(inode);
	data->args.inode = inode;
	status = ld->prepare_layoutstats(&data->args);
	if (status)
		goto out_free;

	status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);

out:
	dprintk("%s returns %d\n", __func__, status);
	return status;

out_free:
	kfree(data);
out_put:
	pnfs_put_layout_hdr(hdr);
2676
out_clear_layoutstats:
2677 2678 2679
	smp_mb__before_atomic();
	clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
	smp_mb__after_atomic();
2680 2681 2682
	goto out;
}
EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
2683
#endif
2684 2685 2686 2687

unsigned int layoutstats_timer;
module_param(layoutstats_timer, uint, 0644);
EXPORT_SYMBOL_GPL(layoutstats_timer);