pnfs.c 50.3 KB
Newer Older
R
Ricardo Labiaga 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 *  pNFS functions to call and manage layout drivers.
 *
 *  Copyright (c) 2002 [year of first publication]
 *  The Regents of the University of Michigan
 *  All Rights Reserved
 *
 *  Dean Hildebrand <dhildebz@umich.edu>
 *
 *  Permission is granted to use, copy, create derivative works, and
 *  redistribute this software and such derivative works for any purpose,
 *  so long as the name of the University of Michigan is not used in
 *  any advertising or publicity pertaining to the use or distribution
 *  of this software without specific, written prior authorization. If
 *  the above copyright notice or any other identification of the
 *  University of Michigan is included in any copy of any portion of
 *  this software, then the disclaimer below must also be included.
 *
 *  This software is provided as is, without representation or warranty
 *  of any kind either express or implied, including without limitation
 *  the implied warranties of merchantability, fitness for a particular
 *  purpose, or noninfringement.  The Regents of the University of
 *  Michigan shall not be liable for any damages, including special,
 *  indirect, incidental, or consequential damages, with respect to any
 *  claim arising out of or in connection with the use of the software,
 *  even if it has been or is hereafter advised of the possibility of
 *  such damages.
 */

#include <linux/nfs_fs.h>
31
#include <linux/nfs_page.h>
32
#include <linux/module.h>
33
#include "internal.h"
R
Ricardo Labiaga 已提交
34
#include "pnfs.h"
A
Andy Adamson 已提交
35
#include "iostat.h"
36
#include "nfs4trace.h"
R
Ricardo Labiaga 已提交
37 38

#define NFSDBG_FACILITY		NFSDBG_PNFS
39
#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
R
Ricardo Labiaga 已提交
40

41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
/* Locking:
 *
 * pnfs_spinlock:
 *      protects pnfs_modules_tbl.
 */
static DEFINE_SPINLOCK(pnfs_spinlock);

/*
 * pnfs_modules_tbl holds all pnfs modules
 */
static LIST_HEAD(pnfs_modules_tbl);

/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
{
	struct pnfs_layoutdriver_type *local;

	list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
		if (local->id == id)
			goto out;
	local = NULL;
out:
	dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
	return local;
}

R
Ricardo Labiaga 已提交
68 69 70
static struct pnfs_layoutdriver_type *
find_pnfs_driver(u32 id)
{
71 72 73 74
	struct pnfs_layoutdriver_type *local;

	spin_lock(&pnfs_spinlock);
	local = find_pnfs_driver_locked(id);
75 76 77 78
	if (local != NULL && !try_module_get(local->owner)) {
		dprintk("%s: Could not grab reference on module\n", __func__);
		local = NULL;
	}
79 80
	spin_unlock(&pnfs_spinlock);
	return local;
R
Ricardo Labiaga 已提交
81 82 83 84 85
}

void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
86 87 88
	if (nfss->pnfs_curr_ld) {
		if (nfss->pnfs_curr_ld->clear_layoutdriver)
			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
89 90 91
		/* Decrement the MDS count. Purge the deviceid cache if zero */
		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
			nfs4_deviceid_purge_client(nfss->nfs_client);
92
		module_put(nfss->pnfs_curr_ld->owner);
93
	}
R
Ricardo Labiaga 已提交
94 95 96 97 98 99 100 101 102 103
	nfss->pnfs_curr_ld = NULL;
}

/*
 * Try to set the server's pnfs module to the pnfs layout type specified by id.
 * Currently only one pNFS layout driver per filesystem is supported.
 *
 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
 */
void
104 105
set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
		      u32 id)
R
Ricardo Labiaga 已提交
106 107 108 109 110 111 112
{
	struct pnfs_layoutdriver_type *ld_type = NULL;

	if (id == 0)
		goto out_no_driver;
	if (!(server->nfs_client->cl_exchange_flags &
		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
113 114
		printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
			__func__, id, server->nfs_client->cl_exchange_flags);
R
Ricardo Labiaga 已提交
115 116 117 118 119 120 121 122 123 124 125 126 127
		goto out_no_driver;
	}
	ld_type = find_pnfs_driver(id);
	if (!ld_type) {
		request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
		ld_type = find_pnfs_driver(id);
		if (!ld_type) {
			dprintk("%s: No pNFS module found for %u.\n",
				__func__, id);
			goto out_no_driver;
		}
	}
	server->pnfs_curr_ld = ld_type;
128 129
	if (ld_type->set_layoutdriver
	    && ld_type->set_layoutdriver(server, mntfh)) {
130 131
		printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
			"driver %u.\n", __func__, id);
132 133 134
		module_put(ld_type->owner);
		goto out_no_driver;
	}
135 136
	/* Bump the MDS count */
	atomic_inc(&server->nfs_client->cl_mds_count);
137

R
Ricardo Labiaga 已提交
138 139 140 141 142 143 144
	dprintk("%s: pNFS module for %u set\n", __func__, id);
	return;

out_no_driver:
	dprintk("%s: Using NFSv4 I/O\n", __func__);
	server->pnfs_curr_ld = NULL;
}
145 146 147 148 149 150 151 152

int
pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	int status = -EINVAL;
	struct pnfs_layoutdriver_type *tmp;

	if (ld_type->id == 0) {
153
		printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
154 155
		return status;
	}
156
	if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
157
		printk(KERN_ERR "NFS: %s Layout driver must provide "
158 159 160
		       "alloc_lseg and free_lseg.\n", __func__);
		return status;
	}
161 162 163 164 165 166 167 168 169

	spin_lock(&pnfs_spinlock);
	tmp = find_pnfs_driver_locked(ld_type->id);
	if (!tmp) {
		list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
		status = 0;
		dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
			ld_type->name);
	} else {
170
		printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
			__func__, ld_type->id);
	}
	spin_unlock(&pnfs_spinlock);

	return status;
}
EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);

void
pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
	spin_lock(&pnfs_spinlock);
	list_del(&ld_type->pnfs_tblid);
	spin_unlock(&pnfs_spinlock);
}
EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
188

189 190 191 192
/*
 * pNFS client layout cache
 */

193
/* Need to hold i_lock if caller does not already hold reference */
F
Fred Isaman 已提交
194
void
195
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
196
{
197
	atomic_inc(&lo->plh_refcount);
198 199
}

200 201 202 203
static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
204
	return ld->alloc_layout_hdr(ino, gfp_flags);
205 206 207 208 209
}

static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
210 211 212 213 214 215 216 217 218 219
	struct nfs_server *server = NFS_SERVER(lo->plh_inode);
	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;

	if (!list_empty(&lo->plh_layouts)) {
		struct nfs_client *clp = server->nfs_client;

		spin_lock(&clp->cl_lock);
		list_del_init(&lo->plh_layouts);
		spin_unlock(&clp->cl_lock);
	}
220
	put_rpccred(lo->plh_lc_cred);
221
	return ld->free_layout_hdr(lo);
222 223
}

224
static void
225
pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
226
{
227
	struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
228
	dprintk("%s: freeing layout cache %p\n", __func__, lo);
229 230 231 232
	nfsi->layout = NULL;
	/* Reset MDS Threshold I/O counters */
	nfsi->write_io = 0;
	nfsi->read_io = 0;
233 234
}

235
void
236
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
237
{
238 239 240
	struct inode *inode = lo->plh_inode;

	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
241
		pnfs_detach_layout_hdr(lo);
242
		spin_unlock(&inode->i_lock);
243
		pnfs_free_layout_hdr(lo);
244
	}
245 246
}

247 248 249 250 251 252 253 254
static int
pnfs_iomode_to_fail_bit(u32 iomode)
{
	return iomode == IOMODE_RW ?
		NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
}

static void
255
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
256
{
257
	lo->plh_retry_timestamp = jiffies;
258
	if (!test_and_set_bit(fail_bit, &lo->plh_flags))
259 260 261 262 263 264 265 266 267 268 269 270 271 272
		atomic_inc(&lo->plh_refcount);
}

static void
pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
	if (test_and_clear_bit(fail_bit, &lo->plh_flags))
		atomic_dec(&lo->plh_refcount);
}

static void
pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
	struct inode *inode = lo->plh_inode;
273 274 275 276 277 278
	struct pnfs_layout_range range = {
		.iomode = iomode,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	LIST_HEAD(head);
279 280 281

	spin_lock(&inode->i_lock);
	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
282
	pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
283
	spin_unlock(&inode->i_lock);
284
	pnfs_free_lseg_list(&head);
285 286 287 288 289 290 291
	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
			iomode == IOMODE_RW ?  "RW" : "READ");
}

static bool
pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
292
	unsigned long start, end;
293 294 295
	int fail_bit = pnfs_iomode_to_fail_bit(iomode);

	if (test_bit(fail_bit, &lo->plh_flags) == 0)
296 297 298 299 300
		return false;
	end = jiffies;
	start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
	if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
		/* It is time to retry the failed layoutgets */
301
		pnfs_layout_clear_fail_bit(lo, fail_bit);
302 303 304
		return false;
	}
	return true;
305 306
}

307 308 309
static void
init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
{
310
	INIT_LIST_HEAD(&lseg->pls_list);
311
	INIT_LIST_HEAD(&lseg->pls_lc_list);
312 313 314
	atomic_set(&lseg->pls_refcount, 1);
	smp_mb();
	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
315
	lseg->pls_layout = lo;
316 317
}

318
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
319
{
320
	struct inode *ino = lseg->pls_layout->plh_inode;
321

322
	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
323 324
}

F
Fred Isaman 已提交
325
static void
326 327
pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
F
Fred Isaman 已提交
328
{
329
	struct inode *inode = lo->plh_inode;
F
Fred Isaman 已提交
330

331
	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
F
Fred Isaman 已提交
332
	list_del_init(&lseg->pls_list);
333 334
	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
	atomic_dec(&lo->plh_refcount);
335 336
	if (list_empty(&lo->plh_segs))
		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
F
Fred Isaman 已提交
337 338 339
	rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}

340
void
341
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
342
{
343
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
344 345 346 347 348
	struct inode *inode;

	if (!lseg)
		return;

349 350 351
	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
352 353
	lo = lseg->pls_layout;
	inode = lo->plh_inode;
F
Fred Isaman 已提交
354
	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
355
		pnfs_get_layout_hdr(lo);
356
		pnfs_layout_remove_lseg(lo, lseg);
F
Fred Isaman 已提交
357
		spin_unlock(&inode->i_lock);
358
		pnfs_free_lseg(lseg);
359
		pnfs_put_layout_hdr(lo);
360 361
	}
}
362
EXPORT_SYMBOL_GPL(pnfs_put_lseg);
363

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
static void pnfs_put_lseg_async_work(struct work_struct *work)
{
	struct pnfs_layout_segment *lseg;

	lseg = container_of(work, struct pnfs_layout_segment, pls_work);

	pnfs_put_lseg(lseg);
}

void
pnfs_put_lseg_async(struct pnfs_layout_segment *lseg)
{
	INIT_WORK(&lseg->pls_work, pnfs_put_lseg_async_work);
	schedule_work(&lseg->pls_work);
}
EXPORT_SYMBOL_GPL(pnfs_put_lseg_async);

381
static u64
382 383 384 385 386 387 388 389 390 391 392 393 394 395 396
end_offset(u64 start, u64 len)
{
	u64 end;

	end = start + len;
	return end >= start ? end : NFS4_MAX_UINT64;
}

/*
 * is l2 fully contained in l1?
 *   start1                             end1
 *   [----------------------------------)
 *           start2           end2
 *           [----------------)
 */
397
static bool
398
pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
399
		 const struct pnfs_layout_range *l2)
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415
{
	u64 start1 = l1->offset;
	u64 end1 = end_offset(start1, l1->length);
	u64 start2 = l2->offset;
	u64 end2 = end_offset(start2, l2->length);

	return (start1 <= start2) && (end1 >= end2);
}

/*
 * is l1 and l2 intersecting?
 *   start1                             end1
 *   [----------------------------------)
 *                              start2           end2
 *                              [----------------)
 */
416
static bool
417
pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
418
		    const struct pnfs_layout_range *l2)
419 420 421 422 423 424 425 426 427 428
{
	u64 start1 = l1->offset;
	u64 end1 = end_offset(start1, l1->length);
	u64 start2 = l2->offset;
	u64 end2 = end_offset(start2, l2->length);

	return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
	       (end2 == NFS4_MAX_UINT64 || end2 > start1);
}

429
static bool
430 431
should_free_lseg(const struct pnfs_layout_range *lseg_range,
		 const struct pnfs_layout_range *recall_range)
432
{
433 434
	return (recall_range->iomode == IOMODE_ANY ||
		lseg_range->iomode == recall_range->iomode) &&
435
	       pnfs_lseg_range_intersecting(lseg_range, recall_range);
436 437
}

438 439 440 441 442 443 444 445 446 447
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		struct list_head *tmp_list)
{
	if (!atomic_dec_and_test(&lseg->pls_refcount))
		return false;
	pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
	list_add(&lseg->pls_list, tmp_list);
	return true;
}

448 449 450 451 452 453 454 455 456 457 458
/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
			     struct list_head *tmp_list)
{
	int rv = 0;

	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
		/* Remove the reference keeping the lseg in the
		 * list.  It will now be removed when all
		 * outstanding io is finished.
		 */
F
Fred Isaman 已提交
459 460
		dprintk("%s: lseg %p ref %d\n", __func__, lseg,
			atomic_read(&lseg->pls_refcount));
461
		if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
F
Fred Isaman 已提交
462
			rv = 1;
463 464 465 466 467 468 469
	}
	return rv;
}

/* Returns count of number of matching invalid lsegs remaining in list
 * after call.
 */
F
Fred Isaman 已提交
470
int
471
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
472
			    struct list_head *tmp_list,
473
			    struct pnfs_layout_range *recall_range)
474 475
{
	struct pnfs_layout_segment *lseg, *next;
476
	int invalid = 0, removed = 0;
477 478 479

	dprintk("%s:Begin lo %p\n", __func__, lo);

480
	if (list_empty(&lo->plh_segs))
481
		return 0;
482
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
483 484
		if (!recall_range ||
		    should_free_lseg(&lseg->pls_range, recall_range)) {
485 486 487 488 489 490 491 492 493
			dprintk("%s: freeing lseg %p iomode %d "
				"offset %llu length %llu\n", __func__,
				lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
				lseg->pls_range.length);
			invalid++;
			removed += mark_lseg_invalid(lseg, tmp_list);
		}
	dprintk("%s:Return %i\n", __func__, invalid - removed);
	return invalid - removed;
494 495
}

496
/* note free_me must contain lsegs from a single layout_hdr */
F
Fred Isaman 已提交
497
void
498
pnfs_free_lseg_list(struct list_head *free_me)
499
{
500
	struct pnfs_layout_segment *lseg, *tmp;
501 502 503 504

	if (list_empty(free_me))
		return;

505
	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
506
		list_del(&lseg->pls_list);
507
		pnfs_free_lseg(lseg);
508 509 510
	}
}

511 512 513 514
void
pnfs_destroy_layout(struct nfs_inode *nfsi)
{
	struct pnfs_layout_hdr *lo;
515
	LIST_HEAD(tmp_list);
516 517 518 519

	spin_lock(&nfsi->vfs_inode.i_lock);
	lo = nfsi->layout;
	if (lo) {
520
		lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
521
		pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
522 523 524 525 526 527 528 529
		pnfs_get_layout_hdr(lo);
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
		spin_unlock(&nfsi->vfs_inode.i_lock);
		pnfs_free_lseg_list(&tmp_list);
		pnfs_put_layout_hdr(lo);
	} else
		spin_unlock(&nfsi->vfs_inode.i_lock);
530
}
531
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
532

533 534 535
static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
		struct list_head *layout_list)
536 537
{
	struct pnfs_layout_hdr *lo;
538
	bool ret = false;
539

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
		pnfs_get_layout_hdr(lo);
		list_add(&lo->plh_bulk_destroy, layout_list);
		ret = true;
	}
	spin_unlock(&inode->i_lock);
	return ret;
}

/* Caller must hold rcu_read_lock and clp->cl_lock */
static int
pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
		struct nfs_server *server,
		struct list_head *layout_list)
{
	struct pnfs_layout_hdr *lo, *next;
	struct inode *inode;

	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
		inode = igrab(lo->plh_inode);
		if (inode == NULL)
			continue;
		list_del_init(&lo->plh_layouts);
		if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
			continue;
		rcu_read_unlock();
		spin_unlock(&clp->cl_lock);
		iput(inode);
		spin_lock(&clp->cl_lock);
		rcu_read_lock();
		return -EAGAIN;
	}
	return 0;
}

static int
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
		bool is_bulk_recall)
{
	struct pnfs_layout_hdr *lo;
	struct inode *inode;
	struct pnfs_layout_range range = {
		.iomode = IOMODE_ANY,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	LIST_HEAD(lseg_list);
	int ret = 0;

	while (!list_empty(layout_list)) {
		lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
				plh_bulk_destroy);
		dprintk("%s freeing layout for inode %lu\n", __func__,
			lo->plh_inode->i_ino);
		inode = lo->plh_inode;
		spin_lock(&inode->i_lock);
		list_del_init(&lo->plh_bulk_destroy);
		lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
		if (is_bulk_recall)
			set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
		if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
			ret = -EAGAIN;
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg_list(&lseg_list);
		pnfs_put_layout_hdr(lo);
		iput(inode);
	}
	return ret;
}

int
pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
		struct nfs_fsid *fsid,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);
619

620
	spin_lock(&clp->cl_lock);
621
	rcu_read_lock();
622
restart:
623
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
624 625 626 627 628 629
		if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
			continue;
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
				server,
				&layout_list) != 0)
			goto restart;
630 631
	}
	rcu_read_unlock();
632 633
	spin_unlock(&clp->cl_lock);

634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

int
pnfs_destroy_layouts_byclid(struct nfs_client *clp,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
restart:
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
					server,
					&layout_list) != 0)
			goto restart;
654
	}
655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
	rcu_read_unlock();
	spin_unlock(&clp->cl_lock);

	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

/*
 * Called by the state manger to remove all layouts established under an
 * expired lease.
 */
void
pnfs_destroy_all_layouts(struct nfs_client *clp)
{
	nfs4_deviceid_mark_client_invalid(clp);
	nfs4_deviceid_purge_client(clp);

	pnfs_destroy_layouts_byclid(clp, false);
674 675
}

676 677 678 679 680 681
/*
 * Compare 2 layout stateid sequence ids, to see which is newer,
 * taking into account wraparound issues.
 */
static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
{
682
	return (s32)(s1 - s2) > 0;
683 684
}

685 686 687 688 689 690 691 692 693 694 695
static void
pnfs_verify_layout_stateid(struct pnfs_layout_hdr *lo,
		const nfs4_stateid *new,
		struct list_head *free_me_list)
{
	if (nfs4_stateid_match_other(&lo->plh_stateid, new))
		return;
	/* Layout is new! Kill existing layout segments */
	pnfs_mark_matching_lsegs_invalid(lo, free_me_list, NULL);
}

696
/* update lo->plh_stateid with new if is more recent */
F
Fred Isaman 已提交
697 698 699
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
			bool update_barrier)
700
{
701 702
	u32 oldseq, newseq, new_barrier;
	int empty = list_empty(&lo->plh_segs);
703

704 705
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
	newseq = be32_to_cpu(new->seqid);
706
	if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
707
		nfs4_stateid_copy(&lo->plh_stateid, new);
F
Fred Isaman 已提交
708
		if (update_barrier) {
709
			new_barrier = be32_to_cpu(new->seqid);
F
Fred Isaman 已提交
710 711
		} else {
			/* Because of wraparound, we want to keep the barrier
712
			 * "close" to the current seqids.
F
Fred Isaman 已提交
713
			 */
714
			new_barrier = newseq - atomic_read(&lo->plh_outstanding);
F
Fred Isaman 已提交
715
		}
716 717
		if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
			lo->plh_barrier = new_barrier;
F
Fred Isaman 已提交
718
	}
719 720
}

721
static bool
722 723
pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid)
F
Fred Isaman 已提交
724
{
725
	u32 seqid = be32_to_cpu(stateid->seqid);
726

727 728 729 730 731 732 733
	return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
}

/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
{
F
Fred Isaman 已提交
734 735
	return lo->plh_block_lgets ||
		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
F
Fred Isaman 已提交
736
		(list_empty(&lo->plh_segs) &&
737 738 739
		 (atomic_read(&lo->plh_outstanding) > lget));
}

740 741 742
int
pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
			      struct nfs4_state *open_state)
743
{
744
	int status = 0;
745

746
	dprintk("--> %s\n", __func__);
747
	spin_lock(&lo->plh_inode->i_lock);
748
	if (pnfs_layoutgets_blocked(lo, 1)) {
749
		status = -EAGAIN;
750 751
	} else if (!nfs4_valid_open_stateid(open_state)) {
		status = -EBADF;
752
	} else if (list_empty(&lo->plh_segs)) {
753 754 755 756
		int seq;

		do {
			seq = read_seqbegin(&open_state->seqlock);
757
			nfs4_stateid_copy(dst, &open_state->stateid);
758 759
		} while (read_seqretry(&open_state->seqlock, seq));
	} else
760
		nfs4_stateid_copy(dst, &lo->plh_stateid);
761
	spin_unlock(&lo->plh_inode->i_lock);
762
	dprintk("<-- %s\n", __func__);
763
	return status;
764 765 766 767 768 769 770 771
}

/*
* Get layout from server.
*    for now, assume that whole file layouts are requested.
*    arg->offset: 0
*    arg->length: all ones
*/
772 773 774
static struct pnfs_layout_segment *
send_layoutget(struct pnfs_layout_hdr *lo,
	   struct nfs_open_context *ctx,
775
	   struct pnfs_layout_range *range,
776
	   gfp_t gfp_flags)
777
{
778
	struct inode *ino = lo->plh_inode;
779 780
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs4_layoutget *lgp;
781
	struct pnfs_layout_segment *lseg;
782 783

	dprintk("--> %s\n", __func__);
784

785
	lgp = kzalloc(sizeof(*lgp), gfp_flags);
786
	if (lgp == NULL)
787
		return NULL;
788

789 790 791
	lgp->args.minlength = PAGE_CACHE_SIZE;
	if (lgp->args.minlength > range->length)
		lgp->args.minlength = range->length;
792
	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
793
	lgp->args.range = *range;
794 795 796
	lgp->args.type = server->pnfs_curr_ld->id;
	lgp->args.inode = ino;
	lgp->args.ctx = get_nfs_open_context(ctx);
797
	lgp->gfp_flags = gfp_flags;
798
	lgp->cred = lo->plh_lc_cred;
799 800 801 802

	/* Synchronously retrieve layout information from server and
	 * store in lseg.
	 */
803 804 805 806 807 808 809 810
	lseg = nfs4_proc_layoutget(lgp, gfp_flags);
	if (IS_ERR(lseg)) {
		switch (PTR_ERR(lseg)) {
		case -ENOMEM:
		case -ERESTARTSYS:
			break;
		default:
			/* remember that LAYOUTGET failed and suspend trying */
811
			pnfs_layout_io_set_failed(lo, range->iomode);
812 813
		}
		return NULL;
814
	}
815

816 817 818
	return lseg;
}

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833
static void pnfs_clear_layoutcommit(struct inode *inode,
		struct list_head *head)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	struct pnfs_layout_segment *lseg, *tmp;

	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		return;
	list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
		if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
			continue;
		pnfs_lseg_dec_and_remove_zero(lseg, head);
	}
}

834 835 836 837 838 839 840 841
/*
 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
 * when the layout segment list is empty.
 *
 * Note that a pnfs_layout_hdr can exist with an empty layout segment
 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
 * deviceid is marked invalid.
 */
B
Benny Halevy 已提交
842 843 844 845 846 847 848 849
int
_pnfs_return_layout(struct inode *ino)
{
	struct pnfs_layout_hdr *lo = NULL;
	struct nfs_inode *nfsi = NFS_I(ino);
	LIST_HEAD(tmp_list);
	struct nfs4_layoutreturn *lrp;
	nfs4_stateid stateid;
850
	int status = 0, empty;
B
Benny Halevy 已提交
851

852
	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
B
Benny Halevy 已提交
853 854 855

	spin_lock(&ino->i_lock);
	lo = nfsi->layout;
856
	if (!lo) {
B
Benny Halevy 已提交
857
		spin_unlock(&ino->i_lock);
858 859
		dprintk("NFS: %s no layout to return\n", __func__);
		goto out;
B
Benny Halevy 已提交
860 861 862
	}
	stateid = nfsi->layout->plh_stateid;
	/* Reference matched in nfs4_layoutreturn_release */
863
	pnfs_get_layout_hdr(lo);
864
	empty = list_empty(&lo->plh_segs);
865
	pnfs_clear_layoutcommit(ino, &tmp_list);
866
	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
867 868 869
	/* Don't send a LAYOUTRETURN if list was initially empty */
	if (empty) {
		spin_unlock(&ino->i_lock);
870
		pnfs_put_layout_hdr(lo);
871 872 873
		dprintk("NFS: %s no layout segments to return\n", __func__);
		goto out;
	}
874
	lo->plh_block_lgets++;
B
Benny Halevy 已提交
875 876 877 878 879 880
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&tmp_list);

	lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
	if (unlikely(lrp == NULL)) {
		status = -ENOMEM;
881 882 883
		spin_lock(&ino->i_lock);
		lo->plh_block_lgets--;
		spin_unlock(&ino->i_lock);
884
		pnfs_put_layout_hdr(lo);
B
Benny Halevy 已提交
885 886 887 888 889 890
		goto out;
	}

	lrp->args.stateid = stateid;
	lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
	lrp->args.inode = ino;
T
Trond Myklebust 已提交
891
	lrp->args.layout = lo;
B
Benny Halevy 已提交
892
	lrp->clp = NFS_SERVER(ino)->nfs_client;
893
	lrp->cred = lo->plh_lc_cred;
B
Benny Halevy 已提交
894 895 896 897 898 899

	status = nfs4_proc_layoutreturn(lrp);
out:
	dprintk("<-- %s status: %d\n", __func__, status);
	return status;
}
900
EXPORT_SYMBOL_GPL(_pnfs_return_layout);
B
Benny Halevy 已提交
901

902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928
int
pnfs_commit_and_return_layout(struct inode *inode)
{
	struct pnfs_layout_hdr *lo;
	int ret;

	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo == NULL) {
		spin_unlock(&inode->i_lock);
		return 0;
	}
	pnfs_get_layout_hdr(lo);
	/* Block new layoutgets and read/write to ds */
	lo->plh_block_lgets++;
	spin_unlock(&inode->i_lock);
	filemap_fdatawait(inode->i_mapping);
	ret = pnfs_layoutcommit_inode(inode, true);
	if (ret == 0)
		ret = _pnfs_return_layout(inode);
	spin_lock(&inode->i_lock);
	lo->plh_block_lgets--;
	spin_unlock(&inode->i_lock);
	pnfs_put_layout_hdr(lo);
	return ret;
}

F
Fred Isaman 已提交
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948
bool pnfs_roc(struct inode *ino)
{
	struct pnfs_layout_hdr *lo;
	struct pnfs_layout_segment *lseg, *tmp;
	LIST_HEAD(tmp_list);
	bool found = false;

	spin_lock(&ino->i_lock);
	lo = NFS_I(ino)->layout;
	if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
		goto out_nolayout;
	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
			mark_lseg_invalid(lseg, &tmp_list);
			found = true;
		}
	if (!found)
		goto out_nolayout;
	lo->plh_block_lgets++;
949
	pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
F
Fred Isaman 已提交
950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&tmp_list);
	return true;

out_nolayout:
	spin_unlock(&ino->i_lock);
	return false;
}

void pnfs_roc_release(struct inode *ino)
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&ino->i_lock);
	lo = NFS_I(ino)->layout;
	lo->plh_block_lgets--;
966 967 968 969 970 971
	if (atomic_dec_and_test(&lo->plh_refcount)) {
		pnfs_detach_layout_hdr(lo);
		spin_unlock(&ino->i_lock);
		pnfs_free_layout_hdr(lo);
	} else
		spin_unlock(&ino->i_lock);
F
Fred Isaman 已提交
972 973 974 975 976 977 978 979
}

void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&ino->i_lock);
	lo = NFS_I(ino)->layout;
980
	if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
F
Fred Isaman 已提交
981 982 983 984
		lo->plh_barrier = barrier;
	spin_unlock(&ino->i_lock);
}

985
bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
F
Fred Isaman 已提交
986 987
{
	struct nfs_inode *nfsi = NFS_I(ino);
988
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
989
	struct pnfs_layout_segment *lseg;
990
	u32 current_seqid;
F
Fred Isaman 已提交
991 992 993 994 995
	bool found = false;

	spin_lock(&ino->i_lock);
	list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
996
			rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
F
Fred Isaman 已提交
997
			found = true;
998
			goto out;
F
Fred Isaman 已提交
999
		}
1000 1001
	lo = nfsi->layout;
	current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
F
Fred Isaman 已提交
1002

1003 1004 1005 1006 1007
	/* Since close does not return a layout stateid for use as
	 * a barrier, we choose the worst-case barrier.
	 */
	*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
out:
F
Fred Isaman 已提交
1008 1009 1010 1011
	spin_unlock(&ino->i_lock);
	return found;
}

1012 1013 1014 1015 1016 1017
/*
 * Compare two layout segments for sorting into layout cache.
 * We want to preferentially return RW over RO layouts, so ensure those
 * are seen first.
 */
static s64
1018
pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1019
	   const struct pnfs_layout_range *l2)
1020
{
1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
	s64 d;

	/* high offset > low offset */
	d = l1->offset - l2->offset;
	if (d)
		return d;

	/* short length > long length */
	d = l2->length - l1->length;
	if (d)
		return d;

1033
	/* read > read/write */
1034
	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1035 1036
}

1037
static void
1038
pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1039 1040
		   struct pnfs_layout_segment *lseg)
{
1041 1042
	struct pnfs_layout_segment *lp;

1043 1044
	dprintk("%s:Begin\n", __func__);

1045
	list_for_each_entry(lp, &lo->plh_segs, pls_list) {
1046
		if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
1047
			continue;
1048
		list_add_tail(&lseg->pls_list, &lp->pls_list);
1049 1050 1051
		dprintk("%s: inserted lseg %p "
			"iomode %d offset %llu length %llu before "
			"lp %p iomode %d offset %llu length %llu\n",
1052 1053 1054 1055
			__func__, lseg, lseg->pls_range.iomode,
			lseg->pls_range.offset, lseg->pls_range.length,
			lp, lp->pls_range.iomode, lp->pls_range.offset,
			lp->pls_range.length);
1056
		goto out;
1057
	}
1058 1059 1060 1061 1062 1063
	list_add_tail(&lseg->pls_list, &lo->plh_segs);
	dprintk("%s: inserted lseg %p "
		"iomode %d offset %llu length %llu at tail\n",
		__func__, lseg, lseg->pls_range.iomode,
		lseg->pls_range.offset, lseg->pls_range.length);
out:
1064
	pnfs_get_layout_hdr(lo);
1065 1066

	dprintk("%s:Return\n", __func__);
1067 1068 1069
}

static struct pnfs_layout_hdr *
1070 1071 1072
alloc_init_layout_hdr(struct inode *ino,
		      struct nfs_open_context *ctx,
		      gfp_t gfp_flags)
1073 1074 1075
{
	struct pnfs_layout_hdr *lo;

1076
	lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1077 1078
	if (!lo)
		return NULL;
1079
	atomic_set(&lo->plh_refcount, 1);
1080 1081
	INIT_LIST_HEAD(&lo->plh_layouts);
	INIT_LIST_HEAD(&lo->plh_segs);
1082
	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1083
	lo->plh_inode = ino;
1084
	lo->plh_lc_cred = get_rpccred(ctx->cred);
1085 1086 1087 1088
	return lo;
}

static struct pnfs_layout_hdr *
1089 1090 1091
pnfs_find_alloc_layout(struct inode *ino,
		       struct nfs_open_context *ctx,
		       gfp_t gfp_flags)
1092 1093 1094 1095 1096 1097
{
	struct nfs_inode *nfsi = NFS_I(ino);
	struct pnfs_layout_hdr *new = NULL;

	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);

1098 1099
	if (nfsi->layout != NULL)
		goto out_existing;
1100
	spin_unlock(&ino->i_lock);
1101
	new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1102 1103
	spin_lock(&ino->i_lock);

1104
	if (likely(nfsi->layout == NULL)) {	/* Won the race? */
1105
		nfsi->layout = new;
1106
		return new;
1107 1108
	} else if (new != NULL)
		pnfs_free_layout_hdr(new);
1109 1110
out_existing:
	pnfs_get_layout_hdr(nfsi->layout);
1111 1112 1113
	return nfsi->layout;
}

1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124
/*
 * iomode matching rules:
 * iomode	lseg	match
 * -----	-----	-----
 * ANY		READ	true
 * ANY		RW	true
 * RW		READ	false
 * RW		RW	true
 * READ		READ	true
 * READ		RW	true
 */
1125
static bool
1126
pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1127
		 const struct pnfs_layout_range *range)
1128
{
1129 1130 1131 1132
	struct pnfs_layout_range range1;

	if ((range->iomode == IOMODE_RW &&
	     ls_range->iomode != IOMODE_RW) ||
1133
	    !pnfs_lseg_range_intersecting(ls_range, range))
1134 1135 1136 1137 1138
		return 0;

	/* range1 covers only the first byte in the range */
	range1 = *range;
	range1.length = 1;
1139
	return pnfs_lseg_range_contained(ls_range, &range1);
1140 1141 1142 1143 1144
}

/*
 * lookup range in layout
 */
1145
static struct pnfs_layout_segment *
1146 1147
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_range *range)
1148
{
1149 1150 1151 1152
	struct pnfs_layout_segment *lseg, *ret = NULL;

	dprintk("%s:Begin\n", __func__);

1153
	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1154
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1155
		    pnfs_lseg_range_match(&lseg->pls_range, range)) {
1156
			ret = pnfs_get_lseg(lseg);
1157 1158
			break;
		}
1159
		if (lseg->pls_range.offset > range->offset)
1160 1161 1162 1163
			break;
	}

	dprintk("%s:Return lseg %p ref %d\n",
1164
		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1165
	return ret;
1166 1167
}

1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
/*
 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
 * to the MDS or over pNFS
 *
 * The nfs_inode read_io and write_io fields are cumulative counters reset
 * when there are no layout segments. Note that in pnfs_update_layout iomode
 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
 * WRITE request.
 *
 * A return of true means use MDS I/O.
 *
 * From rfc 5661:
 * If a file's size is smaller than the file size threshold, data accesses
 * SHOULD be sent to the metadata server.  If an I/O request has a length that
 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
 * server.  If both file size and I/O size are provided, the client SHOULD
 * reach or exceed  both thresholds before sending its read or write
 * requests to the data server.
 */
static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
				     struct inode *ino, int iomode)
{
	struct nfs4_threshold *t = ctx->mdsthreshold;
	struct nfs_inode *nfsi = NFS_I(ino);
	loff_t fsize = i_size_read(ino);
	bool size = false, size_set = false, io = false, io_set = false, ret = false;

	if (t == NULL)
		return ret;

	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);

	switch (iomode) {
	case IOMODE_READ:
		if (t->bm & THRESHOLD_RD) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->rd_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_RD_IO) {
			dprintk("%s nfsi->read_io %llu\n", __func__,
				nfsi->read_io);
			io_set = true;
			if (nfsi->read_io < t->rd_io_sz)
				io = true;
		}
		break;
	case IOMODE_RW:
		if (t->bm & THRESHOLD_WR) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->wr_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_WR_IO) {
			dprintk("%s nfsi->write_io %llu\n", __func__,
				nfsi->write_io);
			io_set = true;
			if (nfsi->write_io < t->wr_io_sz)
				io = true;
		}
		break;
	}
	if (size_set && io_set) {
		if (size && io)
			ret = true;
	} else if (size || io)
		ret = true;

	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
	return ret;
}

1243 1244 1245 1246
/*
 * Layout segment is retreived from the server if not cached.
 * The appropriate layout segment is referenced and returned to the caller.
 */
1247
struct pnfs_layout_segment *
1248 1249
pnfs_update_layout(struct inode *ino,
		   struct nfs_open_context *ctx,
1250 1251
		   loff_t pos,
		   u64 count,
1252 1253
		   enum pnfs_iomode iomode,
		   gfp_t gfp_flags)
1254
{
1255 1256 1257 1258 1259
	struct pnfs_layout_range arg = {
		.iomode = iomode,
		.offset = pos,
		.length = count,
	};
1260
	unsigned pg_offset;
1261 1262
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs_client *clp = server->nfs_client;
1263 1264
	struct pnfs_layout_hdr *lo;
	struct pnfs_layout_segment *lseg = NULL;
1265
	bool first;
1266 1267

	if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1268
		goto out;
1269 1270

	if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1271
		goto out;
1272

1273
	spin_lock(&ino->i_lock);
1274
	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1275 1276 1277 1278
	if (lo == NULL) {
		spin_unlock(&ino->i_lock);
		goto out;
	}
1279

F
Fred Isaman 已提交
1280
	/* Do we even need to bother with this? */
1281
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
F
Fred Isaman 已提交
1282
		dprintk("%s matches recall, use MDS\n", __func__);
1283 1284 1285 1286
		goto out_unlock;
	}

	/* if LAYOUTGET already failed once we don't try again */
1287
	if (pnfs_layout_io_test_failed(lo, iomode))
1288 1289
		goto out_unlock;

1290
	/* Check to see if the layout for the given range already exists */
1291
	lseg = pnfs_find_lseg(lo, &arg);
1292 1293 1294
	if (lseg)
		goto out_unlock;

1295
	if (pnfs_layoutgets_blocked(lo, 0))
1296 1297 1298
		goto out_unlock;
	atomic_inc(&lo->plh_outstanding);

1299
	first = list_empty(&lo->plh_layouts) ? true : false;
1300
	spin_unlock(&ino->i_lock);
1301

1302
	if (first) {
1303 1304 1305 1306
		/* The lo must be on the clp list if there is any
		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
		 */
		spin_lock(&clp->cl_lock);
1307
		list_add_tail(&lo->plh_layouts, &server->layouts);
1308 1309
		spin_unlock(&clp->cl_lock);
	}
1310

1311 1312 1313 1314 1315
	pg_offset = arg.offset & ~PAGE_CACHE_MASK;
	if (pg_offset) {
		arg.offset -= pg_offset;
		arg.length += pg_offset;
	}
1316 1317
	if (arg.length != NFS4_MAX_UINT64)
		arg.length = PAGE_CACHE_ALIGN(arg.length);
1318

1319
	lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1320
	atomic_dec(&lo->plh_outstanding);
1321
out_put_layout_hdr:
1322
	pnfs_put_layout_hdr(lo);
1323
out:
1324 1325 1326 1327 1328 1329 1330 1331
	dprintk("%s: inode %s/%llu pNFS layout segment %s for "
			"(%s, offset: %llu, length: %llu)\n",
			__func__, ino->i_sb->s_id,
			(unsigned long long)NFS_FILEID(ino),
			lseg == NULL ? "not found" : "found",
			iomode==IOMODE_RW ?  "read/write" : "read-only",
			(unsigned long long)pos,
			(unsigned long long)count);
1332 1333 1334
	return lseg;
out_unlock:
	spin_unlock(&ino->i_lock);
1335
	goto out_put_layout_hdr;
1336
}
1337
EXPORT_SYMBOL_GPL(pnfs_update_layout);
1338

1339
struct pnfs_layout_segment *
1340 1341 1342 1343 1344
pnfs_layout_process(struct nfs4_layoutget *lgp)
{
	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
	struct nfs4_layoutget_res *res = &lgp->res;
	struct pnfs_layout_segment *lseg;
1345
	struct inode *ino = lo->plh_inode;
1346
	LIST_HEAD(free_me);
1347 1348 1349
	int status = 0;

	/* Inject layout blob into I/O device driver */
1350
	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
	if (!lseg || IS_ERR(lseg)) {
		if (!lseg)
			status = -ENOMEM;
		else
			status = PTR_ERR(lseg);
		dprintk("%s: Could not allocate layout: error %d\n",
		       __func__, status);
		goto out;
	}

	spin_lock(&ino->i_lock);
1362
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
F
Fred Isaman 已提交
1363 1364 1365 1366
		dprintk("%s forget reply due to recall\n", __func__);
		goto out_forget_reply;
	}

1367 1368
	if (pnfs_layoutgets_blocked(lo, 1) ||
	    pnfs_layout_stateid_blocked(lo, &res->stateid)) {
F
Fred Isaman 已提交
1369 1370 1371
		dprintk("%s forget reply due to state\n", __func__);
		goto out_forget_reply;
	}
1372

1373 1374
	/* Check that the new stateid matches the old stateid */
	pnfs_verify_layout_stateid(lo, &res->stateid, &free_me);
1375 1376 1377
	/* Done processing layoutget. Set the layout stateid */
	pnfs_set_layout_stateid(lo, &res->stateid, false);

1378
	init_lseg(lo, lseg);
1379
	lseg->pls_range = res->range;
1380
	pnfs_get_lseg(lseg);
1381
	pnfs_layout_insert_lseg(lo, lseg);
1382

F
Fred Isaman 已提交
1383 1384 1385 1386 1387
	if (res->return_on_close) {
		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
		set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
	}

1388
	spin_unlock(&ino->i_lock);
1389
	pnfs_free_lseg_list(&free_me);
1390
	return lseg;
1391
out:
1392
	return ERR_PTR(status);
F
Fred Isaman 已提交
1393 1394 1395 1396 1397 1398

out_forget_reply:
	spin_unlock(&ino->i_lock);
	lseg->pls_layout = lo;
	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
	goto out;
1399 1400
}

1401 1402 1403
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
P
Peng Tao 已提交
1404 1405
	u64 rd_size = req->wb_bytes;

1406
	WARN_ON_ONCE(pgio->pg_lseg != NULL);
1407

P
Peng Tao 已提交
1408 1409 1410 1411 1412
	if (pgio->pg_dreq == NULL)
		rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
	else
		rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);

1413 1414 1415
	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
					   req->wb_context,
					   req_offset(req),
P
Peng Tao 已提交
1416
					   rd_size,
1417 1418
					   IOMODE_READ,
					   GFP_KERNEL);
1419 1420
	/* If no lseg, fall back to read through mds */
	if (pgio->pg_lseg == NULL)
1421
		nfs_pageio_reset_read_mds(pgio);
1422

1423 1424 1425 1426
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);

void
1427 1428
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
			   struct nfs_page *req, u64 wb_size)
1429
{
1430
	WARN_ON_ONCE(pgio->pg_lseg != NULL);
1431 1432 1433 1434

	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
					   req->wb_context,
					   req_offset(req),
1435
					   wb_size,
1436 1437
					   IOMODE_RW,
					   GFP_NOFS);
1438 1439
	/* If no lseg, fall back to write through mds */
	if (pgio->pg_lseg == NULL)
1440
		nfs_pageio_reset_write_mds(pgio);
1441 1442 1443
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);

1444 1445 1446 1447 1448
/*
 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
 * of bytes (maximum @req->wb_bytes) that can be coalesced.
 */
size_t
1449 1450
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
		     struct nfs_page *req)
1451
{
1452
	unsigned int size;
1453
	u64 seg_end, req_start, seg_left;
1454 1455 1456 1457

	size = nfs_generic_pg_test(pgio, prev, req);
	if (!size)
		return 0;
1458

1459
	/*
1460 1461 1462 1463 1464
	 * 'size' contains the number of bytes left in the current page (up
	 * to the original size asked for in @req->wb_bytes).
	 *
	 * Calculate how many bytes are left in the layout segment
	 * and if there are less bytes than 'size', return that instead.
1465 1466 1467 1468 1469
	 *
	 * Please also note that 'end_offset' is actually the offset of the
	 * first byte that lies outside the pnfs_layout_range. FIXME?
	 *
	 */
1470
	if (pgio->pg_lseg) {
1471 1472 1473 1474 1475 1476
		seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
				     pgio->pg_lseg->pls_range.length);
		req_start = req_offset(req);
		WARN_ON_ONCE(req_start > seg_end);
		/* start of request is past the last byte of this segment */
		if (req_start >= seg_end)
1477
			return 0;
1478 1479 1480 1481 1482 1483

		/* adjust 'size' iff there are fewer bytes left in the
		 * segment than what nfs_generic_pg_test returned */
		seg_left = seg_end - req_start;
		if (seg_left < size)
			size = (unsigned int)seg_left;
1484
	}
1485

1486
	return size;
1487
}
1488
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1489

1490
int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
1491 1492 1493 1494
{
	struct nfs_pageio_descriptor pgio;

	/* Resend all requests through the MDS */
1495 1496 1497
	nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
			      hdr->completion_ops);
	return nfs_pageio_resend(&pgio, hdr);
1498
}
1499
EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1500

1501
static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
1502
{
1503 1504 1505

	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1506
	    PNFS_LAYOUTRET_ON_ERROR) {
1507
		pnfs_return_layout(hdr->inode);
1508
	}
1509
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1510
		hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
1511 1512
}

1513 1514 1515
/*
 * Called by non rpc-based layout drivers
 */
1516
void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
1517
{
1518
	trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
1519
	if (!hdr->pnfs_error) {
1520 1521
		pnfs_set_layoutcommit(hdr);
		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1522
	} else
1523 1524
		pnfs_ld_handle_write_error(hdr);
	hdr->mds_ops->rpc_release(hdr);
1525
}
1526
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1527

1528 1529
static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1530
		struct nfs_pgio_header *hdr)
1531
{
1532 1533 1534 1535 1536
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
		list_splice_tail_init(&hdr->pages, &desc->pg_list);
		nfs_pageio_reset_write_mds(desc);
		desc->pg_recoalesce = 1;
	}
1537
	nfs_pgio_data_destroy(hdr);
1538 1539 1540
}

static enum pnfs_try_status
1541
pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
1542 1543 1544
			const struct rpc_call_ops *call_ops,
			struct pnfs_layout_segment *lseg,
			int how)
1545
{
1546
	struct inode *inode = hdr->inode;
1547 1548 1549
	enum pnfs_try_status trypnfs;
	struct nfs_server *nfss = NFS_SERVER(inode);

1550
	hdr->mds_ops = call_ops;
1551 1552

	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1553 1554
		inode->i_ino, hdr->args.count, hdr->args.offset, how);
	trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
1555
	if (trypnfs != PNFS_NOT_ATTEMPTED)
1556 1557 1558 1559 1560
		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}

1561
static void
1562 1563
pnfs_do_write(struct nfs_pageio_descriptor *desc,
	      struct nfs_pgio_header *hdr, int how)
1564 1565 1566
{
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;
1567
	enum pnfs_try_status trypnfs;
1568 1569

	desc->pg_lseg = NULL;
1570
	trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
1571
	if (trypnfs == PNFS_NOT_ATTEMPTED)
1572
		pnfs_write_through_mds(desc, hdr);
1573
	pnfs_put_lseg(lseg);
1574 1575
}

1576 1577
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
{
1578
	pnfs_put_lseg(hdr->lseg);
1579
	nfs_pgio_header_free(hdr);
1580
}
B
Bryan Schumaker 已提交
1581
EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1582

1583 1584 1585
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
1586
	struct nfs_pgio_header *hdr;
1587 1588
	int ret;

1589 1590
	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
	if (!hdr) {
1591
		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1592
		pnfs_put_lseg(desc->pg_lseg);
1593
		desc->pg_lseg = NULL;
1594
		return -ENOMEM;
1595
	}
1596
	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1597
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1598
	ret = nfs_generic_pgio(desc, hdr);
1599
	if (ret != 0) {
1600
		pnfs_put_lseg(desc->pg_lseg);
1601 1602
		desc->pg_lseg = NULL;
	} else
1603
		pnfs_do_write(desc, hdr, desc->pg_ioflags);
1604
	return ret;
1605 1606 1607
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);

1608
int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
1609 1610 1611
{
	struct nfs_pageio_descriptor pgio;

1612
	/* Resend all requests through the MDS */
1613 1614
	nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
	return nfs_pageio_resend(&pgio, hdr);
1615
}
1616
EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1617

1618
static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
1619
{
1620 1621
	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1622
	    PNFS_LAYOUTRET_ON_ERROR) {
1623
		pnfs_return_layout(hdr->inode);
1624
	}
1625
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1626
		hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
1627 1628
}

1629 1630 1631
/*
 * Called by non rpc-based layout drivers
 */
1632
void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
1633
{
1634
	trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
1635
	if (likely(!hdr->pnfs_error)) {
1636 1637
		__nfs4_read_done_cb(hdr);
		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
1638
	} else
1639 1640
		pnfs_ld_handle_read_error(hdr);
	hdr->mds_ops->rpc_release(hdr);
1641 1642 1643
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);

1644 1645
static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1646
		struct nfs_pgio_header *hdr)
1647
{
1648 1649 1650 1651 1652
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
		list_splice_tail_init(&hdr->pages, &desc->pg_list);
		nfs_pageio_reset_read_mds(desc);
		desc->pg_recoalesce = 1;
	}
1653
	nfs_pgio_data_destroy(hdr);
1654 1655
}

A
Andy Adamson 已提交
1656 1657 1658
/*
 * Call the appropriate parallel I/O subsystem read function.
 */
1659
static enum pnfs_try_status
1660
pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
1661 1662
		       const struct rpc_call_ops *call_ops,
		       struct pnfs_layout_segment *lseg)
A
Andy Adamson 已提交
1663
{
1664
	struct inode *inode = hdr->inode;
A
Andy Adamson 已提交
1665 1666 1667
	struct nfs_server *nfss = NFS_SERVER(inode);
	enum pnfs_try_status trypnfs;

1668
	hdr->mds_ops = call_ops;
A
Andy Adamson 已提交
1669 1670

	dprintk("%s: Reading ino:%lu %u@%llu\n",
1671
		__func__, inode->i_ino, hdr->args.count, hdr->args.offset);
A
Andy Adamson 已提交
1672

1673
	trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
1674
	if (trypnfs != PNFS_NOT_ATTEMPTED)
A
Andy Adamson 已提交
1675 1676 1677 1678
		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}
A
Andy Adamson 已提交
1679

1680
static void
1681
pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
1682 1683 1684
{
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;
1685
	enum pnfs_try_status trypnfs;
1686 1687

	desc->pg_lseg = NULL;
1688
	trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
1689
	if (trypnfs == PNFS_NOT_ATTEMPTED)
1690
		pnfs_read_through_mds(desc, hdr);
1691
	pnfs_put_lseg(lseg);
1692 1693
}

1694 1695
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
{
1696
	pnfs_put_lseg(hdr->lseg);
1697
	nfs_pgio_header_free(hdr);
1698
}
B
Bryan Schumaker 已提交
1699
EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1700

1701 1702 1703
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
1704
	struct nfs_pgio_header *hdr;
1705 1706
	int ret;

1707 1708
	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
	if (!hdr) {
1709
		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1710
		ret = -ENOMEM;
1711
		pnfs_put_lseg(desc->pg_lseg);
1712 1713 1714
		desc->pg_lseg = NULL;
		return ret;
	}
1715
	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1716
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1717
	ret = nfs_generic_pgio(desc, hdr);
1718
	if (ret != 0) {
1719
		pnfs_put_lseg(desc->pg_lseg);
1720 1721
		desc->pg_lseg = NULL;
	} else
1722
		pnfs_do_read(desc, hdr);
1723
	return ret;
1724 1725 1726
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);

1727 1728 1729 1730 1731
static void pnfs_clear_layoutcommitting(struct inode *inode)
{
	unsigned long *bitlock = &NFS_I(inode)->flags;

	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
1732
	smp_mb__after_atomic();
1733 1734 1735
	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
}

A
Andy Adamson 已提交
1736
/*
1737
 * There can be multiple RW segments.
A
Andy Adamson 已提交
1738
 */
1739
static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
A
Andy Adamson 已提交
1740
{
1741
	struct pnfs_layout_segment *lseg;
A
Andy Adamson 已提交
1742

1743 1744
	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
		if (lseg->pls_range.iomode == IOMODE_RW &&
1745
		    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1746 1747
			list_add(&lseg->pls_lc_list, listp);
	}
A
Andy Adamson 已提交
1748 1749
}

1750 1751 1752 1753 1754 1755 1756 1757 1758 1759
static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
{
	struct pnfs_layout_segment *lseg, *tmp;

	/* Matched by references in pnfs_set_layoutcommit */
	list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
		list_del_init(&lseg->pls_lc_list);
		pnfs_put_lseg(lseg);
	}

1760
	pnfs_clear_layoutcommitting(inode);
1761 1762
}

P
Peng Tao 已提交
1763 1764
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
1765
	pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
P
Peng Tao 已提交
1766 1767 1768
}
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);

A
Andy Adamson 已提交
1769
void
1770
pnfs_set_layoutcommit(struct nfs_pgio_header *hdr)
A
Andy Adamson 已提交
1771
{
1772 1773
	struct inode *inode = hdr->inode;
	struct nfs_inode *nfsi = NFS_I(inode);
1774
	loff_t end_pos = hdr->mds_offset + hdr->res.count;
1775
	bool mark_as_dirty = false;
A
Andy Adamson 已提交
1776

1777
	spin_lock(&inode->i_lock);
A
Andy Adamson 已提交
1778
	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1779
		mark_as_dirty = true;
A
Andy Adamson 已提交
1780
		dprintk("%s: Set layoutcommit for inode %lu ",
1781
			__func__, inode->i_ino);
A
Andy Adamson 已提交
1782
	}
1783
	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1784
		/* references matched in nfs4_layoutcommit_release */
1785
		pnfs_get_lseg(hdr->lseg);
1786
	}
1787 1788
	if (end_pos > nfsi->layout->plh_lwb)
		nfsi->layout->plh_lwb = end_pos;
1789
	spin_unlock(&inode->i_lock);
1790
	dprintk("%s: lseg %p end_pos %llu\n",
1791
		__func__, hdr->lseg, nfsi->layout->plh_lwb);
1792 1793 1794 1795

	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
	if (mark_as_dirty)
1796
		mark_inode_dirty_sync(inode);
A
Andy Adamson 已提交
1797 1798 1799
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);

1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828
void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data)
{
	struct inode *inode = data->inode;
	struct nfs_inode *nfsi = NFS_I(inode);
	bool mark_as_dirty = false;

	spin_lock(&inode->i_lock);
	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
		mark_as_dirty = true;
		dprintk("%s: Set layoutcommit for inode %lu ",
			__func__, inode->i_ino);
	}
	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &data->lseg->pls_flags)) {
		/* references matched in nfs4_layoutcommit_release */
		pnfs_get_lseg(data->lseg);
	}
	if (data->lwb > nfsi->layout->plh_lwb)
		nfsi->layout->plh_lwb = data->lwb;
	spin_unlock(&inode->i_lock);
	dprintk("%s: lseg %p end_pos %llu\n",
		__func__, data->lseg, nfsi->layout->plh_lwb);

	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
	if (mark_as_dirty)
		mark_inode_dirty_sync(inode);
}
EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit);

A
Andy Adamson 已提交
1829 1830 1831 1832 1833 1834
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
{
	struct nfs_server *nfss = NFS_SERVER(data->args.inode);

	if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
		nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1835
	pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
A
Andy Adamson 已提交
1836 1837
}

1838 1839 1840 1841 1842 1843 1844 1845
/*
 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
 * data to disk to allow the server to recover the data if it crashes.
 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
 * is off, and a COMMIT is sent to a data server, or
 * if WRITEs to a data server return NFS_DATA_SYNC.
 */
A
Andy Adamson 已提交
1846
int
1847
pnfs_layoutcommit_inode(struct inode *inode, bool sync)
A
Andy Adamson 已提交
1848 1849 1850 1851
{
	struct nfs4_layoutcommit_data *data;
	struct nfs_inode *nfsi = NFS_I(inode);
	loff_t end_pos;
1852
	int status;
A
Andy Adamson 已提交
1853

1854
	if (!pnfs_layoutcommit_outstanding(inode))
1855 1856
		return 0;

1857
	dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
P
Peng Tao 已提交
1858

1859
	status = -EAGAIN;
P
Peng Tao 已提交
1860
	if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
1861 1862
		if (!sync)
			goto out;
1863
		status = wait_on_bit_lock_action(&nfsi->flags,
1864 1865 1866
				NFS_INO_LAYOUTCOMMITTING,
				nfs_wait_bit_killable,
				TASK_KILLABLE);
P
Peng Tao 已提交
1867
		if (status)
1868
			goto out;
P
Peng Tao 已提交
1869 1870
	}

1871 1872 1873 1874 1875 1876 1877
	status = -ENOMEM;
	/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
	data = kzalloc(sizeof(*data), GFP_NOFS);
	if (!data)
		goto clear_layoutcommitting;

	status = 0;
1878
	spin_lock(&inode->i_lock);
1879 1880
	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		goto out_unlock;
1881

1882
	INIT_LIST_HEAD(&data->lseg_list);
1883
	pnfs_list_write_lseg(inode, &data->lseg_list);
A
Andy Adamson 已提交
1884

1885 1886
	end_pos = nfsi->layout->plh_lwb;
	nfsi->layout->plh_lwb = 0;
A
Andy Adamson 已提交
1887

1888
	nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
A
Andy Adamson 已提交
1889 1890 1891
	spin_unlock(&inode->i_lock);

	data->args.inode = inode;
1892
	data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
A
Andy Adamson 已提交
1893 1894 1895 1896 1897 1898 1899 1900
	nfs_fattr_init(&data->fattr);
	data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
	data->res.fattr = &data->fattr;
	data->args.lastbytewritten = end_pos - 1;
	data->res.server = NFS_SERVER(inode);

	status = nfs4_proc_layoutcommit(data, sync);
out:
P
Peng Tao 已提交
1901 1902
	if (status)
		mark_inode_dirty_sync(inode);
A
Andy Adamson 已提交
1903 1904
	dprintk("<-- %s status %d\n", __func__, status);
	return status;
1905 1906
out_unlock:
	spin_unlock(&inode->i_lock);
P
Peng Tao 已提交
1907
	kfree(data);
1908 1909
clear_layoutcommitting:
	pnfs_clear_layoutcommitting(inode);
P
Peng Tao 已提交
1910
	goto out;
A
Andy Adamson 已提交
1911
}
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923

struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
	struct nfs4_threshold *thp;

	thp = kzalloc(sizeof(*thp), GFP_NOFS);
	if (!thp) {
		dprintk("%s mdsthreshold allocation failed\n", __func__);
		return NULL;
	}
	return thp;
}