pnfs.c 51.0 KB
Newer Older
R
Ricardo Labiaga 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 *  pNFS functions to call and manage layout drivers.
 *
 *  Copyright (c) 2002 [year of first publication]
 *  The Regents of the University of Michigan
 *  All Rights Reserved
 *
 *  Dean Hildebrand <dhildebz@umich.edu>
 *
 *  Permission is granted to use, copy, create derivative works, and
 *  redistribute this software and such derivative works for any purpose,
 *  so long as the name of the University of Michigan is not used in
 *  any advertising or publicity pertaining to the use or distribution
 *  of this software without specific, written prior authorization. If
 *  the above copyright notice or any other identification of the
 *  University of Michigan is included in any copy of any portion of
 *  this software, then the disclaimer below must also be included.
 *
 *  This software is provided as is, without representation or warranty
 *  of any kind either express or implied, including without limitation
 *  the implied warranties of merchantability, fitness for a particular
 *  purpose, or noninfringement.  The Regents of the University of
 *  Michigan shall not be liable for any damages, including special,
 *  indirect, incidental, or consequential damages, with respect to any
 *  claim arising out of or in connection with the use of the software,
 *  even if it has been or is hereafter advised of the possibility of
 *  such damages.
 */

#include <linux/nfs_fs.h>
31
#include <linux/nfs_page.h>
32
#include <linux/module.h>
33
#include "internal.h"
R
Ricardo Labiaga 已提交
34
#include "pnfs.h"
A
Andy Adamson 已提交
35
#include "iostat.h"
R
Ricardo Labiaga 已提交
36 37

#define NFSDBG_FACILITY		NFSDBG_PNFS
38
#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
R
Ricardo Labiaga 已提交
39

40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66
/* Locking:
 *
 * pnfs_spinlock:
 *      protects pnfs_modules_tbl.
 */
static DEFINE_SPINLOCK(pnfs_spinlock);

/*
 * pnfs_modules_tbl holds all pnfs modules
 */
static LIST_HEAD(pnfs_modules_tbl);

/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
{
	struct pnfs_layoutdriver_type *local;

	list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
		if (local->id == id)
			goto out;
	local = NULL;
out:
	dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
	return local;
}

R
Ricardo Labiaga 已提交
67 68 69
static struct pnfs_layoutdriver_type *
find_pnfs_driver(u32 id)
{
70 71 72 73
	struct pnfs_layoutdriver_type *local;

	spin_lock(&pnfs_spinlock);
	local = find_pnfs_driver_locked(id);
74 75 76 77
	if (local != NULL && !try_module_get(local->owner)) {
		dprintk("%s: Could not grab reference on module\n", __func__);
		local = NULL;
	}
78 79
	spin_unlock(&pnfs_spinlock);
	return local;
R
Ricardo Labiaga 已提交
80 81 82 83 84
}

void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
85 86 87
	if (nfss->pnfs_curr_ld) {
		if (nfss->pnfs_curr_ld->clear_layoutdriver)
			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
88 89 90
		/* Decrement the MDS count. Purge the deviceid cache if zero */
		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
			nfs4_deviceid_purge_client(nfss->nfs_client);
91
		module_put(nfss->pnfs_curr_ld->owner);
92
	}
R
Ricardo Labiaga 已提交
93 94 95 96 97 98 99 100 101 102
	nfss->pnfs_curr_ld = NULL;
}

/*
 * Try to set the server's pnfs module to the pnfs layout type specified by id.
 * Currently only one pNFS layout driver per filesystem is supported.
 *
 * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
 */
void
103 104
set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
		      u32 id)
R
Ricardo Labiaga 已提交
105 106 107 108 109 110 111
{
	struct pnfs_layoutdriver_type *ld_type = NULL;

	if (id == 0)
		goto out_no_driver;
	if (!(server->nfs_client->cl_exchange_flags &
		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
112 113
		printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
			__func__, id, server->nfs_client->cl_exchange_flags);
R
Ricardo Labiaga 已提交
114 115 116 117 118 119 120 121 122 123 124 125 126
		goto out_no_driver;
	}
	ld_type = find_pnfs_driver(id);
	if (!ld_type) {
		request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
		ld_type = find_pnfs_driver(id);
		if (!ld_type) {
			dprintk("%s: No pNFS module found for %u.\n",
				__func__, id);
			goto out_no_driver;
		}
	}
	server->pnfs_curr_ld = ld_type;
127 128
	if (ld_type->set_layoutdriver
	    && ld_type->set_layoutdriver(server, mntfh)) {
129 130
		printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
			"driver %u.\n", __func__, id);
131 132 133
		module_put(ld_type->owner);
		goto out_no_driver;
	}
134 135
	/* Bump the MDS count */
	atomic_inc(&server->nfs_client->cl_mds_count);
136

R
Ricardo Labiaga 已提交
137 138 139 140 141 142 143
	dprintk("%s: pNFS module for %u set\n", __func__, id);
	return;

out_no_driver:
	dprintk("%s: Using NFSv4 I/O\n", __func__);
	server->pnfs_curr_ld = NULL;
}
144 145 146 147 148 149 150 151

int
pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	int status = -EINVAL;
	struct pnfs_layoutdriver_type *tmp;

	if (ld_type->id == 0) {
152
		printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
153 154
		return status;
	}
155
	if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
156
		printk(KERN_ERR "NFS: %s Layout driver must provide "
157 158 159
		       "alloc_lseg and free_lseg.\n", __func__);
		return status;
	}
160 161 162 163 164 165 166 167 168

	spin_lock(&pnfs_spinlock);
	tmp = find_pnfs_driver_locked(ld_type->id);
	if (!tmp) {
		list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
		status = 0;
		dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
			ld_type->name);
	} else {
169
		printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
			__func__, ld_type->id);
	}
	spin_unlock(&pnfs_spinlock);

	return status;
}
EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);

void
pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
	spin_lock(&pnfs_spinlock);
	list_del(&ld_type->pnfs_tblid);
	spin_unlock(&pnfs_spinlock);
}
EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
187

188 189 190 191
/*
 * pNFS client layout cache
 */

192
/* Need to hold i_lock if caller does not already hold reference */
F
Fred Isaman 已提交
193
void
194
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
195
{
196
	atomic_inc(&lo->plh_refcount);
197 198
}

199 200 201 202
static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
203
	return ld->alloc_layout_hdr(ino, gfp_flags);
204 205 206 207 208
}

static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
209 210 211 212 213 214 215 216 217 218
	struct nfs_server *server = NFS_SERVER(lo->plh_inode);
	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;

	if (!list_empty(&lo->plh_layouts)) {
		struct nfs_client *clp = server->nfs_client;

		spin_lock(&clp->cl_lock);
		list_del_init(&lo->plh_layouts);
		spin_unlock(&clp->cl_lock);
	}
219
	put_rpccred(lo->plh_lc_cred);
220
	return ld->free_layout_hdr(lo);
221 222
}

223
static void
224
pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
225
{
226
	struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
227
	dprintk("%s: freeing layout cache %p\n", __func__, lo);
228 229 230 231
	nfsi->layout = NULL;
	/* Reset MDS Threshold I/O counters */
	nfsi->write_io = 0;
	nfsi->read_io = 0;
232 233
}

234
void
235
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
236
{
237 238 239
	struct inode *inode = lo->plh_inode;

	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
240
		pnfs_detach_layout_hdr(lo);
241
		spin_unlock(&inode->i_lock);
242
		pnfs_free_layout_hdr(lo);
243
	}
244 245
}

246 247 248 249 250 251 252 253
static int
pnfs_iomode_to_fail_bit(u32 iomode)
{
	return iomode == IOMODE_RW ?
		NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
}

static void
254
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
255
{
256
	lo->plh_retry_timestamp = jiffies;
257
	if (!test_and_set_bit(fail_bit, &lo->plh_flags))
258 259 260 261 262 263 264 265 266 267 268 269 270 271
		atomic_inc(&lo->plh_refcount);
}

static void
pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
	if (test_and_clear_bit(fail_bit, &lo->plh_flags))
		atomic_dec(&lo->plh_refcount);
}

static void
pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
	struct inode *inode = lo->plh_inode;
272 273 274 275 276 277
	struct pnfs_layout_range range = {
		.iomode = iomode,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	LIST_HEAD(head);
278 279 280

	spin_lock(&inode->i_lock);
	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
281
	pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
282
	spin_unlock(&inode->i_lock);
283
	pnfs_free_lseg_list(&head);
284 285 286 287 288 289 290
	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
			iomode == IOMODE_RW ?  "RW" : "READ");
}

static bool
pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
291
	unsigned long start, end;
292 293 294
	int fail_bit = pnfs_iomode_to_fail_bit(iomode);

	if (test_bit(fail_bit, &lo->plh_flags) == 0)
295 296 297 298 299
		return false;
	end = jiffies;
	start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
	if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
		/* It is time to retry the failed layoutgets */
300
		pnfs_layout_clear_fail_bit(lo, fail_bit);
301 302 303
		return false;
	}
	return true;
304 305
}

306 307 308
static void
init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
{
309
	INIT_LIST_HEAD(&lseg->pls_list);
310
	INIT_LIST_HEAD(&lseg->pls_lc_list);
311 312 313
	atomic_set(&lseg->pls_refcount, 1);
	smp_mb();
	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
314
	lseg->pls_layout = lo;
315 316
}

317
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
318
{
319
	struct inode *ino = lseg->pls_layout->plh_inode;
320

321
	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
322 323
}

F
Fred Isaman 已提交
324
static void
325 326
pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
F
Fred Isaman 已提交
327
{
328
	struct inode *inode = lo->plh_inode;
F
Fred Isaman 已提交
329

330
	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
F
Fred Isaman 已提交
331
	list_del_init(&lseg->pls_list);
332 333
	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
	atomic_dec(&lo->plh_refcount);
334 335
	if (list_empty(&lo->plh_segs))
		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
F
Fred Isaman 已提交
336 337 338
	rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}

339
void
340
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
341
{
342
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
343 344 345 346 347
	struct inode *inode;

	if (!lseg)
		return;

348 349 350
	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
351 352
	lo = lseg->pls_layout;
	inode = lo->plh_inode;
F
Fred Isaman 已提交
353
	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
354
		pnfs_get_layout_hdr(lo);
355
		pnfs_layout_remove_lseg(lo, lseg);
F
Fred Isaman 已提交
356
		spin_unlock(&inode->i_lock);
357
		pnfs_free_lseg(lseg);
358
		pnfs_put_layout_hdr(lo);
359 360
	}
}
361
EXPORT_SYMBOL_GPL(pnfs_put_lseg);
362

363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
static inline u64
end_offset(u64 start, u64 len)
{
	u64 end;

	end = start + len;
	return end >= start ? end : NFS4_MAX_UINT64;
}

/*
 * is l2 fully contained in l1?
 *   start1                             end1
 *   [----------------------------------)
 *           start2           end2
 *           [----------------)
 */
static inline int
lo_seg_contained(struct pnfs_layout_range *l1,
		 struct pnfs_layout_range *l2)
{
	u64 start1 = l1->offset;
	u64 end1 = end_offset(start1, l1->length);
	u64 start2 = l2->offset;
	u64 end2 = end_offset(start2, l2->length);

	return (start1 <= start2) && (end1 >= end2);
}

/*
 * is l1 and l2 intersecting?
 *   start1                             end1
 *   [----------------------------------)
 *                              start2           end2
 *                              [----------------)
 */
static inline int
lo_seg_intersecting(struct pnfs_layout_range *l1,
		    struct pnfs_layout_range *l2)
{
	u64 start1 = l1->offset;
	u64 end1 = end_offset(start1, l1->length);
	u64 start2 = l2->offset;
	u64 end2 = end_offset(start2, l2->length);

	return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
	       (end2 == NFS4_MAX_UINT64 || end2 > start1);
}

411
static bool
412 413
should_free_lseg(struct pnfs_layout_range *lseg_range,
		 struct pnfs_layout_range *recall_range)
414
{
415 416 417
	return (recall_range->iomode == IOMODE_ANY ||
		lseg_range->iomode == recall_range->iomode) &&
	       lo_seg_intersecting(lseg_range, recall_range);
418 419
}

420 421 422 423 424 425 426 427 428 429
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		struct list_head *tmp_list)
{
	if (!atomic_dec_and_test(&lseg->pls_refcount))
		return false;
	pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
	list_add(&lseg->pls_list, tmp_list);
	return true;
}

430 431 432 433 434 435 436 437 438 439 440
/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
			     struct list_head *tmp_list)
{
	int rv = 0;

	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
		/* Remove the reference keeping the lseg in the
		 * list.  It will now be removed when all
		 * outstanding io is finished.
		 */
F
Fred Isaman 已提交
441 442
		dprintk("%s: lseg %p ref %d\n", __func__, lseg,
			atomic_read(&lseg->pls_refcount));
443
		if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
F
Fred Isaman 已提交
444
			rv = 1;
445 446 447 448 449 450 451
	}
	return rv;
}

/* Returns count of number of matching invalid lsegs remaining in list
 * after call.
 */
F
Fred Isaman 已提交
452
int
453
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
454
			    struct list_head *tmp_list,
455
			    struct pnfs_layout_range *recall_range)
456 457
{
	struct pnfs_layout_segment *lseg, *next;
458
	int invalid = 0, removed = 0;
459 460 461

	dprintk("%s:Begin lo %p\n", __func__, lo);

462
	if (list_empty(&lo->plh_segs))
463
		return 0;
464
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
465 466
		if (!recall_range ||
		    should_free_lseg(&lseg->pls_range, recall_range)) {
467 468 469 470 471 472 473 474 475
			dprintk("%s: freeing lseg %p iomode %d "
				"offset %llu length %llu\n", __func__,
				lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
				lseg->pls_range.length);
			invalid++;
			removed += mark_lseg_invalid(lseg, tmp_list);
		}
	dprintk("%s:Return %i\n", __func__, invalid - removed);
	return invalid - removed;
476 477
}

478
/* note free_me must contain lsegs from a single layout_hdr */
F
Fred Isaman 已提交
479
void
480
pnfs_free_lseg_list(struct list_head *free_me)
481
{
482
	struct pnfs_layout_segment *lseg, *tmp;
483 484 485 486

	if (list_empty(free_me))
		return;

487
	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
488
		list_del(&lseg->pls_list);
489
		pnfs_free_lseg(lseg);
490 491 492
	}
}

493 494 495 496
void
pnfs_destroy_layout(struct nfs_inode *nfsi)
{
	struct pnfs_layout_hdr *lo;
497
	LIST_HEAD(tmp_list);
498 499 500 501

	spin_lock(&nfsi->vfs_inode.i_lock);
	lo = nfsi->layout;
	if (lo) {
502
		lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
503
		pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
504 505 506 507 508 509 510 511
		pnfs_get_layout_hdr(lo);
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
		spin_unlock(&nfsi->vfs_inode.i_lock);
		pnfs_free_lseg_list(&tmp_list);
		pnfs_put_layout_hdr(lo);
	} else
		spin_unlock(&nfsi->vfs_inode.i_lock);
512
}
513
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
514

515 516 517
static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
		struct list_head *layout_list)
518 519
{
	struct pnfs_layout_hdr *lo;
520
	bool ret = false;
521

522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600
	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
		pnfs_get_layout_hdr(lo);
		list_add(&lo->plh_bulk_destroy, layout_list);
		ret = true;
	}
	spin_unlock(&inode->i_lock);
	return ret;
}

/* Caller must hold rcu_read_lock and clp->cl_lock */
static int
pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
		struct nfs_server *server,
		struct list_head *layout_list)
{
	struct pnfs_layout_hdr *lo, *next;
	struct inode *inode;

	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
		inode = igrab(lo->plh_inode);
		if (inode == NULL)
			continue;
		list_del_init(&lo->plh_layouts);
		if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
			continue;
		rcu_read_unlock();
		spin_unlock(&clp->cl_lock);
		iput(inode);
		spin_lock(&clp->cl_lock);
		rcu_read_lock();
		return -EAGAIN;
	}
	return 0;
}

static int
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
		bool is_bulk_recall)
{
	struct pnfs_layout_hdr *lo;
	struct inode *inode;
	struct pnfs_layout_range range = {
		.iomode = IOMODE_ANY,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	LIST_HEAD(lseg_list);
	int ret = 0;

	while (!list_empty(layout_list)) {
		lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
				plh_bulk_destroy);
		dprintk("%s freeing layout for inode %lu\n", __func__,
			lo->plh_inode->i_ino);
		inode = lo->plh_inode;
		spin_lock(&inode->i_lock);
		list_del_init(&lo->plh_bulk_destroy);
		lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
		if (is_bulk_recall)
			set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
		if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
			ret = -EAGAIN;
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg_list(&lseg_list);
		pnfs_put_layout_hdr(lo);
		iput(inode);
	}
	return ret;
}

int
pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
		struct nfs_fsid *fsid,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);
601

602
	spin_lock(&clp->cl_lock);
603
	rcu_read_lock();
604
restart:
605
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
606 607 608 609 610 611
		if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
			continue;
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
				server,
				&layout_list) != 0)
			goto restart;
612 613
	}
	rcu_read_unlock();
614 615
	spin_unlock(&clp->cl_lock);

616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635
	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

int
pnfs_destroy_layouts_byclid(struct nfs_client *clp,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
restart:
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
					server,
					&layout_list) != 0)
			goto restart;
636
	}
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	rcu_read_unlock();
	spin_unlock(&clp->cl_lock);

	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

/*
 * Called by the state manger to remove all layouts established under an
 * expired lease.
 */
void
pnfs_destroy_all_layouts(struct nfs_client *clp)
{
	nfs4_deviceid_mark_client_invalid(clp);
	nfs4_deviceid_purge_client(clp);

	pnfs_destroy_layouts_byclid(clp, false);
656 657
}

658 659 660 661 662 663 664 665 666
/*
 * Compare 2 layout stateid sequence ids, to see which is newer,
 * taking into account wraparound issues.
 */
static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
{
	return (s32)s1 - (s32)s2 > 0;
}

667
/* update lo->plh_stateid with new if is more recent */
F
Fred Isaman 已提交
668 669 670
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
			bool update_barrier)
671
{
672 673
	u32 oldseq, newseq, new_barrier;
	int empty = list_empty(&lo->plh_segs);
674

675 676
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
	newseq = be32_to_cpu(new->seqid);
677
	if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
678
		nfs4_stateid_copy(&lo->plh_stateid, new);
F
Fred Isaman 已提交
679
		if (update_barrier) {
680
			new_barrier = be32_to_cpu(new->seqid);
F
Fred Isaman 已提交
681 682
		} else {
			/* Because of wraparound, we want to keep the barrier
683
			 * "close" to the current seqids.
F
Fred Isaman 已提交
684
			 */
685
			new_barrier = newseq - atomic_read(&lo->plh_outstanding);
F
Fred Isaman 已提交
686
		}
687 688
		if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
			lo->plh_barrier = new_barrier;
F
Fred Isaman 已提交
689
	}
690 691
}

692
static bool
693 694
pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid)
F
Fred Isaman 已提交
695
{
696
	u32 seqid = be32_to_cpu(stateid->seqid);
697

698 699 700 701 702 703 704
	return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
}

/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
{
F
Fred Isaman 已提交
705 706
	return lo->plh_block_lgets ||
		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
F
Fred Isaman 已提交
707
		(list_empty(&lo->plh_segs) &&
708 709 710
		 (atomic_read(&lo->plh_outstanding) > lget));
}

711 712 713
int
pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
			      struct nfs4_state *open_state)
714
{
715
	int status = 0;
716

717
	dprintk("--> %s\n", __func__);
718
	spin_lock(&lo->plh_inode->i_lock);
719
	if (pnfs_layoutgets_blocked(lo, 1)) {
720
		status = -EAGAIN;
721 722
	} else if (!nfs4_valid_open_stateid(open_state)) {
		status = -EBADF;
723
	} else if (list_empty(&lo->plh_segs)) {
724 725 726 727
		int seq;

		do {
			seq = read_seqbegin(&open_state->seqlock);
728
			nfs4_stateid_copy(dst, &open_state->stateid);
729 730
		} while (read_seqretry(&open_state->seqlock, seq));
	} else
731
		nfs4_stateid_copy(dst, &lo->plh_stateid);
732
	spin_unlock(&lo->plh_inode->i_lock);
733
	dprintk("<-- %s\n", __func__);
734
	return status;
735 736 737 738 739 740 741 742
}

/*
* Get layout from server.
*    for now, assume that whole file layouts are requested.
*    arg->offset: 0
*    arg->length: all ones
*/
743 744 745
static struct pnfs_layout_segment *
send_layoutget(struct pnfs_layout_hdr *lo,
	   struct nfs_open_context *ctx,
746
	   struct pnfs_layout_range *range,
747
	   gfp_t gfp_flags)
748
{
749
	struct inode *ino = lo->plh_inode;
750 751
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs4_layoutget *lgp;
752
	struct pnfs_layout_segment *lseg;
753 754

	dprintk("--> %s\n", __func__);
755

756
	lgp = kzalloc(sizeof(*lgp), gfp_flags);
757
	if (lgp == NULL)
758
		return NULL;
759

760 761 762
	lgp->args.minlength = PAGE_CACHE_SIZE;
	if (lgp->args.minlength > range->length)
		lgp->args.minlength = range->length;
763
	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
764
	lgp->args.range = *range;
765 766 767
	lgp->args.type = server->pnfs_curr_ld->id;
	lgp->args.inode = ino;
	lgp->args.ctx = get_nfs_open_context(ctx);
768
	lgp->gfp_flags = gfp_flags;
769
	lgp->cred = lo->plh_lc_cred;
770 771 772 773

	/* Synchronously retrieve layout information from server and
	 * store in lseg.
	 */
774 775 776 777 778 779 780 781
	lseg = nfs4_proc_layoutget(lgp, gfp_flags);
	if (IS_ERR(lseg)) {
		switch (PTR_ERR(lseg)) {
		case -ENOMEM:
		case -ERESTARTSYS:
			break;
		default:
			/* remember that LAYOUTGET failed and suspend trying */
782
			pnfs_layout_io_set_failed(lo, range->iomode);
783 784
		}
		return NULL;
785
	}
786

787 788 789
	return lseg;
}

790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
static void pnfs_clear_layoutcommit(struct inode *inode,
		struct list_head *head)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	struct pnfs_layout_segment *lseg, *tmp;

	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		return;
	list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
		if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
			continue;
		pnfs_lseg_dec_and_remove_zero(lseg, head);
	}
}

805 806 807 808 809 810 811 812
/*
 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
 * when the layout segment list is empty.
 *
 * Note that a pnfs_layout_hdr can exist with an empty layout segment
 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
 * deviceid is marked invalid.
 */
B
Benny Halevy 已提交
813 814 815 816 817 818 819 820
int
_pnfs_return_layout(struct inode *ino)
{
	struct pnfs_layout_hdr *lo = NULL;
	struct nfs_inode *nfsi = NFS_I(ino);
	LIST_HEAD(tmp_list);
	struct nfs4_layoutreturn *lrp;
	nfs4_stateid stateid;
821
	int status = 0, empty;
B
Benny Halevy 已提交
822

823
	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
B
Benny Halevy 已提交
824 825 826

	spin_lock(&ino->i_lock);
	lo = nfsi->layout;
827
	if (!lo) {
B
Benny Halevy 已提交
828
		spin_unlock(&ino->i_lock);
829 830
		dprintk("NFS: %s no layout to return\n", __func__);
		goto out;
B
Benny Halevy 已提交
831 832 833
	}
	stateid = nfsi->layout->plh_stateid;
	/* Reference matched in nfs4_layoutreturn_release */
834
	pnfs_get_layout_hdr(lo);
835
	empty = list_empty(&lo->plh_segs);
836
	pnfs_clear_layoutcommit(ino, &tmp_list);
837
	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
838 839 840
	/* Don't send a LAYOUTRETURN if list was initially empty */
	if (empty) {
		spin_unlock(&ino->i_lock);
841
		pnfs_put_layout_hdr(lo);
842 843 844
		dprintk("NFS: %s no layout segments to return\n", __func__);
		goto out;
	}
845
	lo->plh_block_lgets++;
B
Benny Halevy 已提交
846 847 848 849 850 851
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&tmp_list);

	lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
	if (unlikely(lrp == NULL)) {
		status = -ENOMEM;
852 853 854
		spin_lock(&ino->i_lock);
		lo->plh_block_lgets--;
		spin_unlock(&ino->i_lock);
855
		pnfs_put_layout_hdr(lo);
B
Benny Halevy 已提交
856 857 858 859 860 861
		goto out;
	}

	lrp->args.stateid = stateid;
	lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
	lrp->args.inode = ino;
T
Trond Myklebust 已提交
862
	lrp->args.layout = lo;
B
Benny Halevy 已提交
863
	lrp->clp = NFS_SERVER(ino)->nfs_client;
864
	lrp->cred = lo->plh_lc_cred;
B
Benny Halevy 已提交
865 866 867 868 869 870

	status = nfs4_proc_layoutreturn(lrp);
out:
	dprintk("<-- %s status: %d\n", __func__, status);
	return status;
}
871
EXPORT_SYMBOL_GPL(_pnfs_return_layout);
B
Benny Halevy 已提交
872

873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
int
pnfs_commit_and_return_layout(struct inode *inode)
{
	struct pnfs_layout_hdr *lo;
	int ret;

	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo == NULL) {
		spin_unlock(&inode->i_lock);
		return 0;
	}
	pnfs_get_layout_hdr(lo);
	/* Block new layoutgets and read/write to ds */
	lo->plh_block_lgets++;
	spin_unlock(&inode->i_lock);
	filemap_fdatawait(inode->i_mapping);
	ret = pnfs_layoutcommit_inode(inode, true);
	if (ret == 0)
		ret = _pnfs_return_layout(inode);
	spin_lock(&inode->i_lock);
	lo->plh_block_lgets--;
	spin_unlock(&inode->i_lock);
	pnfs_put_layout_hdr(lo);
	return ret;
}

F
Fred Isaman 已提交
900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919
bool pnfs_roc(struct inode *ino)
{
	struct pnfs_layout_hdr *lo;
	struct pnfs_layout_segment *lseg, *tmp;
	LIST_HEAD(tmp_list);
	bool found = false;

	spin_lock(&ino->i_lock);
	lo = NFS_I(ino)->layout;
	if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
	    test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
		goto out_nolayout;
	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
			mark_lseg_invalid(lseg, &tmp_list);
			found = true;
		}
	if (!found)
		goto out_nolayout;
	lo->plh_block_lgets++;
920
	pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
F
Fred Isaman 已提交
921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&tmp_list);
	return true;

out_nolayout:
	spin_unlock(&ino->i_lock);
	return false;
}

void pnfs_roc_release(struct inode *ino)
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&ino->i_lock);
	lo = NFS_I(ino)->layout;
	lo->plh_block_lgets--;
937 938 939 940 941 942
	if (atomic_dec_and_test(&lo->plh_refcount)) {
		pnfs_detach_layout_hdr(lo);
		spin_unlock(&ino->i_lock);
		pnfs_free_layout_hdr(lo);
	} else
		spin_unlock(&ino->i_lock);
F
Fred Isaman 已提交
943 944 945 946 947 948 949 950
}

void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&ino->i_lock);
	lo = NFS_I(ino)->layout;
951
	if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
F
Fred Isaman 已提交
952 953 954 955
		lo->plh_barrier = barrier;
	spin_unlock(&ino->i_lock);
}

956
bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
F
Fred Isaman 已提交
957 958
{
	struct nfs_inode *nfsi = NFS_I(ino);
959
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
960
	struct pnfs_layout_segment *lseg;
961
	u32 current_seqid;
F
Fred Isaman 已提交
962 963 964 965 966
	bool found = false;

	spin_lock(&ino->i_lock);
	list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
		if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
967
			rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
F
Fred Isaman 已提交
968
			found = true;
969
			goto out;
F
Fred Isaman 已提交
970
		}
971 972
	lo = nfsi->layout;
	current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
F
Fred Isaman 已提交
973

974 975 976 977 978
	/* Since close does not return a layout stateid for use as
	 * a barrier, we choose the worst-case barrier.
	 */
	*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
out:
F
Fred Isaman 已提交
979 980 981 982
	spin_unlock(&ino->i_lock);
	return found;
}

983 984 985 986 987 988
/*
 * Compare two layout segments for sorting into layout cache.
 * We want to preferentially return RW over RO layouts, so ensure those
 * are seen first.
 */
static s64
989 990
cmp_layout(struct pnfs_layout_range *l1,
	   struct pnfs_layout_range *l2)
991
{
992 993 994 995 996 997 998 999 1000 1001 1002 1003
	s64 d;

	/* high offset > low offset */
	d = l1->offset - l2->offset;
	if (d)
		return d;

	/* short length > long length */
	d = l2->length - l1->length;
	if (d)
		return d;

1004
	/* read > read/write */
1005
	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1006 1007
}

1008
static void
1009
pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
1010 1011
		   struct pnfs_layout_segment *lseg)
{
1012 1013
	struct pnfs_layout_segment *lp;

1014 1015
	dprintk("%s:Begin\n", __func__);

1016
	list_for_each_entry(lp, &lo->plh_segs, pls_list) {
1017
		if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
1018
			continue;
1019
		list_add_tail(&lseg->pls_list, &lp->pls_list);
1020 1021 1022
		dprintk("%s: inserted lseg %p "
			"iomode %d offset %llu length %llu before "
			"lp %p iomode %d offset %llu length %llu\n",
1023 1024 1025 1026
			__func__, lseg, lseg->pls_range.iomode,
			lseg->pls_range.offset, lseg->pls_range.length,
			lp, lp->pls_range.iomode, lp->pls_range.offset,
			lp->pls_range.length);
1027
		goto out;
1028
	}
1029 1030 1031 1032 1033 1034
	list_add_tail(&lseg->pls_list, &lo->plh_segs);
	dprintk("%s: inserted lseg %p "
		"iomode %d offset %llu length %llu at tail\n",
		__func__, lseg, lseg->pls_range.iomode,
		lseg->pls_range.offset, lseg->pls_range.length);
out:
1035
	pnfs_get_layout_hdr(lo);
1036 1037

	dprintk("%s:Return\n", __func__);
1038 1039 1040
}

static struct pnfs_layout_hdr *
1041 1042 1043
alloc_init_layout_hdr(struct inode *ino,
		      struct nfs_open_context *ctx,
		      gfp_t gfp_flags)
1044 1045 1046
{
	struct pnfs_layout_hdr *lo;

1047
	lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1048 1049
	if (!lo)
		return NULL;
1050
	atomic_set(&lo->plh_refcount, 1);
1051 1052
	INIT_LIST_HEAD(&lo->plh_layouts);
	INIT_LIST_HEAD(&lo->plh_segs);
1053
	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1054
	lo->plh_inode = ino;
1055
	lo->plh_lc_cred = get_rpccred(ctx->cred);
1056 1057 1058 1059
	return lo;
}

static struct pnfs_layout_hdr *
1060 1061 1062
pnfs_find_alloc_layout(struct inode *ino,
		       struct nfs_open_context *ctx,
		       gfp_t gfp_flags)
1063 1064 1065 1066 1067 1068
{
	struct nfs_inode *nfsi = NFS_I(ino);
	struct pnfs_layout_hdr *new = NULL;

	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);

1069 1070
	if (nfsi->layout != NULL)
		goto out_existing;
1071
	spin_unlock(&ino->i_lock);
1072
	new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1073 1074
	spin_lock(&ino->i_lock);

1075
	if (likely(nfsi->layout == NULL)) {	/* Won the race? */
1076
		nfsi->layout = new;
1077
		return new;
1078 1079
	} else if (new != NULL)
		pnfs_free_layout_hdr(new);
1080 1081
out_existing:
	pnfs_get_layout_hdr(nfsi->layout);
1082 1083 1084
	return nfsi->layout;
}

1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096
/*
 * iomode matching rules:
 * iomode	lseg	match
 * -----	-----	-----
 * ANY		READ	true
 * ANY		RW	true
 * RW		READ	false
 * RW		RW	true
 * READ		READ	true
 * READ		RW	true
 */
static int
1097 1098
is_matching_lseg(struct pnfs_layout_range *ls_range,
		 struct pnfs_layout_range *range)
1099
{
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
	struct pnfs_layout_range range1;

	if ((range->iomode == IOMODE_RW &&
	     ls_range->iomode != IOMODE_RW) ||
	    !lo_seg_intersecting(ls_range, range))
		return 0;

	/* range1 covers only the first byte in the range */
	range1 = *range;
	range1.length = 1;
	return lo_seg_contained(ls_range, &range1);
1111 1112 1113 1114 1115
}

/*
 * lookup range in layout
 */
1116
static struct pnfs_layout_segment *
1117 1118
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_range *range)
1119
{
1120 1121 1122 1123
	struct pnfs_layout_segment *lseg, *ret = NULL;

	dprintk("%s:Begin\n", __func__);

1124
	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1125
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1126
		    is_matching_lseg(&lseg->pls_range, range)) {
1127
			ret = pnfs_get_lseg(lseg);
1128 1129
			break;
		}
1130
		if (lseg->pls_range.offset > range->offset)
1131 1132 1133 1134
			break;
	}

	dprintk("%s:Return lseg %p ref %d\n",
1135
		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1136
	return ret;
1137 1138
}

1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213
/*
 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
 * to the MDS or over pNFS
 *
 * The nfs_inode read_io and write_io fields are cumulative counters reset
 * when there are no layout segments. Note that in pnfs_update_layout iomode
 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
 * WRITE request.
 *
 * A return of true means use MDS I/O.
 *
 * From rfc 5661:
 * If a file's size is smaller than the file size threshold, data accesses
 * SHOULD be sent to the metadata server.  If an I/O request has a length that
 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
 * server.  If both file size and I/O size are provided, the client SHOULD
 * reach or exceed  both thresholds before sending its read or write
 * requests to the data server.
 */
static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
				     struct inode *ino, int iomode)
{
	struct nfs4_threshold *t = ctx->mdsthreshold;
	struct nfs_inode *nfsi = NFS_I(ino);
	loff_t fsize = i_size_read(ino);
	bool size = false, size_set = false, io = false, io_set = false, ret = false;

	if (t == NULL)
		return ret;

	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);

	switch (iomode) {
	case IOMODE_READ:
		if (t->bm & THRESHOLD_RD) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->rd_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_RD_IO) {
			dprintk("%s nfsi->read_io %llu\n", __func__,
				nfsi->read_io);
			io_set = true;
			if (nfsi->read_io < t->rd_io_sz)
				io = true;
		}
		break;
	case IOMODE_RW:
		if (t->bm & THRESHOLD_WR) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->wr_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_WR_IO) {
			dprintk("%s nfsi->write_io %llu\n", __func__,
				nfsi->write_io);
			io_set = true;
			if (nfsi->write_io < t->wr_io_sz)
				io = true;
		}
		break;
	}
	if (size_set && io_set) {
		if (size && io)
			ret = true;
	} else if (size || io)
		ret = true;

	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
	return ret;
}

1214 1215 1216 1217
/*
 * Layout segment is retreived from the server if not cached.
 * The appropriate layout segment is referenced and returned to the caller.
 */
1218
struct pnfs_layout_segment *
1219 1220
pnfs_update_layout(struct inode *ino,
		   struct nfs_open_context *ctx,
1221 1222
		   loff_t pos,
		   u64 count,
1223 1224
		   enum pnfs_iomode iomode,
		   gfp_t gfp_flags)
1225
{
1226 1227 1228 1229 1230
	struct pnfs_layout_range arg = {
		.iomode = iomode,
		.offset = pos,
		.length = count,
	};
1231
	unsigned pg_offset;
1232 1233
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs_client *clp = server->nfs_client;
1234 1235
	struct pnfs_layout_hdr *lo;
	struct pnfs_layout_segment *lseg = NULL;
1236
	bool first;
1237 1238

	if (!pnfs_enabled_sb(NFS_SERVER(ino)))
1239
		goto out;
1240 1241

	if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1242
		goto out;
1243

1244
	spin_lock(&ino->i_lock);
1245
	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1246 1247 1248 1249
	if (lo == NULL) {
		spin_unlock(&ino->i_lock);
		goto out;
	}
1250

F
Fred Isaman 已提交
1251
	/* Do we even need to bother with this? */
1252
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
F
Fred Isaman 已提交
1253
		dprintk("%s matches recall, use MDS\n", __func__);
1254 1255 1256 1257
		goto out_unlock;
	}

	/* if LAYOUTGET already failed once we don't try again */
1258
	if (pnfs_layout_io_test_failed(lo, iomode))
1259 1260
		goto out_unlock;

1261
	/* Check to see if the layout for the given range already exists */
1262
	lseg = pnfs_find_lseg(lo, &arg);
1263 1264 1265
	if (lseg)
		goto out_unlock;

1266
	if (pnfs_layoutgets_blocked(lo, 0))
1267 1268 1269
		goto out_unlock;
	atomic_inc(&lo->plh_outstanding);

1270
	first = list_empty(&lo->plh_layouts) ? true : false;
1271
	spin_unlock(&ino->i_lock);
1272

1273
	if (first) {
1274 1275 1276 1277
		/* The lo must be on the clp list if there is any
		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
		 */
		spin_lock(&clp->cl_lock);
1278
		list_add_tail(&lo->plh_layouts, &server->layouts);
1279 1280
		spin_unlock(&clp->cl_lock);
	}
1281

1282 1283 1284 1285 1286
	pg_offset = arg.offset & ~PAGE_CACHE_MASK;
	if (pg_offset) {
		arg.offset -= pg_offset;
		arg.length += pg_offset;
	}
1287 1288
	if (arg.length != NFS4_MAX_UINT64)
		arg.length = PAGE_CACHE_ALIGN(arg.length);
1289

1290
	lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
1291
	atomic_dec(&lo->plh_outstanding);
1292
out_put_layout_hdr:
1293
	pnfs_put_layout_hdr(lo);
1294
out:
1295 1296 1297 1298 1299 1300 1301 1302
	dprintk("%s: inode %s/%llu pNFS layout segment %s for "
			"(%s, offset: %llu, length: %llu)\n",
			__func__, ino->i_sb->s_id,
			(unsigned long long)NFS_FILEID(ino),
			lseg == NULL ? "not found" : "found",
			iomode==IOMODE_RW ?  "read/write" : "read-only",
			(unsigned long long)pos,
			(unsigned long long)count);
1303 1304 1305
	return lseg;
out_unlock:
	spin_unlock(&ino->i_lock);
1306
	goto out_put_layout_hdr;
1307
}
1308
EXPORT_SYMBOL_GPL(pnfs_update_layout);
1309

1310
struct pnfs_layout_segment *
1311 1312 1313 1314 1315
pnfs_layout_process(struct nfs4_layoutget *lgp)
{
	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
	struct nfs4_layoutget_res *res = &lgp->res;
	struct pnfs_layout_segment *lseg;
1316
	struct inode *ino = lo->plh_inode;
1317 1318 1319
	int status = 0;

	/* Inject layout blob into I/O device driver */
1320
	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331
	if (!lseg || IS_ERR(lseg)) {
		if (!lseg)
			status = -ENOMEM;
		else
			status = PTR_ERR(lseg);
		dprintk("%s: Could not allocate layout: error %d\n",
		       __func__, status);
		goto out;
	}

	spin_lock(&ino->i_lock);
1332
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
F
Fred Isaman 已提交
1333 1334 1335 1336
		dprintk("%s forget reply due to recall\n", __func__);
		goto out_forget_reply;
	}

1337 1338
	if (pnfs_layoutgets_blocked(lo, 1) ||
	    pnfs_layout_stateid_blocked(lo, &res->stateid)) {
F
Fred Isaman 已提交
1339 1340 1341
		dprintk("%s forget reply due to state\n", __func__);
		goto out_forget_reply;
	}
1342 1343 1344 1345

	/* Done processing layoutget. Set the layout stateid */
	pnfs_set_layout_stateid(lo, &res->stateid, false);

1346
	init_lseg(lo, lseg);
1347
	lseg->pls_range = res->range;
1348
	pnfs_get_lseg(lseg);
1349
	pnfs_layout_insert_lseg(lo, lseg);
1350

F
Fred Isaman 已提交
1351 1352 1353 1354 1355
	if (res->return_on_close) {
		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
		set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
	}

1356
	spin_unlock(&ino->i_lock);
1357
	return lseg;
1358
out:
1359
	return ERR_PTR(status);
F
Fred Isaman 已提交
1360 1361 1362 1363 1364 1365

out_forget_reply:
	spin_unlock(&ino->i_lock);
	lseg->pls_layout = lo;
	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
	goto out;
1366 1367
}

1368 1369 1370
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
P
Peng Tao 已提交
1371 1372
	u64 rd_size = req->wb_bytes;

1373
	WARN_ON_ONCE(pgio->pg_lseg != NULL);
1374

1375 1376 1377 1378
	if (req->wb_offset != req->wb_pgbase) {
		nfs_pageio_reset_read_mds(pgio);
		return;
	}
P
Peng Tao 已提交
1379 1380 1381 1382 1383 1384

	if (pgio->pg_dreq == NULL)
		rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
	else
		rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);

1385 1386 1387
	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
					   req->wb_context,
					   req_offset(req),
P
Peng Tao 已提交
1388
					   rd_size,
1389 1390
					   IOMODE_READ,
					   GFP_KERNEL);
1391 1392
	/* If no lseg, fall back to read through mds */
	if (pgio->pg_lseg == NULL)
1393
		nfs_pageio_reset_read_mds(pgio);
1394

1395 1396 1397 1398
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);

void
1399 1400
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
			   struct nfs_page *req, u64 wb_size)
1401
{
1402
	WARN_ON_ONCE(pgio->pg_lseg != NULL);
1403

1404 1405 1406 1407
	if (req->wb_offset != req->wb_pgbase) {
		nfs_pageio_reset_write_mds(pgio);
		return;
	}
1408

1409 1410 1411
	pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
					   req->wb_context,
					   req_offset(req),
1412
					   wb_size,
1413 1414
					   IOMODE_RW,
					   GFP_NOFS);
1415 1416
	/* If no lseg, fall back to write through mds */
	if (pgio->pg_lseg == NULL)
1417
		nfs_pageio_reset_write_mds(pgio);
1418 1419 1420
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);

1421
void
1422 1423
pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
		      const struct nfs_pgio_completion_ops *compl_ops)
1424 1425 1426 1427 1428
{
	struct nfs_server *server = NFS_SERVER(inode);
	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;

	if (ld == NULL)
1429 1430 1431
		nfs_pageio_init_read(pgio, inode, compl_ops);
	else
		nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
1432 1433
}

1434
void
1435 1436 1437
pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
		       int ioflags,
		       const struct nfs_pgio_completion_ops *compl_ops)
1438 1439 1440 1441 1442
{
	struct nfs_server *server = NFS_SERVER(inode);
	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;

	if (ld == NULL)
1443 1444 1445
		nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
	else
		nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
1446 1447
}

1448
bool
1449 1450
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
		     struct nfs_page *req)
1451
{
1452 1453
	if (pgio->pg_lseg == NULL)
		return nfs_generic_pg_test(pgio, prev, req);
1454

1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470
	/*
	 * Test if a nfs_page is fully contained in the pnfs_layout_range.
	 * Note that this test makes several assumptions:
	 * - that the previous nfs_page in the struct nfs_pageio_descriptor
	 *   is known to lie within the range.
	 *   - that the nfs_page being tested is known to be contiguous with the
	 *   previous nfs_page.
	 *   - Layout ranges are page aligned, so we only have to test the
	 *   start offset of the request.
	 *
	 * Please also note that 'end_offset' is actually the offset of the
	 * first byte that lies outside the pnfs_layout_range. FIXME?
	 *
	 */
	return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
					 pgio->pg_lseg->pls_range.length);
1471
}
1472
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1473

1474
int pnfs_write_done_resend_to_mds(struct inode *inode,
1475
				struct list_head *head,
1476 1477
				const struct nfs_pgio_completion_ops *compl_ops,
				struct nfs_direct_req *dreq)
1478 1479 1480 1481 1482
{
	struct nfs_pageio_descriptor pgio;
	LIST_HEAD(failed);

	/* Resend all requests through the MDS */
1483
	nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
1484
	pgio.pg_dreq = dreq;
1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503
	while (!list_empty(head)) {
		struct nfs_page *req = nfs_list_entry(head->next);

		nfs_list_remove_request(req);
		if (!nfs_pageio_add_request(&pgio, req))
			nfs_list_add_request(req, &failed);
	}
	nfs_pageio_complete(&pgio);

	if (!list_empty(&failed)) {
		/* For some reason our attempt to resend pages. Mark the
		 * overall send request as having failed, and let
		 * nfs_writeback_release_full deal with the error.
		 */
		list_move(&failed, head);
		return -EIO;
	}
	return 0;
}
1504
EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1505

1506 1507
static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
{
1508 1509 1510 1511
	struct nfs_pgio_header *hdr = data->header;

	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1512
	    PNFS_LAYOUTRET_ON_ERROR) {
1513
		pnfs_return_layout(hdr->inode);
1514
	}
1515 1516
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
		data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
1517
							&hdr->pages,
1518 1519
							hdr->completion_ops,
							hdr->dreq);
1520 1521
}

1522 1523 1524
/*
 * Called by non rpc-based layout drivers
 */
1525
void pnfs_ld_write_done(struct nfs_write_data *data)
1526
{
1527 1528 1529
	struct nfs_pgio_header *hdr = data->header;

	if (!hdr->pnfs_error) {
1530
		pnfs_set_layoutcommit(data);
1531
		hdr->mds_ops->rpc_call_done(&data->task, data);
1532 1533
	} else
		pnfs_ld_handle_write_error(data);
1534
	hdr->mds_ops->rpc_release(data);
1535
}
1536
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1537

1538 1539 1540 1541
static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
		struct nfs_write_data *data)
{
1542 1543
	struct nfs_pgio_header *hdr = data->header;

1544 1545 1546 1547 1548
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
		list_splice_tail_init(&hdr->pages, &desc->pg_list);
		nfs_pageio_reset_write_mds(desc);
		desc->pg_recoalesce = 1;
	}
1549 1550 1551 1552
	nfs_writedata_release(data);
}

static enum pnfs_try_status
1553
pnfs_try_to_write_data(struct nfs_write_data *wdata,
1554 1555 1556
			const struct rpc_call_ops *call_ops,
			struct pnfs_layout_segment *lseg,
			int how)
1557
{
1558 1559
	struct nfs_pgio_header *hdr = wdata->header;
	struct inode *inode = hdr->inode;
1560 1561 1562
	enum pnfs_try_status trypnfs;
	struct nfs_server *nfss = NFS_SERVER(inode);

1563
	hdr->mds_ops = call_ops;
1564 1565 1566 1567

	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
		inode->i_ino, wdata->args.count, wdata->args.offset, how);
	trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1568
	if (trypnfs != PNFS_NOT_ATTEMPTED)
1569 1570 1571 1572 1573
		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}

1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584
static void
pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
{
	struct nfs_write_data *data;
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;

	desc->pg_lseg = NULL;
	while (!list_empty(head)) {
		enum pnfs_try_status trypnfs;

1585
		data = list_first_entry(head, struct nfs_write_data, list);
1586 1587 1588 1589 1590 1591
		list_del_init(&data->list);

		trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
		if (trypnfs == PNFS_NOT_ATTEMPTED)
			pnfs_write_through_mds(desc, data);
	}
1592
	pnfs_put_lseg(lseg);
1593 1594
}

1595 1596
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
{
1597
	pnfs_put_lseg(hdr->lseg);
1598 1599
	nfs_writehdr_free(hdr);
}
B
Bryan Schumaker 已提交
1600
EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
1601

1602 1603 1604
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
1605 1606
	struct nfs_write_header *whdr;
	struct nfs_pgio_header *hdr;
1607 1608
	int ret;

1609 1610
	whdr = nfs_writehdr_alloc();
	if (!whdr) {
1611
		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1612
		pnfs_put_lseg(desc->pg_lseg);
1613
		desc->pg_lseg = NULL;
1614
		return -ENOMEM;
1615
	}
1616 1617
	hdr = &whdr->header;
	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1618
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1619 1620 1621
	atomic_inc(&hdr->refcnt);
	ret = nfs_generic_flush(desc, hdr);
	if (ret != 0) {
1622
		pnfs_put_lseg(desc->pg_lseg);
1623 1624 1625 1626
		desc->pg_lseg = NULL;
	} else
		pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
	if (atomic_dec_and_test(&hdr->refcnt))
1627
		hdr->completion_ops->completion(hdr);
1628
	return ret;
1629 1630 1631
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);

1632
int pnfs_read_done_resend_to_mds(struct inode *inode,
1633
				struct list_head *head,
1634 1635
				const struct nfs_pgio_completion_ops *compl_ops,
				struct nfs_direct_req *dreq)
1636 1637
{
	struct nfs_pageio_descriptor pgio;
1638
	LIST_HEAD(failed);
1639

1640
	/* Resend all requests through the MDS */
1641
	nfs_pageio_init_read(&pgio, inode, compl_ops);
1642
	pgio.pg_dreq = dreq;
1643 1644
	while (!list_empty(head)) {
		struct nfs_page *req = nfs_list_entry(head->next);
1645 1646

		nfs_list_remove_request(req);
1647 1648
		if (!nfs_pageio_add_request(&pgio, req))
			nfs_list_add_request(req, &failed);
1649 1650
	}
	nfs_pageio_complete(&pgio);
1651 1652 1653 1654 1655 1656 1657

	if (!list_empty(&failed)) {
		list_move(&failed, head);
		return -EIO;
	}
	return 0;
}
1658
EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1659 1660 1661

static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
{
1662 1663 1664 1665
	struct nfs_pgio_header *hdr = data->header;

	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1666
	    PNFS_LAYOUTRET_ON_ERROR) {
1667
		pnfs_return_layout(hdr->inode);
1668
	}
1669 1670
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
		data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
1671
							&hdr->pages,
1672 1673
							hdr->completion_ops,
							hdr->dreq);
1674 1675
}

1676 1677 1678
/*
 * Called by non rpc-based layout drivers
 */
1679
void pnfs_ld_read_done(struct nfs_read_data *data)
1680
{
1681 1682 1683
	struct nfs_pgio_header *hdr = data->header;

	if (likely(!hdr->pnfs_error)) {
1684
		__nfs4_read_done_cb(data);
1685
		hdr->mds_ops->rpc_call_done(&data->task, data);
1686 1687
	} else
		pnfs_ld_handle_read_error(data);
1688
	hdr->mds_ops->rpc_release(data);
1689 1690 1691
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);

1692 1693 1694 1695
static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
		struct nfs_read_data *data)
{
1696 1697
	struct nfs_pgio_header *hdr = data->header;

1698 1699 1700 1701 1702
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
		list_splice_tail_init(&hdr->pages, &desc->pg_list);
		nfs_pageio_reset_read_mds(desc);
		desc->pg_recoalesce = 1;
	}
1703 1704 1705
	nfs_readdata_release(data);
}

A
Andy Adamson 已提交
1706 1707 1708
/*
 * Call the appropriate parallel I/O subsystem read function.
 */
1709
static enum pnfs_try_status
A
Andy Adamson 已提交
1710
pnfs_try_to_read_data(struct nfs_read_data *rdata,
1711 1712
		       const struct rpc_call_ops *call_ops,
		       struct pnfs_layout_segment *lseg)
A
Andy Adamson 已提交
1713
{
1714 1715
	struct nfs_pgio_header *hdr = rdata->header;
	struct inode *inode = hdr->inode;
A
Andy Adamson 已提交
1716 1717 1718
	struct nfs_server *nfss = NFS_SERVER(inode);
	enum pnfs_try_status trypnfs;

1719
	hdr->mds_ops = call_ops;
A
Andy Adamson 已提交
1720 1721 1722 1723 1724

	dprintk("%s: Reading ino:%lu %u@%llu\n",
		__func__, inode->i_ino, rdata->args.count, rdata->args.offset);

	trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1725
	if (trypnfs != PNFS_NOT_ATTEMPTED)
A
Andy Adamson 已提交
1726 1727 1728 1729
		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}
A
Andy Adamson 已提交
1730

1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
static void
pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
{
	struct nfs_read_data *data;
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;

	desc->pg_lseg = NULL;
	while (!list_empty(head)) {
		enum pnfs_try_status trypnfs;

1742
		data = list_first_entry(head, struct nfs_read_data, list);
1743 1744 1745 1746 1747 1748
		list_del_init(&data->list);

		trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
		if (trypnfs == PNFS_NOT_ATTEMPTED)
			pnfs_read_through_mds(desc, data);
	}
1749
	pnfs_put_lseg(lseg);
1750 1751
}

1752 1753
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
{
1754
	pnfs_put_lseg(hdr->lseg);
1755 1756
	nfs_readhdr_free(hdr);
}
B
Bryan Schumaker 已提交
1757
EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
1758

1759 1760 1761
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
1762 1763
	struct nfs_read_header *rhdr;
	struct nfs_pgio_header *hdr;
1764 1765
	int ret;

1766 1767
	rhdr = nfs_readhdr_alloc();
	if (!rhdr) {
1768
		desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1769
		ret = -ENOMEM;
1770
		pnfs_put_lseg(desc->pg_lseg);
1771 1772 1773
		desc->pg_lseg = NULL;
		return ret;
	}
1774 1775
	hdr = &rhdr->header;
	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1776
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
1777 1778 1779
	atomic_inc(&hdr->refcnt);
	ret = nfs_generic_pagein(desc, hdr);
	if (ret != 0) {
1780
		pnfs_put_lseg(desc->pg_lseg);
1781 1782 1783 1784
		desc->pg_lseg = NULL;
	} else
		pnfs_do_multiple_reads(desc, &hdr->rpc_list);
	if (atomic_dec_and_test(&hdr->refcnt))
1785
		hdr->completion_ops->completion(hdr);
1786
	return ret;
1787 1788 1789
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);

A
Andy Adamson 已提交
1790
/*
1791
 * There can be multiple RW segments.
A
Andy Adamson 已提交
1792
 */
1793
static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
A
Andy Adamson 已提交
1794
{
1795
	struct pnfs_layout_segment *lseg;
A
Andy Adamson 已提交
1796

1797 1798
	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
		if (lseg->pls_range.iomode == IOMODE_RW &&
1799
		    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
1800 1801
			list_add(&lseg->pls_lc_list, listp);
	}
A
Andy Adamson 已提交
1802 1803
}

1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819
static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
{
	struct pnfs_layout_segment *lseg, *tmp;
	unsigned long *bitlock = &NFS_I(inode)->flags;

	/* Matched by references in pnfs_set_layoutcommit */
	list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
		list_del_init(&lseg->pls_lc_list);
		pnfs_put_lseg(lseg);
	}

	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
	smp_mb__after_clear_bit();
	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
}

P
Peng Tao 已提交
1820 1821
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
1822
	pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
P
Peng Tao 已提交
1823 1824 1825
}
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);

A
Andy Adamson 已提交
1826 1827 1828
void
pnfs_set_layoutcommit(struct nfs_write_data *wdata)
{
1829 1830 1831
	struct nfs_pgio_header *hdr = wdata->header;
	struct inode *inode = hdr->inode;
	struct nfs_inode *nfsi = NFS_I(inode);
1832
	loff_t end_pos = wdata->mds_offset + wdata->res.count;
1833
	bool mark_as_dirty = false;
A
Andy Adamson 已提交
1834

1835
	spin_lock(&inode->i_lock);
A
Andy Adamson 已提交
1836
	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1837
		mark_as_dirty = true;
A
Andy Adamson 已提交
1838
		dprintk("%s: Set layoutcommit for inode %lu ",
1839
			__func__, inode->i_ino);
A
Andy Adamson 已提交
1840
	}
1841
	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1842
		/* references matched in nfs4_layoutcommit_release */
1843
		pnfs_get_lseg(hdr->lseg);
1844
	}
1845 1846
	if (end_pos > nfsi->layout->plh_lwb)
		nfsi->layout->plh_lwb = end_pos;
1847
	spin_unlock(&inode->i_lock);
1848
	dprintk("%s: lseg %p end_pos %llu\n",
1849
		__func__, hdr->lseg, nfsi->layout->plh_lwb);
1850 1851 1852 1853

	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
	if (mark_as_dirty)
1854
		mark_inode_dirty_sync(inode);
A
Andy Adamson 已提交
1855 1856 1857
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);

A
Andy Adamson 已提交
1858 1859 1860 1861 1862 1863
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
{
	struct nfs_server *nfss = NFS_SERVER(data->args.inode);

	if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
		nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
1864
	pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
A
Andy Adamson 已提交
1865 1866
}

1867 1868 1869 1870 1871 1872 1873 1874
/*
 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
 * data to disk to allow the server to recover the data if it crashes.
 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
 * is off, and a COMMIT is sent to a data server, or
 * if WRITEs to a data server return NFS_DATA_SYNC.
 */
A
Andy Adamson 已提交
1875
int
1876
pnfs_layoutcommit_inode(struct inode *inode, bool sync)
A
Andy Adamson 已提交
1877 1878 1879 1880 1881 1882 1883 1884
{
	struct nfs4_layoutcommit_data *data;
	struct nfs_inode *nfsi = NFS_I(inode);
	loff_t end_pos;
	int status = 0;

	dprintk("--> %s inode %lu\n", __func__, inode->i_ino);

1885 1886 1887
	if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		return 0;

A
Andy Adamson 已提交
1888 1889
	/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
	data = kzalloc(sizeof(*data), GFP_NOFS);
1890 1891 1892 1893
	if (!data) {
		status = -ENOMEM;
		goto out;
	}
A
Andy Adamson 已提交
1894

P
Peng Tao 已提交
1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908
	if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		goto out_free;

	if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
		if (!sync) {
			status = -EAGAIN;
			goto out_free;
		}
		status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
					nfs_wait_bit_killable, TASK_KILLABLE);
		if (status)
			goto out_free;
	}

1909
	INIT_LIST_HEAD(&data->lseg_list);
1910
	spin_lock(&inode->i_lock);
A
Andy Adamson 已提交
1911
	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
P
Peng Tao 已提交
1912
		clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
A
Andy Adamson 已提交
1913
		spin_unlock(&inode->i_lock);
P
Peng Tao 已提交
1914 1915
		wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
		goto out_free;
A
Andy Adamson 已提交
1916
	}
1917 1918

	pnfs_list_write_lseg(inode, &data->lseg_list);
A
Andy Adamson 已提交
1919

1920 1921
	end_pos = nfsi->layout->plh_lwb;
	nfsi->layout->plh_lwb = 0;
A
Andy Adamson 已提交
1922

1923
	nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
A
Andy Adamson 已提交
1924 1925 1926
	spin_unlock(&inode->i_lock);

	data->args.inode = inode;
1927
	data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
A
Andy Adamson 已提交
1928 1929 1930 1931 1932 1933 1934 1935
	nfs_fattr_init(&data->fattr);
	data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
	data->res.fattr = &data->fattr;
	data->args.lastbytewritten = end_pos - 1;
	data->res.server = NFS_SERVER(inode);

	status = nfs4_proc_layoutcommit(data, sync);
out:
P
Peng Tao 已提交
1936 1937
	if (status)
		mark_inode_dirty_sync(inode);
A
Andy Adamson 已提交
1938 1939
	dprintk("<-- %s status %d\n", __func__, status);
	return status;
P
Peng Tao 已提交
1940 1941 1942
out_free:
	kfree(data);
	goto out;
A
Andy Adamson 已提交
1943
}
1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955

struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
	struct nfs4_threshold *thp;

	thp = kzalloc(sizeof(*thp), GFP_NOFS);
	if (!thp) {
		dprintk("%s mdsthreshold allocation failed\n", __func__);
		return NULL;
	}
	return thp;
}