pnfs.c 67.7 KB
Newer Older
R
Ricardo Labiaga 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
/*
 *  pNFS functions to call and manage layout drivers.
 *
 *  Copyright (c) 2002 [year of first publication]
 *  The Regents of the University of Michigan
 *  All Rights Reserved
 *
 *  Dean Hildebrand <dhildebz@umich.edu>
 *
 *  Permission is granted to use, copy, create derivative works, and
 *  redistribute this software and such derivative works for any purpose,
 *  so long as the name of the University of Michigan is not used in
 *  any advertising or publicity pertaining to the use or distribution
 *  of this software without specific, written prior authorization. If
 *  the above copyright notice or any other identification of the
 *  University of Michigan is included in any copy of any portion of
 *  this software, then the disclaimer below must also be included.
 *
 *  This software is provided as is, without representation or warranty
 *  of any kind either express or implied, including without limitation
 *  the implied warranties of merchantability, fitness for a particular
 *  purpose, or noninfringement.  The Regents of the University of
 *  Michigan shall not be liable for any damages, including special,
 *  indirect, incidental, or consequential damages, with respect to any
 *  claim arising out of or in connection with the use of the software,
 *  even if it has been or is hereafter advised of the possibility of
 *  such damages.
 */

#include <linux/nfs_fs.h>
31
#include <linux/nfs_page.h>
32
#include <linux/module.h>
33
#include <linux/sort.h>
34
#include "internal.h"
R
Ricardo Labiaga 已提交
35
#include "pnfs.h"
A
Andy Adamson 已提交
36
#include "iostat.h"
37
#include "nfs4trace.h"
38
#include "delegation.h"
39
#include "nfs42.h"
R
Ricardo Labiaga 已提交
40 41

#define NFSDBG_FACILITY		NFSDBG_PNFS
42
#define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
R
Ricardo Labiaga 已提交
43

44 45 46 47 48 49 50 51 52 53 54 55
/* Locking:
 *
 * pnfs_spinlock:
 *      protects pnfs_modules_tbl.
 */
static DEFINE_SPINLOCK(pnfs_spinlock);

/*
 * pnfs_modules_tbl holds all pnfs modules
 */
static LIST_HEAD(pnfs_modules_tbl);

56
static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo);
57

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72
/* Return the registered pnfs layout driver module matching given id */
static struct pnfs_layoutdriver_type *
find_pnfs_driver_locked(u32 id)
{
	struct pnfs_layoutdriver_type *local;

	list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
		if (local->id == id)
			goto out;
	local = NULL;
out:
	dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
	return local;
}

R
Ricardo Labiaga 已提交
73 74 75
static struct pnfs_layoutdriver_type *
find_pnfs_driver(u32 id)
{
76 77 78 79
	struct pnfs_layoutdriver_type *local;

	spin_lock(&pnfs_spinlock);
	local = find_pnfs_driver_locked(id);
80 81 82 83
	if (local != NULL && !try_module_get(local->owner)) {
		dprintk("%s: Could not grab reference on module\n", __func__);
		local = NULL;
	}
84 85
	spin_unlock(&pnfs_spinlock);
	return local;
R
Ricardo Labiaga 已提交
86 87 88 89 90
}

void
unset_pnfs_layoutdriver(struct nfs_server *nfss)
{
91 92 93
	if (nfss->pnfs_curr_ld) {
		if (nfss->pnfs_curr_ld->clear_layoutdriver)
			nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
94 95 96
		/* Decrement the MDS count. Purge the deviceid cache if zero */
		if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
			nfs4_deviceid_purge_client(nfss->nfs_client);
97
		module_put(nfss->pnfs_curr_ld->owner);
98
	}
R
Ricardo Labiaga 已提交
99 100 101
	nfss->pnfs_curr_ld = NULL;
}

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
/*
 * When the server sends a list of layout types, we choose one in the order
 * given in the list below.
 *
 * FIXME: should this list be configurable in some fashion? module param?
 * 	  mount option? something else?
 */
static const u32 ld_prefs[] = {
	LAYOUT_SCSI,
	LAYOUT_BLOCK_VOLUME,
	LAYOUT_OSD2_OBJECTS,
	LAYOUT_FLEX_FILES,
	LAYOUT_NFSV4_1_FILES,
	0
};

static int
ld_cmp(const void *e1, const void *e2)
{
	u32 ld1 = *((u32 *)e1);
	u32 ld2 = *((u32 *)e2);
	int i;

	for (i = 0; ld_prefs[i] != 0; i++) {
		if (ld1 == ld_prefs[i])
			return -1;

		if (ld2 == ld_prefs[i])
			return 1;
	}
	return 0;
}

R
Ricardo Labiaga 已提交
135 136 137 138
/*
 * Try to set the server's pnfs module to the pnfs layout type specified by id.
 * Currently only one pNFS layout driver per filesystem is supported.
 *
139
 * @ids array of layout types supported by MDS.
R
Ricardo Labiaga 已提交
140 141
 */
void
142
set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
143
		      struct nfs_fsinfo *fsinfo)
R
Ricardo Labiaga 已提交
144 145
{
	struct pnfs_layoutdriver_type *ld_type = NULL;
146
	u32 id;
147
	int i;
R
Ricardo Labiaga 已提交
148

149 150
	if (fsinfo->nlayouttypes == 0)
		goto out_no_driver;
R
Ricardo Labiaga 已提交
151 152
	if (!(server->nfs_client->cl_exchange_flags &
		 (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
153 154
		printk(KERN_ERR "NFS: %s: cl_exchange_flags 0x%x\n",
			__func__, server->nfs_client->cl_exchange_flags);
R
Ricardo Labiaga 已提交
155 156
		goto out_no_driver;
	}
157

158 159
	sort(fsinfo->layouttype, fsinfo->nlayouttypes,
		sizeof(*fsinfo->layouttype), ld_cmp, NULL);
160

161 162
	for (i = 0; i < fsinfo->nlayouttypes; i++) {
		id = fsinfo->layouttype[i];
R
Ricardo Labiaga 已提交
163
		ld_type = find_pnfs_driver(id);
164 165 166 167 168 169 170
		if (!ld_type) {
			request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX,
					id);
			ld_type = find_pnfs_driver(id);
		}
		if (ld_type)
			break;
R
Ricardo Labiaga 已提交
171
	}
172 173

	if (!ld_type) {
174
		dprintk("%s: No pNFS module found!\n", __func__);
175 176 177
		goto out_no_driver;
	}

R
Ricardo Labiaga 已提交
178
	server->pnfs_curr_ld = ld_type;
179 180
	if (ld_type->set_layoutdriver
	    && ld_type->set_layoutdriver(server, mntfh)) {
181 182
		printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
			"driver %u.\n", __func__, id);
183 184 185
		module_put(ld_type->owner);
		goto out_no_driver;
	}
186 187
	/* Bump the MDS count */
	atomic_inc(&server->nfs_client->cl_mds_count);
188

R
Ricardo Labiaga 已提交
189 190 191 192 193 194 195
	dprintk("%s: pNFS module for %u set\n", __func__, id);
	return;

out_no_driver:
	dprintk("%s: Using NFSv4 I/O\n", __func__);
	server->pnfs_curr_ld = NULL;
}
196 197 198 199 200 201 202 203

int
pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	int status = -EINVAL;
	struct pnfs_layoutdriver_type *tmp;

	if (ld_type->id == 0) {
204
		printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
205 206
		return status;
	}
207
	if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
208
		printk(KERN_ERR "NFS: %s Layout driver must provide "
209 210 211
		       "alloc_lseg and free_lseg.\n", __func__);
		return status;
	}
212 213 214 215 216 217 218 219 220

	spin_lock(&pnfs_spinlock);
	tmp = find_pnfs_driver_locked(ld_type->id);
	if (!tmp) {
		list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
		status = 0;
		dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
			ld_type->name);
	} else {
221
		printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238
			__func__, ld_type->id);
	}
	spin_unlock(&pnfs_spinlock);

	return status;
}
EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);

void
pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
{
	dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
	spin_lock(&pnfs_spinlock);
	list_del(&ld_type->pnfs_tblid);
	spin_unlock(&pnfs_spinlock);
}
EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
239

240 241 242 243
/*
 * pNFS client layout cache
 */

244
/* Need to hold i_lock if caller does not already hold reference */
F
Fred Isaman 已提交
245
void
246
pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
247
{
248
	atomic_inc(&lo->plh_refcount);
249 250
}

251 252 253 254
static struct pnfs_layout_hdr *
pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
{
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
255
	return ld->alloc_layout_hdr(ino, gfp_flags);
256 257 258 259 260
}

static void
pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
261 262 263 264 265 266 267 268 269 270
	struct nfs_server *server = NFS_SERVER(lo->plh_inode);
	struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;

	if (!list_empty(&lo->plh_layouts)) {
		struct nfs_client *clp = server->nfs_client;

		spin_lock(&clp->cl_lock);
		list_del_init(&lo->plh_layouts);
		spin_unlock(&clp->cl_lock);
	}
271
	put_rpccred(lo->plh_lc_cred);
272
	return ld->free_layout_hdr(lo);
273 274
}

275
static void
276
pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
277
{
278
	struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
279
	dprintk("%s: freeing layout cache %p\n", __func__, lo);
280 281 282 283
	nfsi->layout = NULL;
	/* Reset MDS Threshold I/O counters */
	nfsi->write_io = 0;
	nfsi->read_io = 0;
284 285
}

286
void
287
pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
288
{
289 290
	struct inode *inode = lo->plh_inode;

291 292
	pnfs_layoutreturn_before_put_layout_hdr(lo);

293
	if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
294 295
		if (!list_empty(&lo->plh_segs))
			WARN_ONCE(1, "NFS: BUG unfreed layout segments.\n");
296
		pnfs_detach_layout_hdr(lo);
297
		spin_unlock(&inode->i_lock);
298
		pnfs_free_layout_hdr(lo);
299
	}
300 301
}

302 303 304 305 306 307 308 309
static void
pnfs_clear_layoutreturn_info(struct pnfs_layout_hdr *lo)
{
	lo->plh_return_iomode = 0;
	lo->plh_return_seq = 0;
	clear_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
}

310 311 312 313 314 315 316
/*
 * Mark a pnfs_layout_hdr and all associated layout segments as invalid
 *
 * In order to continue using the pnfs_layout_hdr, a full recovery
 * is required.
 * Note that caller must hold inode->i_lock.
 */
317
int
318 319 320 321 322 323 324 325 326 327
pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
		struct list_head *lseg_list)
{
	struct pnfs_layout_range range = {
		.iomode = IOMODE_ANY,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};

	set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
328
	pnfs_clear_layoutreturn_info(lo);
329
	return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range, 0);
330 331
}

332 333 334 335 336 337 338 339
static int
pnfs_iomode_to_fail_bit(u32 iomode)
{
	return iomode == IOMODE_RW ?
		NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
}

static void
340
pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
341
{
342
	lo->plh_retry_timestamp = jiffies;
343
	if (!test_and_set_bit(fail_bit, &lo->plh_flags))
344 345 346 347 348 349 350 351 352 353 354 355 356 357
		atomic_inc(&lo->plh_refcount);
}

static void
pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
{
	if (test_and_clear_bit(fail_bit, &lo->plh_flags))
		atomic_dec(&lo->plh_refcount);
}

static void
pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
	struct inode *inode = lo->plh_inode;
358 359 360 361 362 363
	struct pnfs_layout_range range = {
		.iomode = iomode,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	LIST_HEAD(head);
364 365 366

	spin_lock(&inode->i_lock);
	pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
367
	pnfs_mark_matching_lsegs_invalid(lo, &head, &range, 0);
368
	spin_unlock(&inode->i_lock);
369
	pnfs_free_lseg_list(&head);
370 371 372 373 374 375 376
	dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
			iomode == IOMODE_RW ?  "RW" : "READ");
}

static bool
pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
{
377
	unsigned long start, end;
378 379 380
	int fail_bit = pnfs_iomode_to_fail_bit(iomode);

	if (test_bit(fail_bit, &lo->plh_flags) == 0)
381 382 383 384 385
		return false;
	end = jiffies;
	start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
	if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
		/* It is time to retry the failed layoutgets */
386
		pnfs_layout_clear_fail_bit(lo, fail_bit);
387 388 389
		return false;
	}
	return true;
390 391
}

392
static void
393 394 395
pnfs_init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg,
		const struct pnfs_layout_range *range,
		const nfs4_stateid *stateid)
396
{
397
	INIT_LIST_HEAD(&lseg->pls_list);
398
	INIT_LIST_HEAD(&lseg->pls_lc_list);
399 400
	atomic_set(&lseg->pls_refcount, 1);
	set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
401
	lseg->pls_layout = lo;
402 403
	lseg->pls_range = *range;
	lseg->pls_seq = be32_to_cpu(stateid->seqid);
404 405
}

406
static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
407
{
408
	struct inode *ino = lseg->pls_layout->plh_inode;
409

410
	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
411 412
}

F
Fred Isaman 已提交
413
static void
414 415
pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
		struct pnfs_layout_segment *lseg)
F
Fred Isaman 已提交
416
{
417
	struct inode *inode = lo->plh_inode;
F
Fred Isaman 已提交
418

419
	WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
F
Fred Isaman 已提交
420
	list_del_init(&lseg->pls_list);
421 422
	/* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
	atomic_dec(&lo->plh_refcount);
423 424 425
	if (list_empty(&lo->plh_segs) &&
	    !test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags) &&
	    !test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
426 427
		if (atomic_read(&lo->plh_outstanding) == 0)
			set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
428
		clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
429
	}
F
Fred Isaman 已提交
430 431 432
	rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
}

433
void
434
pnfs_put_lseg(struct pnfs_layout_segment *lseg)
435
{
436
	struct pnfs_layout_hdr *lo;
F
Fred Isaman 已提交
437 438 439 440 441
	struct inode *inode;

	if (!lseg)
		return;

442 443 444
	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
445

446 447
	lo = lseg->pls_layout;
	inode = lo->plh_inode;
448

F
Fred Isaman 已提交
449
	if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
450 451 452 453
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
			spin_unlock(&inode->i_lock);
			return;
		}
454
		pnfs_get_layout_hdr(lo);
455 456 457 458
		pnfs_layout_remove_lseg(lo, lseg);
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg(lseg);
		pnfs_put_layout_hdr(lo);
459 460
	}
}
461
EXPORT_SYMBOL_GPL(pnfs_put_lseg);
462

463
static void pnfs_free_lseg_async_work(struct work_struct *work)
464 465
{
	struct pnfs_layout_segment *lseg;
466
	struct pnfs_layout_hdr *lo;
467 468

	lseg = container_of(work, struct pnfs_layout_segment, pls_work);
469
	lo = lseg->pls_layout;
470

471 472
	pnfs_free_lseg(lseg);
	pnfs_put_layout_hdr(lo);
473 474
}

475
static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
476
{
477
	INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
478 479
	schedule_work(&lseg->pls_work);
}
480 481 482 483 484 485 486 487 488 489 490 491 492 493

void
pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
{
	if (!lseg)
		return;

	assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);

	dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
		atomic_read(&lseg->pls_refcount),
		test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
	if (atomic_dec_and_test(&lseg->pls_refcount)) {
		struct pnfs_layout_hdr *lo = lseg->pls_layout;
494 495
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
			return;
496 497 498 499 500 501
		pnfs_get_layout_hdr(lo);
		pnfs_layout_remove_lseg(lo, lseg);
		pnfs_free_lseg_async(lseg);
	}
}
EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
502

503 504 505 506 507 508 509
/*
 * is l2 fully contained in l1?
 *   start1                             end1
 *   [----------------------------------)
 *           start2           end2
 *           [----------------)
 */
510
static bool
511
pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
512
		 const struct pnfs_layout_range *l2)
513 514
{
	u64 start1 = l1->offset;
515
	u64 end1 = pnfs_end_offset(start1, l1->length);
516
	u64 start2 = l2->offset;
517
	u64 end2 = pnfs_end_offset(start2, l2->length);
518 519 520 521

	return (start1 <= start2) && (end1 >= end2);
}

522 523 524 525 526 527 528 529 530 531
static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
		struct list_head *tmp_list)
{
	if (!atomic_dec_and_test(&lseg->pls_refcount))
		return false;
	pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
	list_add(&lseg->pls_list, tmp_list);
	return true;
}

532 533 534 535 536 537 538 539 540 541 542
/* Returns 1 if lseg is removed from list, 0 otherwise */
static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
			     struct list_head *tmp_list)
{
	int rv = 0;

	if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
		/* Remove the reference keeping the lseg in the
		 * list.  It will now be removed when all
		 * outstanding io is finished.
		 */
F
Fred Isaman 已提交
543 544
		dprintk("%s: lseg %p ref %d\n", __func__, lseg,
			atomic_read(&lseg->pls_refcount));
545
		if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
F
Fred Isaman 已提交
546
			rv = 1;
547 548 549 550
	}
	return rv;
}

551 552 553 554 555 556 557 558 559
/*
 * Compare 2 layout stateid sequence ids, to see which is newer,
 * taking into account wraparound issues.
 */
static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
{
	return (s32)(s1 - s2) > 0;
}

560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
static bool
pnfs_should_free_range(const struct pnfs_layout_range *lseg_range,
		 const struct pnfs_layout_range *recall_range)
{
	return (recall_range->iomode == IOMODE_ANY ||
		lseg_range->iomode == recall_range->iomode) &&
	       pnfs_lseg_range_intersecting(lseg_range, recall_range);
}

static bool
pnfs_match_lseg_recall(const struct pnfs_layout_segment *lseg,
		const struct pnfs_layout_range *recall_range,
		u32 seq)
{
	if (seq != 0 && pnfs_seqid_is_newer(lseg->pls_seq, seq))
		return false;
	if (recall_range == NULL)
		return true;
	return pnfs_should_free_range(&lseg->pls_range, recall_range);
}

581 582 583 584 585 586 587 588 589 590 591 592 593 594
/**
 * pnfs_mark_matching_lsegs_invalid - tear down lsegs or mark them for later
 * @lo: layout header containing the lsegs
 * @tmp_list: list head where doomed lsegs should go
 * @recall_range: optional recall range argument to match (may be NULL)
 * @seq: only invalidate lsegs obtained prior to this sequence (may be 0)
 *
 * Walk the list of lsegs in the layout header, and tear down any that should
 * be destroyed. If "recall_range" is specified then the segment must match
 * that range. If "seq" is non-zero, then only match segments that were handed
 * out at or before that sequence.
 *
 * Returns number of matching invalid lsegs remaining in list after scanning
 * it and purging them.
595
 */
F
Fred Isaman 已提交
596
int
597
pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
598
			    struct list_head *tmp_list,
599 600
			    const struct pnfs_layout_range *recall_range,
			    u32 seq)
601 602
{
	struct pnfs_layout_segment *lseg, *next;
603
	int remaining = 0;
604 605 606

	dprintk("%s:Begin lo %p\n", __func__, lo);

607
	if (list_empty(&lo->plh_segs))
608
		return 0;
609
	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
610
		if (pnfs_match_lseg_recall(lseg, recall_range, seq)) {
611
			dprintk("%s: freeing lseg %p iomode %d seq %u"
612
				"offset %llu length %llu\n", __func__,
613 614
				lseg, lseg->pls_range.iomode, lseg->pls_seq,
				lseg->pls_range.offset, lseg->pls_range.length);
615 616
			if (!mark_lseg_invalid(lseg, tmp_list))
				remaining++;
617
		}
618 619
	dprintk("%s:Return %i\n", __func__, remaining);
	return remaining;
620 621
}

622
/* note free_me must contain lsegs from a single layout_hdr */
F
Fred Isaman 已提交
623
void
624
pnfs_free_lseg_list(struct list_head *free_me)
625
{
626
	struct pnfs_layout_segment *lseg, *tmp;
627 628 629 630

	if (list_empty(free_me))
		return;

631
	list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
632
		list_del(&lseg->pls_list);
633
		pnfs_free_lseg(lseg);
634 635 636
	}
}

637 638 639 640
void
pnfs_destroy_layout(struct nfs_inode *nfsi)
{
	struct pnfs_layout_hdr *lo;
641
	LIST_HEAD(tmp_list);
642 643 644 645

	spin_lock(&nfsi->vfs_inode.i_lock);
	lo = nfsi->layout;
	if (lo) {
646
		pnfs_get_layout_hdr(lo);
647
		pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
648 649 650 651 652 653 654
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
		pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
		spin_unlock(&nfsi->vfs_inode.i_lock);
		pnfs_free_lseg_list(&tmp_list);
		pnfs_put_layout_hdr(lo);
	} else
		spin_unlock(&nfsi->vfs_inode.i_lock);
655
}
656
EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
657

658 659 660
static bool
pnfs_layout_add_bulk_destroy_list(struct inode *inode,
		struct list_head *layout_list)
661 662
{
	struct pnfs_layout_hdr *lo;
663
	bool ret = false;
664

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
		pnfs_get_layout_hdr(lo);
		list_add(&lo->plh_bulk_destroy, layout_list);
		ret = true;
	}
	spin_unlock(&inode->i_lock);
	return ret;
}

/* Caller must hold rcu_read_lock and clp->cl_lock */
static int
pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
		struct nfs_server *server,
		struct list_head *layout_list)
{
	struct pnfs_layout_hdr *lo, *next;
	struct inode *inode;

	list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
		inode = igrab(lo->plh_inode);
		if (inode == NULL)
			continue;
		list_del_init(&lo->plh_layouts);
		if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
			continue;
		rcu_read_unlock();
		spin_unlock(&clp->cl_lock);
		iput(inode);
		spin_lock(&clp->cl_lock);
		rcu_read_lock();
		return -EAGAIN;
	}
	return 0;
}

static int
pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
		bool is_bulk_recall)
{
	struct pnfs_layout_hdr *lo;
	struct inode *inode;
	LIST_HEAD(lseg_list);
	int ret = 0;

	while (!list_empty(layout_list)) {
		lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
				plh_bulk_destroy);
		dprintk("%s freeing layout for inode %lu\n", __func__,
			lo->plh_inode->i_ino);
		inode = lo->plh_inode;
717 718 719

		pnfs_layoutcommit_inode(inode, false);

720 721
		spin_lock(&inode->i_lock);
		list_del_init(&lo->plh_bulk_destroy);
722 723 724
		if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
			if (is_bulk_recall)
				set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
725
			ret = -EAGAIN;
726
		}
727 728
		spin_unlock(&inode->i_lock);
		pnfs_free_lseg_list(&lseg_list);
729 730
		/* Free all lsegs that are attached to commit buckets */
		nfs_commit_inode(inode, 0);
731 732 733 734 735 736 737 738 739 740 741 742 743
		pnfs_put_layout_hdr(lo);
		iput(inode);
	}
	return ret;
}

int
pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
		struct nfs_fsid *fsid,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);
744

745
	spin_lock(&clp->cl_lock);
746
	rcu_read_lock();
747
restart:
748
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
749 750 751 752 753 754
		if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
			continue;
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
				server,
				&layout_list) != 0)
			goto restart;
755 756
	}
	rcu_read_unlock();
757 758
	spin_unlock(&clp->cl_lock);

759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778
	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

int
pnfs_destroy_layouts_byclid(struct nfs_client *clp,
		bool is_recall)
{
	struct nfs_server *server;
	LIST_HEAD(layout_list);

	spin_lock(&clp->cl_lock);
	rcu_read_lock();
restart:
	list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
		if (pnfs_layout_bulk_destroy_byserver_locked(clp,
					server,
					&layout_list) != 0)
			goto restart;
779
	}
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
	rcu_read_unlock();
	spin_unlock(&clp->cl_lock);

	if (list_empty(&layout_list))
		return 0;
	return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
}

/*
 * Called by the state manger to remove all layouts established under an
 * expired lease.
 */
void
pnfs_destroy_all_layouts(struct nfs_client *clp)
{
	nfs4_deviceid_mark_client_invalid(clp);
	nfs4_deviceid_purge_client(clp);

	pnfs_destroy_layouts_byclid(clp, false);
799 800
}

801
/* update lo->plh_stateid with new if is more recent */
F
Fred Isaman 已提交
802 803 804
void
pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
			bool update_barrier)
805
{
806
	u32 oldseq, newseq, new_barrier = 0;
807

808 809
	oldseq = be32_to_cpu(lo->plh_stateid.seqid);
	newseq = be32_to_cpu(new->seqid);
810 811 812 813 814 815 816 817 818

	if (!pnfs_layout_is_valid(lo)) {
		nfs4_stateid_copy(&lo->plh_stateid, new);
		lo->plh_barrier = newseq;
		pnfs_clear_layoutreturn_info(lo);
		clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
		return;
	}
	if (pnfs_seqid_is_newer(newseq, oldseq)) {
819
		nfs4_stateid_copy(&lo->plh_stateid, new);
820 821 822 823 824
		/*
		 * Because of wraparound, we want to keep the barrier
		 * "close" to the current seqids.
		 */
		new_barrier = newseq - atomic_read(&lo->plh_outstanding);
F
Fred Isaman 已提交
825
	}
826 827 828 829
	if (update_barrier)
		new_barrier = be32_to_cpu(new->seqid);
	else if (new_barrier == 0)
		return;
830
	if (pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
831
		lo->plh_barrier = new_barrier;
832 833
}

834
static bool
835 836
pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
		const nfs4_stateid *stateid)
F
Fred Isaman 已提交
837
{
838
	u32 seqid = be32_to_cpu(stateid->seqid);
839

840 841 842 843 844
	return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
}

/* lget is set to 1 if called from inside send_layoutget call chain */
static bool
845
pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo)
846
{
F
Fred Isaman 已提交
847
	return lo->plh_block_lgets ||
848
		test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
849 850
}

851
/*
852 853 854 855 856
 * Get layout from server.
 *    for now, assume that whole file layouts are requested.
 *    arg->offset: 0
 *    arg->length: all ones
 */
857 858 859
static struct pnfs_layout_segment *
send_layoutget(struct pnfs_layout_hdr *lo,
	   struct nfs_open_context *ctx,
860
	   nfs4_stateid *stateid,
861
	   const struct pnfs_layout_range *range,
862
	   long *timeout, gfp_t gfp_flags)
863
{
864
	struct inode *ino = lo->plh_inode;
865 866
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs4_layoutget *lgp;
867
	loff_t i_size;
868 869

	dprintk("--> %s\n", __func__);
870

871 872 873 874 875
	/*
	 * Synchronously retrieve layout information from server and
	 * store in lseg. If we race with a concurrent seqid morphing
	 * op, then re-send the LAYOUTGET.
	 */
876 877 878 879 880 881 882 883 884 885 886 887 888 889
	lgp = kzalloc(sizeof(*lgp), gfp_flags);
	if (lgp == NULL)
		return ERR_PTR(-ENOMEM);

	i_size = i_size_read(ino);

	lgp->args.minlength = PAGE_SIZE;
	if (lgp->args.minlength > range->length)
		lgp->args.minlength = range->length;
	if (range->iomode == IOMODE_READ) {
		if (range->offset >= i_size)
			lgp->args.minlength = 0;
		else if (i_size - range->offset < lgp->args.minlength)
			lgp->args.minlength = i_size - range->offset;
890
	}
891 892 893 894 895
	lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
	pnfs_copy_range(&lgp->args.range, range);
	lgp->args.type = server->pnfs_curr_ld->id;
	lgp->args.inode = ino;
	lgp->args.ctx = get_nfs_open_context(ctx);
896
	nfs4_stateid_copy(&lgp->args.stateid, stateid);
897 898
	lgp->gfp_flags = gfp_flags;
	lgp->cred = lo->plh_lc_cred;
899

900
	return nfs4_proc_layoutget(lgp, timeout, gfp_flags);
901 902
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917
static void pnfs_clear_layoutcommit(struct inode *inode,
		struct list_head *head)
{
	struct nfs_inode *nfsi = NFS_I(inode);
	struct pnfs_layout_segment *lseg, *tmp;

	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		return;
	list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
		if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
			continue;
		pnfs_lseg_dec_and_remove_zero(lseg, head);
	}
}

918 919 920
void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
{
	clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
921
	clear_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags);
922 923
	smp_mb__after_atomic();
	wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
924
	rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
925 926
}

927
static bool
928 929 930
pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo,
		nfs4_stateid *stateid,
		enum pnfs_iomode *iomode)
931
{
932 933 934
	/* Serialise LAYOUTGET/LAYOUTRETURN */
	if (atomic_read(&lo->plh_outstanding) != 0)
		return false;
935
	if (test_and_set_bit(NFS_LAYOUT_RETURN_LOCK, &lo->plh_flags))
936
		return false;
937
	set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
938
	pnfs_get_layout_hdr(lo);
939 940 941 942 943 944 945 946 947 948 949 950 951 952 953
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags)) {
		if (stateid != NULL) {
			nfs4_stateid_copy(stateid, &lo->plh_stateid);
			if (lo->plh_return_seq != 0)
				stateid->seqid = cpu_to_be32(lo->plh_return_seq);
		}
		if (iomode != NULL)
			*iomode = lo->plh_return_iomode;
		pnfs_clear_layoutreturn_info(lo);
		return true;
	}
	if (stateid != NULL)
		nfs4_stateid_copy(stateid, &lo->plh_stateid);
	if (iomode != NULL)
		*iomode = IOMODE_ANY;
954 955 956
	return true;
}

957
static int
958
pnfs_send_layoutreturn(struct pnfs_layout_hdr *lo, const nfs4_stateid *stateid,
959
		       enum pnfs_iomode iomode, bool sync)
960 961 962 963 964
{
	struct inode *ino = lo->plh_inode;
	struct nfs4_layoutreturn *lrp;
	int status = 0;

965
	lrp = kzalloc(sizeof(*lrp), GFP_NOFS);
966 967 968
	if (unlikely(lrp == NULL)) {
		status = -ENOMEM;
		spin_lock(&ino->i_lock);
969
		pnfs_clear_layoutreturn_waitbit(lo);
970 971 972 973 974
		spin_unlock(&ino->i_lock);
		pnfs_put_layout_hdr(lo);
		goto out;
	}

975
	nfs4_stateid_copy(&lrp->args.stateid, stateid);
976 977
	lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
	lrp->args.inode = ino;
P
Peng Tao 已提交
978 979 980
	lrp->args.range.iomode = iomode;
	lrp->args.range.offset = 0;
	lrp->args.range.length = NFS4_MAX_UINT64;
981 982 983 984
	lrp->args.layout = lo;
	lrp->clp = NFS_SERVER(ino)->nfs_client;
	lrp->cred = lo->plh_lc_cred;

985
	status = nfs4_proc_layoutreturn(lrp, sync);
986 987 988 989 990
out:
	dprintk("<-- %s status: %d\n", __func__, status);
	return status;
}

991 992 993 994 995 996
/* Return true if layoutreturn is needed */
static bool
pnfs_layout_need_return(struct pnfs_layout_hdr *lo)
{
	struct pnfs_layout_segment *s;

997
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
		return false;

	/* Defer layoutreturn until all lsegs are done */
	list_for_each_entry(s, &lo->plh_segs, pls_list) {
		if (test_bit(NFS_LSEG_LAYOUTRETURN, &s->pls_flags))
			return false;
	}

	return true;
}

static void pnfs_layoutreturn_before_put_layout_hdr(struct pnfs_layout_hdr *lo)
{
	struct inode *inode= lo->plh_inode;

1013
	if (!test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
1014 1015 1016 1017 1018 1019 1020
		return;
	spin_lock(&inode->i_lock);
	if (pnfs_layout_need_return(lo)) {
		nfs4_stateid stateid;
		enum pnfs_iomode iomode;
		bool send;

1021
		send = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1022 1023 1024 1025 1026 1027 1028 1029 1030
		spin_unlock(&inode->i_lock);
		if (send) {
			/* Send an async layoutreturn so we dont deadlock */
			pnfs_send_layoutreturn(lo, &stateid, iomode, false);
		}
	} else
		spin_unlock(&inode->i_lock);
}

1031 1032 1033 1034 1035 1036 1037 1038
/*
 * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
 * when the layout segment list is empty.
 *
 * Note that a pnfs_layout_hdr can exist with an empty layout segment
 * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
 * deviceid is marked invalid.
 */
B
Benny Halevy 已提交
1039 1040 1041 1042 1043 1044 1045
int
_pnfs_return_layout(struct inode *ino)
{
	struct pnfs_layout_hdr *lo = NULL;
	struct nfs_inode *nfsi = NFS_I(ino);
	LIST_HEAD(tmp_list);
	nfs4_stateid stateid;
1046
	int status = 0, empty;
1047
	bool send;
B
Benny Halevy 已提交
1048

1049
	dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
B
Benny Halevy 已提交
1050 1051 1052

	spin_lock(&ino->i_lock);
	lo = nfsi->layout;
1053
	if (!lo) {
B
Benny Halevy 已提交
1054
		spin_unlock(&ino->i_lock);
1055 1056
		dprintk("NFS: %s no layout to return\n", __func__);
		goto out;
B
Benny Halevy 已提交
1057 1058
	}
	/* Reference matched in nfs4_layoutreturn_release */
1059
	pnfs_get_layout_hdr(lo);
1060
	empty = list_empty(&lo->plh_segs);
1061
	pnfs_clear_layoutcommit(ino, &tmp_list);
1062
	pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL, 0);
1063 1064 1065 1066 1067 1068 1069 1070 1071 1072

	if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
		struct pnfs_layout_range range = {
			.iomode		= IOMODE_ANY,
			.offset		= 0,
			.length		= NFS4_MAX_UINT64,
		};
		NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
	}

1073 1074 1075 1076
	/* Don't send a LAYOUTRETURN if list was initially empty */
	if (empty) {
		spin_unlock(&ino->i_lock);
		dprintk("NFS: %s no layout segments to return\n", __func__);
1077
		goto out_put_layout_hdr;
1078
	}
1079

1080
	send = pnfs_prepare_layoutreturn(lo, &stateid, NULL);
B
Benny Halevy 已提交
1081 1082
	spin_unlock(&ino->i_lock);
	pnfs_free_lseg_list(&tmp_list);
1083
	if (send)
1084
		status = pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1085 1086
out_put_layout_hdr:
	pnfs_put_layout_hdr(lo);
B
Benny Halevy 已提交
1087 1088 1089 1090
out:
	dprintk("<-- %s status: %d\n", __func__, status);
	return status;
}
1091
EXPORT_SYMBOL_GPL(_pnfs_return_layout);
B
Benny Halevy 已提交
1092

1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119
int
pnfs_commit_and_return_layout(struct inode *inode)
{
	struct pnfs_layout_hdr *lo;
	int ret;

	spin_lock(&inode->i_lock);
	lo = NFS_I(inode)->layout;
	if (lo == NULL) {
		spin_unlock(&inode->i_lock);
		return 0;
	}
	pnfs_get_layout_hdr(lo);
	/* Block new layoutgets and read/write to ds */
	lo->plh_block_lgets++;
	spin_unlock(&inode->i_lock);
	filemap_fdatawait(inode->i_mapping);
	ret = pnfs_layoutcommit_inode(inode, true);
	if (ret == 0)
		ret = _pnfs_return_layout(inode);
	spin_lock(&inode->i_lock);
	lo->plh_block_lgets--;
	spin_unlock(&inode->i_lock);
	pnfs_put_layout_hdr(lo);
	return ret;
}

F
Fred Isaman 已提交
1120 1121
bool pnfs_roc(struct inode *ino)
{
1122 1123 1124
	struct nfs_inode *nfsi = NFS_I(ino);
	struct nfs_open_context *ctx;
	struct nfs4_state *state;
F
Fred Isaman 已提交
1125 1126
	struct pnfs_layout_hdr *lo;
	struct pnfs_layout_segment *lseg, *tmp;
1127
	nfs4_stateid stateid;
F
Fred Isaman 已提交
1128
	LIST_HEAD(tmp_list);
1129
	bool found = false, layoutreturn = false, roc = false;
F
Fred Isaman 已提交
1130 1131

	spin_lock(&ino->i_lock);
1132
	lo = nfsi->layout;
P
Peng Tao 已提交
1133
	if (!lo || test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
1134 1135
		goto out_noroc;

1136
	/* no roc if we hold a delegation */
1137 1138 1139 1140 1141 1142 1143 1144 1145 1146
	if (nfs4_check_delegation(ino, FMODE_READ))
		goto out_noroc;

	list_for_each_entry(ctx, &nfsi->open_files, list) {
		state = ctx->state;
		/* Don't return layout if there is open file state */
		if (state != NULL && state->state != 0)
			goto out_noroc;
	}

1147
	/* always send layoutreturn if being marked so */
1148 1149 1150
	if (test_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags))
		layoutreturn = pnfs_prepare_layoutreturn(lo,
				&stateid, NULL);
1151

F
Fred Isaman 已提交
1152
	list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
1153 1154
		/* If we are sending layoutreturn, invalidate all valid lsegs */
		if (layoutreturn || test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
F
Fred Isaman 已提交
1155 1156 1157
			mark_lseg_invalid(lseg, &tmp_list);
			found = true;
		}
1158
	/* ROC in two conditions:
1159 1160 1161
	 * 1. there are ROC lsegs
	 * 2. we don't send layoutreturn
	 */
1162 1163 1164
	if (found && !layoutreturn) {
		/* lo ref dropped in pnfs_roc_release() */
		pnfs_get_layout_hdr(lo);
1165
		roc = true;
1166
	}
F
Fred Isaman 已提交
1167

1168
out_noroc:
F
Fred Isaman 已提交
1169
	spin_unlock(&ino->i_lock);
1170 1171 1172
	pnfs_free_lseg_list(&tmp_list);
	pnfs_layoutcommit_inode(ino, true);
	if (layoutreturn)
1173
		pnfs_send_layoutreturn(lo, &stateid, IOMODE_ANY, true);
1174
	return roc;
F
Fred Isaman 已提交
1175 1176 1177 1178 1179 1180 1181 1182
}

void pnfs_roc_release(struct inode *ino)
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&ino->i_lock);
	lo = NFS_I(ino)->layout;
1183
	pnfs_clear_layoutreturn_waitbit(lo);
1184 1185 1186 1187 1188 1189
	if (atomic_dec_and_test(&lo->plh_refcount)) {
		pnfs_detach_layout_hdr(lo);
		spin_unlock(&ino->i_lock);
		pnfs_free_layout_hdr(lo);
	} else
		spin_unlock(&ino->i_lock);
F
Fred Isaman 已提交
1190 1191 1192 1193 1194 1195 1196 1197
}

void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
{
	struct pnfs_layout_hdr *lo;

	spin_lock(&ino->i_lock);
	lo = NFS_I(ino)->layout;
1198
	if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
F
Fred Isaman 已提交
1199 1200
		lo->plh_barrier = barrier;
	spin_unlock(&ino->i_lock);
1201
	trace_nfs4_layoutreturn_on_close(ino, 0);
F
Fred Isaman 已提交
1202 1203
}

1204
void pnfs_roc_get_barrier(struct inode *ino, u32 *barrier)
F
Fred Isaman 已提交
1205 1206
{
	struct nfs_inode *nfsi = NFS_I(ino);
1207 1208
	struct pnfs_layout_hdr *lo;
	u32 current_seqid;
F
Fred Isaman 已提交
1209 1210

	spin_lock(&ino->i_lock);
1211 1212
	lo = nfsi->layout;
	current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
F
Fred Isaman 已提交
1213

1214 1215 1216 1217
	/* Since close does not return a layout stateid for use as
	 * a barrier, we choose the worst-case barrier.
	 */
	*barrier = current_seqid + atomic_read(&lo->plh_outstanding);
F
Fred Isaman 已提交
1218 1219 1220
	spin_unlock(&ino->i_lock);
}

1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
bool pnfs_wait_on_layoutreturn(struct inode *ino, struct rpc_task *task)
{
	struct nfs_inode *nfsi = NFS_I(ino);
        struct pnfs_layout_hdr *lo;
        bool sleep = false;

	/* we might not have grabbed lo reference. so need to check under
	 * i_lock */
        spin_lock(&ino->i_lock);
        lo = nfsi->layout;
1231 1232
        if (lo && test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
                rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1233
                sleep = true;
1234
	}
1235 1236 1237 1238
        spin_unlock(&ino->i_lock);
        return sleep;
}

1239 1240 1241 1242 1243 1244
/*
 * Compare two layout segments for sorting into layout cache.
 * We want to preferentially return RW over RO layouts, so ensure those
 * are seen first.
 */
static s64
1245
pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
1246
	   const struct pnfs_layout_range *l2)
1247
{
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259
	s64 d;

	/* high offset > low offset */
	d = l1->offset - l2->offset;
	if (d)
		return d;

	/* short length > long length */
	d = l2->length - l1->length;
	if (d)
		return d;

1260
	/* read > read/write */
1261
	return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
1262 1263
}

1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
static bool
pnfs_lseg_range_is_after(const struct pnfs_layout_range *l1,
		const struct pnfs_layout_range *l2)
{
	return pnfs_lseg_range_cmp(l1, l2) > 0;
}

static bool
pnfs_lseg_no_merge(struct pnfs_layout_segment *lseg,
		struct pnfs_layout_segment *old)
{
	return false;
}

void
pnfs_generic_layout_insert_lseg(struct pnfs_layout_hdr *lo,
		   struct pnfs_layout_segment *lseg,
		   bool (*is_after)(const struct pnfs_layout_range *,
			   const struct pnfs_layout_range *),
		   bool (*do_merge)(struct pnfs_layout_segment *,
			   struct pnfs_layout_segment *),
		   struct list_head *free_me)
1286
{
1287
	struct pnfs_layout_segment *lp, *tmp;
1288

1289 1290
	dprintk("%s:Begin\n", __func__);

1291 1292 1293 1294 1295 1296 1297 1298
	list_for_each_entry_safe(lp, tmp, &lo->plh_segs, pls_list) {
		if (test_bit(NFS_LSEG_VALID, &lp->pls_flags) == 0)
			continue;
		if (do_merge(lseg, lp)) {
			mark_lseg_invalid(lp, free_me);
			continue;
		}
		if (is_after(&lseg->pls_range, &lp->pls_range))
1299
			continue;
1300
		list_add_tail(&lseg->pls_list, &lp->pls_list);
1301 1302 1303
		dprintk("%s: inserted lseg %p "
			"iomode %d offset %llu length %llu before "
			"lp %p iomode %d offset %llu length %llu\n",
1304 1305 1306 1307
			__func__, lseg, lseg->pls_range.iomode,
			lseg->pls_range.offset, lseg->pls_range.length,
			lp, lp->pls_range.iomode, lp->pls_range.offset,
			lp->pls_range.length);
1308
		goto out;
1309
	}
1310 1311 1312 1313 1314 1315
	list_add_tail(&lseg->pls_list, &lo->plh_segs);
	dprintk("%s: inserted lseg %p "
		"iomode %d offset %llu length %llu at tail\n",
		__func__, lseg, lseg->pls_range.iomode,
		lseg->pls_range.offset, lseg->pls_range.length);
out:
1316
	pnfs_get_layout_hdr(lo);
1317 1318

	dprintk("%s:Return\n", __func__);
1319
}
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
EXPORT_SYMBOL_GPL(pnfs_generic_layout_insert_lseg);

static void
pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
		   struct pnfs_layout_segment *lseg,
		   struct list_head *free_me)
{
	struct inode *inode = lo->plh_inode;
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;

	if (ld->add_lseg != NULL)
		ld->add_lseg(lo, lseg, free_me);
	else
		pnfs_generic_layout_insert_lseg(lo, lseg,
				pnfs_lseg_range_is_after,
				pnfs_lseg_no_merge,
				free_me);
}
1338 1339

static struct pnfs_layout_hdr *
1340 1341 1342
alloc_init_layout_hdr(struct inode *ino,
		      struct nfs_open_context *ctx,
		      gfp_t gfp_flags)
1343 1344 1345
{
	struct pnfs_layout_hdr *lo;

1346
	lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
1347 1348
	if (!lo)
		return NULL;
1349
	atomic_set(&lo->plh_refcount, 1);
1350 1351
	INIT_LIST_HEAD(&lo->plh_layouts);
	INIT_LIST_HEAD(&lo->plh_segs);
1352
	INIT_LIST_HEAD(&lo->plh_bulk_destroy);
1353
	lo->plh_inode = ino;
1354
	lo->plh_lc_cred = get_rpccred(ctx->cred);
1355
	lo->plh_flags |= 1 << NFS_LAYOUT_INVALID_STID;
1356 1357 1358 1359
	return lo;
}

static struct pnfs_layout_hdr *
1360 1361 1362
pnfs_find_alloc_layout(struct inode *ino,
		       struct nfs_open_context *ctx,
		       gfp_t gfp_flags)
1363 1364
	__releases(&ino->i_lock)
	__acquires(&ino->i_lock)
1365 1366 1367 1368 1369 1370
{
	struct nfs_inode *nfsi = NFS_I(ino);
	struct pnfs_layout_hdr *new = NULL;

	dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);

1371 1372
	if (nfsi->layout != NULL)
		goto out_existing;
1373
	spin_unlock(&ino->i_lock);
1374
	new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
1375 1376
	spin_lock(&ino->i_lock);

1377
	if (likely(nfsi->layout == NULL)) {	/* Won the race? */
1378
		nfsi->layout = new;
1379
		return new;
1380 1381
	} else if (new != NULL)
		pnfs_free_layout_hdr(new);
1382 1383
out_existing:
	pnfs_get_layout_hdr(nfsi->layout);
1384 1385 1386
	return nfsi->layout;
}

1387 1388
/*
 * iomode matching rules:
1389 1390 1391 1392 1393 1394 1395 1396 1397 1398
 * iomode	lseg	strict match
 *                      iomode
 * -----	-----	------ -----
 * ANY		READ	N/A    true
 * ANY		RW	N/A    true
 * RW		READ	N/A    false
 * RW		RW	N/A    true
 * READ		READ	N/A    true
 * READ		RW	true   false
 * READ		RW	false  true
1399
 */
1400
static bool
1401
pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
1402 1403
		 const struct pnfs_layout_range *range,
		 bool strict_iomode)
1404
{
1405 1406 1407 1408
	struct pnfs_layout_range range1;

	if ((range->iomode == IOMODE_RW &&
	     ls_range->iomode != IOMODE_RW) ||
1409 1410
	    (range->iomode != ls_range->iomode &&
	     strict_iomode == true) ||
1411
	    !pnfs_lseg_range_intersecting(ls_range, range))
1412 1413 1414 1415 1416
		return 0;

	/* range1 covers only the first byte in the range */
	range1 = *range;
	range1.length = 1;
1417
	return pnfs_lseg_range_contained(ls_range, &range1);
1418 1419 1420 1421 1422
}

/*
 * lookup range in layout
 */
1423
static struct pnfs_layout_segment *
1424
pnfs_find_lseg(struct pnfs_layout_hdr *lo,
1425 1426
		struct pnfs_layout_range *range,
		bool strict_iomode)
1427
{
1428 1429 1430 1431
	struct pnfs_layout_segment *lseg, *ret = NULL;

	dprintk("%s:Begin\n", __func__);

1432
	list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
1433
		if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
1434
		    !test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags) &&
1435 1436
		    pnfs_lseg_range_match(&lseg->pls_range, range,
					  strict_iomode)) {
1437
			ret = pnfs_get_lseg(lseg);
1438 1439 1440 1441 1442
			break;
		}
	}

	dprintk("%s:Return lseg %p ref %d\n",
1443
		__func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
1444
	return ret;
1445 1446
}

1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521
/*
 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
 * to the MDS or over pNFS
 *
 * The nfs_inode read_io and write_io fields are cumulative counters reset
 * when there are no layout segments. Note that in pnfs_update_layout iomode
 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
 * WRITE request.
 *
 * A return of true means use MDS I/O.
 *
 * From rfc 5661:
 * If a file's size is smaller than the file size threshold, data accesses
 * SHOULD be sent to the metadata server.  If an I/O request has a length that
 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
 * server.  If both file size and I/O size are provided, the client SHOULD
 * reach or exceed  both thresholds before sending its read or write
 * requests to the data server.
 */
static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
				     struct inode *ino, int iomode)
{
	struct nfs4_threshold *t = ctx->mdsthreshold;
	struct nfs_inode *nfsi = NFS_I(ino);
	loff_t fsize = i_size_read(ino);
	bool size = false, size_set = false, io = false, io_set = false, ret = false;

	if (t == NULL)
		return ret;

	dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
		__func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);

	switch (iomode) {
	case IOMODE_READ:
		if (t->bm & THRESHOLD_RD) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->rd_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_RD_IO) {
			dprintk("%s nfsi->read_io %llu\n", __func__,
				nfsi->read_io);
			io_set = true;
			if (nfsi->read_io < t->rd_io_sz)
				io = true;
		}
		break;
	case IOMODE_RW:
		if (t->bm & THRESHOLD_WR) {
			dprintk("%s fsize %llu\n", __func__, fsize);
			size_set = true;
			if (fsize < t->wr_sz)
				size = true;
		}
		if (t->bm & THRESHOLD_WR_IO) {
			dprintk("%s nfsi->write_io %llu\n", __func__,
				nfsi->write_io);
			io_set = true;
			if (nfsi->write_io < t->wr_io_sz)
				io = true;
		}
		break;
	}
	if (size_set && io_set) {
		if (size && io)
			ret = true;
	} else if (size || io)
		ret = true;

	dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
	return ret;
}

1522 1523 1524 1525 1526 1527 1528 1529
static bool pnfs_prepare_to_retry_layoutget(struct pnfs_layout_hdr *lo)
{
	/*
	 * send layoutcommit as it can hold up layoutreturn due to lseg
	 * reference
	 */
	pnfs_layoutcommit_inode(lo->plh_inode, false);
	return !wait_on_bit_action(&lo->plh_flags, NFS_LAYOUT_RETURN,
1530
				   nfs_wait_bit_killable,
1531 1532 1533
				   TASK_UNINTERRUPTIBLE);
}

1534 1535 1536 1537 1538 1539 1540 1541 1542
static void pnfs_clear_first_layoutget(struct pnfs_layout_hdr *lo)
{
	unsigned long *bitlock = &lo->plh_flags;

	clear_bit_unlock(NFS_LAYOUT_FIRST_LAYOUTGET, bitlock);
	smp_mb__after_atomic();
	wake_up_bit(bitlock, NFS_LAYOUT_FIRST_LAYOUTGET);
}

1543 1544 1545 1546
/*
 * Layout segment is retreived from the server if not cached.
 * The appropriate layout segment is referenced and returned to the caller.
 */
1547
struct pnfs_layout_segment *
1548 1549
pnfs_update_layout(struct inode *ino,
		   struct nfs_open_context *ctx,
1550 1551
		   loff_t pos,
		   u64 count,
1552
		   enum pnfs_iomode iomode,
1553
		   bool strict_iomode,
1554
		   gfp_t gfp_flags)
1555
{
1556 1557 1558 1559 1560
	struct pnfs_layout_range arg = {
		.iomode = iomode,
		.offset = pos,
		.length = count,
	};
1561
	unsigned pg_offset, seq;
1562 1563
	struct nfs_server *server = NFS_SERVER(ino);
	struct nfs_client *clp = server->nfs_client;
1564
	struct pnfs_layout_hdr *lo = NULL;
1565
	struct pnfs_layout_segment *lseg = NULL;
1566 1567
	nfs4_stateid stateid;
	long timeout = 0;
1568
	unsigned long giveup = jiffies + (clp->cl_lease_time << 1);
1569
	bool first;
1570

1571
	if (!pnfs_enabled_sb(NFS_SERVER(ino))) {
1572
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1573
				 PNFS_UPDATE_LAYOUT_NO_PNFS);
1574
		goto out;
1575
	}
1576

1577
	if (iomode == IOMODE_READ && i_size_read(ino) == 0) {
1578
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1579
				 PNFS_UPDATE_LAYOUT_RD_ZEROLEN);
1580
		goto out;
1581
	}
1582

1583
	if (pnfs_within_mdsthreshold(ctx, ino, iomode)) {
1584
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1585
				 PNFS_UPDATE_LAYOUT_MDSTHRESH);
1586
		goto out;
1587
	}
1588

1589
lookup_again:
1590
	nfs4_client_recover_expired_lease(clp);
1591
	first = false;
1592
	spin_lock(&ino->i_lock);
1593
	lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
1594 1595
	if (lo == NULL) {
		spin_unlock(&ino->i_lock);
1596
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1597
				 PNFS_UPDATE_LAYOUT_NOMEM);
1598 1599
		goto out;
	}
1600

F
Fred Isaman 已提交
1601
	/* Do we even need to bother with this? */
1602
	if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
1603
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1604
				 PNFS_UPDATE_LAYOUT_BULK_RECALL);
F
Fred Isaman 已提交
1605
		dprintk("%s matches recall, use MDS\n", __func__);
1606 1607 1608 1609
		goto out_unlock;
	}

	/* if LAYOUTGET already failed once we don't try again */
1610
	if (pnfs_layout_io_test_failed(lo, iomode)) {
1611
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1612
				 PNFS_UPDATE_LAYOUT_IO_TEST_FAIL);
1613
		goto out_unlock;
1614
	}
1615

1616
	lseg = pnfs_find_lseg(lo, &arg, strict_iomode);
1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633
	if (lseg) {
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				PNFS_UPDATE_LAYOUT_FOUND_CACHED);
		goto out_unlock;
	}

	if (!nfs4_valid_open_stateid(ctx->state)) {
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				PNFS_UPDATE_LAYOUT_INVALID_OPEN);
		goto out_unlock;
	}

	/*
	 * Choose a stateid for the LAYOUTGET. If we don't have a layout
	 * stateid, or it has been invalidated, then we must use the open
	 * stateid.
	 */
1634
	if (test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
1635 1636 1637

		/*
		 * The first layoutget for the file. Need to serialize per
1638 1639 1640 1641 1642 1643 1644 1645
		 * RFC 5661 Errata 3208.
		 */
		if (test_and_set_bit(NFS_LAYOUT_FIRST_LAYOUTGET,
				     &lo->plh_flags)) {
			spin_unlock(&ino->i_lock);
			wait_on_bit(&lo->plh_flags, NFS_LAYOUT_FIRST_LAYOUTGET,
				    TASK_UNINTERRUPTIBLE);
			pnfs_put_layout_hdr(lo);
1646
			dprintk("%s retrying\n", __func__);
1647 1648
			goto lookup_again;
		}
1649 1650 1651 1652 1653 1654

		first = true;
		do {
			seq = read_seqbegin(&ctx->state->seqlock);
			nfs4_stateid_copy(&stateid, &ctx->state->stateid);
		} while (read_seqretry(&ctx->state->seqlock, seq));
1655
	} else {
1656
		nfs4_stateid_copy(&stateid, &lo->plh_stateid);
1657
	}
1658

1659 1660 1661 1662
	/*
	 * Because we free lsegs before sending LAYOUTRETURN, we need to wait
	 * for LAYOUTRETURN even if first is true.
	 */
1663
	if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags)) {
1664 1665 1666
		spin_unlock(&ino->i_lock);
		dprintk("%s wait for layoutreturn\n", __func__);
		if (pnfs_prepare_to_retry_layoutget(lo)) {
1667 1668
			if (first)
				pnfs_clear_first_layoutget(lo);
1669 1670
			pnfs_put_layout_hdr(lo);
			dprintk("%s retrying\n", __func__);
1671 1672
			trace_pnfs_update_layout(ino, pos, count, iomode, lo,
					lseg, PNFS_UPDATE_LAYOUT_RETRY);
1673 1674
			goto lookup_again;
		}
1675
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1676
				PNFS_UPDATE_LAYOUT_RETURN);
1677 1678 1679
		goto out_put_layout_hdr;
	}

1680
	if (pnfs_layoutgets_blocked(lo)) {
1681
		trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
1682
				PNFS_UPDATE_LAYOUT_BLOCKED);
1683
		goto out_unlock;
1684
	}
1685
	atomic_inc(&lo->plh_outstanding);
1686
	spin_unlock(&ino->i_lock);
1687

1688
	if (list_empty(&lo->plh_layouts)) {
1689 1690 1691 1692
		/* The lo must be on the clp list if there is any
		 * chance of a CB_LAYOUTRECALL(FILE) coming in.
		 */
		spin_lock(&clp->cl_lock);
1693 1694
		if (list_empty(&lo->plh_layouts))
			list_add_tail(&lo->plh_layouts, &server->layouts);
1695 1696
		spin_unlock(&clp->cl_lock);
	}
1697

1698
	pg_offset = arg.offset & ~PAGE_MASK;
1699 1700 1701 1702
	if (pg_offset) {
		arg.offset -= pg_offset;
		arg.length += pg_offset;
	}
1703
	if (arg.length != NFS4_MAX_UINT64)
1704
		arg.length = PAGE_ALIGN(arg.length);
1705

1706 1707 1708
	lseg = send_layoutget(lo, ctx, &stateid, &arg, &timeout, gfp_flags);
	trace_pnfs_update_layout(ino, pos, count, iomode, lo, lseg,
				 PNFS_UPDATE_LAYOUT_SEND_LAYOUTGET);
1709
	atomic_dec(&lo->plh_outstanding);
1710
	if (IS_ERR(lseg)) {
1711
		switch(PTR_ERR(lseg)) {
1712
		case -EBUSY:
1713 1714
			if (time_after(jiffies, giveup))
				lseg = NULL;
1715 1716 1717 1718 1719 1720 1721 1722 1723 1724
			break;
		case -ERECALLCONFLICT:
			/* Huh? We hold no layouts, how is there a recall? */
			if (first) {
				lseg = NULL;
				break;
			}
			/* Destroy the existing layout and start over */
			if (time_after(jiffies, giveup))
				pnfs_destroy_layout(NFS_I(ino));
1725 1726
			/* Fallthrough */
		case -EAGAIN:
1727
			break;
1728 1729 1730 1731 1732
		default:
			if (!nfs_error_is_fatal(PTR_ERR(lseg))) {
				pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
				lseg = NULL;
			}
1733 1734 1735 1736 1737 1738 1739 1740 1741
			goto out_put_layout_hdr;
		}
		if (lseg) {
			if (first)
				pnfs_clear_first_layoutget(lo);
			trace_pnfs_update_layout(ino, pos, count,
				iomode, lo, lseg, PNFS_UPDATE_LAYOUT_RETRY);
			pnfs_put_layout_hdr(lo);
			goto lookup_again;
1742 1743 1744 1745 1746
		}
	} else {
		pnfs_layout_clear_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
	}

1747
out_put_layout_hdr:
1748 1749
	if (first)
		pnfs_clear_first_layoutget(lo);
1750
	pnfs_put_layout_hdr(lo);
1751
out:
1752 1753 1754 1755
	dprintk("%s: inode %s/%llu pNFS layout segment %s for "
			"(%s, offset: %llu, length: %llu)\n",
			__func__, ino->i_sb->s_id,
			(unsigned long long)NFS_FILEID(ino),
1756
			IS_ERR_OR_NULL(lseg) ? "not found" : "found",
1757 1758 1759
			iomode==IOMODE_RW ?  "read/write" : "read-only",
			(unsigned long long)pos,
			(unsigned long long)count);
1760 1761 1762
	return lseg;
out_unlock:
	spin_unlock(&ino->i_lock);
1763
	goto out_put_layout_hdr;
1764
}
1765
EXPORT_SYMBOL_GPL(pnfs_update_layout);
1766

1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786
static bool
pnfs_sanity_check_layout_range(struct pnfs_layout_range *range)
{
	switch (range->iomode) {
	case IOMODE_READ:
	case IOMODE_RW:
		break;
	default:
		return false;
	}
	if (range->offset == NFS4_MAX_UINT64)
		return false;
	if (range->length == 0)
		return false;
	if (range->length != NFS4_MAX_UINT64 &&
	    range->length > NFS4_MAX_UINT64 - range->offset)
		return false;
	return true;
}

1787
struct pnfs_layout_segment *
1788 1789 1790 1791 1792
pnfs_layout_process(struct nfs4_layoutget *lgp)
{
	struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
	struct nfs4_layoutget_res *res = &lgp->res;
	struct pnfs_layout_segment *lseg;
1793
	struct inode *ino = lo->plh_inode;
1794
	LIST_HEAD(free_me);
1795 1796

	if (!pnfs_sanity_check_layout_range(&res->range))
1797
		return ERR_PTR(-EINVAL);
1798 1799

	/* Inject layout blob into I/O device driver */
1800
	lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
1801
	if (IS_ERR_OR_NULL(lseg)) {
1802
		if (!lseg)
1803 1804 1805 1806 1807
			lseg = ERR_PTR(-ENOMEM);

		dprintk("%s: Could not allocate layout: error %ld\n",
		       __func__, PTR_ERR(lseg));
		return lseg;
1808 1809
	}

1810
	pnfs_init_lseg(lo, lseg, &res->range, &res->stateid);
1811

1812
	spin_lock(&ino->i_lock);
1813
	if (pnfs_layoutgets_blocked(lo)) {
F
Fred Isaman 已提交
1814
		dprintk("%s forget reply due to state\n", __func__);
1815
		goto out_forget;
F
Fred Isaman 已提交
1816
	}
1817

1818 1819 1820 1821
	if (!pnfs_layout_is_valid(lo)) {
		/* We have a completely new layout */
		pnfs_set_layout_stateid(lo, &res->stateid, true);
	} else if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
1822 1823 1824
		/* existing state ID, make sure the sequence number matches. */
		if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
			dprintk("%s forget reply due to sequence\n", __func__);
1825
			goto out_forget;
1826 1827 1828 1829 1830
		}
		pnfs_set_layout_stateid(lo, &res->stateid, false);
	} else {
		/*
		 * We got an entirely new state ID.  Mark all segments for the
1831
		 * inode invalid, and retry the layoutget
1832
		 */
1833
		pnfs_mark_layout_stateid_invalid(lo, &free_me);
1834
		goto out_forget;
1835
	}
1836

1837
	pnfs_get_lseg(lseg);
1838
	pnfs_layout_insert_lseg(lo, lseg, &free_me);
1839

1840

P
Peng Tao 已提交
1841
	if (res->return_on_close)
F
Fred Isaman 已提交
1842 1843
		set_bit(NFS_LSEG_ROC, &lseg->pls_flags);

1844
	spin_unlock(&ino->i_lock);
1845
	pnfs_free_lseg_list(&free_me);
1846
	return lseg;
F
Fred Isaman 已提交
1847

1848
out_forget:
F
Fred Isaman 已提交
1849 1850 1851
	spin_unlock(&ino->i_lock);
	lseg->pls_layout = lo;
	NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
1852
	return ERR_PTR(-EAGAIN);
1853 1854
}

1855
static void
1856 1857
pnfs_set_plh_return_info(struct pnfs_layout_hdr *lo, enum pnfs_iomode iomode,
			 u32 seq)
1858
{
1859
	if (lo->plh_return_iomode != 0 && lo->plh_return_iomode != iomode)
1860 1861
		iomode = IOMODE_ANY;
	lo->plh_return_iomode = iomode;
1862
	set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
1863 1864
	if (seq != 0) {
		WARN_ON_ONCE(lo->plh_return_seq != 0 && lo->plh_return_seq != seq);
1865
		lo->plh_return_seq = seq;
1866
	}
1867 1868
}

1869 1870 1871 1872 1873 1874 1875 1876 1877 1878
/**
 * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
 * @lo: pointer to layout header
 * @tmp_list: list header to be used with pnfs_free_lseg_list()
 * @return_range: describe layout segment ranges to be returned
 *
 * This function is mainly intended for use by layoutrecall. It attempts
 * to free the layout segment immediately, or else to mark it for return
 * as soon as its reference count drops to zero.
 */
1879
int
1880 1881
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
				struct list_head *tmp_list,
1882 1883
				const struct pnfs_layout_range *return_range,
				u32 seq)
1884 1885
{
	struct pnfs_layout_segment *lseg, *next;
1886
	int remaining = 0;
1887 1888 1889 1890

	dprintk("%s:Begin lo %p\n", __func__, lo);

	if (list_empty(&lo->plh_segs))
1891
		return 0;
1892

1893
	assert_spin_locked(&lo->plh_inode->i_lock);
1894 1895

	list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
1896
		if (pnfs_match_lseg_recall(lseg, return_range, seq)) {
1897 1898 1899 1900 1901
			dprintk("%s: marking lseg %p iomode %d "
				"offset %llu length %llu\n", __func__,
				lseg, lseg->pls_range.iomode,
				lseg->pls_range.offset,
				lseg->pls_range.length);
1902 1903 1904
			if (mark_lseg_invalid(lseg, tmp_list))
				continue;
			remaining++;
1905 1906
			set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
		}
1907 1908 1909 1910

	if (remaining)
		pnfs_set_plh_return_info(lo, return_range->iomode, seq);

1911
	return remaining;
1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
}

void pnfs_error_mark_layout_for_return(struct inode *inode,
				       struct pnfs_layout_segment *lseg)
{
	struct pnfs_layout_hdr *lo = NFS_I(inode)->layout;
	struct pnfs_layout_range range = {
		.iomode = lseg->pls_range.iomode,
		.offset = 0,
		.length = NFS4_MAX_UINT64,
	};
	LIST_HEAD(free_me);
1924
	bool return_now = false;
1925 1926

	spin_lock(&inode->i_lock);
1927
	pnfs_set_plh_return_info(lo, range.iomode, 0);
1928 1929
	/* Block LAYOUTGET */
	set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1930 1931 1932 1933 1934
	/*
	 * mark all matching lsegs so that we are sure to have no live
	 * segments at hand when sending layoutreturn. See pnfs_put_lseg()
	 * for how it works.
	 */
1935
	if (!pnfs_mark_matching_lsegs_return(lo, &free_me, &range, 0)) {
1936
		nfs4_stateid stateid;
1937
		enum pnfs_iomode iomode;
1938

1939
		return_now = pnfs_prepare_layoutreturn(lo, &stateid, &iomode);
1940 1941 1942 1943 1944 1945 1946
		spin_unlock(&inode->i_lock);
		if (return_now)
			pnfs_send_layoutreturn(lo, &stateid, iomode, false);
	} else {
		spin_unlock(&inode->i_lock);
		nfs_commit_inode(inode, 0);
	}
1947 1948 1949 1950
	pnfs_free_lseg_list(&free_me);
}
EXPORT_SYMBOL_GPL(pnfs_error_mark_layout_for_return);

1951 1952 1953
void
pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
P
Peng Tao 已提交
1954 1955
	u64 rd_size = req->wb_bytes;

1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966
	if (pgio->pg_lseg == NULL) {
		if (pgio->pg_dreq == NULL)
			rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
		else
			rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);

		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
						   req->wb_context,
						   req_offset(req),
						   rd_size,
						   IOMODE_READ,
1967
						   false,
1968
						   GFP_KERNEL);
1969 1970 1971 1972 1973
		if (IS_ERR(pgio->pg_lseg)) {
			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
			pgio->pg_lseg = NULL;
			return;
		}
1974
	}
1975 1976
	/* If no lseg, fall back to read through mds */
	if (pgio->pg_lseg == NULL)
1977
		nfs_pageio_reset_read_mds(pgio);
1978

1979 1980 1981 1982
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);

void
1983 1984
pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
			   struct nfs_page *req, u64 wb_size)
1985
{
1986
	if (pgio->pg_lseg == NULL) {
1987 1988 1989 1990 1991
		pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
						   req->wb_context,
						   req_offset(req),
						   wb_size,
						   IOMODE_RW,
1992
						   false,
1993
						   GFP_NOFS);
1994 1995 1996 1997 1998 1999
		if (IS_ERR(pgio->pg_lseg)) {
			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
			pgio->pg_lseg = NULL;
			return;
		}
	}
2000 2001
	/* If no lseg, fall back to write through mds */
	if (pgio->pg_lseg == NULL)
2002
		nfs_pageio_reset_write_mds(pgio);
2003 2004 2005
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);

2006 2007 2008 2009 2010 2011 2012 2013 2014 2015
void
pnfs_generic_pg_cleanup(struct nfs_pageio_descriptor *desc)
{
	if (desc->pg_lseg) {
		pnfs_put_lseg(desc->pg_lseg);
		desc->pg_lseg = NULL;
	}
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_cleanup);

2016 2017 2018 2019 2020
/*
 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
 * of bytes (maximum @req->wb_bytes) that can be coalesced.
 */
size_t
2021 2022
pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio,
		     struct nfs_page *prev, struct nfs_page *req)
2023
{
2024
	unsigned int size;
2025
	u64 seg_end, req_start, seg_left;
2026 2027 2028 2029

	size = nfs_generic_pg_test(pgio, prev, req);
	if (!size)
		return 0;
2030

2031
	/*
2032 2033 2034 2035 2036
	 * 'size' contains the number of bytes left in the current page (up
	 * to the original size asked for in @req->wb_bytes).
	 *
	 * Calculate how many bytes are left in the layout segment
	 * and if there are less bytes than 'size', return that instead.
2037 2038 2039 2040 2041
	 *
	 * Please also note that 'end_offset' is actually the offset of the
	 * first byte that lies outside the pnfs_layout_range. FIXME?
	 *
	 */
2042
	if (pgio->pg_lseg) {
2043
		seg_end = pnfs_end_offset(pgio->pg_lseg->pls_range.offset,
2044 2045
				     pgio->pg_lseg->pls_range.length);
		req_start = req_offset(req);
2046
		WARN_ON_ONCE(req_start >= seg_end);
2047
		/* start of request is past the last byte of this segment */
2048 2049 2050 2051 2052 2053
		if (req_start >= seg_end) {
			/* reference the new lseg */
			if (pgio->pg_ops->pg_cleanup)
				pgio->pg_ops->pg_cleanup(pgio);
			if (pgio->pg_ops->pg_init)
				pgio->pg_ops->pg_init(pgio, req);
2054
			return 0;
2055
		}
2056 2057 2058 2059 2060 2061

		/* adjust 'size' iff there are fewer bytes left in the
		 * segment than what nfs_generic_pg_test returned */
		seg_left = seg_end - req_start;
		if (seg_left < size)
			size = (unsigned int)seg_left;
2062
	}
2063

2064
	return size;
2065
}
2066
EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
2067

2068
int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
2069 2070 2071 2072
{
	struct nfs_pageio_descriptor pgio;

	/* Resend all requests through the MDS */
2073 2074
	nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
			      hdr->completion_ops);
2075
	set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
2076
	return nfs_pageio_resend(&pgio, hdr);
2077
}
2078
EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
2079

2080
static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
2081
{
2082 2083 2084

	dprintk("pnfs write error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2085
	    PNFS_LAYOUTRET_ON_ERROR) {
2086
		pnfs_return_layout(hdr->inode);
2087
	}
2088
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2089
		hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
2090 2091
}

2092 2093 2094
/*
 * Called by non rpc-based layout drivers
 */
2095
void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
2096
{
2097
	if (likely(!hdr->pnfs_error)) {
2098 2099
		pnfs_set_layoutcommit(hdr->inode, hdr->lseg,
				hdr->mds_offset + hdr->res.count);
2100
		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2101 2102 2103
	}
	trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
	if (unlikely(hdr->pnfs_error))
2104 2105
		pnfs_ld_handle_write_error(hdr);
	hdr->mds_ops->rpc_release(hdr);
2106
}
2107
EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
2108

2109 2110
static void
pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
2111
		struct nfs_pgio_header *hdr)
2112
{
2113
	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2114

2115
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2116
		list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2117
		nfs_pageio_reset_write_mds(desc);
2118
		mirror->pg_recoalesce = 1;
2119
	}
2120
	nfs_pgio_data_destroy(hdr);
2121
	hdr->release(hdr);
2122 2123 2124
}

static enum pnfs_try_status
2125
pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
2126 2127 2128
			const struct rpc_call_ops *call_ops,
			struct pnfs_layout_segment *lseg,
			int how)
2129
{
2130
	struct inode *inode = hdr->inode;
2131 2132 2133
	enum pnfs_try_status trypnfs;
	struct nfs_server *nfss = NFS_SERVER(inode);

2134
	hdr->mds_ops = call_ops;
2135 2136

	dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
2137 2138
		inode->i_ino, hdr->args.count, hdr->args.offset, how);
	trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
2139
	if (trypnfs != PNFS_NOT_ATTEMPTED)
2140 2141 2142 2143 2144
		nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}

2145
static void
2146 2147
pnfs_do_write(struct nfs_pageio_descriptor *desc,
	      struct nfs_pgio_header *hdr, int how)
2148 2149 2150
{
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;
2151
	enum pnfs_try_status trypnfs;
2152

2153
	trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
2154
	if (trypnfs == PNFS_NOT_ATTEMPTED)
2155
		pnfs_write_through_mds(desc, hdr);
2156 2157
}

2158 2159
static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
{
2160
	pnfs_put_lseg(hdr->lseg);
2161
	nfs_pgio_header_free(hdr);
2162 2163
}

2164 2165 2166
int
pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
{
2167
	struct nfs_pgio_header *hdr;
2168 2169
	int ret;

2170 2171
	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
	if (!hdr) {
P
Peng Tao 已提交
2172 2173
		desc->pg_error = -ENOMEM;
		return desc->pg_error;
2174
	}
2175
	nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
2176

2177
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2178
	ret = nfs_generic_pgio(desc, hdr);
2179
	if (!ret)
2180
		pnfs_do_write(desc, hdr, desc->pg_ioflags);
2181

2182
	return ret;
2183 2184 2185
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);

2186
int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
2187 2188 2189
{
	struct nfs_pageio_descriptor pgio;

2190
	/* Resend all requests through the MDS */
2191 2192
	nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
	return nfs_pageio_resend(&pgio, hdr);
2193
}
2194
EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
2195

2196
static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
2197
{
2198 2199
	dprintk("pnfs read error = %d\n", hdr->pnfs_error);
	if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
2200
	    PNFS_LAYOUTRET_ON_ERROR) {
2201
		pnfs_return_layout(hdr->inode);
2202
	}
2203
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
2204
		hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
2205 2206
}

2207 2208 2209
/*
 * Called by non rpc-based layout drivers
 */
2210
void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
2211
{
2212
	if (likely(!hdr->pnfs_error))
2213
		hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
2214 2215
	trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
	if (unlikely(hdr->pnfs_error))
2216 2217
		pnfs_ld_handle_read_error(hdr);
	hdr->mds_ops->rpc_release(hdr);
2218 2219 2220
}
EXPORT_SYMBOL_GPL(pnfs_ld_read_done);

2221 2222
static void
pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
2223
		struct nfs_pgio_header *hdr)
2224
{
2225
	struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc);
2226

2227
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2228
		list_splice_tail_init(&hdr->pages, &mirror->pg_list);
2229
		nfs_pageio_reset_read_mds(desc);
2230
		mirror->pg_recoalesce = 1;
2231
	}
2232
	nfs_pgio_data_destroy(hdr);
2233
	hdr->release(hdr);
2234 2235
}

A
Andy Adamson 已提交
2236 2237 2238
/*
 * Call the appropriate parallel I/O subsystem read function.
 */
2239
static enum pnfs_try_status
2240
pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
2241 2242
		       const struct rpc_call_ops *call_ops,
		       struct pnfs_layout_segment *lseg)
A
Andy Adamson 已提交
2243
{
2244
	struct inode *inode = hdr->inode;
A
Andy Adamson 已提交
2245 2246 2247
	struct nfs_server *nfss = NFS_SERVER(inode);
	enum pnfs_try_status trypnfs;

2248
	hdr->mds_ops = call_ops;
A
Andy Adamson 已提交
2249 2250

	dprintk("%s: Reading ino:%lu %u@%llu\n",
2251
		__func__, inode->i_ino, hdr->args.count, hdr->args.offset);
A
Andy Adamson 已提交
2252

2253
	trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
2254
	if (trypnfs != PNFS_NOT_ATTEMPTED)
A
Andy Adamson 已提交
2255 2256 2257 2258
		nfs_inc_stats(inode, NFSIOS_PNFS_READ);
	dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
	return trypnfs;
}
A
Andy Adamson 已提交
2259

2260
/* Resend all requests through pnfs. */
2261
void pnfs_read_resend_pnfs(struct nfs_pgio_header *hdr)
2262 2263 2264
{
	struct nfs_pageio_descriptor pgio;

2265
	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
2266 2267 2268 2269
		/* Prevent deadlocks with layoutreturn! */
		pnfs_put_lseg(hdr->lseg);
		hdr->lseg = NULL;

2270 2271 2272 2273
		nfs_pageio_init_read(&pgio, hdr->inode, false,
					hdr->completion_ops);
		hdr->task.tk_status = nfs_pageio_resend(&pgio, hdr);
	}
2274 2275 2276
}
EXPORT_SYMBOL_GPL(pnfs_read_resend_pnfs);

2277
static void
2278
pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
2279 2280 2281
{
	const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
	struct pnfs_layout_segment *lseg = desc->pg_lseg;
2282
	enum pnfs_try_status trypnfs;
2283

2284
	trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
2285
	if (trypnfs == PNFS_TRY_AGAIN)
2286 2287
		pnfs_read_resend_pnfs(hdr);
	if (trypnfs == PNFS_NOT_ATTEMPTED || hdr->task.tk_status)
2288
		pnfs_read_through_mds(desc, hdr);
2289 2290
}

2291 2292
static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
{
2293
	pnfs_put_lseg(hdr->lseg);
2294
	nfs_pgio_header_free(hdr);
2295 2296
}

2297 2298 2299
int
pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
{
2300
	struct nfs_pgio_header *hdr;
2301 2302
	int ret;

2303 2304
	hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
	if (!hdr) {
P
Peng Tao 已提交
2305 2306
		desc->pg_error = -ENOMEM;
		return desc->pg_error;
2307
	}
2308
	nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
2309
	hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
2310
	ret = nfs_generic_pgio(desc, hdr);
2311
	if (!ret)
2312
		pnfs_do_read(desc, hdr);
2313
	return ret;
2314 2315 2316
}
EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);

2317 2318 2319 2320 2321
static void pnfs_clear_layoutcommitting(struct inode *inode)
{
	unsigned long *bitlock = &NFS_I(inode)->flags;

	clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
2322
	smp_mb__after_atomic();
2323 2324 2325
	wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
}

A
Andy Adamson 已提交
2326
/*
2327
 * There can be multiple RW segments.
A
Andy Adamson 已提交
2328
 */
2329
static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
A
Andy Adamson 已提交
2330
{
2331
	struct pnfs_layout_segment *lseg;
A
Andy Adamson 已提交
2332

2333 2334
	list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
		if (lseg->pls_range.iomode == IOMODE_RW &&
2335
		    test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
2336 2337
			list_add(&lseg->pls_lc_list, listp);
	}
A
Andy Adamson 已提交
2338 2339
}

2340 2341 2342 2343 2344 2345 2346 2347 2348 2349
static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
{
	struct pnfs_layout_segment *lseg, *tmp;

	/* Matched by references in pnfs_set_layoutcommit */
	list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
		list_del_init(&lseg->pls_lc_list);
		pnfs_put_lseg(lseg);
	}

2350
	pnfs_clear_layoutcommitting(inode);
2351 2352
}

P
Peng Tao 已提交
2353 2354
void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
{
2355
	pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
P
Peng Tao 已提交
2356 2357 2358
}
EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);

A
Andy Adamson 已提交
2359
void
2360 2361
pnfs_set_layoutcommit(struct inode *inode, struct pnfs_layout_segment *lseg,
		loff_t end_pos)
A
Andy Adamson 已提交
2362
{
2363
	struct nfs_inode *nfsi = NFS_I(inode);
2364
	bool mark_as_dirty = false;
A
Andy Adamson 已提交
2365

2366
	spin_lock(&inode->i_lock);
A
Andy Adamson 已提交
2367
	if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
2368
		nfsi->layout->plh_lwb = end_pos;
2369
		mark_as_dirty = true;
A
Andy Adamson 已提交
2370
		dprintk("%s: Set layoutcommit for inode %lu ",
2371
			__func__, inode->i_ino);
2372 2373
	} else if (end_pos > nfsi->layout->plh_lwb)
		nfsi->layout->plh_lwb = end_pos;
2374
	if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags)) {
2375
		/* references matched in nfs4_layoutcommit_release */
2376
		pnfs_get_lseg(lseg);
2377
	}
2378
	spin_unlock(&inode->i_lock);
2379
	dprintk("%s: lseg %p end_pos %llu\n",
2380
		__func__, lseg, nfsi->layout->plh_lwb);
2381 2382 2383 2384

	/* if pnfs_layoutcommit_inode() runs between inode locks, the next one
	 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
	if (mark_as_dirty)
2385
		mark_inode_dirty_sync(inode);
A
Andy Adamson 已提交
2386 2387 2388
}
EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);

A
Andy Adamson 已提交
2389 2390 2391 2392 2393 2394
void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
{
	struct nfs_server *nfss = NFS_SERVER(data->args.inode);

	if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
		nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
2395
	pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
A
Andy Adamson 已提交
2396 2397
}

2398 2399 2400 2401 2402 2403 2404 2405
/*
 * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
 * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
 * data to disk to allow the server to recover the data if it crashes.
 * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
 * is off, and a COMMIT is sent to a data server, or
 * if WRITEs to a data server return NFS_DATA_SYNC.
 */
A
Andy Adamson 已提交
2406
int
2407
pnfs_layoutcommit_inode(struct inode *inode, bool sync)
A
Andy Adamson 已提交
2408
{
2409
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
A
Andy Adamson 已提交
2410 2411 2412
	struct nfs4_layoutcommit_data *data;
	struct nfs_inode *nfsi = NFS_I(inode);
	loff_t end_pos;
2413
	int status;
A
Andy Adamson 已提交
2414

2415
	if (!pnfs_layoutcommit_outstanding(inode))
2416 2417
		return 0;

2418
	dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
P
Peng Tao 已提交
2419

2420
	status = -EAGAIN;
P
Peng Tao 已提交
2421
	if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
2422 2423
		if (!sync)
			goto out;
2424
		status = wait_on_bit_lock_action(&nfsi->flags,
2425 2426 2427
				NFS_INO_LAYOUTCOMMITTING,
				nfs_wait_bit_killable,
				TASK_KILLABLE);
P
Peng Tao 已提交
2428
		if (status)
2429
			goto out;
P
Peng Tao 已提交
2430 2431
	}

2432 2433 2434 2435 2436 2437 2438
	status = -ENOMEM;
	/* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
	data = kzalloc(sizeof(*data), GFP_NOFS);
	if (!data)
		goto clear_layoutcommitting;

	status = 0;
2439
	spin_lock(&inode->i_lock);
2440 2441
	if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
		goto out_unlock;
2442

2443
	INIT_LIST_HEAD(&data->lseg_list);
2444
	pnfs_list_write_lseg(inode, &data->lseg_list);
A
Andy Adamson 已提交
2445

2446
	end_pos = nfsi->layout->plh_lwb;
A
Andy Adamson 已提交
2447

2448
	nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
A
Andy Adamson 已提交
2449 2450 2451
	spin_unlock(&inode->i_lock);

	data->args.inode = inode;
2452
	data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
A
Andy Adamson 已提交
2453 2454 2455
	nfs_fattr_init(&data->fattr);
	data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
	data->res.fattr = &data->fattr;
2456 2457 2458 2459
	if (end_pos != 0)
		data->args.lastbytewritten = end_pos - 1;
	else
		data->args.lastbytewritten = U64_MAX;
A
Andy Adamson 已提交
2460 2461
	data->res.server = NFS_SERVER(inode);

2462 2463 2464
	if (ld->prepare_layoutcommit) {
		status = ld->prepare_layoutcommit(&data->args);
		if (status) {
2465
			put_rpccred(data->cred);
2466
			spin_lock(&inode->i_lock);
2467 2468
			set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
			if (end_pos > nfsi->layout->plh_lwb)
2469
				nfsi->layout->plh_lwb = end_pos;
2470
			goto out_unlock;
2471 2472 2473 2474
		}
	}


A
Andy Adamson 已提交
2475 2476
	status = nfs4_proc_layoutcommit(data, sync);
out:
P
Peng Tao 已提交
2477 2478
	if (status)
		mark_inode_dirty_sync(inode);
A
Andy Adamson 已提交
2479 2480
	dprintk("<-- %s status %d\n", __func__, status);
	return status;
2481 2482
out_unlock:
	spin_unlock(&inode->i_lock);
P
Peng Tao 已提交
2483
	kfree(data);
2484 2485
clear_layoutcommitting:
	pnfs_clear_layoutcommitting(inode);
P
Peng Tao 已提交
2486
	goto out;
A
Andy Adamson 已提交
2487
}
2488
EXPORT_SYMBOL_GPL(pnfs_layoutcommit_inode);
2489

2490 2491 2492 2493 2494 2495 2496
int
pnfs_generic_sync(struct inode *inode, bool datasync)
{
	return pnfs_layoutcommit_inode(inode, true);
}
EXPORT_SYMBOL_GPL(pnfs_generic_sync);

2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507
struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
{
	struct nfs4_threshold *thp;

	thp = kzalloc(sizeof(*thp), GFP_NOFS);
	if (!thp) {
		dprintk("%s mdsthreshold allocation failed\n", __func__);
		return NULL;
	}
	return thp;
}
2508

2509
#if IS_ENABLED(CONFIG_NFS_V4_2)
2510
int
2511
pnfs_report_layoutstat(struct inode *inode, gfp_t gfp_flags)
2512 2513 2514
{
	struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
	struct nfs_server *server = NFS_SERVER(inode);
2515
	struct nfs_inode *nfsi = NFS_I(inode);
2516 2517 2518 2519 2520 2521 2522
	struct nfs42_layoutstat_data *data;
	struct pnfs_layout_hdr *hdr;
	int status = 0;

	if (!pnfs_enabled_sb(server) || !ld->prepare_layoutstats)
		goto out;

2523 2524 2525
	if (!nfs_server_capable(inode, NFS_CAP_LAYOUTSTATS))
		goto out;

2526 2527 2528
	if (test_and_set_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags))
		goto out;

2529 2530 2531
	spin_lock(&inode->i_lock);
	if (!NFS_I(inode)->layout) {
		spin_unlock(&inode->i_lock);
2532
		goto out_clear_layoutstats;
2533 2534 2535 2536 2537
	}
	hdr = NFS_I(inode)->layout;
	pnfs_get_layout_hdr(hdr);
	spin_unlock(&inode->i_lock);

2538
	data = kzalloc(sizeof(*data), gfp_flags);
2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559
	if (!data) {
		status = -ENOMEM;
		goto out_put;
	}

	data->args.fh = NFS_FH(inode);
	data->args.inode = inode;
	status = ld->prepare_layoutstats(&data->args);
	if (status)
		goto out_free;

	status = nfs42_proc_layoutstats_generic(NFS_SERVER(inode), data);

out:
	dprintk("%s returns %d\n", __func__, status);
	return status;

out_free:
	kfree(data);
out_put:
	pnfs_put_layout_hdr(hdr);
2560
out_clear_layoutstats:
2561 2562 2563
	smp_mb__before_atomic();
	clear_bit(NFS_INO_LAYOUTSTATS, &nfsi->flags);
	smp_mb__after_atomic();
2564 2565 2566
	goto out;
}
EXPORT_SYMBOL_GPL(pnfs_report_layoutstat);
2567
#endif
2568 2569 2570 2571

unsigned int layoutstats_timer;
module_param(layoutstats_timer, uint, 0644);
EXPORT_SYMBOL_GPL(layoutstats_timer);