blocklayout.c 27.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 *  linux/fs/nfs/blocklayout/blocklayout.c
 *
 *  Module for the NFSv4.1 pNFS block layout driver.
 *
 *  Copyright (c) 2006 The Regents of the University of Michigan.
 *  All rights reserved.
 *
 *  Andy Adamson <andros@citi.umich.edu>
 *  Fred Isaman <iisaman@umich.edu>
 *
 * permission is granted to use, copy, create derivative works and
 * redistribute this software and such derivative works for any purpose,
 * so long as the name of the university of michigan is not used in
 * any advertising or publicity pertaining to the use or distribution
 * of this software without specific, written prior authorization.  if
 * the above copyright notice or any other identification of the
 * university of michigan is included in any copy of any portion of
 * this software, then the disclaimer below must also be included.
 *
 * this software is provided as is, without representation from the
 * university of michigan as to its fitness for any purpose, and without
 * warranty by the university of michigan of any kind, either express
 * or implied, including without limitation the implied warranties of
 * merchantability and fitness for a particular purpose.  the regents
 * of the university of michigan shall not be liable for any damages,
 * including special, indirect, incidental, or consequential damages,
 * with respect to any claim arising out or in connection with the use
 * of the software, even if it has been or is hereafter advised of the
 * possibility of such damages.
 */
F
Fred Isaman 已提交
32

33 34
#include <linux/module.h>
#include <linux/init.h>
J
Jim Rees 已提交
35 36
#include <linux/mount.h>
#include <linux/namei.h>
F
Fred Isaman 已提交
37
#include <linux/bio.h>		/* struct bio */
38
#include <linux/prefetch.h>
39
#include <linux/pagevec.h>
40

41
#include "../pnfs.h"
42
#include "../nfs4session.h"
43
#include "../internal.h"
44 45 46 47 48 49 50 51
#include "blocklayout.h"

#define NFSDBG_FACILITY	NFSDBG_PNFS_LD

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Andy Adamson <andros@citi.umich.edu>");
MODULE_DESCRIPTION("The NFSv4.1 pNFS Block layout driver");

F
Fred Isaman 已提交
52 53 54 55 56 57 58 59 60 61 62 63 64
/* Given the be associated with isect, determine if page data needs to be
 * initialized.
 */
static int is_hole(struct pnfs_block_extent *be, sector_t isect)
{
	if (be->be_state == PNFS_BLOCK_NONE_DATA)
		return 1;
	else if (be->be_state != PNFS_BLOCK_INVALID_DATA)
		return 0;
	else
		return !bl_is_sector_init(be->be_inval, isect);
}

F
Fred Isaman 已提交
65 66 67 68 69
/* Given the be associated with isect, determine if page data can be
 * written to disk.
 */
static int is_writable(struct pnfs_block_extent *be, sector_t isect)
{
70 71
	return (be->be_state == PNFS_BLOCK_READWRITE_DATA ||
		be->be_state == PNFS_BLOCK_INVALID_DATA);
F
Fred Isaman 已提交
72 73
}

F
Fred Isaman 已提交
74 75 76 77 78
/* The data we are handed might be spread across several bios.  We need
 * to track when the last one is finished.
 */
struct parallel_io {
	struct kref refcnt;
79
	void (*pnfs_callback) (void *data, int num_se);
F
Fred Isaman 已提交
80
	void *data;
81
	int bse_count;
F
Fred Isaman 已提交
82 83 84 85 86 87 88 89 90 91
};

static inline struct parallel_io *alloc_parallel(void *data)
{
	struct parallel_io *rv;

	rv  = kmalloc(sizeof(*rv), GFP_NOFS);
	if (rv) {
		rv->data = data;
		kref_init(&rv->refcnt);
92
		rv->bse_count = 0;
F
Fred Isaman 已提交
93 94 95 96 97 98 99 100 101 102 103 104 105 106
	}
	return rv;
}

static inline void get_parallel(struct parallel_io *p)
{
	kref_get(&p->refcnt);
}

static void destroy_parallel(struct kref *kref)
{
	struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);

	dprintk("%s enter\n", __func__);
107
	p->pnfs_callback(p->data, p->bse_count);
F
Fred Isaman 已提交
108 109 110 111 112 113 114 115 116 117 118 119 120 121
	kfree(p);
}

static inline void put_parallel(struct parallel_io *p)
{
	kref_put(&p->refcnt, destroy_parallel);
}

static struct bio *
bl_submit_bio(int rw, struct bio *bio)
{
	if (bio) {
		get_parallel(bio->bi_private);
		dprintk("%s submitting %s bio %u@%llu\n", __func__,
122 123
			rw == READ ? "read" : "write", bio->bi_iter.bi_size,
			(unsigned long long)bio->bi_iter.bi_sector);
F
Fred Isaman 已提交
124 125 126 127 128 129 130 131 132 133 134 135
		submit_bio(rw, bio);
	}
	return NULL;
}

static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
				     struct pnfs_block_extent *be,
				     void (*end_io)(struct bio *, int err),
				     struct parallel_io *par)
{
	struct bio *bio;

P
Peng Tao 已提交
136
	npg = min(npg, BIO_MAX_PAGES);
F
Fred Isaman 已提交
137
	bio = bio_alloc(GFP_NOIO, npg);
P
Peng Tao 已提交
138 139 140 141
	if (!bio && (current->flags & PF_MEMALLOC)) {
		while (!bio && (npg /= 2))
			bio = bio_alloc(GFP_NOIO, npg);
	}
F
Fred Isaman 已提交
142

P
Peng Tao 已提交
143
	if (bio) {
144 145
		bio->bi_iter.bi_sector = isect - be->be_f_offset +
			be->be_v_offset;
P
Peng Tao 已提交
146 147 148 149
		bio->bi_bdev = be->be_mdev;
		bio->bi_end_io = end_io;
		bio->bi_private = par;
	}
F
Fred Isaman 已提交
150 151 152
	return bio;
}

153
static struct bio *do_add_page_to_bio(struct bio *bio, int npg, int rw,
F
Fred Isaman 已提交
154 155 156
				      sector_t isect, struct page *page,
				      struct pnfs_block_extent *be,
				      void (*end_io)(struct bio *, int err),
157 158
				      struct parallel_io *par,
				      unsigned int offset, int len)
F
Fred Isaman 已提交
159
{
160 161 162
	isect = isect + (offset >> SECTOR_SHIFT);
	dprintk("%s: npg %d rw %d isect %llu offset %u len %d\n", __func__,
		npg, rw, (unsigned long long)isect, offset, len);
F
Fred Isaman 已提交
163 164 165 166 167 168
retry:
	if (!bio) {
		bio = bl_alloc_init_bio(npg, isect, be, end_io, par);
		if (!bio)
			return ERR_PTR(-ENOMEM);
	}
169
	if (bio_add_page(bio, page, len, offset) < len) {
F
Fred Isaman 已提交
170 171 172 173 174 175 176 177 178 179
		bio = bl_submit_bio(rw, bio);
		goto retry;
	}
	return bio;
}

static void bl_end_io_read(struct bio *bio, int err)
{
	struct parallel_io *par = bio->bi_private;

180
	if (err) {
181
		struct nfs_pgio_header *header = par->data;
182 183 184 185

		if (!header->pnfs_error)
			header->pnfs_error = -EIO;
		pnfs_set_lo_fail(header->lseg);
F
Fred Isaman 已提交
186
	}
187

F
Fred Isaman 已提交
188 189 190 191 192 193 194
	bio_put(bio);
	put_parallel(par);
}

static void bl_read_cleanup(struct work_struct *work)
{
	struct rpc_task *task;
195
	struct nfs_pgio_header *hdr;
F
Fred Isaman 已提交
196 197
	dprintk("%s enter\n", __func__);
	task = container_of(work, struct rpc_task, u.tk_work);
198 199
	hdr = container_of(task, struct nfs_pgio_header, task);
	pnfs_ld_read_done(hdr);
F
Fred Isaman 已提交
200 201 202
}

static void
203
bl_end_par_io_read(void *data, int unused)
F
Fred Isaman 已提交
204
{
205
	struct nfs_pgio_header *hdr = data;
F
Fred Isaman 已提交
206

207 208 209
	hdr->task.tk_status = hdr->pnfs_error;
	INIT_WORK(&hdr->task.u.tk_work, bl_read_cleanup);
	schedule_work(&hdr->task.u.tk_work);
F
Fred Isaman 已提交
210 211
}

212
static enum pnfs_try_status
213
bl_read_pagelist(struct nfs_pgio_header *hdr)
214
{
215
	struct nfs_pgio_header *header = hdr;
F
Fred Isaman 已提交
216 217 218 219 220
	int i, hole;
	struct bio *bio = NULL;
	struct pnfs_block_extent *be = NULL, *cow_read = NULL;
	sector_t isect, extent_length = 0;
	struct parallel_io *par;
221 222
	loff_t f_offset = hdr->args.offset;
	size_t bytes_left = hdr->args.count;
P
Peng Tao 已提交
223
	unsigned int pg_offset, pg_len;
224 225
	struct page **pages = hdr->args.pages;
	int pg_index = hdr->args.pgbase >> PAGE_CACHE_SHIFT;
P
Peng Tao 已提交
226
	const bool is_dio = (header->dreq != NULL);
227
	struct blk_plug plug;
F
Fred Isaman 已提交
228

229
	dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
230 231
		hdr->page_array.npages, f_offset,
		(unsigned int)hdr->args.count);
F
Fred Isaman 已提交
232

233
	par = alloc_parallel(hdr);
F
Fred Isaman 已提交
234 235 236 237 238
	if (!par)
		goto use_mds;
	par->pnfs_callback = bl_end_par_io_read;
	/* At this point, we can no longer jump to use_mds */

239 240
	blk_start_plug(&plug);

F
Fred Isaman 已提交
241 242
	isect = (sector_t) (f_offset >> SECTOR_SHIFT);
	/* Code assumes extents are page-aligned */
243
	for (i = pg_index; i < hdr->page_array.npages; i++) {
244
		if (extent_length <= 0) {
F
Fred Isaman 已提交
245 246 247 248 249
			/* We've used up the previous extent */
			bl_put_extent(be);
			bl_put_extent(cow_read);
			bio = bl_submit_bio(READ, bio);
			/* Get the next one */
250
			be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
F
Fred Isaman 已提交
251 252
					     isect, &cow_read);
			if (!be) {
253
				header->pnfs_error = -EIO;
F
Fred Isaman 已提交
254 255 256 257 258 259 260 261 262 263
				goto out;
			}
			extent_length = be->be_length -
				(isect - be->be_f_offset);
			if (cow_read) {
				sector_t cow_length = cow_read->be_length -
					(isect - cow_read->be_f_offset);
				extent_length = min(extent_length, cow_length);
			}
		}
P
Peng Tao 已提交
264

265
		pg_offset = f_offset & ~PAGE_CACHE_MASK;
P
Peng Tao 已提交
266 267 268 269 270 271 272 273 274
		if (is_dio) {
			if (pg_offset + bytes_left > PAGE_CACHE_SIZE)
				pg_len = PAGE_CACHE_SIZE - pg_offset;
			else
				pg_len = bytes_left;

			f_offset += pg_len;
			bytes_left -= pg_len;
			isect += (pg_offset >> SECTOR_SHIFT);
275
			extent_length -= (pg_offset >> SECTOR_SHIFT);
P
Peng Tao 已提交
276
		} else {
277
			BUG_ON(pg_offset != 0);
P
Peng Tao 已提交
278 279 280
			pg_len = PAGE_CACHE_SIZE;
		}

F
Fred Isaman 已提交
281 282 283 284 285
		hole = is_hole(be, isect);
		if (hole && !cow_read) {
			bio = bl_submit_bio(READ, bio);
			/* Fill hole w/ zeroes w/o accessing device */
			dprintk("%s Zeroing page for hole\n", __func__);
P
Peng Tao 已提交
286
			zero_user_segment(pages[i], pg_offset, pg_len);
F
Fred Isaman 已提交
287 288 289 290
		} else {
			struct pnfs_block_extent *be_read;

			be_read = (hole && cow_read) ? cow_read : be;
291
			bio = do_add_page_to_bio(bio,
292
						 hdr->page_array.npages - i,
F
Fred Isaman 已提交
293
						 READ,
F
Fred Isaman 已提交
294
						 isect, pages[i], be_read,
P
Peng Tao 已提交
295 296
						 bl_end_io_read, par,
						 pg_offset, pg_len);
F
Fred Isaman 已提交
297
			if (IS_ERR(bio)) {
298
				header->pnfs_error = PTR_ERR(bio);
P
Peng Tao 已提交
299
				bio = NULL;
F
Fred Isaman 已提交
300 301 302
				goto out;
			}
		}
P
Peng Tao 已提交
303
		isect += (pg_len >> SECTOR_SHIFT);
304
		extent_length -= (pg_len >> SECTOR_SHIFT);
F
Fred Isaman 已提交
305
	}
306
	if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
307 308
		hdr->res.eof = 1;
		hdr->res.count = header->inode->i_size - hdr->args.offset;
F
Fred Isaman 已提交
309
	} else {
310
		hdr->res.count = (isect << SECTOR_SHIFT) - hdr->args.offset;
F
Fred Isaman 已提交
311 312 313 314 315
	}
out:
	bl_put_extent(be);
	bl_put_extent(cow_read);
	bl_submit_bio(READ, bio);
316
	blk_finish_plug(&plug);
F
Fred Isaman 已提交
317 318 319 320 321
	put_parallel(par);
	return PNFS_ATTEMPTED;

 use_mds:
	dprintk("Giving up and using normal NFS\n");
322 323 324
	return PNFS_NOT_ATTEMPTED;
}

325 326 327 328 329
static void mark_extents_written(struct pnfs_block_layout *bl,
				 __u64 offset, __u32 count)
{
	sector_t isect, end;
	struct pnfs_block_extent *be;
330
	struct pnfs_block_short_extent *se;
331 332 333 334 335 336 337 338 339 340 341 342

	dprintk("%s(%llu, %u)\n", __func__, offset, count);
	if (count == 0)
		return;
	isect = (offset & (long)(PAGE_CACHE_MASK)) >> SECTOR_SHIFT;
	end = (offset + count + PAGE_CACHE_SIZE - 1) & (long)(PAGE_CACHE_MASK);
	end >>= SECTOR_SHIFT;
	while (isect < end) {
		sector_t len;
		be = bl_find_get_extent(bl, isect, NULL);
		BUG_ON(!be); /* FIXME */
		len = min(end, be->be_f_offset + be->be_length) - isect;
343 344 345 346 347
		if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
			se = bl_pop_one_short_extent(be->be_inval);
			BUG_ON(!se);
			bl_mark_for_commit(be, isect, len, se);
		}
348 349 350 351 352
		isect += len;
		bl_put_extent(be);
	}
}

F
Fred Isaman 已提交
353 354 355 356
static void bl_end_io_write(struct bio *bio, int err)
{
	struct parallel_io *par = bio->bi_private;
	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
357
	struct nfs_pgio_header *header = par->data;
F
Fred Isaman 已提交
358 359

	if (!uptodate) {
360 361 362
		if (!header->pnfs_error)
			header->pnfs_error = -EIO;
		pnfs_set_lo_fail(header->lseg);
F
Fred Isaman 已提交
363 364 365 366 367 368 369 370 371 372 373
	}
	bio_put(bio);
	put_parallel(par);
}

/* Function scheduled for call during bl_end_par_io_write,
 * it marks sectors as written and extends the commitlist.
 */
static void bl_write_cleanup(struct work_struct *work)
{
	struct rpc_task *task;
374
	struct nfs_pgio_header *hdr;
F
Fred Isaman 已提交
375 376
	dprintk("%s enter\n", __func__);
	task = container_of(work, struct rpc_task, u.tk_work);
377 378
	hdr = container_of(task, struct nfs_pgio_header, task);
	if (likely(!hdr->pnfs_error)) {
379
		/* Marks for LAYOUTCOMMIT */
380 381
		mark_extents_written(BLK_LSEG2EXT(hdr->lseg),
				     hdr->args.offset, hdr->args.count);
382
	}
383
	pnfs_ld_write_done(hdr);
F
Fred Isaman 已提交
384 385 386
}

/* Called when last of bios associated with a bl_write_pagelist call finishes */
387
static void bl_end_par_io_write(void *data, int num_se)
F
Fred Isaman 已提交
388
{
389
	struct nfs_pgio_header *hdr = data;
F
Fred Isaman 已提交
390

391 392
	if (unlikely(hdr->pnfs_error)) {
		bl_free_short_extents(&BLK_LSEG2EXT(hdr->lseg)->bl_inval,
393 394 395
					num_se);
	}

396
	hdr->task.tk_status = hdr->pnfs_error;
397
	hdr->verf.committed = NFS_FILE_SYNC;
398 399
	INIT_WORK(&hdr->task.u.tk_work, bl_write_cleanup);
	schedule_work(&hdr->task.u.tk_work);
F
Fred Isaman 已提交
400 401
}

402
static enum pnfs_try_status
403
bl_write_pagelist(struct nfs_pgio_header *header, int sync)
404
{
405
	int i, ret;
F
Fred Isaman 已提交
406
	struct bio *bio = NULL;
407 408
	struct pnfs_block_extent *be = NULL;
	sector_t isect, extent_length = 0;
P
Peng Tao 已提交
409
	struct parallel_io *par = NULL;
410 411 412
	loff_t offset = header->args.offset;
	size_t count = header->args.count;
	struct page **pages = header->args.pages;
413
	int pg_index = pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
414
	struct blk_plug plug;
F
Fred Isaman 已提交
415 416

	dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
P
Peng Tao 已提交
417

418
	/* At this point, header->page_aray is a (sequential) list of nfs_pages.
419 420
	 * We want to write each, and if there is an error set pnfs_error
	 * to have it redone using nfs.
F
Fred Isaman 已提交
421
	 */
422
	par = alloc_parallel(header);
F
Fred Isaman 已提交
423
	if (!par)
424
		goto out_mds;
F
Fred Isaman 已提交
425 426 427
	par->pnfs_callback = bl_end_par_io_write;
	/* At this point, have to be more careful with error handling */

428
	blk_start_plug(&plug);
429

430 431 432
	/* we always write out the whole page */
	offset = offset & (loff_t)PAGE_CACHE_MASK;
	isect = offset >> SECTOR_SHIFT;
433

434
	for (i = pg_index; i < header->page_array.npages; i++) {
435
		if (extent_length <= 0) {
F
Fred Isaman 已提交
436 437 438 439
			/* We've used up the previous extent */
			bl_put_extent(be);
			bio = bl_submit_bio(WRITE, bio);
			/* Get the next one */
440
			be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
441
					     isect, NULL);
F
Fred Isaman 已提交
442
			if (!be || !is_writable(be, isect)) {
443
				header->pnfs_error = -EINVAL;
F
Fred Isaman 已提交
444 445
				goto out;
			}
446 447 448 449 450
			if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
				if (likely(!bl_push_one_short_extent(
								be->be_inval)))
					par->bse_count++;
				else {
451
					header->pnfs_error = -ENOMEM;
452 453 454
					goto out;
				}
			}
F
Fred Isaman 已提交
455
			extent_length = be->be_length -
456
			    (isect - be->be_f_offset);
F
Fred Isaman 已提交
457
		}
458

459
		BUG_ON(offset & ~PAGE_CACHE_MASK);
460 461 462

		if (be->be_state == PNFS_BLOCK_INVALID_DATA &&
		    !bl_is_sector_init(be->be_inval, isect)) {
463
			ret = bl_mark_sectors_init(be->be_inval, isect,
464
						       PAGE_CACHE_SECTORS);
465 466 467
			if (unlikely(ret)) {
				dprintk("%s bl_mark_sectors_init fail %d\n",
					__func__, ret);
468
				header->pnfs_error = ret;
469
				goto out;
F
Fred Isaman 已提交
470
			}
471
		}
472

473
		bio = do_add_page_to_bio(bio, header->page_array.npages - i,
474
					 WRITE, isect, pages[i], be,
475
					 bl_end_io_write, par,
476
					 0, PAGE_CACHE_SIZE);
477
		if (IS_ERR(bio)) {
478
			header->pnfs_error = PTR_ERR(bio);
P
Peng Tao 已提交
479
			bio = NULL;
480
			goto out;
F
Fred Isaman 已提交
481
		}
482 483
		offset += PAGE_CACHE_SIZE;
		count -= PAGE_CACHE_SIZE;
F
Fred Isaman 已提交
484 485 486
		isect += PAGE_CACHE_SECTORS;
		extent_length -= PAGE_CACHE_SECTORS;
	}
487

488
	header->res.count = header->args.count;
F
Fred Isaman 已提交
489 490 491
out:
	bl_put_extent(be);
	bl_submit_bio(WRITE, bio);
492
	blk_finish_plug(&plug);
F
Fred Isaman 已提交
493 494
	put_parallel(par);
	return PNFS_ATTEMPTED;
495 496
out_mds:
	return PNFS_NOT_ATTEMPTED;
497 498
}

F
Fred Isaman 已提交
499
/* FIXME - range ignored */
500
static void
F
Fred Isaman 已提交
501
release_extents(struct pnfs_block_layout *bl, struct pnfs_layout_range *range)
502
{
F
Fred Isaman 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516
	int i;
	struct pnfs_block_extent *be;

	spin_lock(&bl->bl_ext_lock);
	for (i = 0; i < EXTENT_LISTS; i++) {
		while (!list_empty(&bl->bl_extents[i])) {
			be = list_first_entry(&bl->bl_extents[i],
					      struct pnfs_block_extent,
					      be_node);
			list_del(&be->be_node);
			bl_put_extent(be);
		}
	}
	spin_unlock(&bl->bl_ext_lock);
517 518 519 520 521
}

static void
release_inval_marks(struct pnfs_inval_markings *marks)
{
522
	struct pnfs_inval_tracking *pos, *temp;
523
	struct pnfs_block_short_extent *se, *stemp;
524 525 526 527 528

	list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
		list_del(&pos->it_link);
		kfree(pos);
	}
529 530 531 532 533

	list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
		list_del(&se->bse_node);
		kfree(se);
	}
534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
	return;
}

static void bl_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
	struct pnfs_block_layout *bl = BLK_LO2EXT(lo);

	dprintk("%s enter\n", __func__);
	release_extents(bl, NULL);
	release_inval_marks(&bl->bl_inval);
	kfree(bl);
}

static struct pnfs_layout_hdr *bl_alloc_layout_hdr(struct inode *inode,
						   gfp_t gfp_flags)
{
	struct pnfs_block_layout *bl;

	dprintk("%s enter\n", __func__);
	bl = kzalloc(sizeof(*bl), gfp_flags);
	if (!bl)
		return NULL;
	spin_lock_init(&bl->bl_ext_lock);
	INIT_LIST_HEAD(&bl->bl_extents[0]);
	INIT_LIST_HEAD(&bl->bl_extents[1]);
	INIT_LIST_HEAD(&bl->bl_commit);
	INIT_LIST_HEAD(&bl->bl_committing);
	bl->bl_count = 0;
	bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> SECTOR_SHIFT;
	BL_INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
	return &bl->bl_layout;
}

F
Fred Isaman 已提交
567
static void bl_free_lseg(struct pnfs_layout_segment *lseg)
568
{
F
Fred Isaman 已提交
569 570
	dprintk("%s enter\n", __func__);
	kfree(lseg);
571 572
}

F
Fred Isaman 已提交
573 574 575 576 577 578
/* We pretty much ignore lseg, and store all data layout wide, so we
 * can correctly merge.
 */
static struct pnfs_layout_segment *bl_alloc_lseg(struct pnfs_layout_hdr *lo,
						 struct nfs4_layoutget_res *lgr,
						 gfp_t gfp_flags)
579
{
F
Fred Isaman 已提交
580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595
	struct pnfs_layout_segment *lseg;
	int status;

	dprintk("%s enter\n", __func__);
	lseg = kzalloc(sizeof(*lseg), gfp_flags);
	if (!lseg)
		return ERR_PTR(-ENOMEM);
	status = nfs4_blk_process_layoutget(lo, lgr, gfp_flags);
	if (status) {
		/* We don't want to call the full-blown bl_free_lseg,
		 * since on error extents were not touched.
		 */
		kfree(lseg);
		return ERR_PTR(status);
	}
	return lseg;
596 597 598 599 600 601
}

static void
bl_encode_layoutcommit(struct pnfs_layout_hdr *lo, struct xdr_stream *xdr,
		       const struct nfs4_layoutcommit_args *arg)
{
F
Fred Isaman 已提交
602 603
	dprintk("%s enter\n", __func__);
	encode_pnfs_block_layoutupdate(BLK_LO2EXT(lo), xdr, arg);
604 605 606 607 608
}

static void
bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
{
F
Fred Isaman 已提交
609 610 611 612
	struct pnfs_layout_hdr *lo = NFS_I(lcdata->args.inode)->layout;

	dprintk("%s enter\n", __func__);
	clean_pnfs_block_layoutupdate(BLK_LO2EXT(lo), &lcdata->args, lcdata->res.status);
613 614
}

615 616 617
static void free_blk_mountid(struct block_mount_id *mid)
{
	if (mid) {
618 619 620 621
		struct pnfs_block_dev *dev, *tmp;

		/* No need to take bm_lock as we are last user freeing bm_devlist */
		list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
622 623 624 625 626 627 628
			list_del(&dev->bm_node);
			bl_free_block_dev(dev);
		}
		kfree(mid);
	}
}

629
/* This is mostly copied from the filelayout_get_device_info function.
630 631 632 633 634 635 636
 * It seems much of this should be at the generic pnfs level.
 */
static struct pnfs_block_dev *
nfs4_blk_get_deviceinfo(struct nfs_server *server, const struct nfs_fh *fh,
			struct nfs4_deviceid *d_id)
{
	struct pnfs_device *dev;
J
Jim Rees 已提交
637
	struct pnfs_block_dev *rv;
638 639 640 641 642 643 644 645 646 647
	u32 max_resp_sz;
	int max_pages;
	struct page **pages = NULL;
	int i, rc;

	/*
	 * Use the session max response size as the basis for setting
	 * GETDEVICEINFO's maxcount
	 */
	max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
648
	max_pages = nfs_page_array_len(0, max_resp_sz);
649 650 651 652 653 654
	dprintk("%s max_resp_sz %u max_pages %d\n",
		__func__, max_resp_sz, max_pages);

	dev = kmalloc(sizeof(*dev), GFP_NOFS);
	if (!dev) {
		dprintk("%s kmalloc failed\n", __func__);
J
Jim Rees 已提交
655
		return ERR_PTR(-ENOMEM);
656 657
	}

658
	pages = kcalloc(max_pages, sizeof(struct page *), GFP_NOFS);
659 660
	if (pages == NULL) {
		kfree(dev);
J
Jim Rees 已提交
661
		return ERR_PTR(-ENOMEM);
662 663 664
	}
	for (i = 0; i < max_pages; i++) {
		pages[i] = alloc_page(GFP_NOFS);
J
Jim Rees 已提交
665 666
		if (!pages[i]) {
			rv = ERR_PTR(-ENOMEM);
667
			goto out_free;
J
Jim Rees 已提交
668
		}
669 670 671 672 673 674 675 676
	}

	memcpy(&dev->dev_id, d_id, sizeof(*d_id));
	dev->layout_type = LAYOUT_BLOCK_VOLUME;
	dev->pages = pages;
	dev->pgbase = 0;
	dev->pglen = PAGE_SIZE * max_pages;
	dev->mincount = 0;
677
	dev->maxcount = max_resp_sz - nfs41_maxgetdevinfo_overhead;
678 679

	dprintk("%s: dev_id: %s\n", __func__, dev->dev_id.data);
680
	rc = nfs4_proc_getdeviceinfo(server, dev, NULL);
681
	dprintk("%s getdevice info returns %d\n", __func__, rc);
J
Jim Rees 已提交
682 683
	if (rc) {
		rv = ERR_PTR(rc);
684
		goto out_free;
J
Jim Rees 已提交
685
	}
686 687 688 689 690 691 692 693 694 695

	rv = nfs4_blk_decode_device(server, dev);
 out_free:
	for (i = 0; i < max_pages; i++)
		__free_page(pages[i]);
	kfree(pages);
	kfree(dev);
	return rv;
}

696 697 698
static int
bl_set_layoutdriver(struct nfs_server *server, const struct nfs_fh *fh)
{
699 700 701 702
	struct block_mount_id *b_mt_id = NULL;
	struct pnfs_devicelist *dlist = NULL;
	struct pnfs_block_dev *bdev;
	LIST_HEAD(block_disklist);
J
Jim Rees 已提交
703
	int status, i;
704

705
	dprintk("%s enter\n", __func__);
706 707 708 709 710

	if (server->pnfs_blksize == 0) {
		dprintk("%s Server did not return blksize\n", __func__);
		return -EINVAL;
	}
711 712 713 714 715 716
	if (server->pnfs_blksize > PAGE_SIZE) {
		printk(KERN_ERR "%s: pNFS blksize %d not supported.\n",
			__func__, server->pnfs_blksize);
		return -EINVAL;
	}

717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740
	b_mt_id = kzalloc(sizeof(struct block_mount_id), GFP_NOFS);
	if (!b_mt_id) {
		status = -ENOMEM;
		goto out_error;
	}
	/* Initialize nfs4 block layout mount id */
	spin_lock_init(&b_mt_id->bm_lock);
	INIT_LIST_HEAD(&b_mt_id->bm_devlist);

	dlist = kmalloc(sizeof(struct pnfs_devicelist), GFP_NOFS);
	if (!dlist) {
		status = -ENOMEM;
		goto out_error;
	}
	dlist->eof = 0;
	while (!dlist->eof) {
		status = nfs4_proc_getdevicelist(server, fh, dlist);
		if (status)
			goto out_error;
		dprintk("%s GETDEVICELIST numdevs=%i, eof=%i\n",
			__func__, dlist->num_devs, dlist->eof);
		for (i = 0; i < dlist->num_devs; i++) {
			bdev = nfs4_blk_get_deviceinfo(server, fh,
						       &dlist->dev_id[i]);
J
Jim Rees 已提交
741 742
			if (IS_ERR(bdev)) {
				status = PTR_ERR(bdev);
743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759
				goto out_error;
			}
			spin_lock(&b_mt_id->bm_lock);
			list_add(&bdev->bm_node, &b_mt_id->bm_devlist);
			spin_unlock(&b_mt_id->bm_lock);
		}
	}
	dprintk("%s SUCCESS\n", __func__);
	server->pnfs_ld_data = b_mt_id;

 out_return:
	kfree(dlist);
	return status;

 out_error:
	free_blk_mountid(b_mt_id);
	goto out_return;
760 761 762 763 764
}

static int
bl_clear_layoutdriver(struct nfs_server *server)
{
765 766
	struct block_mount_id *b_mt_id = server->pnfs_ld_data;

767
	dprintk("%s enter\n", __func__);
768 769
	free_blk_mountid(b_mt_id);
	dprintk("%s RETURNS\n", __func__);
770 771 772
	return 0;
}

P
Peng Tao 已提交
773
static bool
774 775
is_aligned_req(struct nfs_pageio_descriptor *pgio,
		struct nfs_page *req, unsigned int alignment)
P
Peng Tao 已提交
776
{
777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
	/*
	 * Always accept buffered writes, higher layers take care of the
	 * right alignment.
	 */
	if (pgio->pg_dreq == NULL)
		return true;

	if (!IS_ALIGNED(req->wb_offset, alignment))
		return false;

	if (IS_ALIGNED(req->wb_bytes, alignment))
		return true;

	if (req_offset(req) + req->wb_bytes == i_size_read(pgio->pg_inode)) {
		/*
		 * If the write goes up to the inode size, just write
		 * the full page.  Data past the inode size is
		 * guaranteed to be zeroed by the higher level client
		 * code, and this behaviour is mandated by RFC 5663
		 * section 2.3.2.
		 */
		return true;
	}

	return false;
P
Peng Tao 已提交
802 803 804 805 806
}

static void
bl_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
807
	if (!is_aligned_req(pgio, req, SECTOR_SIZE)) {
P
Peng Tao 已提交
808
		nfs_pageio_reset_read_mds(pgio);
809 810 811 812
		return;
	}

	pnfs_generic_pg_init_read(pgio, req);
P
Peng Tao 已提交
813 814
}

815 816 817 818 819
/*
 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
 * of bytes (maximum @req->wb_bytes) that can be coalesced.
 */
static size_t
P
Peng Tao 已提交
820 821 822
bl_pg_test_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
		struct nfs_page *req)
{
823
	if (!is_aligned_req(pgio, req, SECTOR_SIZE))
824
		return 0;
P
Peng Tao 已提交
825 826 827
	return pnfs_generic_pg_test(pgio, prev, req);
}

828 829 830 831 832 833 834 835 836 837 838 839 840
/*
 * Return the number of contiguous bytes for a given inode
 * starting at page frame idx.
 */
static u64 pnfs_num_cont_bytes(struct inode *inode, pgoff_t idx)
{
	struct address_space *mapping = inode->i_mapping;
	pgoff_t end;

	/* Optimize common case that writes from 0 to end of file */
	end = DIV_ROUND_UP(i_size_read(inode), PAGE_CACHE_SIZE);
	if (end != NFS_I(inode)->npages) {
		rcu_read_lock();
841
		end = page_cache_next_hole(mapping, idx + 1, ULONG_MAX);
842 843 844 845 846 847 848 849 850
		rcu_read_unlock();
	}

	if (!end)
		return i_size_read(inode) - (idx << PAGE_CACHE_SHIFT);
	else
		return (end - idx) << PAGE_CACHE_SHIFT;
}

851
static void
P
Peng Tao 已提交
852 853
bl_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
{
854 855 856
	u64 wb_size;

	if (!is_aligned_req(pgio, req, PAGE_SIZE)) {
P
Peng Tao 已提交
857
		nfs_pageio_reset_write_mds(pgio);
858
		return;
859
	}
860 861 862 863 864 865 866 867

	if (pgio->pg_dreq == NULL)
		wb_size = pnfs_num_cont_bytes(pgio->pg_inode,
					      req->wb_index);
	else
		wb_size = nfs_dreq_bytes_left(pgio->pg_dreq);

	pnfs_generic_pg_init_write(pgio, req, wb_size);
P
Peng Tao 已提交
868 869
}

870 871 872 873 874
/*
 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
 * of bytes (maximum @req->wb_bytes) that can be coalesced.
 */
static size_t
P
Peng Tao 已提交
875 876 877
bl_pg_test_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
		 struct nfs_page *req)
{
878
	if (!is_aligned_req(pgio, req, PAGE_SIZE))
879
		return 0;
P
Peng Tao 已提交
880 881 882
	return pnfs_generic_pg_test(pgio, prev, req);
}

B
Benny Halevy 已提交
883
static const struct nfs_pageio_ops bl_pg_read_ops = {
P
Peng Tao 已提交
884 885
	.pg_init = bl_pg_init_read,
	.pg_test = bl_pg_test_read,
B
Benny Halevy 已提交
886 887 888 889
	.pg_doio = pnfs_generic_pg_readpages,
};

static const struct nfs_pageio_ops bl_pg_write_ops = {
P
Peng Tao 已提交
890 891
	.pg_init = bl_pg_init_write,
	.pg_test = bl_pg_test_write,
B
Benny Halevy 已提交
892 893 894
	.pg_doio = pnfs_generic_pg_writepages,
};

895 896 897
static struct pnfs_layoutdriver_type blocklayout_type = {
	.id				= LAYOUT_BLOCK_VOLUME,
	.name				= "LAYOUT_BLOCK_VOLUME",
898
	.owner				= THIS_MODULE,
899
	.flags				= PNFS_READ_WHOLE_PAGE,
900 901 902 903 904 905 906 907 908 909
	.read_pagelist			= bl_read_pagelist,
	.write_pagelist			= bl_write_pagelist,
	.alloc_layout_hdr		= bl_alloc_layout_hdr,
	.free_layout_hdr		= bl_free_layout_hdr,
	.alloc_lseg			= bl_alloc_lseg,
	.free_lseg			= bl_free_lseg,
	.encode_layoutcommit		= bl_encode_layoutcommit,
	.cleanup_layoutcommit		= bl_cleanup_layoutcommit,
	.set_layoutdriver		= bl_set_layoutdriver,
	.clear_layoutdriver		= bl_clear_layoutdriver,
B
Benny Halevy 已提交
910 911
	.pg_read_ops			= &bl_pg_read_ops,
	.pg_write_ops			= &bl_pg_write_ops,
912 913
};

J
Jim Rees 已提交
914
static const struct rpc_pipe_ops bl_upcall_ops = {
915
	.upcall		= rpc_pipe_generic_upcall,
J
Jim Rees 已提交
916 917 918 919
	.downcall	= bl_pipe_downcall,
	.destroy_msg	= bl_pipe_destroy_msg,
};

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939
static struct dentry *nfs4blocklayout_register_sb(struct super_block *sb,
					    struct rpc_pipe *pipe)
{
	struct dentry *dir, *dentry;

	dir = rpc_d_lookup_sb(sb, NFS_PIPE_DIRNAME);
	if (dir == NULL)
		return ERR_PTR(-ENOENT);
	dentry = rpc_mkpipe_dentry(dir, "blocklayout", NULL, pipe);
	dput(dir);
	return dentry;
}

static void nfs4blocklayout_unregister_sb(struct super_block *sb,
					  struct rpc_pipe *pipe)
{
	if (pipe->dentry)
		rpc_unlink(pipe->dentry);
}

940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981
static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event,
			   void *ptr)
{
	struct super_block *sb = ptr;
	struct net *net = sb->s_fs_info;
	struct nfs_net *nn = net_generic(net, nfs_net_id);
	struct dentry *dentry;
	int ret = 0;

	if (!try_module_get(THIS_MODULE))
		return 0;

	if (nn->bl_device_pipe == NULL) {
		module_put(THIS_MODULE);
		return 0;
	}

	switch (event) {
	case RPC_PIPEFS_MOUNT:
		dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe);
		if (IS_ERR(dentry)) {
			ret = PTR_ERR(dentry);
			break;
		}
		nn->bl_device_pipe->dentry = dentry;
		break;
	case RPC_PIPEFS_UMOUNT:
		if (nn->bl_device_pipe->dentry)
			nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe);
		break;
	default:
		ret = -ENOTSUPP;
		break;
	}
	module_put(THIS_MODULE);
	return ret;
}

static struct notifier_block nfs4blocklayout_block = {
	.notifier_call = rpc_pipefs_event,
};

982 983 984 985 986 987 988 989
static struct dentry *nfs4blocklayout_register_net(struct net *net,
						   struct rpc_pipe *pipe)
{
	struct super_block *pipefs_sb;
	struct dentry *dentry;

	pipefs_sb = rpc_get_sb_net(net);
	if (!pipefs_sb)
990
		return NULL;
991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007
	dentry = nfs4blocklayout_register_sb(pipefs_sb, pipe);
	rpc_put_sb_net(net);
	return dentry;
}

static void nfs4blocklayout_unregister_net(struct net *net,
					   struct rpc_pipe *pipe)
{
	struct super_block *pipefs_sb;

	pipefs_sb = rpc_get_sb_net(net);
	if (pipefs_sb) {
		nfs4blocklayout_unregister_sb(pipefs_sb, pipe);
		rpc_put_sb_net(net);
	}
}

1008 1009 1010 1011 1012
static int nfs4blocklayout_net_init(struct net *net)
{
	struct nfs_net *nn = net_generic(net, nfs_net_id);
	struct dentry *dentry;

1013
	init_waitqueue_head(&nn->bl_wq);
1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
	nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
	if (IS_ERR(nn->bl_device_pipe))
		return PTR_ERR(nn->bl_device_pipe);
	dentry = nfs4blocklayout_register_net(net, nn->bl_device_pipe);
	if (IS_ERR(dentry)) {
		rpc_destroy_pipe_data(nn->bl_device_pipe);
		return PTR_ERR(dentry);
	}
	nn->bl_device_pipe->dentry = dentry;
	return 0;
}

static void nfs4blocklayout_net_exit(struct net *net)
{
	struct nfs_net *nn = net_generic(net, nfs_net_id);

	nfs4blocklayout_unregister_net(net, nn->bl_device_pipe);
	rpc_destroy_pipe_data(nn->bl_device_pipe);
	nn->bl_device_pipe = NULL;
}

static struct pernet_operations nfs4blocklayout_net_ops = {
	.init = nfs4blocklayout_net_init,
	.exit = nfs4blocklayout_net_exit,
};

1040 1041 1042 1043 1044 1045 1046
static int __init nfs4blocklayout_init(void)
{
	int ret;

	dprintk("%s: NFSv4 Block Layout Driver Registering...\n", __func__);

	ret = pnfs_register_layoutdriver(&blocklayout_type);
J
Jim Rees 已提交
1047 1048 1049
	if (ret)
		goto out;

1050
	ret = rpc_pipefs_notifier_register(&nfs4blocklayout_block);
1051 1052
	if (ret)
		goto out_remove;
1053 1054 1055
	ret = register_pernet_subsys(&nfs4blocklayout_net_ops);
	if (ret)
		goto out_notifier;
J
Jim Rees 已提交
1056 1057 1058
out:
	return ret;

1059 1060
out_notifier:
	rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
J
Jim Rees 已提交
1061 1062
out_remove:
	pnfs_unregister_layoutdriver(&blocklayout_type);
1063 1064 1065 1066 1067 1068 1069 1070
	return ret;
}

static void __exit nfs4blocklayout_exit(void)
{
	dprintk("%s: NFSv4 Block Layout Driver Unregistering...\n",
	       __func__);

1071
	rpc_pipefs_notifier_unregister(&nfs4blocklayout_block);
1072
	unregister_pernet_subsys(&nfs4blocklayout_net_ops);
1073 1074 1075 1076 1077 1078 1079
	pnfs_unregister_layoutdriver(&blocklayout_type);
}

MODULE_ALIAS("nfs-layouttype4-3");

module_init(nfs4blocklayout_init);
module_exit(nfs4blocklayout_exit);