filelayout.c 33.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 *  Module for the pnfs nfs4 file layout driver.
 *  Defines all I/O and Policy interface operations, plus code
 *  to register itself with the pNFS client.
 *
 *  Copyright (c) 2002
 *  The Regents of the University of Michigan
 *  All Rights Reserved
 *
 *  Dean Hildebrand <dhildebz@umich.edu>
 *
 *  Permission is granted to use, copy, create derivative works, and
 *  redistribute this software and such derivative works for any purpose,
 *  so long as the name of the University of Michigan is not used in
 *  any advertising or publicity pertaining to the use or distribution
 *  of this software without specific, written prior authorization. If
 *  the above copyright notice or any other identification of the
 *  University of Michigan is included in any copy of any portion of
 *  this software, then the disclaimer below must also be included.
 *
 *  This software is provided as is, without representation or warranty
 *  of any kind either express or implied, including without limitation
 *  the implied warranties of merchantability, fitness for a particular
 *  purpose, or noninfringement.  The Regents of the University of
 *  Michigan shall not be liable for any damages, including special,
 *  indirect, incidental, or consequential damages, with respect to any
 *  claim arising out of or in connection with the use of the software,
 *  even if it has been or is hereafter advised of the possibility of
 *  such damages.
 */

#include <linux/nfs_fs.h>
33
#include <linux/nfs_page.h>
34
#include <linux/module.h>
35
#include <linux/backing-dev.h>
36

37 38
#include <linux/sunrpc/metrics.h>

39 40 41 42 43
#include "../nfs4session.h"
#include "../internal.h"
#include "../delegation.h"
#include "filelayout.h"
#include "../nfs4trace.h"
44 45 46 47 48 49 50

#define NFSDBG_FACILITY         NFSDBG_PNFS_LD

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dean Hildebrand <dhildebz@umich.edu>");
MODULE_DESCRIPTION("The NFSv4 file layout driver");

51 52
#define FILELAYOUT_POLL_RETRY_MAX     (15*HZ)

F
Fred Isaman 已提交
53 54 55 56 57
static loff_t
filelayout_get_dense_offset(struct nfs4_filelayout_segment *flseg,
			    loff_t offset)
{
	u32 stripe_width = flseg->stripe_unit * flseg->dsaddr->stripe_count;
58 59
	u64 stripe_no;
	u32 rem;
F
Fred Isaman 已提交
60 61

	offset -= flseg->pattern_offset;
62 63
	stripe_no = div_u64(offset, stripe_width);
	div_u64_rem(offset, flseg->stripe_unit, &rem);
F
Fred Isaman 已提交
64

65
	return stripe_no * flseg->stripe_unit + rem;
F
Fred Isaman 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
}

/* This function is used by the layout driver to calculate the
 * offset of the file on the dserver based on whether the
 * layout type is STRIPE_DENSE or STRIPE_SPARSE
 */
static loff_t
filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
{
	struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);

	switch (flseg->stripe_type) {
	case STRIPE_SPARSE:
		return offset;

	case STRIPE_DENSE:
		return filelayout_get_dense_offset(flseg, offset);
	}

	BUG();
}

88
static void filelayout_reset_write(struct nfs_pgio_header *hdr)
89
{
90
	struct rpc_task *task = &hdr->task;
91 92 93

	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
		dprintk("%s Reset task %5u for i/o through MDS "
94
			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
95
			hdr->task.tk_pid,
B
Bryan Schumaker 已提交
96
			hdr->inode->i_sb->s_id,
97
			(unsigned long long)NFS_FILEID(hdr->inode),
98 99
			hdr->args.count,
			(unsigned long long)hdr->args.offset);
100

101
		task->tk_status = pnfs_write_done_resend_to_mds(hdr);
102 103 104
	}
}

105
static void filelayout_reset_read(struct nfs_pgio_header *hdr)
106
{
107
	struct rpc_task *task = &hdr->task;
108 109 110

	if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
		dprintk("%s Reset task %5u for i/o through MDS "
111
			"(req %s/%llu, %u bytes @ offset %llu)\n", __func__,
112
			hdr->task.tk_pid,
B
Bryan Schumaker 已提交
113
			hdr->inode->i_sb->s_id,
114
			(unsigned long long)NFS_FILEID(hdr->inode),
115 116
			hdr->args.count,
			(unsigned long long)hdr->args.offset);
117

118
		task->tk_status = pnfs_read_done_resend_to_mds(hdr);
119 120 121
	}
}

122 123 124
static int filelayout_async_handle_error(struct rpc_task *task,
					 struct nfs4_state *state,
					 struct nfs_client *clp,
125
					 struct pnfs_layout_segment *lseg)
126
{
127 128
	struct pnfs_layout_hdr *lo = lseg->pls_layout;
	struct inode *inode = lo->plh_inode;
129 130
	struct nfs_server *mds_server = NFS_SERVER(inode);
	struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
A
Andy Adamson 已提交
131
	struct nfs_client *mds_client = mds_server->nfs_client;
132
	struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
A
Andy Adamson 已提交
133

134 135 136 137
	if (task->tk_status >= 0)
		return 0;

	switch (task->tk_status) {
A
Andy Adamson 已提交
138 139 140 141 142
	/* MDS state errors */
	case -NFS4ERR_DELEG_REVOKED:
	case -NFS4ERR_ADMIN_REVOKED:
	case -NFS4ERR_BAD_STATEID:
	case -NFS4ERR_OPENMODE:
143 144
		if (state == NULL)
			break;
145 146
		if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
			goto out_bad_stateid;
A
Andy Adamson 已提交
147 148
		goto wait_on_recovery;
	case -NFS4ERR_EXPIRED:
149 150 151 152
		if (state != NULL) {
			if (nfs4_schedule_stateid_recovery(mds_server, state) < 0)
				goto out_bad_stateid;
		}
A
Andy Adamson 已提交
153 154 155
		nfs4_schedule_lease_recovery(mds_client);
		goto wait_on_recovery;
	/* DS session errors */
156 157 158 159 160 161 162 163 164 165
	case -NFS4ERR_BADSESSION:
	case -NFS4ERR_BADSLOT:
	case -NFS4ERR_BAD_HIGH_SLOT:
	case -NFS4ERR_DEADSESSION:
	case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
	case -NFS4ERR_SEQ_FALSE_RETRY:
	case -NFS4ERR_SEQ_MISORDERED:
		dprintk("%s ERROR %d, Reset session. Exchangeid "
			"flags 0x%x\n", __func__, task->tk_status,
			clp->cl_exchange_flags);
166
		nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
167 168 169 170 171
		break;
	case -NFS4ERR_DELAY:
	case -NFS4ERR_GRACE:
		rpc_delay(task, FILELAYOUT_POLL_RETRY_MAX);
		break;
172 173
	case -NFS4ERR_RETRY_UNCACHED_REP:
		break;
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
	/* Invalidate Layout errors */
	case -NFS4ERR_PNFS_NO_LAYOUT:
	case -ESTALE:           /* mapped NFS4ERR_STALE */
	case -EBADHANDLE:       /* mapped NFS4ERR_BADHANDLE */
	case -EISDIR:           /* mapped NFS4ERR_ISDIR */
	case -NFS4ERR_FHEXPIRED:
	case -NFS4ERR_WRONG_TYPE:
		dprintk("%s Invalid layout error %d\n", __func__,
			task->tk_status);
		/*
		 * Destroy layout so new i/o will get a new layout.
		 * Layout will not be destroyed until all current lseg
		 * references are put. Mark layout as invalid to resend failed
		 * i/o and all i/o waiting on the slot table to the MDS until
		 * layout is destroyed and a new valid layout is obtained.
		 */
190
		pnfs_destroy_layout(NFS_I(inode));
191 192
		rpc_wake_up(&tbl->slot_tbl_waitq);
		goto reset;
193 194 195 196 197 198 199 200 201 202
	/* RPC connection errors */
	case -ECONNREFUSED:
	case -EHOSTDOWN:
	case -EHOSTUNREACH:
	case -ENETUNREACH:
	case -EIO:
	case -ETIMEDOUT:
	case -EPIPE:
		dprintk("%s DS connection error %d\n", __func__,
			task->tk_status);
203
		nfs4_mark_deviceid_unavailable(devid);
204
		pnfs_error_mark_layout_for_return(inode, lseg);
205
		rpc_wake_up(&tbl->slot_tbl_waitq);
206
		/* fall through */
207
	default:
208
		pnfs_set_lo_fail(lseg);
209
reset:
210
		dprintk("%s Retry through MDS. Error %d\n", __func__,
211
			task->tk_status);
212
		return -NFS4ERR_RESET_TO_MDS;
213
	}
A
Andy Adamson 已提交
214
out:
215 216
	task->tk_status = 0;
	return -EAGAIN;
217 218 219
out_bad_stateid:
	task->tk_status = -EIO;
	return 0;
A
Andy Adamson 已提交
220 221 222 223 224
wait_on_recovery:
	rpc_sleep_on(&mds_client->cl_rpcwaitq, task, NULL);
	if (test_bit(NFS4CLNT_MANAGER_RUNNING, &mds_client->cl_state) == 0)
		rpc_wake_up_queued_task(&mds_client->cl_rpcwaitq, task);
	goto out;
225 226 227 228 229
}

/* NFS_PROTO call done callback routines */

static int filelayout_read_done_cb(struct rpc_task *task,
230
				struct nfs_pgio_header *hdr)
231
{
232
	int err;
233

234 235 236
	trace_nfs4_pnfs_read(hdr, task->tk_status);
	err = filelayout_async_handle_error(task, hdr->args.context->state,
					    hdr->ds_clp, hdr->lseg);
237

238 239
	switch (err) {
	case -NFS4ERR_RESET_TO_MDS:
240
		filelayout_reset_read(hdr);
241 242
		return task->tk_status;
	case -EAGAIN:
243
		rpc_restart_call_prepare(task);
244 245 246 247 248 249
		return -EAGAIN;
	}

	return 0;
}

A
Andy Adamson 已提交
250 251 252 253 254 255
/*
 * We reference the rpc_cred of the first WRITE that triggers the need for
 * a LAYOUTCOMMIT, and use it to send the layoutcommit compound.
 * rfc5661 is not clear about which credential should be used.
 */
static void
256
filelayout_set_layoutcommit(struct nfs_pgio_header *hdr)
A
Andy Adamson 已提交
257
{
258
	loff_t end_offs = 0;
259 260

	if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
261
	    hdr->res.verf->committed == NFS_FILE_SYNC)
A
Andy Adamson 已提交
262
		return;
263 264
	if (hdr->res.verf->committed == NFS_DATA_SYNC)
		end_offs = hdr->mds_offset + (loff_t)hdr->res.count;
A
Andy Adamson 已提交
265

266 267
	/* Note: if the write is unstable, don't set end_offs until commit */
	pnfs_set_layoutcommit(hdr->inode, hdr->lseg, end_offs);
T
Tom Haynes 已提交
268
	dprintk("%s inode %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
269
		(unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
A
Andy Adamson 已提交
270 271
}

272 273 274 275 276 277 278 279 280 281 282 283
bool
filelayout_test_devid_unavailable(struct nfs4_deviceid_node *node)
{
	return filelayout_test_devid_invalid(node) ||
		nfs4_test_deviceid_unavailable(node);
}

static bool
filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
{
	struct nfs4_deviceid_node *node = FILELAYOUT_DEVID_NODE(lseg);

284
	return filelayout_test_devid_unavailable(node);
285 286
}

A
Andy Adamson 已提交
287 288 289 290 291 292 293
/*
 * Call ops for the async read/write cases
 * In the case of dense layouts, the offset needs to be reset to its
 * original value.
 */
static void filelayout_read_prepare(struct rpc_task *task, void *data)
{
294
	struct nfs_pgio_header *hdr = data;
A
Andy Adamson 已提交
295

296
	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
297 298 299
		rpc_exit(task, -EIO);
		return;
	}
300
	if (filelayout_reset_to_mds(hdr->lseg)) {
301
		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
302
		filelayout_reset_read(hdr);
303 304 305
		rpc_exit(task, 0);
		return;
	}
306
	hdr->pgio_done_cb = filelayout_read_done_cb;
307

308
	if (nfs4_setup_sequence(hdr->ds_clp,
309 310
			&hdr->args.seq_args,
			&hdr->res.seq_res,
311 312
			task))
		return;
313 314
	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
			hdr->args.lock_context, FMODE_READ) == -EIO)
315
		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
A
Andy Adamson 已提交
316 317 318 319
}

static void filelayout_read_call_done(struct rpc_task *task, void *data)
{
320
	struct nfs_pgio_header *hdr = data;
A
Andy Adamson 已提交
321 322 323

	dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);

324
	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
325
	    task->tk_status == 0) {
326
		nfs41_sequence_done(task, &hdr->res.seq_res);
327
		return;
328
	}
329

A
Andy Adamson 已提交
330
	/* Note this may cause RPC to be resent */
331
	hdr->mds_ops->rpc_call_done(task, data);
A
Andy Adamson 已提交
332 333
}

334 335
static void filelayout_read_count_stats(struct rpc_task *task, void *data)
{
336
	struct nfs_pgio_header *hdr = data;
337

338
	rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
339 340
}

341
static int filelayout_write_done_cb(struct rpc_task *task,
342
				struct nfs_pgio_header *hdr)
343
{
344 345
	int err;

346 347 348
	trace_nfs4_pnfs_write(hdr, task->tk_status);
	err = filelayout_async_handle_error(task, hdr->args.context->state,
					    hdr->ds_clp, hdr->lseg);
349 350 351

	switch (err) {
	case -NFS4ERR_RESET_TO_MDS:
352
		filelayout_reset_write(hdr);
353 354
		return task->tk_status;
	case -EAGAIN:
355
		rpc_restart_call_prepare(task);
356 357 358
		return -EAGAIN;
	}

359
	filelayout_set_layoutcommit(hdr);
360 361 362 363 364 365

	/* zero out the fattr */
	hdr->fattr.valid = 0;
	if (task->tk_status >= 0)
		nfs_writeback_update_inode(hdr);

366 367 368
	return 0;
}

369
static int filelayout_commit_done_cb(struct rpc_task *task,
370
				     struct nfs_commit_data *data)
371
{
372 373
	int err;

374
	trace_nfs4_pnfs_commit_ds(data, task->tk_status);
375 376 377 378 379
	err = filelayout_async_handle_error(task, NULL, data->ds_clp,
					    data->lseg);

	switch (err) {
	case -NFS4ERR_RESET_TO_MDS:
380
		pnfs_generic_prepare_to_resend_writes(data);
381 382 383
		return -EAGAIN;
	case -EAGAIN:
		rpc_restart_call_prepare(task);
384 385 386
		return -EAGAIN;
	}

387
	pnfs_set_layoutcommit(data->inode, data->lseg, data->lwb);
388

389 390 391
	return 0;
}

392 393
static void filelayout_write_prepare(struct rpc_task *task, void *data)
{
394
	struct nfs_pgio_header *hdr = data;
395

396
	if (unlikely(test_bit(NFS_CONTEXT_BAD, &hdr->args.context->flags))) {
397 398 399
		rpc_exit(task, -EIO);
		return;
	}
400
	if (filelayout_reset_to_mds(hdr->lseg)) {
401
		dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
402
		filelayout_reset_write(hdr);
403 404 405
		rpc_exit(task, 0);
		return;
	}
406
	if (nfs4_setup_sequence(hdr->ds_clp,
407 408
			&hdr->args.seq_args,
			&hdr->res.seq_res,
409 410
			task))
		return;
411 412
	if (nfs4_set_rw_stateid(&hdr->args.stateid, hdr->args.context,
			hdr->args.lock_context, FMODE_WRITE) == -EIO)
413
		rpc_exit(task, -EIO); /* lost lock, terminate I/O */
414 415 416 417
}

static void filelayout_write_call_done(struct rpc_task *task, void *data)
{
418
	struct nfs_pgio_header *hdr = data;
419

420
	if (test_bit(NFS_IOHDR_REDO, &hdr->flags) &&
421
	    task->tk_status == 0) {
422
		nfs41_sequence_done(task, &hdr->res.seq_res);
423
		return;
424
	}
425

426
	/* Note this may cause RPC to be resent */
427
	hdr->mds_ops->rpc_call_done(task, data);
428 429
}

430 431
static void filelayout_write_count_stats(struct rpc_task *task, void *data)
{
432
	struct nfs_pgio_header *hdr = data;
433

434
	rpc_count_iostats(task, NFS_SERVER(hdr->inode)->client->cl_metrics);
435 436
}

437
static void filelayout_commit_prepare(struct rpc_task *task, void *data)
438
{
439
	struct nfs_commit_data *wdata = data;
440

441
	nfs4_setup_sequence(wdata->ds_clp,
442 443 444
			&wdata->args.seq_args,
			&wdata->res.seq_res,
			task);
445 446 447 448 449 450 451 452 453
}

static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
{
	struct nfs_commit_data *cdata = data;

	rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
}

454
static const struct rpc_call_ops filelayout_read_call_ops = {
A
Andy Adamson 已提交
455 456
	.rpc_call_prepare = filelayout_read_prepare,
	.rpc_call_done = filelayout_read_call_done,
457
	.rpc_count_stats = filelayout_read_count_stats,
458
	.rpc_release = pnfs_generic_rw_release,
A
Andy Adamson 已提交
459 460
};

461
static const struct rpc_call_ops filelayout_write_call_ops = {
462 463
	.rpc_call_prepare = filelayout_write_prepare,
	.rpc_call_done = filelayout_write_call_done,
464
	.rpc_count_stats = filelayout_write_count_stats,
465
	.rpc_release = pnfs_generic_rw_release,
466 467
};

468
static const struct rpc_call_ops filelayout_commit_call_ops = {
469
	.rpc_call_prepare = filelayout_commit_prepare,
470
	.rpc_call_done = pnfs_generic_write_commit_done,
471
	.rpc_count_stats = filelayout_commit_count_stats,
472
	.rpc_release = pnfs_generic_commit_release,
473 474
};

A
Andy Adamson 已提交
475
static enum pnfs_try_status
476
filelayout_read_pagelist(struct nfs_pgio_header *hdr)
A
Andy Adamson 已提交
477
{
478
	struct pnfs_layout_segment *lseg = hdr->lseg;
A
Andy Adamson 已提交
479
	struct nfs4_pnfs_ds *ds;
480
	struct rpc_clnt *ds_clnt;
481
	loff_t offset = hdr->args.offset;
A
Andy Adamson 已提交
482 483 484
	u32 j, idx;
	struct nfs_fh *fh;

485
	dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
486
		__func__, hdr->inode->i_ino,
487
		hdr->args.pgbase, (size_t)hdr->args.count, offset);
A
Andy Adamson 已提交
488 489 490 491 492

	/* Retrieve the correct rpc_client for the byte range */
	j = nfs4_fl_calc_j_index(lseg, offset);
	idx = nfs4_fl_calc_ds_index(lseg, j);
	ds = nfs4_fl_prepare_ds(lseg, idx);
493
	if (!ds)
A
Andy Adamson 已提交
494
		return PNFS_NOT_ATTEMPTED;
495 496 497 498 499

	ds_clnt = nfs4_find_or_create_ds_client(ds->ds_clp, hdr->inode);
	if (IS_ERR(ds_clnt))
		return PNFS_NOT_ATTEMPTED;

500 501
	dprintk("%s USE DS: %s cl_count %d\n", __func__,
		ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
A
Andy Adamson 已提交
502 503

	/* No multipath support. Use first DS */
504
	atomic_inc(&ds->ds_clp->cl_count);
505
	hdr->ds_clp = ds->ds_clp;
506
	hdr->ds_commit_idx = idx;
A
Andy Adamson 已提交
507 508
	fh = nfs4_fl_select_ds_fh(lseg, j);
	if (fh)
509
		hdr->args.fh = fh;
A
Andy Adamson 已提交
510

511 512
	hdr->args.offset = filelayout_get_dserver_offset(lseg, offset);
	hdr->mds_offset = offset;
A
Andy Adamson 已提交
513 514

	/* Perform an asynchronous read to ds */
515 516 517
	nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
			  NFS_PROTO(hdr->inode), &filelayout_read_call_ops,
			  0, RPC_TASK_SOFTCONN);
A
Andy Adamson 已提交
518 519 520
	return PNFS_ATTEMPTED;
}

521
/* Perform async writes. */
522
static enum pnfs_try_status
523
filelayout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
524
{
525
	struct pnfs_layout_segment *lseg = hdr->lseg;
526
	struct nfs4_pnfs_ds *ds;
527
	struct rpc_clnt *ds_clnt;
528
	loff_t offset = hdr->args.offset;
529 530 531 532 533 534 535
	u32 j, idx;
	struct nfs_fh *fh;

	/* Retrieve the correct rpc_client for the byte range */
	j = nfs4_fl_calc_j_index(lseg, offset);
	idx = nfs4_fl_calc_ds_index(lseg, j);
	ds = nfs4_fl_prepare_ds(lseg, idx);
536
	if (!ds)
537
		return PNFS_NOT_ATTEMPTED;
538 539 540 541 542

	ds_clnt = nfs4_find_or_create_ds_client(ds->ds_clp, hdr->inode);
	if (IS_ERR(ds_clnt))
		return PNFS_NOT_ATTEMPTED;

543
	dprintk("%s ino %lu sync %d req %zu@%llu DS: %s cl_count %d\n",
544
		__func__, hdr->inode->i_ino, sync, (size_t) hdr->args.count,
545
		offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
546

547
	hdr->pgio_done_cb = filelayout_write_done_cb;
548
	atomic_inc(&ds->ds_clp->cl_count);
549
	hdr->ds_clp = ds->ds_clp;
550
	hdr->ds_commit_idx = idx;
551 552
	fh = nfs4_fl_select_ds_fh(lseg, j);
	if (fh)
553 554
		hdr->args.fh = fh;
	hdr->args.offset = filelayout_get_dserver_offset(lseg, offset);
555 556

	/* Perform an asynchronous write */
557 558 559
	nfs_initiate_pgio(ds_clnt, hdr, hdr->cred,
			  NFS_PROTO(hdr->inode), &filelayout_write_call_ops,
			  sync, RPC_TASK_SOFTCONN);
560
	return PNFS_ATTEMPTED;
561 562
}

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
static int
filelayout_check_deviceid(struct pnfs_layout_hdr *lo,
			  struct nfs4_filelayout_segment *fl,
			  gfp_t gfp_flags)
{
	struct nfs4_deviceid_node *d;
	struct nfs4_file_layout_dsaddr *dsaddr;
	int status = -EINVAL;

	/* find and reference the deviceid */
	d = nfs4_find_get_deviceid(NFS_SERVER(lo->plh_inode), &fl->deviceid,
			lo->plh_lc_cred, gfp_flags);
	if (d == NULL)
		goto out;

	dsaddr = container_of(d, struct nfs4_file_layout_dsaddr, id_node);
	/* Found deviceid is unavailable */
	if (filelayout_test_devid_unavailable(&dsaddr->id_node))
		goto out_put;

	fl->dsaddr = dsaddr;

	if (fl->first_stripe_index >= dsaddr->stripe_count) {
		dprintk("%s Bad first_stripe_index %u\n",
				__func__, fl->first_stripe_index);
		goto out_put;
	}

	if ((fl->stripe_type == STRIPE_SPARSE &&
	    fl->num_fh > 1 && fl->num_fh != dsaddr->ds_num) ||
	    (fl->stripe_type == STRIPE_DENSE &&
	    fl->num_fh != dsaddr->stripe_count)) {
		dprintk("%s num_fh %u not valid for given packing\n",
			__func__, fl->num_fh);
		goto out_put;
	}
	status = 0;
out:
	return status;
out_put:
	nfs4_fl_put_deviceid(dsaddr);
	goto out;
}

607 608 609 610 611 612 613 614 615 616 617 618
/*
 * filelayout_check_layout()
 *
 * Make sure layout segment parameters are sane WRT the device.
 * At this point no generic layer initialization of the lseg has occurred,
 * and nothing has been added to the layout_hdr cache.
 *
 */
static int
filelayout_check_layout(struct pnfs_layout_hdr *lo,
			struct nfs4_filelayout_segment *fl,
			struct nfs4_layoutget_res *lgr,
619
			gfp_t gfp_flags)
620 621 622 623 624
{
	int status = -EINVAL;

	dprintk("--> %s\n", __func__);

625 626 627 628 629 630 631 632
	/* FIXME: remove this check when layout segment support is added */
	if (lgr->range.offset != 0 ||
	    lgr->range.length != NFS4_MAX_UINT64) {
		dprintk("%s Only whole file layouts supported. Use MDS i/o\n",
			__func__);
		goto out;
	}

633
	if (fl->pattern_offset > lgr->range.offset) {
634
		dprintk("%s pattern_offset %lld too large\n",
635 636 637 638
				__func__, fl->pattern_offset);
		goto out;
	}

639
	if (!fl->stripe_unit) {
640
		dprintk("%s Invalid stripe unit (%u)\n",
641 642 643 644 645 646 647 648 649 650
			__func__, fl->stripe_unit);
		goto out;
	}

	status = 0;
out:
	dprintk("--> %s returns %d\n", __func__, status);
	return status;
}

651
static void _filelayout_free_lseg(struct nfs4_filelayout_segment *fl)
652 653 654
{
	int i;

655 656 657 658 659 660 661
	if (fl->fh_array) {
		for (i = 0; i < fl->num_fh; i++) {
			if (!fl->fh_array[i])
				break;
			kfree(fl->fh_array[i]);
		}
		kfree(fl->fh_array);
662 663 664 665 666 667 668 669
	}
	kfree(fl);
}

static int
filelayout_decode_layout(struct pnfs_layout_hdr *flo,
			 struct nfs4_filelayout_segment *fl,
			 struct nfs4_layoutget_res *lgr,
670
			 gfp_t gfp_flags)
671
{
672
	struct xdr_stream stream;
673
	struct xdr_buf buf;
674 675
	struct page *scratch;
	__be32 *p;
676 677 678 679 680
	uint32_t nfl_util;
	int i;

	dprintk("%s: set_layout_map Begin\n", __func__);

681
	scratch = alloc_page(gfp_flags);
682 683 684
	if (!scratch)
		return -ENOMEM;

685
	xdr_init_decode_pages(&stream, &buf, lgr->layoutp->pages, lgr->layoutp->len);
686 687 688 689 690 691 692 693
	xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE);

	/* 20 = ufl_util (4), first_stripe_index (4), pattern_offset (8),
	 * num_fh (4) */
	p = xdr_inline_decode(&stream, NFS4_DEVICEID4_SIZE + 20);
	if (unlikely(!p))
		goto out_err;

694
	memcpy(&fl->deviceid, p, sizeof(fl->deviceid));
695
	p += XDR_QUADLEN(NFS4_DEVICEID4_SIZE);
696
	nfs4_print_deviceid(&fl->deviceid);
697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714

	nfl_util = be32_to_cpup(p++);
	if (nfl_util & NFL4_UFLG_COMMIT_THRU_MDS)
		fl->commit_through_mds = 1;
	if (nfl_util & NFL4_UFLG_DENSE)
		fl->stripe_type = STRIPE_DENSE;
	else
		fl->stripe_type = STRIPE_SPARSE;
	fl->stripe_unit = nfl_util & ~NFL4_UFLG_MASK;

	fl->first_stripe_index = be32_to_cpup(p++);
	p = xdr_decode_hyper(p, &fl->pattern_offset);
	fl->num_fh = be32_to_cpup(p++);

	dprintk("%s: nfl_util 0x%X num_fh %u fsi %u po %llu\n",
		__func__, nfl_util, fl->num_fh, fl->first_stripe_index,
		fl->pattern_offset);

715 716
	/* Note that a zero value for num_fh is legal for STRIPE_SPARSE.
	 * Futher checking is done in filelayout_check_layout */
717
	if (fl->num_fh >
718
	    max(NFS4_PNFS_MAX_STRIPE_CNT, NFS4_PNFS_MAX_MULTI_CNT))
719 720
		goto out_err;

721
	if (fl->num_fh > 0) {
722
		fl->fh_array = kcalloc(fl->num_fh, sizeof(fl->fh_array[0]),
723 724 725 726
				       gfp_flags);
		if (!fl->fh_array)
			goto out_err;
	}
727 728 729

	for (i = 0; i < fl->num_fh; i++) {
		/* Do we want to use a mempool here? */
730
		fl->fh_array[i] = kmalloc(sizeof(struct nfs_fh), gfp_flags);
731
		if (!fl->fh_array[i])
732
			goto out_err;
733 734 735

		p = xdr_inline_decode(&stream, 4);
		if (unlikely(!p))
736
			goto out_err;
737 738
		fl->fh_array[i]->size = be32_to_cpup(p++);
		if (sizeof(struct nfs_fh) < fl->fh_array[i]->size) {
739
			printk(KERN_ERR "NFS: Too big fh %d received %d\n",
740
			       i, fl->fh_array[i]->size);
741
			goto out_err;
742
		}
743 744 745

		p = xdr_inline_decode(&stream, fl->fh_array[i]->size);
		if (unlikely(!p))
746
			goto out_err;
747 748 749 750 751
		memcpy(fl->fh_array[i]->data, p, fl->fh_array[i]->size);
		dprintk("DEBUG: %s: fh len %d\n", __func__,
			fl->fh_array[i]->size);
	}

752
	__free_page(scratch);
753
	return 0;
754 755 756 757

out_err:
	__free_page(scratch);
	return -EIO;
758 759
}

760 761 762 763 764 765 766
static void
filelayout_free_lseg(struct pnfs_layout_segment *lseg)
{
	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);

	dprintk("--> %s\n", __func__);
	nfs4_fl_put_deviceid(fl->dsaddr);
767 768 769 770 771 772 773 774 775
	/* This assumes a single RW lseg */
	if (lseg->pls_range.iomode == IOMODE_RW) {
		struct nfs4_filelayout *flo;

		flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
		flo->commit_info.nbuckets = 0;
		kfree(flo->commit_info.buckets);
		flo->commit_info.buckets = NULL;
	}
776 777 778
	_filelayout_free_lseg(fl);
}

779 780
static int
filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
F
Fred Isaman 已提交
781
			     struct nfs_commit_info *cinfo,
782 783 784
			     gfp_t gfp_flags)
{
	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
F
Fred Isaman 已提交
785
	struct pnfs_commit_bucket *buckets;
786
	int size, i;
787 788 789

	if (fl->commit_through_mds)
		return 0;
790 791 792 793 794

	size = (fl->stripe_type == STRIPE_SPARSE) ?
		fl->dsaddr->ds_num : fl->dsaddr->stripe_count;

	if (cinfo->ds->nbuckets >= size) {
795 796 797 798 799 800 801 802 803
		/* This assumes there is only one IOMODE_RW lseg.  What
		 * we really want to do is have a layout_hdr level
		 * dictionary of <multipath_list4, fh> keys, each
		 * associated with a struct list_head, populated by calls
		 * to filelayout_write_pagelist().
		 * */
		return 0;
	}

F
Fred Isaman 已提交
804
	buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
805 806 807
			  gfp_flags);
	if (!buckets)
		return -ENOMEM;
808 809 810
	for (i = 0; i < size; i++) {
		INIT_LIST_HEAD(&buckets[i].written);
		INIT_LIST_HEAD(&buckets[i].committing);
811 812
		/* mark direct verifier as unset */
		buckets[i].direct_verf.committed = NFS_INVALID_STABLE_HOW;
813
	}
814

815
	spin_lock(&cinfo->inode->i_lock);
816 817 818 819 820 821 822
	if (cinfo->ds->nbuckets >= size)
		goto out;
	for (i = 0; i < cinfo->ds->nbuckets; i++) {
		list_splice(&cinfo->ds->buckets[i].written,
			    &buckets[i].written);
		list_splice(&cinfo->ds->buckets[i].committing,
			    &buckets[i].committing);
823 824
		buckets[i].direct_verf.committed =
			cinfo->ds->buckets[i].direct_verf.committed;
825 826 827 828 829 830
		buckets[i].wlseg = cinfo->ds->buckets[i].wlseg;
		buckets[i].clseg = cinfo->ds->buckets[i].clseg;
	}
	swap(cinfo->ds->buckets, buckets);
	cinfo->ds->nbuckets = size;
out:
831
	spin_unlock(&cinfo->inode->i_lock);
832 833
	kfree(buckets);
	return 0;
834 835
}

836 837
static struct pnfs_layout_segment *
filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
838 839
		      struct nfs4_layoutget_res *lgr,
		      gfp_t gfp_flags)
840 841 842 843 844
{
	struct nfs4_filelayout_segment *fl;
	int rc;

	dprintk("--> %s\n", __func__);
845
	fl = kzalloc(sizeof(*fl), gfp_flags);
846 847 848
	if (!fl)
		return NULL;

849 850
	rc = filelayout_decode_layout(layoutid, fl, lgr, gfp_flags);
	if (rc != 0 || filelayout_check_layout(layoutid, fl, lgr, gfp_flags)) {
851 852 853 854 855 856
		_filelayout_free_lseg(fl);
		return NULL;
	}
	return &fl->generic_hdr;
}

857 858 859
/*
 * filelayout_pg_test(). Called by nfs_can_coalesce_requests()
 *
860 861
 * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
 * of bytes (maximum @req->wb_bytes) that can be coalesced.
862
 */
863
static size_t
864 865 866
filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
		   struct nfs_page *req)
{
867
	unsigned int size;
868
	u64 p_stripe, r_stripe;
869 870 871
	u32 stripe_offset;
	u64 segment_offset = pgio->pg_lseg->pls_range.offset;
	u32 stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
872

873 874 875
	/* calls nfs_generic_pg_test */
	size = pnfs_generic_pg_test(pgio, prev, req);
	if (!size)
876
		return 0;
877

878
	/* see if req and prev are in the same stripe */
879
	if (prev) {
880 881
		p_stripe = (u64)req_offset(prev) - segment_offset;
		r_stripe = (u64)req_offset(req) - segment_offset;
882 883
		do_div(p_stripe, stripe_unit);
		do_div(r_stripe, stripe_unit);
884

885 886 887
		if (p_stripe != r_stripe)
			return 0;
	}
888 889 890 891 892 893 894 895 896

	/* calculate remaining bytes in the current stripe */
	div_u64_rem((u64)req_offset(req) - segment_offset,
			stripe_unit,
			&stripe_offset);
	WARN_ON_ONCE(stripe_offset > stripe_unit);
	if (stripe_offset >= stripe_unit)
		return 0;
	return min(stripe_unit - (unsigned int)stripe_offset, size);
897 898
}

899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931
static struct pnfs_layout_segment *
fl_pnfs_update_layout(struct inode *ino,
		      struct nfs_open_context *ctx,
		      loff_t pos,
		      u64 count,
		      enum pnfs_iomode iomode,
		      bool strict_iomode,
		      gfp_t gfp_flags)
{
	struct pnfs_layout_segment *lseg = NULL;
	struct pnfs_layout_hdr *lo;
	struct nfs4_filelayout_segment *fl;
	int status;

	lseg = pnfs_update_layout(ino, ctx, pos, count, iomode, strict_iomode,
				  gfp_flags);
	if (!lseg)
		lseg = ERR_PTR(-ENOMEM);
	if (IS_ERR(lseg))
		goto out;

	lo = NFS_I(ino)->layout;
	fl = FILELAYOUT_LSEG(lseg);

	status = filelayout_check_deviceid(lo, fl, gfp_flags);
	if (status)
		lseg = ERR_PTR(status);
out:
	if (IS_ERR(lseg))
		pnfs_put_lseg(lseg);
	return lseg;
}

932
static void
933 934 935
filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
			struct nfs_page *req)
{
936
	pnfs_generic_pg_check_layout(pgio);
937
	if (!pgio->pg_lseg) {
938 939 940 941 942 943 944
		pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
						      req->wb_context,
						      0,
						      NFS4_MAX_UINT64,
						      IOMODE_READ,
						      false,
						      GFP_KERNEL);
945 946 947 948 949 950
		if (IS_ERR(pgio->pg_lseg)) {
			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
			pgio->pg_lseg = NULL;
			return;
		}
	}
951 952
	/* If no lseg, fall back to read through mds */
	if (pgio->pg_lseg == NULL)
953
		nfs_pageio_reset_read_mds(pgio);
954 955
}

956
static void
957 958 959
filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
			 struct nfs_page *req)
{
F
Fred Isaman 已提交
960
	struct nfs_commit_info cinfo;
961 962
	int status;

963
	pnfs_generic_pg_check_layout(pgio);
964
	if (!pgio->pg_lseg) {
965 966 967 968 969 970 971
		pgio->pg_lseg = fl_pnfs_update_layout(pgio->pg_inode,
						      req->wb_context,
						      0,
						      NFS4_MAX_UINT64,
						      IOMODE_RW,
						      false,
						      GFP_NOFS);
972 973 974 975 976 977 978
		if (IS_ERR(pgio->pg_lseg)) {
			pgio->pg_error = PTR_ERR(pgio->pg_lseg);
			pgio->pg_lseg = NULL;
			return;
		}
	}

979 980
	/* If no lseg, fall back to write through mds */
	if (pgio->pg_lseg == NULL)
981
		goto out_mds;
F
Fred Isaman 已提交
982 983
	nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
	status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
984
	if (status < 0) {
985
		pnfs_put_lseg(pgio->pg_lseg);
986 987 988 989 990 991
		pgio->pg_lseg = NULL;
		goto out_mds;
	}
	return;
out_mds:
	nfs_pageio_reset_write_mds(pgio);
992 993
}

994
static const struct nfs_pageio_ops filelayout_pg_read_ops = {
995
	.pg_init = filelayout_pg_init_read,
996
	.pg_test = filelayout_pg_test,
997
	.pg_doio = pnfs_generic_pg_readpages,
998
	.pg_cleanup = pnfs_generic_pg_cleanup,
999 1000 1001
};

static const struct nfs_pageio_ops filelayout_pg_write_ops = {
1002
	.pg_init = filelayout_pg_init_write,
1003
	.pg_test = filelayout_pg_test,
1004
	.pg_doio = pnfs_generic_pg_writepages,
1005
	.pg_cleanup = pnfs_generic_pg_cleanup,
1006 1007
};

1008 1009 1010 1011 1012 1013 1014 1015
static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
{
	if (fl->stripe_type == STRIPE_SPARSE)
		return nfs4_fl_calc_ds_index(&fl->generic_hdr, j);
	else
		return j;
}

1016 1017 1018
static void
filelayout_mark_request_commit(struct nfs_page *req,
			       struct pnfs_layout_segment *lseg,
1019 1020
			       struct nfs_commit_info *cinfo,
			       u32 ds_commit_idx)
1021

1022 1023 1024 1025
{
	struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
	u32 i, j;

1026
	if (fl->commit_through_mds) {
1027
		nfs_request_add_commit_list(req, cinfo);
1028 1029 1030 1031 1032 1033
	} else {
		/* Note that we are calling nfs4_fl_calc_j_index on each page
		 * that ends up being committed to a data server.  An attractive
		 * alternative is to add a field to nfs_write_data and nfs_page
		 * to store the value calculated in filelayout_write_pagelist
		 * and just use that here.
F
Fred Isaman 已提交
1034
		 */
1035 1036 1037
		j = nfs4_fl_calc_j_index(lseg, req_offset(req));
		i = select_bucket_index(fl, j);
		pnfs_layout_mark_request_commit(req, lseg, cinfo, i);
1038
	}
1039 1040
}

1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
{
	struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);

	if (flseg->stripe_type == STRIPE_SPARSE)
		return i;
	else
		return nfs4_fl_calc_ds_index(lseg, i);
}

static struct nfs_fh *
select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
{
	struct nfs4_filelayout_segment *flseg = FILELAYOUT_LSEG(lseg);

	if (flseg->stripe_type == STRIPE_SPARSE) {
		if (flseg->num_fh == 1)
			i = 0;
		else if (flseg->num_fh == 0)
			/* Use the MDS OPEN fh set in nfs_read_rpcsetup */
			return NULL;
	}
	return flseg->fh_array[i];
}

1066
static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
1067 1068 1069
{
	struct pnfs_layout_segment *lseg = data->lseg;
	struct nfs4_pnfs_ds *ds;
1070
	struct rpc_clnt *ds_clnt;
1071 1072 1073 1074 1075
	u32 idx;
	struct nfs_fh *fh;

	idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
	ds = nfs4_fl_prepare_ds(lseg, idx);
1076 1077 1078 1079 1080 1081 1082
	if (!ds)
		goto out_err;

	ds_clnt = nfs4_find_or_create_ds_client(ds->ds_clp, data->inode);
	if (IS_ERR(ds_clnt))
		goto out_err;

1083 1084
	dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
		data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
1085
	data->commit_done_cb = filelayout_commit_done_cb;
1086
	atomic_inc(&ds->ds_clp->cl_count);
1087 1088 1089 1090
	data->ds_clp = ds->ds_clp;
	fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
	if (fh)
		data->args.fh = fh;
1091
	return nfs_initiate_commit(ds_clnt, data, NFS_PROTO(data->inode),
1092 1093
				   &filelayout_commit_call_ops, how,
				   RPC_TASK_SOFTCONN);
1094
out_err:
1095 1096
	pnfs_generic_prepare_to_resend_writes(data);
	pnfs_generic_commit_release(data);
1097
	return -EAGAIN;
1098 1099
}

1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129
/* filelayout_search_commit_reqs - Search lists in @cinfo for the head reqest
 *				   for @page
 * @cinfo - commit info for current inode
 * @page - page to search for matching head request
 *
 * Returns a the head request if one is found, otherwise returns NULL.
 */
static struct nfs_page *
filelayout_search_commit_reqs(struct nfs_commit_info *cinfo, struct page *page)
{
	struct nfs_page *freq, *t;
	struct pnfs_commit_bucket *b;
	int i;

	/* Linearly search the commit lists for each bucket until a matching
	 * request is found */
	for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
		list_for_each_entry_safe(freq, t, &b->written, wb_list) {
			if (freq->wb_page == page)
				return freq->wb_head;
		}
		list_for_each_entry_safe(freq, t, &b->committing, wb_list) {
			if (freq->wb_page == page)
				return freq->wb_head;
		}
	}

	return NULL;
}

1130 1131
static int
filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
F
Fred Isaman 已提交
1132
			   int how, struct nfs_commit_info *cinfo)
1133
{
1134 1135
	return pnfs_generic_commit_pagelist(inode, mds_pages, how, cinfo,
					    filelayout_initiate_commit);
1136
}
1137

1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
static struct nfs4_deviceid_node *
filelayout_alloc_deviceid_node(struct nfs_server *server,
		struct pnfs_device *pdev, gfp_t gfp_flags)
{
	struct nfs4_file_layout_dsaddr *dsaddr;

	dsaddr = nfs4_fl_alloc_deviceid_node(server, pdev, gfp_flags);
	if (!dsaddr)
		return NULL;
	return &dsaddr->id_node;
}
1149

1150
static void
1151
filelayout_free_deviceid_node(struct nfs4_deviceid_node *d)
1152 1153 1154 1155
{
	nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
}

1156 1157 1158 1159 1160 1161
static struct pnfs_layout_hdr *
filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
{
	struct nfs4_filelayout *flo;

	flo = kzalloc(sizeof(*flo), gfp_flags);
1162
	return flo != NULL ? &flo->generic_hdr : NULL;
1163 1164 1165 1166 1167 1168 1169 1170
}

static void
filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
{
	kfree(FILELAYOUT_FROM_HDR(lo));
}

F
Fred Isaman 已提交
1171 1172 1173
static struct pnfs_ds_commit_info *
filelayout_get_ds_info(struct inode *inode)
{
1174 1175 1176 1177 1178 1179
	struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;

	if (layout == NULL)
		return NULL;
	else
		return &FILELAYOUT_FROM_HDR(layout)->commit_info;
F
Fred Isaman 已提交
1180 1181
}

1182
static struct pnfs_layoutdriver_type filelayout_type = {
1183 1184 1185
	.id			= LAYOUT_NFSV4_1_FILES,
	.name			= "LAYOUT_NFSV4_1_FILES",
	.owner			= THIS_MODULE,
1186 1187
	.alloc_layout_hdr	= filelayout_alloc_layout_hdr,
	.free_layout_hdr	= filelayout_free_layout_hdr,
1188 1189
	.alloc_lseg		= filelayout_alloc_lseg,
	.free_lseg		= filelayout_free_lseg,
1190 1191
	.pg_read_ops		= &filelayout_pg_read_ops,
	.pg_write_ops		= &filelayout_pg_write_ops,
F
Fred Isaman 已提交
1192
	.get_ds_info		= &filelayout_get_ds_info,
1193
	.mark_request_commit	= filelayout_mark_request_commit,
1194 1195 1196
	.clear_request_commit	= pnfs_generic_clear_request_commit,
	.scan_commit_lists	= pnfs_generic_scan_commit_lists,
	.recover_commit_reqs	= pnfs_generic_recover_commit_reqs,
1197
	.search_commit_reqs	= filelayout_search_commit_reqs,
1198
	.commit_pagelist	= filelayout_commit_pagelist,
A
Andy Adamson 已提交
1199
	.read_pagelist		= filelayout_read_pagelist,
1200
	.write_pagelist		= filelayout_write_pagelist,
1201
	.alloc_deviceid_node	= filelayout_alloc_deviceid_node,
1202
	.free_deviceid_node	= filelayout_free_deviceid_node,
1203
	.sync			= pnfs_nfs_generic_sync,
1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
};

static int __init nfs4filelayout_init(void)
{
	printk(KERN_INFO "%s: NFSv4 File Layout Driver Registering...\n",
	       __func__);
	return pnfs_register_layoutdriver(&filelayout_type);
}

static void __exit nfs4filelayout_exit(void)
{
	printk(KERN_INFO "%s: NFSv4 File Layout Driver Unregistering...\n",
	       __func__);
	pnfs_unregister_layoutdriver(&filelayout_type);
}

1220 1221
MODULE_ALIAS("nfs-layouttype4-1");

1222 1223
module_init(nfs4filelayout_init);
module_exit(nfs4filelayout_exit);