write.c 20.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/* handling of writes to regular files and writing back to the server
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
11

A
Alexey Dobriyan 已提交
12
#include <linux/backing-dev.h>
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include "internal.h"

/*
 * mark a page as having been made dirty and thus needing writeback
 */
int afs_set_page_dirty(struct page *page)
{
	_enter("");
	return __set_page_dirty_nobuffers(page);
}

/*
 * partly or wholly fill a page that's under preparation for writing
 */
static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
33
			 loff_t pos, unsigned int len, struct page *page)
34
{
35
	struct afs_read *req;
36 37
	int ret;

38
	_enter(",,%llu", (unsigned long long)pos);
39

40 41 42 43 44
	req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
		      GFP_KERNEL);
	if (!req)
		return -ENOMEM;

D
David Howells 已提交
45
	refcount_set(&req->usage, 1);
46
	req->pos = pos;
47
	req->len = len;
48
	req->nr_pages = 1;
D
David Howells 已提交
49
	req->pages = req->array;
50
	req->pages[0] = page;
51
	get_page(page);
52

53
	ret = afs_fetch_data(vnode, key, req);
54
	afs_put_read(req);
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
	if (ret < 0) {
		if (ret == -ENOENT) {
			_debug("got NOENT from server"
			       " - marking file deleted and stale");
			set_bit(AFS_VNODE_DELETED, &vnode->flags);
			ret = -ESTALE;
		}
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * prepare to perform part of a write to a page
 */
N
Nick Piggin 已提交
71 72 73
int afs_write_begin(struct file *file, struct address_space *mapping,
		    loff_t pos, unsigned len, unsigned flags,
		    struct page **pagep, void **fsdata)
74
{
A
Al Viro 已提交
75
	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
N
Nick Piggin 已提交
76
	struct page *page;
77
	struct key *key = afs_file_key(file);
78 79 80
	unsigned long priv;
	unsigned f, from = pos & (PAGE_SIZE - 1);
	unsigned t, to = from + len;
81
	pgoff_t index = pos >> PAGE_SHIFT;
82 83 84
	int ret;

	_enter("{%x:%u},{%lx},%u,%u",
N
Nick Piggin 已提交
85
	       vnode->fid.vid, vnode->fid.vnode, index, from, to);
86

87 88 89 90
	/* We want to store information about how much of a page is altered in
	 * page->private.
	 */
	BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
91

92
	page = grab_cache_page_write_begin(mapping, index, flags);
93
	if (!page)
N
Nick Piggin 已提交
94 95
		return -ENOMEM;

96
	if (!PageUptodate(page) && len != PAGE_SIZE) {
97
		ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
98
		if (ret < 0) {
99 100
			unlock_page(page);
			put_page(page);
101 102 103
			_leave(" = %d [prep]", ret);
			return ret;
		}
N
Nick Piggin 已提交
104
		SetPageUptodate(page);
105 106
	}

107 108 109
	/* page won't leak in error case: it eventually gets cleaned off LRU */
	*pagep = page;

110
try_again:
111 112 113 114 115 116 117 118 119
	/* See if this page is already partially written in a way that we can
	 * merge the new write with.
	 */
	t = f = 0;
	if (PagePrivate(page)) {
		priv = page_private(page);
		f = priv & AFS_PRIV_MAX;
		t = priv >> AFS_PRIV_SHIFT;
		ASSERTCMP(f, <=, t);
120 121
	}

122
	if (f != t) {
123 124 125 126 127
		if (PageWriteback(page)) {
			trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
					     page->index, priv);
			goto flush_conflicting_write;
		}
128 129 130 131 132 133
		/* If the file is being filled locally, allow inter-write
		 * spaces to be merged into writes.  If it's not, only write
		 * back what the user gives us.
		 */
		if (!test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags) &&
		    (to < f || from > t))
134 135 136 137 138 139 140 141
			goto flush_conflicting_write;
		if (from < f)
			f = from;
		if (to > t)
			t = to;
	} else {
		f = from;
		t = to;
142 143
	}

144 145
	priv = (unsigned long)t << AFS_PRIV_SHIFT;
	priv |= f;
D
David Howells 已提交
146 147
	trace_afs_page_dirty(vnode, tracepoint_string("begin"),
			     page->index, priv);
148
	SetPagePrivate(page);
149 150
	set_page_private(page, priv);
	_leave(" = 0");
151 152
	return 0;

153 154 155 156
	/* The previous write and this write aren't adjacent or overlapping, so
	 * flush the page out.
	 */
flush_conflicting_write:
157
	_debug("flush conflict");
158 159 160 161
	ret = write_one_page(page);
	if (ret < 0) {
		_leave(" = %d", ret);
		return ret;
162 163
	}

164 165 166 167 168
	ret = lock_page_killable(page);
	if (ret < 0) {
		_leave(" = %d", ret);
		return ret;
	}
169 170 171 172 173 174
	goto try_again;
}

/*
 * finalise part of a write to a page
 */
N
Nick Piggin 已提交
175 176 177
int afs_write_end(struct file *file, struct address_space *mapping,
		  loff_t pos, unsigned len, unsigned copied,
		  struct page *page, void *fsdata)
178
{
A
Al Viro 已提交
179
	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
180
	struct key *key = afs_file_key(file);
181
	loff_t i_size, maybe_i_size;
182
	int ret;
183

N
Nick Piggin 已提交
184 185
	_enter("{%x:%u},{%lx}",
	       vnode->fid.vid, vnode->fid.vnode, page->index);
186

N
Nick Piggin 已提交
187
	maybe_i_size = pos + copied;
188 189 190

	i_size = i_size_read(&vnode->vfs_inode);
	if (maybe_i_size > i_size) {
191
		spin_lock(&vnode->wb_lock);
192 193 194
		i_size = i_size_read(&vnode->vfs_inode);
		if (maybe_i_size > i_size)
			i_size_write(&vnode->vfs_inode, maybe_i_size);
195
		spin_unlock(&vnode->wb_lock);
196 197
	}

198 199 200 201 202 203 204 205 206
	if (!PageUptodate(page)) {
		if (copied < len) {
			/* Try and load any missing data from the server.  The
			 * unmarshalling routine will take care of clearing any
			 * bits that are beyond the EOF.
			 */
			ret = afs_fill_page(vnode, key, pos + copied,
					    len - copied, page);
			if (ret < 0)
207
				goto out;
208 209 210 211
		}
		SetPageUptodate(page);
	}

212 213 214
	set_page_dirty(page);
	if (PageDirty(page))
		_debug("dirtied");
215 216 217
	ret = copied;

out:
N
Nick Piggin 已提交
218
	unlock_page(page);
219
	put_page(page);
220
	return ret;
221 222 223 224 225
}

/*
 * kill all the pages in the given range
 */
226
static void afs_kill_pages(struct address_space *mapping,
227 228
			   pgoff_t first, pgoff_t last)
{
229
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
230 231 232 233 234 235
	struct pagevec pv;
	unsigned count, loop;

	_enter("{%x:%u},%lx-%lx",
	       vnode->fid.vid, vnode->fid.vnode, first, last);

236
	pagevec_init(&pv);
237 238 239 240 241 242 243

	do {
		_debug("kill %lx-%lx", first, last);

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
244
		pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
245 246 247
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
D
David Howells 已提交
248 249
			struct page *page = pv.pages[loop];
			ClearPageUptodate(page);
250 251
			SetPageError(page);
			end_page_writeback(page);
D
David Howells 已提交
252 253
			if (page->index >= first)
				first = page->index + 1;
254 255
			lock_page(page);
			generic_error_remove_page(mapping, page);
256 257 258
		}

		__pagevec_release(&pv);
259
	} while (first <= last);
260 261 262 263 264

	_leave("");
}

/*
265 266 267 268 269 270 271 272 273 274 275 276 277
 * Redirty all the pages in a given range.
 */
static void afs_redirty_pages(struct writeback_control *wbc,
			      struct address_space *mapping,
			      pgoff_t first, pgoff_t last)
{
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
	struct pagevec pv;
	unsigned count, loop;

	_enter("{%x:%u},%lx-%lx",
	       vnode->fid.vid, vnode->fid.vnode, first, last);

278
	pagevec_init(&pv);
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299

	do {
		_debug("redirty %lx-%lx", first, last);

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
		pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
			struct page *page = pv.pages[loop];

			redirty_page_for_writepage(wbc, page);
			end_page_writeback(page);
			if (page->index >= first)
				first = page->index + 1;
		}

		__pagevec_release(&pv);
	} while (first <= last);
300 301 302 303

	_leave("");
}

304 305 306
/*
 * write to a file
 */
307 308
static int afs_store_data(struct address_space *mapping,
			  pgoff_t first, pgoff_t last,
309 310
			  unsigned offset, unsigned to)
{
311
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
312
	struct afs_fs_cursor fc;
313 314 315
	struct afs_wb_key *wbk = NULL;
	struct list_head *p;
	int ret = -ENOKEY, ret2;
316

317
	_enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
318 319 320 321 322 323
	       vnode->volume->name,
	       vnode->fid.vid,
	       vnode->fid.vnode,
	       vnode->fid.unique,
	       first, last, offset, to);

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
	spin_lock(&vnode->wb_lock);
	p = vnode->wb_keys.next;

	/* Iterate through the list looking for a valid key to use. */
try_next_key:
	while (p != &vnode->wb_keys) {
		wbk = list_entry(p, struct afs_wb_key, vnode_link);
		_debug("wbk %u", key_serial(wbk->key));
		ret2 = key_validate(wbk->key);
		if (ret2 == 0)
			goto found_key;
		if (ret == -ENOKEY)
			ret = ret2;
		p = p->next;
	}

	spin_unlock(&vnode->wb_lock);
	afs_put_wb_key(wbk);
	_leave(" = %d [no keys]", ret);
	return ret;

found_key:
	refcount_inc(&wbk->usage);
	spin_unlock(&vnode->wb_lock);

	_debug("USE WB KEY %u", key_serial(wbk->key));

351
	ret = -ERESTARTSYS;
352
	if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
353 354
		while (afs_select_fileserver(&fc)) {
			fc.cb_break = vnode->cb_break + vnode->cb_s_break;
355
			afs_fs_store_data(&fc, mapping, first, last, offset, to);
356 357 358 359 360 361 362
		}

		afs_check_for_remote_deletion(&fc, fc.vnode);
		afs_vnode_commit_status(&fc, vnode, fc.cb_break);
		ret = afs_end_vnode_operation(&fc);
	}

363
	switch (ret) {
364 365 366 367 368 369
	case 0:
		afs_stat_v(vnode, n_stores);
		atomic_long_add((last * PAGE_SIZE + to) -
				(first * PAGE_SIZE + offset),
				&afs_v2net(vnode)->n_store_bytes);
		break;
370 371 372 373 374 375 376 377 378 379 380 381 382 383
	case -EACCES:
	case -EPERM:
	case -ENOKEY:
	case -EKEYEXPIRED:
	case -EKEYREJECTED:
	case -EKEYREVOKED:
		_debug("next");
		spin_lock(&vnode->wb_lock);
		p = wbk->vnode_link.next;
		afs_put_wb_key(wbk);
		goto try_next_key;
	}

	afs_put_wb_key(wbk);
384 385 386 387
	_leave(" = %d", ret);
	return ret;
}

388
/*
389 390
 * Synchronously write back the locked page and any subsequent non-locked dirty
 * pages.
391
 */
392 393 394 395
static int afs_write_back_from_locked_page(struct address_space *mapping,
					   struct writeback_control *wbc,
					   struct page *primary_page,
					   pgoff_t final_page)
396
{
D
David Howells 已提交
397
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
398
	struct page *pages[8], *page;
399 400
	unsigned long count, priv;
	unsigned n, offset, to, f, t;
401 402 403 404 405 406 407 408 409
	pgoff_t start, first, last;
	int loop, ret;

	_enter(",%lx", primary_page->index);

	count = 1;
	if (test_set_page_writeback(primary_page))
		BUG();

410 411 412 413 414
	/* Find all consecutive lockable dirty pages that have contiguous
	 * written regions, stopping when we find a page that is not
	 * immediately lockable, is not dirty or is missing, or we reach the
	 * end of the range.
	 */
415
	start = primary_page->index;
416 417 418
	priv = page_private(primary_page);
	offset = priv & AFS_PRIV_MAX;
	to = priv >> AFS_PRIV_SHIFT;
D
David Howells 已提交
419 420
	trace_afs_page_dirty(vnode, tracepoint_string("store"),
			     primary_page->index, priv);
421 422

	WARN_ON(offset == to);
D
David Howells 已提交
423 424 425
	if (offset == to)
		trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
				     primary_page->index, priv);
426

427 428
	if (start >= final_page ||
	    (to < PAGE_SIZE && !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)))
429
		goto no_more;
430

431 432 433
	start++;
	do {
		_debug("more %lx [%lx]", start, count);
434
		n = final_page - start + 1;
435 436
		if (n > ARRAY_SIZE(pages))
			n = ARRAY_SIZE(pages);
437
		n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
438 439 440 441
		_debug("fgpc %u", n);
		if (n == 0)
			goto no_more;
		if (pages[0]->index != start) {
442 443 444
			do {
				put_page(pages[--n]);
			} while (n > 0);
445 446 447 448 449
			goto no_more;
		}

		for (loop = 0; loop < n; loop++) {
			page = pages[loop];
450 451 452
			if (to != PAGE_SIZE &&
			    !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags))
				break;
453
			if (page->index > final_page)
454
				break;
N
Nick Piggin 已提交
455
			if (!trylock_page(page))
456
				break;
457
			if (!PageDirty(page) || PageWriteback(page)) {
458 459 460
				unlock_page(page);
				break;
			}
461 462 463 464

			priv = page_private(page);
			f = priv & AFS_PRIV_MAX;
			t = priv >> AFS_PRIV_SHIFT;
465 466
			if (f != 0 &&
			    !test_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags)) {
467 468 469
				unlock_page(page);
				break;
			}
470 471
			to = t;

D
David Howells 已提交
472 473 474
			trace_afs_page_dirty(vnode, tracepoint_string("store+"),
					     page->index, priv);

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
			if (!clear_page_dirty_for_io(page))
				BUG();
			if (test_set_page_writeback(page))
				BUG();
			unlock_page(page);
			put_page(page);
		}
		count += loop;
		if (loop < n) {
			for (; loop < n; loop++)
				put_page(pages[loop]);
			goto no_more;
		}

		start += loop;
490
	} while (start <= final_page && count < 65536);
491 492

no_more:
493 494 495 496 497 498
	/* We now have a contiguous set of dirty pages, each with writeback
	 * set; the first page is still locked at this point, but all the rest
	 * have been unlocked.
	 */
	unlock_page(primary_page);

499 500 501 502 503
	first = primary_page->index;
	last = first + count - 1;

	_debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);

504 505 506
	ret = afs_store_data(mapping, first, last, offset, to);
	switch (ret) {
	case 0:
507
		ret = count;
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
		break;

	default:
		pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
		/* Fall through */
	case -EACCES:
	case -EPERM:
	case -ENOKEY:
	case -EKEYEXPIRED:
	case -EKEYREJECTED:
	case -EKEYREVOKED:
		afs_redirty_pages(wbc, mapping, first, last);
		mapping_set_error(mapping, ret);
		break;

	case -EDQUOT:
	case -ENOSPC:
		afs_redirty_pages(wbc, mapping, first, last);
		mapping_set_error(mapping, -ENOSPC);
		break;

	case -EROFS:
	case -EIO:
	case -EREMOTEIO:
	case -EFBIG:
	case -ENOENT:
	case -ENOMEDIUM:
	case -ENXIO:
		afs_kill_pages(mapping, first, last);
		mapping_set_error(mapping, ret);
		break;
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * write a page back to the server
 * - the caller locked the page for us
 */
int afs_writepage(struct page *page, struct writeback_control *wbc)
{
	int ret;

	_enter("{%lx},", page->index);

555 556
	ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
					      wbc->range_end >> PAGE_SHIFT);
557 558 559 560 561 562 563 564 565 566 567 568 569 570
	if (ret < 0) {
		_leave(" = %d", ret);
		return 0;
	}

	wbc->nr_to_write -= ret;

	_leave(" = 0");
	return 0;
}

/*
 * write a region of pages back to the server
 */
A
Adrian Bunk 已提交
571 572 573
static int afs_writepages_region(struct address_space *mapping,
				 struct writeback_control *wbc,
				 pgoff_t index, pgoff_t end, pgoff_t *_next)
574 575 576 577 578 579 580
{
	struct page *page;
	int ret, n;

	_enter(",,%lx,%lx,", index, end);

	do {
J
Jan Kara 已提交
581 582
		n = find_get_pages_range_tag(mapping, &index, end,
					PAGECACHE_TAG_DIRTY, 1, &page);
583 584 585 586 587
		if (!n)
			break;

		_debug("wback %lx", page->index);

M
Matthew Wilcox 已提交
588 589 590 591 592
		/*
		 * at this point we hold neither the i_pages lock nor the
		 * page lock: the page may be truncated or invalidated
		 * (changing page->mapping to NULL), or even swizzled
		 * back from swapper_space to tmpfs file mapping
593
		 */
594 595 596 597 598 599
		ret = lock_page_killable(page);
		if (ret < 0) {
			put_page(page);
			_leave(" = %d", ret);
			return ret;
		}
600

601
		if (page->mapping != mapping || !PageDirty(page)) {
602
			unlock_page(page);
603
			put_page(page);
604 605 606
			continue;
		}

607
		if (PageWriteback(page)) {
608
			unlock_page(page);
609 610
			if (wbc->sync_mode != WB_SYNC_NONE)
				wait_on_page_writeback(page);
D
David Howells 已提交
611
			put_page(page);
612 613 614
			continue;
		}

615 616
		if (!clear_page_dirty_for_io(page))
			BUG();
617
		ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
618
		put_page(page);
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
		if (ret < 0) {
			_leave(" = %d", ret);
			return ret;
		}

		wbc->nr_to_write -= ret;

		cond_resched();
	} while (index < end && wbc->nr_to_write > 0);

	*_next = index;
	_leave(" = 0 [%lx]", *_next);
	return 0;
}

/*
 * write some of the pending data back to the server
 */
int afs_writepages(struct address_space *mapping,
		   struct writeback_control *wbc)
{
	pgoff_t start, end, next;
	int ret;

	_enter("");

	if (wbc->range_cyclic) {
		start = mapping->writeback_index;
		end = -1;
		ret = afs_writepages_region(mapping, wbc, start, end, &next);
649
		if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
650 651 652 653
			ret = afs_writepages_region(mapping, wbc, 0, start,
						    &next);
		mapping->writeback_index = next;
	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
654
		end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
655 656 657 658
		ret = afs_writepages_region(mapping, wbc, 0, end, &next);
		if (wbc->nr_to_write > 0)
			mapping->writeback_index = next;
	} else {
659 660
		start = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
661 662 663 664 665 666 667 668 669 670 671 672 673
		ret = afs_writepages_region(mapping, wbc, start, end, &next);
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * completion of write to server
 */
void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
{
	struct pagevec pv;
D
David Howells 已提交
674
	unsigned long priv;
675 676 677 678 679 680
	unsigned count, loop;
	pgoff_t first = call->first, last = call->last;

	_enter("{%x:%u},{%lx-%lx}",
	       vnode->fid.vid, vnode->fid.vnode, first, last);

681
	pagevec_init(&pv);
682 683

	do {
D
David Howells 已提交
684
		_debug("done %lx-%lx", first, last);
685 686 687 688

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
689 690
		pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
					      first, count, pv.pages);
691 692 693
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
D
David Howells 已提交
694 695 696
			priv = page_private(pv.pages[loop]);
			trace_afs_page_dirty(vnode, tracepoint_string("clear"),
					     pv.pages[loop]->index, priv);
697 698
			set_page_private(pv.pages[loop], 0);
			end_page_writeback(pv.pages[loop]);
699 700 701
		}
		first += count;
		__pagevec_release(&pv);
D
David Howells 已提交
702
	} while (first <= last);
703

704
	afs_prune_wb_keys(vnode);
705 706 707 708 709 710
	_leave("");
}

/*
 * write to an AFS file
 */
A
Al Viro 已提交
711
ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
712
{
A
Al Viro 已提交
713
	struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
714
	ssize_t result;
A
Al Viro 已提交
715
	size_t count = iov_iter_count(from);
716

A
Al Viro 已提交
717 718
	_enter("{%x.%u},{%zu},",
	       vnode->fid.vid, vnode->fid.vnode, count);
719 720 721 722 723 724 725 726 727 728

	if (IS_SWAPFILE(&vnode->vfs_inode)) {
		printk(KERN_INFO
		       "AFS: Attempt to write to active swap file!\n");
		return -EBUSY;
	}

	if (!count)
		return 0;

A
Al Viro 已提交
729
	result = generic_file_write_iter(iocb, from);
730 731 732 733 734 735 736 737 738 739

	_leave(" = %zd", result);
	return result;
}

/*
 * flush any dirty pages for this process, and check for write errors.
 * - the return status from this call provides a reliable indication of
 *   whether any write errors occurred for this process.
 */
740
int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
741
{
742 743
	struct inode *inode = file_inode(file);
	struct afs_vnode *vnode = AFS_FS_I(inode);
744

745 746
	_enter("{%x:%u},{n=%pD},%d",
	       vnode->fid.vid, vnode->fid.vnode, file,
747 748
	       datasync);

749
	return file_write_and_wait_range(file, start, end);
750
}
D
David Howells 已提交
751 752 753 754 755

/*
 * notification that a previously read-only page is about to become writable
 * - if it returns an error, the caller will deliver a bus error signal
 */
756
int afs_page_mkwrite(struct vm_fault *vmf)
D
David Howells 已提交
757
{
758 759 760 761
	struct file *file = vmf->vma->vm_file;
	struct inode *inode = file_inode(file);
	struct afs_vnode *vnode = AFS_FS_I(inode);
	unsigned long priv;
D
David Howells 已提交
762 763

	_enter("{{%x:%u}},{%lx}",
764
	       vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
D
David Howells 已提交
765

766
	sb_start_pagefault(inode->i_sb);
D
David Howells 已提交
767

768 769 770
	/* Wait for the page to be written to the cache before we allow it to
	 * be modified.  We then assume the entire page will need writing back.
	 */
D
David Howells 已提交
771
#ifdef CONFIG_AFS_FSCACHE
772
	fscache_wait_on_page_write(vnode->cache, vmf->page);
D
David Howells 已提交
773 774
#endif

775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
	if (PageWriteback(vmf->page) &&
	    wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
		return VM_FAULT_RETRY;

	if (lock_page_killable(vmf->page) < 0)
		return VM_FAULT_RETRY;

	/* We mustn't change page->private until writeback is complete as that
	 * details the portion of the page we need to write back and we might
	 * need to redirty the page if there's a problem.
	 */
	wait_on_page_writeback(vmf->page);

	priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
	priv |= 0; /* From */
D
David Howells 已提交
790 791
	trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
			     vmf->page->index, priv);
792 793 794 795 796
	SetPagePrivate(vmf->page);
	set_page_private(vmf->page, priv);

	sb_end_pagefault(inode->i_sb);
	return VM_FAULT_LOCKED;
D
David Howells 已提交
797
}
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848

/*
 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 */
void afs_prune_wb_keys(struct afs_vnode *vnode)
{
	LIST_HEAD(graveyard);
	struct afs_wb_key *wbk, *tmp;

	/* Discard unused keys */
	spin_lock(&vnode->wb_lock);

	if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
	    !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
		list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
			if (refcount_read(&wbk->usage) == 1)
				list_move(&wbk->vnode_link, &graveyard);
		}
	}

	spin_unlock(&vnode->wb_lock);

	while (!list_empty(&graveyard)) {
		wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
		list_del(&wbk->vnode_link);
		afs_put_wb_key(wbk);
	}
}

/*
 * Clean up a page during invalidation.
 */
int afs_launder_page(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
	unsigned long priv;
	unsigned int f, t;
	int ret = 0;

	_enter("{%lx}", page->index);

	priv = page_private(page);
	if (clear_page_dirty_for_io(page)) {
		f = 0;
		t = PAGE_SIZE;
		if (PagePrivate(page)) {
			f = priv & AFS_PRIV_MAX;
			t = priv >> AFS_PRIV_SHIFT;
		}

D
David Howells 已提交
849 850
		trace_afs_page_dirty(vnode, tracepoint_string("launder"),
				     page->index, priv);
851 852 853
		ret = afs_store_data(mapping, page->index, page->index, t, f);
	}

D
David Howells 已提交
854 855
	trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
			     page->index, priv);
856 857 858 859 860 861 862 863 864 865
	set_page_private(page, 0);
	ClearPagePrivate(page);

#ifdef CONFIG_AFS_FSCACHE
	if (PageFsCache(page)) {
		fscache_wait_on_page_write(vnode->cache, page);
		fscache_uncache_page(vnode->cache, page);
	}
#endif
	return ret;
D
David Howells 已提交
866
}