write.c 20.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10
/* handling of writes to regular files and writing back to the server
 *
 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
 * Written by David Howells (dhowells@redhat.com)
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
11

A
Alexey Dobriyan 已提交
12
#include <linux/backing-dev.h>
13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include "internal.h"

/*
 * mark a page as having been made dirty and thus needing writeback
 */
int afs_set_page_dirty(struct page *page)
{
	_enter("");
	return __set_page_dirty_nobuffers(page);
}

/*
 * partly or wholly fill a page that's under preparation for writing
 */
static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
33
			 loff_t pos, unsigned int len, struct page *page)
34
{
35
	struct afs_read *req;
36 37
	int ret;

38
	_enter(",,%llu", (unsigned long long)pos);
39

40 41 42 43 44
	req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
		      GFP_KERNEL);
	if (!req)
		return -ENOMEM;

D
David Howells 已提交
45
	refcount_set(&req->usage, 1);
46
	req->pos = pos;
47
	req->len = len;
48
	req->nr_pages = 1;
D
David Howells 已提交
49
	req->pages = req->array;
50
	req->pages[0] = page;
51
	get_page(page);
52

53
	ret = afs_fetch_data(vnode, key, req);
54
	afs_put_read(req);
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
	if (ret < 0) {
		if (ret == -ENOENT) {
			_debug("got NOENT from server"
			       " - marking file deleted and stale");
			set_bit(AFS_VNODE_DELETED, &vnode->flags);
			ret = -ESTALE;
		}
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * prepare to perform part of a write to a page
 */
N
Nick Piggin 已提交
71 72 73
int afs_write_begin(struct file *file, struct address_space *mapping,
		    loff_t pos, unsigned len, unsigned flags,
		    struct page **pagep, void **fsdata)
74
{
A
Al Viro 已提交
75
	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
N
Nick Piggin 已提交
76
	struct page *page;
77
	struct key *key = afs_file_key(file);
78 79 80
	unsigned long priv;
	unsigned f, from = pos & (PAGE_SIZE - 1);
	unsigned t, to = from + len;
81
	pgoff_t index = pos >> PAGE_SHIFT;
82 83 84
	int ret;

	_enter("{%x:%u},{%lx},%u,%u",
N
Nick Piggin 已提交
85
	       vnode->fid.vid, vnode->fid.vnode, index, from, to);
86

87 88 89 90
	/* We want to store information about how much of a page is altered in
	 * page->private.
	 */
	BUILD_BUG_ON(PAGE_SIZE > 32768 && sizeof(page->private) < 8);
91

92
	page = grab_cache_page_write_begin(mapping, index, flags);
93
	if (!page)
N
Nick Piggin 已提交
94 95
		return -ENOMEM;

96
	if (!PageUptodate(page) && len != PAGE_SIZE) {
97
		ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
98
		if (ret < 0) {
99 100
			unlock_page(page);
			put_page(page);
101 102 103
			_leave(" = %d [prep]", ret);
			return ret;
		}
N
Nick Piggin 已提交
104
		SetPageUptodate(page);
105 106
	}

107 108 109
	/* page won't leak in error case: it eventually gets cleaned off LRU */
	*pagep = page;

110
try_again:
111 112 113 114 115 116 117 118 119
	/* See if this page is already partially written in a way that we can
	 * merge the new write with.
	 */
	t = f = 0;
	if (PagePrivate(page)) {
		priv = page_private(page);
		f = priv & AFS_PRIV_MAX;
		t = priv >> AFS_PRIV_SHIFT;
		ASSERTCMP(f, <=, t);
120 121
	}

122
	if (f != t) {
123 124 125 126 127
		if (PageWriteback(page)) {
			trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
					     page->index, priv);
			goto flush_conflicting_write;
		}
128 129 130 131 132 133 134 135 136
		if (to < f || from > t)
			goto flush_conflicting_write;
		if (from < f)
			f = from;
		if (to > t)
			t = to;
	} else {
		f = from;
		t = to;
137 138
	}

139 140
	priv = (unsigned long)t << AFS_PRIV_SHIFT;
	priv |= f;
D
David Howells 已提交
141 142
	trace_afs_page_dirty(vnode, tracepoint_string("begin"),
			     page->index, priv);
143
	SetPagePrivate(page);
144 145
	set_page_private(page, priv);
	_leave(" = 0");
146 147
	return 0;

148 149 150 151
	/* The previous write and this write aren't adjacent or overlapping, so
	 * flush the page out.
	 */
flush_conflicting_write:
152
	_debug("flush conflict");
153 154 155 156
	ret = write_one_page(page);
	if (ret < 0) {
		_leave(" = %d", ret);
		return ret;
157 158
	}

159 160 161 162 163
	ret = lock_page_killable(page);
	if (ret < 0) {
		_leave(" = %d", ret);
		return ret;
	}
164 165 166 167 168 169
	goto try_again;
}

/*
 * finalise part of a write to a page
 */
N
Nick Piggin 已提交
170 171 172
int afs_write_end(struct file *file, struct address_space *mapping,
		  loff_t pos, unsigned len, unsigned copied,
		  struct page *page, void *fsdata)
173
{
A
Al Viro 已提交
174
	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
175
	struct key *key = afs_file_key(file);
176
	loff_t i_size, maybe_i_size;
177
	int ret;
178

N
Nick Piggin 已提交
179 180
	_enter("{%x:%u},{%lx}",
	       vnode->fid.vid, vnode->fid.vnode, page->index);
181

N
Nick Piggin 已提交
182
	maybe_i_size = pos + copied;
183 184 185

	i_size = i_size_read(&vnode->vfs_inode);
	if (maybe_i_size > i_size) {
186
		spin_lock(&vnode->wb_lock);
187 188 189
		i_size = i_size_read(&vnode->vfs_inode);
		if (maybe_i_size > i_size)
			i_size_write(&vnode->vfs_inode, maybe_i_size);
190
		spin_unlock(&vnode->wb_lock);
191 192
	}

193 194 195 196 197 198 199 200 201
	if (!PageUptodate(page)) {
		if (copied < len) {
			/* Try and load any missing data from the server.  The
			 * unmarshalling routine will take care of clearing any
			 * bits that are beyond the EOF.
			 */
			ret = afs_fill_page(vnode, key, pos + copied,
					    len - copied, page);
			if (ret < 0)
202
				goto out;
203 204 205 206
		}
		SetPageUptodate(page);
	}

207 208 209
	set_page_dirty(page);
	if (PageDirty(page))
		_debug("dirtied");
210 211 212
	ret = copied;

out:
N
Nick Piggin 已提交
213
	unlock_page(page);
214
	put_page(page);
215
	return ret;
216 217 218 219 220
}

/*
 * kill all the pages in the given range
 */
221
static void afs_kill_pages(struct address_space *mapping,
222 223
			   pgoff_t first, pgoff_t last)
{
224
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
225 226 227 228 229 230
	struct pagevec pv;
	unsigned count, loop;

	_enter("{%x:%u},%lx-%lx",
	       vnode->fid.vid, vnode->fid.vnode, first, last);

231
	pagevec_init(&pv);
232 233 234 235 236 237 238

	do {
		_debug("kill %lx-%lx", first, last);

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
239
		pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
240 241 242
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
D
David Howells 已提交
243 244
			struct page *page = pv.pages[loop];
			ClearPageUptodate(page);
245 246
			SetPageError(page);
			end_page_writeback(page);
D
David Howells 已提交
247 248
			if (page->index >= first)
				first = page->index + 1;
249 250
			lock_page(page);
			generic_error_remove_page(mapping, page);
251 252 253
		}

		__pagevec_release(&pv);
254
	} while (first <= last);
255 256 257 258 259

	_leave("");
}

/*
260 261 262 263 264 265 266 267 268 269 270 271 272
 * Redirty all the pages in a given range.
 */
static void afs_redirty_pages(struct writeback_control *wbc,
			      struct address_space *mapping,
			      pgoff_t first, pgoff_t last)
{
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
	struct pagevec pv;
	unsigned count, loop;

	_enter("{%x:%u},%lx-%lx",
	       vnode->fid.vid, vnode->fid.vnode, first, last);

273
	pagevec_init(&pv);
274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294

	do {
		_debug("redirty %lx-%lx", first, last);

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
		pv.nr = find_get_pages_contig(mapping, first, count, pv.pages);
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
			struct page *page = pv.pages[loop];

			redirty_page_for_writepage(wbc, page);
			end_page_writeback(page);
			if (page->index >= first)
				first = page->index + 1;
		}

		__pagevec_release(&pv);
	} while (first <= last);
295 296 297 298

	_leave("");
}

299 300 301
/*
 * write to a file
 */
302 303
static int afs_store_data(struct address_space *mapping,
			  pgoff_t first, pgoff_t last,
304 305
			  unsigned offset, unsigned to)
{
306
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
307
	struct afs_fs_cursor fc;
308 309 310
	struct afs_wb_key *wbk = NULL;
	struct list_head *p;
	int ret = -ENOKEY, ret2;
311

312
	_enter("%s{%x:%u.%u},%lx,%lx,%x,%x",
313 314 315 316 317 318
	       vnode->volume->name,
	       vnode->fid.vid,
	       vnode->fid.vnode,
	       vnode->fid.unique,
	       first, last, offset, to);

319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
	spin_lock(&vnode->wb_lock);
	p = vnode->wb_keys.next;

	/* Iterate through the list looking for a valid key to use. */
try_next_key:
	while (p != &vnode->wb_keys) {
		wbk = list_entry(p, struct afs_wb_key, vnode_link);
		_debug("wbk %u", key_serial(wbk->key));
		ret2 = key_validate(wbk->key);
		if (ret2 == 0)
			goto found_key;
		if (ret == -ENOKEY)
			ret = ret2;
		p = p->next;
	}

	spin_unlock(&vnode->wb_lock);
	afs_put_wb_key(wbk);
	_leave(" = %d [no keys]", ret);
	return ret;

found_key:
	refcount_inc(&wbk->usage);
	spin_unlock(&vnode->wb_lock);

	_debug("USE WB KEY %u", key_serial(wbk->key));

346
	ret = -ERESTARTSYS;
347
	if (afs_begin_vnode_operation(&fc, vnode, wbk->key)) {
348 349
		while (afs_select_fileserver(&fc)) {
			fc.cb_break = vnode->cb_break + vnode->cb_s_break;
350
			afs_fs_store_data(&fc, mapping, first, last, offset, to);
351 352 353 354 355 356 357
		}

		afs_check_for_remote_deletion(&fc, fc.vnode);
		afs_vnode_commit_status(&fc, vnode, fc.cb_break);
		ret = afs_end_vnode_operation(&fc);
	}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372
	switch (ret) {
	case -EACCES:
	case -EPERM:
	case -ENOKEY:
	case -EKEYEXPIRED:
	case -EKEYREJECTED:
	case -EKEYREVOKED:
		_debug("next");
		spin_lock(&vnode->wb_lock);
		p = wbk->vnode_link.next;
		afs_put_wb_key(wbk);
		goto try_next_key;
	}

	afs_put_wb_key(wbk);
373 374 375 376
	_leave(" = %d", ret);
	return ret;
}

377
/*
378 379
 * Synchronously write back the locked page and any subsequent non-locked dirty
 * pages.
380
 */
381 382 383 384
static int afs_write_back_from_locked_page(struct address_space *mapping,
					   struct writeback_control *wbc,
					   struct page *primary_page,
					   pgoff_t final_page)
385
{
D
David Howells 已提交
386
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
387
	struct page *pages[8], *page;
388 389
	unsigned long count, priv;
	unsigned n, offset, to, f, t;
390 391 392 393 394 395 396 397 398
	pgoff_t start, first, last;
	int loop, ret;

	_enter(",%lx", primary_page->index);

	count = 1;
	if (test_set_page_writeback(primary_page))
		BUG();

399 400 401 402 403
	/* Find all consecutive lockable dirty pages that have contiguous
	 * written regions, stopping when we find a page that is not
	 * immediately lockable, is not dirty or is missing, or we reach the
	 * end of the range.
	 */
404
	start = primary_page->index;
405 406 407
	priv = page_private(primary_page);
	offset = priv & AFS_PRIV_MAX;
	to = priv >> AFS_PRIV_SHIFT;
D
David Howells 已提交
408 409
	trace_afs_page_dirty(vnode, tracepoint_string("store"),
			     primary_page->index, priv);
410 411

	WARN_ON(offset == to);
D
David Howells 已提交
412 413 414
	if (offset == to)
		trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
				     primary_page->index, priv);
415 416

	if (start >= final_page || to < PAGE_SIZE)
417
		goto no_more;
418

419 420 421
	start++;
	do {
		_debug("more %lx [%lx]", start, count);
422
		n = final_page - start + 1;
423 424
		if (n > ARRAY_SIZE(pages))
			n = ARRAY_SIZE(pages);
425
		n = find_get_pages_contig(mapping, start, ARRAY_SIZE(pages), pages);
426 427 428 429
		_debug("fgpc %u", n);
		if (n == 0)
			goto no_more;
		if (pages[0]->index != start) {
430 431 432
			do {
				put_page(pages[--n]);
			} while (n > 0);
433 434 435 436
			goto no_more;
		}

		for (loop = 0; loop < n; loop++) {
437 438
			if (to != PAGE_SIZE)
				break;
439
			page = pages[loop];
440
			if (page->index > final_page)
441
				break;
N
Nick Piggin 已提交
442
			if (!trylock_page(page))
443
				break;
444
			if (!PageDirty(page) || PageWriteback(page)) {
445 446 447
				unlock_page(page);
				break;
			}
448 449 450 451 452

			priv = page_private(page);
			f = priv & AFS_PRIV_MAX;
			t = priv >> AFS_PRIV_SHIFT;
			if (f != 0) {
453 454 455
				unlock_page(page);
				break;
			}
456 457
			to = t;

D
David Howells 已提交
458 459 460
			trace_afs_page_dirty(vnode, tracepoint_string("store+"),
					     page->index, priv);

461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
			if (!clear_page_dirty_for_io(page))
				BUG();
			if (test_set_page_writeback(page))
				BUG();
			unlock_page(page);
			put_page(page);
		}
		count += loop;
		if (loop < n) {
			for (; loop < n; loop++)
				put_page(pages[loop]);
			goto no_more;
		}

		start += loop;
476
	} while (start <= final_page && count < 65536);
477 478

no_more:
479 480 481 482 483 484
	/* We now have a contiguous set of dirty pages, each with writeback
	 * set; the first page is still locked at this point, but all the rest
	 * have been unlocked.
	 */
	unlock_page(primary_page);

485 486 487 488 489
	first = primary_page->index;
	last = first + count - 1;

	_debug("write back %lx[%u..] to %lx[..%u]", first, offset, last, to);

490 491 492
	ret = afs_store_data(mapping, first, last, offset, to);
	switch (ret) {
	case 0:
493
		ret = count;
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
		break;

	default:
		pr_notice("kAFS: Unexpected error from FS.StoreData %d\n", ret);
		/* Fall through */
	case -EACCES:
	case -EPERM:
	case -ENOKEY:
	case -EKEYEXPIRED:
	case -EKEYREJECTED:
	case -EKEYREVOKED:
		afs_redirty_pages(wbc, mapping, first, last);
		mapping_set_error(mapping, ret);
		break;

	case -EDQUOT:
	case -ENOSPC:
		afs_redirty_pages(wbc, mapping, first, last);
		mapping_set_error(mapping, -ENOSPC);
		break;

	case -EROFS:
	case -EIO:
	case -EREMOTEIO:
	case -EFBIG:
	case -ENOENT:
	case -ENOMEDIUM:
	case -ENXIO:
		afs_kill_pages(mapping, first, last);
		mapping_set_error(mapping, ret);
		break;
525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * write a page back to the server
 * - the caller locked the page for us
 */
int afs_writepage(struct page *page, struct writeback_control *wbc)
{
	int ret;

	_enter("{%lx},", page->index);

541 542
	ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
					      wbc->range_end >> PAGE_SHIFT);
543 544 545 546 547 548 549 550 551 552 553 554 555 556
	if (ret < 0) {
		_leave(" = %d", ret);
		return 0;
	}

	wbc->nr_to_write -= ret;

	_leave(" = 0");
	return 0;
}

/*
 * write a region of pages back to the server
 */
A
Adrian Bunk 已提交
557 558 559
static int afs_writepages_region(struct address_space *mapping,
				 struct writeback_control *wbc,
				 pgoff_t index, pgoff_t end, pgoff_t *_next)
560 561 562 563 564 565 566
{
	struct page *page;
	int ret, n;

	_enter(",,%lx,%lx,", index, end);

	do {
J
Jan Kara 已提交
567 568
		n = find_get_pages_range_tag(mapping, &index, end,
					PAGECACHE_TAG_DIRTY, 1, &page);
569 570 571 572 573 574 575 576 577 578
		if (!n)
			break;

		_debug("wback %lx", page->index);

		/* at this point we hold neither mapping->tree_lock nor lock on
		 * the page itself: the page may be truncated or invalidated
		 * (changing page->mapping to NULL), or even swizzled back from
		 * swapper_space to tmpfs file mapping
		 */
579 580 581 582 583 584
		ret = lock_page_killable(page);
		if (ret < 0) {
			put_page(page);
			_leave(" = %d", ret);
			return ret;
		}
585

586
		if (page->mapping != mapping || !PageDirty(page)) {
587
			unlock_page(page);
588
			put_page(page);
589 590 591
			continue;
		}

592
		if (PageWriteback(page)) {
593
			unlock_page(page);
594 595
			if (wbc->sync_mode != WB_SYNC_NONE)
				wait_on_page_writeback(page);
D
David Howells 已提交
596
			put_page(page);
597 598 599
			continue;
		}

600 601
		if (!clear_page_dirty_for_io(page))
			BUG();
602
		ret = afs_write_back_from_locked_page(mapping, wbc, page, end);
603
		put_page(page);
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
		if (ret < 0) {
			_leave(" = %d", ret);
			return ret;
		}

		wbc->nr_to_write -= ret;

		cond_resched();
	} while (index < end && wbc->nr_to_write > 0);

	*_next = index;
	_leave(" = 0 [%lx]", *_next);
	return 0;
}

/*
 * write some of the pending data back to the server
 */
int afs_writepages(struct address_space *mapping,
		   struct writeback_control *wbc)
{
	pgoff_t start, end, next;
	int ret;

	_enter("");

	if (wbc->range_cyclic) {
		start = mapping->writeback_index;
		end = -1;
		ret = afs_writepages_region(mapping, wbc, start, end, &next);
634
		if (start > 0 && wbc->nr_to_write > 0 && ret == 0)
635 636 637 638
			ret = afs_writepages_region(mapping, wbc, 0, start,
						    &next);
		mapping->writeback_index = next;
	} else if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
639
		end = (pgoff_t)(LLONG_MAX >> PAGE_SHIFT);
640 641 642 643
		ret = afs_writepages_region(mapping, wbc, 0, end, &next);
		if (wbc->nr_to_write > 0)
			mapping->writeback_index = next;
	} else {
644 645
		start = wbc->range_start >> PAGE_SHIFT;
		end = wbc->range_end >> PAGE_SHIFT;
646 647 648 649 650 651 652 653 654 655 656 657 658
		ret = afs_writepages_region(mapping, wbc, start, end, &next);
	}

	_leave(" = %d", ret);
	return ret;
}

/*
 * completion of write to server
 */
void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
{
	struct pagevec pv;
D
David Howells 已提交
659
	unsigned long priv;
660 661 662 663 664 665
	unsigned count, loop;
	pgoff_t first = call->first, last = call->last;

	_enter("{%x:%u},{%lx-%lx}",
	       vnode->fid.vid, vnode->fid.vnode, first, last);

666
	pagevec_init(&pv);
667 668

	do {
D
David Howells 已提交
669
		_debug("done %lx-%lx", first, last);
670 671 672 673

		count = last - first + 1;
		if (count > PAGEVEC_SIZE)
			count = PAGEVEC_SIZE;
674 675
		pv.nr = find_get_pages_contig(vnode->vfs_inode.i_mapping,
					      first, count, pv.pages);
676 677 678
		ASSERTCMP(pv.nr, ==, count);

		for (loop = 0; loop < count; loop++) {
D
David Howells 已提交
679 680 681
			priv = page_private(pv.pages[loop]);
			trace_afs_page_dirty(vnode, tracepoint_string("clear"),
					     pv.pages[loop]->index, priv);
682 683
			set_page_private(pv.pages[loop], 0);
			end_page_writeback(pv.pages[loop]);
684 685 686
		}
		first += count;
		__pagevec_release(&pv);
D
David Howells 已提交
687
	} while (first <= last);
688

689
	afs_prune_wb_keys(vnode);
690 691 692 693 694 695
	_leave("");
}

/*
 * write to an AFS file
 */
A
Al Viro 已提交
696
ssize_t afs_file_write(struct kiocb *iocb, struct iov_iter *from)
697
{
A
Al Viro 已提交
698
	struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
699
	ssize_t result;
A
Al Viro 已提交
700
	size_t count = iov_iter_count(from);
701

A
Al Viro 已提交
702 703
	_enter("{%x.%u},{%zu},",
	       vnode->fid.vid, vnode->fid.vnode, count);
704 705 706 707 708 709 710 711 712 713

	if (IS_SWAPFILE(&vnode->vfs_inode)) {
		printk(KERN_INFO
		       "AFS: Attempt to write to active swap file!\n");
		return -EBUSY;
	}

	if (!count)
		return 0;

A
Al Viro 已提交
714
	result = generic_file_write_iter(iocb, from);
715 716 717 718 719 720 721 722 723 724

	_leave(" = %zd", result);
	return result;
}

/*
 * flush any dirty pages for this process, and check for write errors.
 * - the return status from this call provides a reliable indication of
 *   whether any write errors occurred for this process.
 */
725
int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
726
{
727 728
	struct inode *inode = file_inode(file);
	struct afs_vnode *vnode = AFS_FS_I(inode);
729

730 731
	_enter("{%x:%u},{n=%pD},%d",
	       vnode->fid.vid, vnode->fid.vnode, file,
732 733
	       datasync);

734
	return file_write_and_wait_range(file, start, end);
735
}
D
David Howells 已提交
736

737 738 739 740 741 742 743 744 745 746 747 748 749 750
/*
 * Flush out all outstanding writes on a file opened for writing when it is
 * closed.
 */
int afs_flush(struct file *file, fl_owner_t id)
{
	_enter("");

	if ((file->f_mode & FMODE_WRITE) == 0)
		return 0;

	return vfs_fsync(file, 0);
}

D
David Howells 已提交
751 752 753 754
/*
 * notification that a previously read-only page is about to become writable
 * - if it returns an error, the caller will deliver a bus error signal
 */
755
int afs_page_mkwrite(struct vm_fault *vmf)
D
David Howells 已提交
756
{
757 758 759 760
	struct file *file = vmf->vma->vm_file;
	struct inode *inode = file_inode(file);
	struct afs_vnode *vnode = AFS_FS_I(inode);
	unsigned long priv;
D
David Howells 已提交
761 762

	_enter("{{%x:%u}},{%lx}",
763
	       vnode->fid.vid, vnode->fid.vnode, vmf->page->index);
D
David Howells 已提交
764

765
	sb_start_pagefault(inode->i_sb);
D
David Howells 已提交
766

767 768 769
	/* Wait for the page to be written to the cache before we allow it to
	 * be modified.  We then assume the entire page will need writing back.
	 */
D
David Howells 已提交
770
#ifdef CONFIG_AFS_FSCACHE
771
	fscache_wait_on_page_write(vnode->cache, vmf->page);
D
David Howells 已提交
772 773
#endif

774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
	if (PageWriteback(vmf->page) &&
	    wait_on_page_bit_killable(vmf->page, PG_writeback) < 0)
		return VM_FAULT_RETRY;

	if (lock_page_killable(vmf->page) < 0)
		return VM_FAULT_RETRY;

	/* We mustn't change page->private until writeback is complete as that
	 * details the portion of the page we need to write back and we might
	 * need to redirty the page if there's a problem.
	 */
	wait_on_page_writeback(vmf->page);

	priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
	priv |= 0; /* From */
D
David Howells 已提交
789 790
	trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
			     vmf->page->index, priv);
791 792 793 794 795
	SetPagePrivate(vmf->page);
	set_page_private(vmf->page, priv);

	sb_end_pagefault(inode->i_sb);
	return VM_FAULT_LOCKED;
D
David Howells 已提交
796
}
797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847

/*
 * Prune the keys cached for writeback.  The caller must hold vnode->wb_lock.
 */
void afs_prune_wb_keys(struct afs_vnode *vnode)
{
	LIST_HEAD(graveyard);
	struct afs_wb_key *wbk, *tmp;

	/* Discard unused keys */
	spin_lock(&vnode->wb_lock);

	if (!mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_WRITEBACK) &&
	    !mapping_tagged(&vnode->vfs_inode.i_data, PAGECACHE_TAG_DIRTY)) {
		list_for_each_entry_safe(wbk, tmp, &vnode->wb_keys, vnode_link) {
			if (refcount_read(&wbk->usage) == 1)
				list_move(&wbk->vnode_link, &graveyard);
		}
	}

	spin_unlock(&vnode->wb_lock);

	while (!list_empty(&graveyard)) {
		wbk = list_entry(graveyard.next, struct afs_wb_key, vnode_link);
		list_del(&wbk->vnode_link);
		afs_put_wb_key(wbk);
	}
}

/*
 * Clean up a page during invalidation.
 */
int afs_launder_page(struct page *page)
{
	struct address_space *mapping = page->mapping;
	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
	unsigned long priv;
	unsigned int f, t;
	int ret = 0;

	_enter("{%lx}", page->index);

	priv = page_private(page);
	if (clear_page_dirty_for_io(page)) {
		f = 0;
		t = PAGE_SIZE;
		if (PagePrivate(page)) {
			f = priv & AFS_PRIV_MAX;
			t = priv >> AFS_PRIV_SHIFT;
		}

D
David Howells 已提交
848 849
		trace_afs_page_dirty(vnode, tracepoint_string("launder"),
				     page->index, priv);
850 851 852
		ret = afs_store_data(mapping, page->index, page->index, t, f);
	}

D
David Howells 已提交
853 854
	trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
			     page->index, priv);
855 856 857 858 859 860 861 862 863 864
	set_page_private(page, 0);
	ClearPagePrivate(page);

#ifdef CONFIG_AFS_FSCACHE
	if (PageFsCache(page)) {
		fscache_wait_on_page_write(vnode->cache, page);
		fscache_uncache_page(vnode->cache, page);
	}
#endif
	return ret;
D
David Howells 已提交
865
}