truncate.c 22.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * mm/truncate.c - code for taking down pages from address_spaces
 *
 * Copyright (C) 2002, Linus Torvalds
 *
6
 * 10Sep2002	Andrew Morton
L
Linus Torvalds 已提交
7 8 9 10
 *		Initial version.
 */

#include <linux/kernel.h>
A
Alexey Dobriyan 已提交
11
#include <linux/backing-dev.h>
12
#include <linux/gfp.h>
L
Linus Torvalds 已提交
13
#include <linux/mm.h>
N
Nick Piggin 已提交
14
#include <linux/swap.h>
15
#include <linux/export.h>
L
Linus Torvalds 已提交
16
#include <linux/pagemap.h>
17
#include <linux/highmem.h>
L
Linus Torvalds 已提交
18
#include <linux/pagevec.h>
19
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
20
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
21
				   do_invalidatepage */
22
#include <linux/cleancache.h>
23
#include "internal.h"
L
Linus Torvalds 已提交
24

25 26 27 28 29 30 31 32 33 34 35 36 37
static void clear_exceptional_entry(struct address_space *mapping,
				    pgoff_t index, void *entry)
{
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return;

	spin_lock_irq(&mapping->tree_lock);
	/*
	 * Regular page slots are stabilized by the page lock even
	 * without the tree itself locked.  These unlocked entries
	 * need verification under the tree lock.
	 */
38 39
	if (radix_tree_delete_item(&mapping->page_tree, index, entry) == entry)
		mapping->nrshadows--;
40 41
	spin_unlock_irq(&mapping->tree_lock);
}
L
Linus Torvalds 已提交
42

43
/**
44
 * do_invalidatepage - invalidate part or all of a page
45
 * @page: the page which is affected
46 47
 * @offset: start of the range to invalidate
 * @length: length of the range to invalidate
48 49 50 51 52 53 54 55 56 57
 *
 * do_invalidatepage() is called when all or part of the page has become
 * invalidated by a truncate operation.
 *
 * do_invalidatepage() does not have to release all buffers, but it must
 * ensure that no dirty buffer is left outside @offset and that no I/O
 * is underway against any of the blocks which are outside the truncation
 * point.  Because the caller is about to free (and possibly reuse) those
 * blocks on-disk.
 */
58 59
void do_invalidatepage(struct page *page, unsigned int offset,
		       unsigned int length)
60
{
61 62
	void (*invalidatepage)(struct page *, unsigned int, unsigned int);

63
	invalidatepage = page->mapping->a_ops->invalidatepage;
64
#ifdef CONFIG_BLOCK
65 66
	if (!invalidatepage)
		invalidatepage = block_invalidatepage;
67
#endif
68
	if (invalidatepage)
69
		(*invalidatepage)(page, offset, length);
70 71
}

72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * This cancels just the dirty bit on the kernel page itself, it
 * does NOT actually remove dirty bits on any mmap's that may be
 * around. It also leaves the page tagged dirty, so any sync
 * activity will still find it on the dirty lists, and in particular,
 * clear_page_dirty_for_io() will still look at the dirty bits in
 * the VM.
 *
 * Doing this should *normally* only ever be done when a page
 * is truncated, and is not actually mapped anywhere at all. However,
 * fs/buffer.c does this when it notices that somebody has cleaned
 * out all the buffers on a page without actually doing it through
 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
 */
86 87
void cancel_dirty_page(struct page *page, unsigned int account_size)
{
88 89 90 91
	if (TestClearPageDirty(page)) {
		struct address_space *mapping = page->mapping;
		if (mapping && mapping_cap_account_dirty(mapping)) {
			dec_zone_page_state(page, NR_FILE_DIRTY);
92 93
			dec_bdi_stat(mapping->backing_dev_info,
					BDI_RECLAIMABLE);
94 95 96
			if (account_size)
				task_io_account_cancelled_write(account_size);
		}
97
	}
98
}
99
EXPORT_SYMBOL(cancel_dirty_page);
100

L
Linus Torvalds 已提交
101 102
/*
 * If truncate cannot remove the fs-private metadata from the page, the page
103
 * becomes orphaned.  It will be left on the LRU and may even be mapped into
104
 * user pagetables if we're racing with filemap_fault().
L
Linus Torvalds 已提交
105 106 107
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
108
 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
L
Linus Torvalds 已提交
109 110
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
111
static int
L
Linus Torvalds 已提交
112 113 114
truncate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
115
		return -EIO;
L
Linus Torvalds 已提交
116

117
	if (page_has_private(page))
118
		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
L
Linus Torvalds 已提交
119

120 121
	cancel_dirty_page(page, PAGE_CACHE_SIZE);

L
Linus Torvalds 已提交
122
	ClearPageMappedToDisk(page);
123
	delete_from_page_cache(page);
124
	return 0;
L
Linus Torvalds 已提交
125 126 127
}

/*
128
 * This is for invalidate_mapping_pages().  That function can be called at
L
Linus Torvalds 已提交
129
 * any time, and is not supposed to throw away dirty pages.  But pages can
N
Nick Piggin 已提交
130 131
 * be marked dirty at any time too, so use remove_mapping which safely
 * discards clean, unused pages.
L
Linus Torvalds 已提交
132 133 134 135 136 137
 *
 * Returns non-zero if the page was successfully invalidated.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
N
Nick Piggin 已提交
138 139
	int ret;

L
Linus Torvalds 已提交
140 141 142
	if (page->mapping != mapping)
		return 0;

143
	if (page_has_private(page) && !try_to_release_page(page, 0))
L
Linus Torvalds 已提交
144 145
		return 0;

N
Nick Piggin 已提交
146 147 148
	ret = remove_mapping(mapping, page);

	return ret;
L
Linus Torvalds 已提交
149 150
}

151 152 153 154 155 156 157 158 159 160
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
	if (page_mapped(page)) {
		unmap_mapping_range(mapping,
				   (loff_t)page->index << PAGE_CACHE_SHIFT,
				   PAGE_CACHE_SIZE, 0);
	}
	return truncate_complete_page(mapping, page);
}

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177
/*
 * Used to get rid of pages on hardware memory corruption.
 */
int generic_error_remove_page(struct address_space *mapping, struct page *page)
{
	if (!mapping)
		return -EINVAL;
	/*
	 * Only punch for normal data pages for now.
	 * Handling other types like directories would need more auditing.
	 */
	if (!S_ISREG(mapping->host->i_mode))
		return -EIO;
	return truncate_inode_page(mapping, page);
}
EXPORT_SYMBOL(generic_error_remove_page);

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
/*
 * Safely invalidate one page from its pagecache mapping.
 * It only drops clean, unused pages. The page must be locked.
 *
 * Returns 1 if the page is successfully invalidated, otherwise 0.
 */
int invalidate_inode_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	if (!mapping)
		return 0;
	if (PageDirty(page) || PageWriteback(page))
		return 0;
	if (page_mapped(page))
		return 0;
	return invalidate_complete_page(mapping, page);
}

L
Linus Torvalds 已提交
196
/**
197
 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
L
Linus Torvalds 已提交
198 199
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
200
 * @lend: offset to which to truncate (inclusive)
L
Linus Torvalds 已提交
201
 *
202
 * Truncate the page cache, removing the pages that are between
203 204
 * specified offsets (and zeroing out partial pages
 * if lstart or lend + 1 is not page aligned).
L
Linus Torvalds 已提交
205 206 207 208 209 210 211 212 213 214
 *
 * Truncate takes two passes - the first pass is nonblocking.  It will not
 * block on page locks and it will not block on writeback.  The second pass
 * will wait.  This is to prevent as much IO as possible in the affected region.
 * The first pass will remove most pages, so the search cost of the second pass
 * is low.
 *
 * We pass down the cache-hot hint to the page freeing code.  Even if the
 * mapping is large, it is probably the case that the final pages are the most
 * recently touched, and freeing happens in ascending file offset order.
215 216 217 218
 *
 * Note that since ->invalidatepage() accepts range to invalidate
 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 * page aligned properly.
L
Linus Torvalds 已提交
219
 */
220 221
void truncate_inode_pages_range(struct address_space *mapping,
				loff_t lstart, loff_t lend)
L
Linus Torvalds 已提交
222
{
223 224 225 226 227
	pgoff_t		start;		/* inclusive */
	pgoff_t		end;		/* exclusive */
	unsigned int	partial_start;	/* inclusive */
	unsigned int	partial_end;	/* exclusive */
	struct pagevec	pvec;
228
	pgoff_t		indices[PAGEVEC_SIZE];
229 230
	pgoff_t		index;
	int		i;
L
Linus Torvalds 已提交
231

232
	cleancache_invalidate_inode(mapping);
233
	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
L
Linus Torvalds 已提交
234 235
		return;

236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255
	/* Offsets within partial pages */
	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);

	/*
	 * 'start' and 'end' always covers the range of pages to be fully
	 * truncated. Partial pages are covered with 'partial_start' at the
	 * start of the range and 'partial_end' at the end of the range.
	 * Note that 'end' is exclusive while 'lend' is inclusive.
	 */
	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	if (lend == -1)
		/*
		 * lend == -1 indicates end-of-file so we have to set 'end'
		 * to the highest possible pgoff_t and since the type is
		 * unsigned we're using -1.
		 */
		end = -1;
	else
		end = (lend + 1) >> PAGE_CACHE_SHIFT;
256

L
Linus Torvalds 已提交
257
	pagevec_init(&pvec, 0);
258
	index = start;
259 260 261
	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE),
			indices)) {
262
		mem_cgroup_uncharge_start();
L
Linus Torvalds 已提交
263 264 265
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

266
			/* We rely upon deletion not changing page->index */
267
			index = indices[i];
268
			if (index >= end)
269 270
				break;

271 272 273 274 275
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

N
Nick Piggin 已提交
276
			if (!trylock_page(page))
L
Linus Torvalds 已提交
277
				continue;
278
			WARN_ON(page->index != index);
L
Linus Torvalds 已提交
279 280 281 282
			if (PageWriteback(page)) {
				unlock_page(page);
				continue;
			}
283
			truncate_inode_page(mapping, page);
L
Linus Torvalds 已提交
284 285
			unlock_page(page);
		}
286
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
287
		pagevec_release(&pvec);
288
		mem_cgroup_uncharge_end();
L
Linus Torvalds 已提交
289
		cond_resched();
290
		index++;
L
Linus Torvalds 已提交
291 292
	}

293
	if (partial_start) {
L
Linus Torvalds 已提交
294 295
		struct page *page = find_lock_page(mapping, start - 1);
		if (page) {
296 297 298 299 300 301
			unsigned int top = PAGE_CACHE_SIZE;
			if (start > end) {
				/* Truncation within a single page */
				top = partial_end;
				partial_end = 0;
			}
L
Linus Torvalds 已提交
302
			wait_on_page_writeback(page);
303 304 305 306 307
			zero_user_segment(page, partial_start, top);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, partial_start,
						  top - partial_start);
L
Linus Torvalds 已提交
308 309 310 311
			unlock_page(page);
			page_cache_release(page);
		}
	}
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
	if (partial_end) {
		struct page *page = find_lock_page(mapping, end);
		if (page) {
			wait_on_page_writeback(page);
			zero_user_segment(page, 0, partial_end);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, 0,
						  partial_end);
			unlock_page(page);
			page_cache_release(page);
		}
	}
	/*
	 * If the truncation happened within a single page no pages
	 * will be released, just zeroed, so we can bail out now.
	 */
	if (start >= end)
		return;
L
Linus Torvalds 已提交
331

332
	index = start;
L
Linus Torvalds 已提交
333 334
	for ( ; ; ) {
		cond_resched();
335 336 337
		if (!pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE),
			indices)) {
338
			if (index == start)
L
Linus Torvalds 已提交
339
				break;
340
			index = start;
L
Linus Torvalds 已提交
341 342
			continue;
		}
343 344
		if (index == start && indices[0] >= end) {
			pagevec_remove_exceptionals(&pvec);
345 346 347
			pagevec_release(&pvec);
			break;
		}
348
		mem_cgroup_uncharge_start();
L
Linus Torvalds 已提交
349 350 351
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

352
			/* We rely upon deletion not changing page->index */
353
			index = indices[i];
354
			if (index >= end)
355
				break;
356

357 358 359 360 361
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

L
Linus Torvalds 已提交
362
			lock_page(page);
363
			WARN_ON(page->index != index);
L
Linus Torvalds 已提交
364
			wait_on_page_writeback(page);
365
			truncate_inode_page(mapping, page);
L
Linus Torvalds 已提交
366 367
			unlock_page(page);
		}
368
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
369
		pagevec_release(&pvec);
370
		mem_cgroup_uncharge_end();
371
		index++;
L
Linus Torvalds 已提交
372
	}
373
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
374
}
375
EXPORT_SYMBOL(truncate_inode_pages_range);
L
Linus Torvalds 已提交
376

377 378 379 380 381
/**
 * truncate_inode_pages - truncate *all* the pages from an offset
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
 *
382
 * Called under (and serialised by) inode->i_mutex.
383 384 385 386 387
 *
 * Note: When this function returns, there can be a page in the process of
 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 * mapping->nrpages can be non-zero when this function returns even after
 * truncation of the whole mapping.
388 389 390 391 392
 */
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
}
L
Linus Torvalds 已提交
393 394
EXPORT_SYMBOL(truncate_inode_pages);

395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441
/**
 * truncate_inode_pages_final - truncate *all* pages before inode dies
 * @mapping: mapping to truncate
 *
 * Called under (and serialized by) inode->i_mutex.
 *
 * Filesystems have to use this in the .evict_inode path to inform the
 * VM that this is the final truncate and the inode is going away.
 */
void truncate_inode_pages_final(struct address_space *mapping)
{
	unsigned long nrshadows;
	unsigned long nrpages;

	/*
	 * Page reclaim can not participate in regular inode lifetime
	 * management (can't call iput()) and thus can race with the
	 * inode teardown.  Tell it when the address space is exiting,
	 * so that it does not install eviction information after the
	 * final truncate has begun.
	 */
	mapping_set_exiting(mapping);

	/*
	 * When reclaim installs eviction entries, it increases
	 * nrshadows first, then decreases nrpages.  Make sure we see
	 * this in the right order or we might miss an entry.
	 */
	nrpages = mapping->nrpages;
	smp_rmb();
	nrshadows = mapping->nrshadows;

	if (nrpages || nrshadows) {
		/*
		 * As truncation uses a lockless tree lookup, cycle
		 * the tree lock to make sure any ongoing tree
		 * modification that does not see AS_EXITING is
		 * completed before starting the final truncate.
		 */
		spin_lock_irq(&mapping->tree_lock);
		spin_unlock_irq(&mapping->tree_lock);

		truncate_inode_pages(mapping, 0);
	}
}
EXPORT_SYMBOL(truncate_inode_pages_final);

442 443 444 445 446 447 448 449 450 451 452 453 454 455
/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
M
Minchan Kim 已提交
456
		pgoff_t start, pgoff_t end)
L
Linus Torvalds 已提交
457
{
458
	pgoff_t indices[PAGEVEC_SIZE];
L
Linus Torvalds 已提交
459
	struct pagevec pvec;
460
	pgoff_t index = start;
M
Minchan Kim 已提交
461 462
	unsigned long ret;
	unsigned long count = 0;
L
Linus Torvalds 已提交
463 464
	int i;

465 466 467 468 469 470 471 472
	/*
	 * Note: this function may get called on a shmem/tmpfs mapping:
	 * pagevec_lookup() might then return 0 prematurely (because it
	 * got a gangful of swap entries); but it's hardly worth worrying
	 * about - it can rarely have anything to free from such a mapping
	 * (most pages are dirty), and already skips over any difficulties.
	 */

L
Linus Torvalds 已提交
473
	pagevec_init(&pvec, 0);
474 475 476
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
477
		mem_cgroup_uncharge_start();
L
Linus Torvalds 已提交
478 479
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
480

481
			/* We rely upon deletion not changing page->index */
482
			index = indices[i];
483 484
			if (index > end)
				break;
485

486 487 488 489 490
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

491 492 493
			if (!trylock_page(page))
				continue;
			WARN_ON(page->index != index);
M
Minchan Kim 已提交
494
			ret = invalidate_inode_page(page);
L
Linus Torvalds 已提交
495
			unlock_page(page);
M
Minchan Kim 已提交
496 497 498 499 500 501 502
			/*
			 * Invalidation is a hint that the page is no longer
			 * of interest and try to speed up its reclaim.
			 */
			if (!ret)
				deactivate_page(page);
			count += ret;
L
Linus Torvalds 已提交
503
		}
504
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
505
		pagevec_release(&pvec);
506
		mem_cgroup_uncharge_end();
507
		cond_resched();
508
		index++;
L
Linus Torvalds 已提交
509
	}
M
Minchan Kim 已提交
510
	return count;
L
Linus Torvalds 已提交
511
}
512
EXPORT_SYMBOL(invalidate_mapping_pages);
L
Linus Torvalds 已提交
513

514 515 516 517
/*
 * This is like invalidate_complete_page(), except it ignores the page's
 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 * invalidation guarantees, and cannot afford to leave pages behind because
518 519
 * shrink_page_list() has a temp ref on them, or because they're transiently
 * sitting in the lru_cache_add() pagevecs.
520 521 522 523 524 525 526
 */
static int
invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
		return 0;

527
	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
528 529
		return 0;

N
Nick Piggin 已提交
530
	spin_lock_irq(&mapping->tree_lock);
531 532 533
	if (PageDirty(page))
		goto failed;

534
	BUG_ON(page_has_private(page));
535
	__delete_from_page_cache(page, NULL);
N
Nick Piggin 已提交
536
	spin_unlock_irq(&mapping->tree_lock);
537
	mem_cgroup_uncharge_cache_page(page);
538 539 540 541

	if (mapping->a_ops->freepage)
		mapping->a_ops->freepage(page);

542 543 544
	page_cache_release(page);	/* pagecache ref */
	return 1;
failed:
N
Nick Piggin 已提交
545
	spin_unlock_irq(&mapping->tree_lock);
546 547 548
	return 0;
}

549 550 551 552 553 554 555 556 557
static int do_launder_page(struct address_space *mapping, struct page *page)
{
	if (!PageDirty(page))
		return 0;
	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
		return 0;
	return mapping->a_ops->launder_page(page);
}

L
Linus Torvalds 已提交
558 559
/**
 * invalidate_inode_pages2_range - remove range of pages from an address_space
560
 * @mapping: the address_space
L
Linus Torvalds 已提交
561 562 563 564 565 566
 * @start: the page offset 'from' which to invalidate
 * @end: the page offset 'to' which to invalidate (inclusive)
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
567
 * Returns -EBUSY if any pages could not be invalidated.
L
Linus Torvalds 已提交
568 569 570 571
 */
int invalidate_inode_pages2_range(struct address_space *mapping,
				  pgoff_t start, pgoff_t end)
{
572
	pgoff_t indices[PAGEVEC_SIZE];
L
Linus Torvalds 已提交
573
	struct pagevec pvec;
574
	pgoff_t index;
L
Linus Torvalds 已提交
575 576
	int i;
	int ret = 0;
577
	int ret2 = 0;
L
Linus Torvalds 已提交
578 579
	int did_range_unmap = 0;

580
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
581
	pagevec_init(&pvec, 0);
582
	index = start;
583 584 585
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
586
		mem_cgroup_uncharge_start();
587
		for (i = 0; i < pagevec_count(&pvec); i++) {
L
Linus Torvalds 已提交
588
			struct page *page = pvec.pages[i];
589 590

			/* We rely upon deletion not changing page->index */
591
			index = indices[i];
592 593
			if (index > end)
				break;
L
Linus Torvalds 已提交
594

595 596 597 598 599
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

L
Linus Torvalds 已提交
600
			lock_page(page);
601
			WARN_ON(page->index != index);
L
Linus Torvalds 已提交
602 603 604 605 606
			if (page->mapping != mapping) {
				unlock_page(page);
				continue;
			}
			wait_on_page_writeback(page);
607
			if (page_mapped(page)) {
L
Linus Torvalds 已提交
608 609 610 611 612
				if (!did_range_unmap) {
					/*
					 * Zap the rest of the file in one hit.
					 */
					unmap_mapping_range(mapping,
613 614 615
					   (loff_t)index << PAGE_CACHE_SHIFT,
					   (loff_t)(1 + end - index)
							 << PAGE_CACHE_SHIFT,
L
Linus Torvalds 已提交
616 617 618 619 620 621 622
					    0);
					did_range_unmap = 1;
				} else {
					/*
					 * Just zap this page
					 */
					unmap_mapping_range(mapping,
623 624
					   (loff_t)index << PAGE_CACHE_SHIFT,
					   PAGE_CACHE_SIZE, 0);
L
Linus Torvalds 已提交
625 626
				}
			}
627
			BUG_ON(page_mapped(page));
628 629 630
			ret2 = do_launder_page(mapping, page);
			if (ret2 == 0) {
				if (!invalidate_complete_page2(mapping, page))
631
					ret2 = -EBUSY;
632 633 634
			}
			if (ret2 < 0)
				ret = ret2;
L
Linus Torvalds 已提交
635 636
			unlock_page(page);
		}
637
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
638
		pagevec_release(&pvec);
639
		mem_cgroup_uncharge_end();
L
Linus Torvalds 已提交
640
		cond_resched();
641
		index++;
L
Linus Torvalds 已提交
642
	}
643
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
644 645 646 647 648 649
	return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);

/**
 * invalidate_inode_pages2 - remove all pages from an address_space
650
 * @mapping: the address_space
L
Linus Torvalds 已提交
651 652 653 654
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
655
 * Returns -EBUSY if any pages could not be invalidated.
L
Linus Torvalds 已提交
656 657 658 659 660 661
 */
int invalidate_inode_pages2(struct address_space *mapping)
{
	return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
N
npiggin@suse.de 已提交
662 663 664 665

/**
 * truncate_pagecache - unmap and remove pagecache that has been truncated
 * @inode: inode
666
 * @newsize: new file size
N
npiggin@suse.de 已提交
667 668 669 670 671 672 673 674 675 676 677
 *
 * inode's new i_size must already be written before truncate_pagecache
 * is called.
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
678
void truncate_pagecache(struct inode *inode, loff_t newsize)
N
npiggin@suse.de 已提交
679
{
680
	struct address_space *mapping = inode->i_mapping;
681
	loff_t holebegin = round_up(newsize, PAGE_SIZE);
682 683 684 685 686 687 688 689 690 691

	/*
	 * unmap_mapping_range is called twice, first simply for
	 * efficiency so that truncate_inode_pages does fewer
	 * single-page unmaps.  However after this first call, and
	 * before truncate_inode_pages finishes, it is possible for
	 * private pages to be COWed, which remain after
	 * truncate_inode_pages finishes, hence the second
	 * unmap_mapping_range call must be made for correctness.
	 */
692 693 694
	unmap_mapping_range(mapping, holebegin, 0, 1);
	truncate_inode_pages(mapping, newsize);
	unmap_mapping_range(mapping, holebegin, 0, 1);
N
npiggin@suse.de 已提交
695 696 697
}
EXPORT_SYMBOL(truncate_pagecache);

698 699 700 701 702
/**
 * truncate_setsize - update inode and pagecache for a new file size
 * @inode: inode
 * @newsize: new file size
 *
J
Jan Kara 已提交
703 704 705
 * truncate_setsize updates i_size and performs pagecache truncation (if
 * necessary) to @newsize. It will be typically be called from the filesystem's
 * setattr function when ATTR_SIZE is passed in.
706
 *
J
Jan Kara 已提交
707 708
 * Must be called with inode_mutex held and before all filesystem specific
 * block truncation has been performed.
709 710 711 712
 */
void truncate_setsize(struct inode *inode, loff_t newsize)
{
	i_size_write(inode, newsize);
713
	truncate_pagecache(inode, newsize);
714 715 716
}
EXPORT_SYMBOL(truncate_setsize);

717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738
/**
 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 * @inode: inode
 * @lstart: offset of beginning of hole
 * @lend: offset of last byte of hole
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	struct address_space *mapping = inode->i_mapping;
	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
	/*
	 * This rounding is currently just for example: unmap_mapping_range
	 * expands its hole outwards, whereas we want it to contract the hole
	 * inwards.  However, existing callers of truncate_pagecache_range are
739 740
	 * doing their own page rounding first.  Note that unmap_mapping_range
	 * allows holelen 0 for all, and we allow lend -1 for end of file.
741 742 743 744 745 746 747 748 749 750 751 752 753
	 */

	/*
	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
	 * once (before truncating pagecache), and without "even_cows" flag:
	 * hole-punching should not remove private COWed pages from the hole.
	 */
	if ((u64)unmap_end > (u64)unmap_start)
		unmap_mapping_range(mapping, unmap_start,
				    1 + unmap_end - unmap_start, 0);
	truncate_inode_pages_range(mapping, lstart, lend);
}
EXPORT_SYMBOL(truncate_pagecache_range);