truncate.c 24.4 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * mm/truncate.c - code for taking down pages from address_spaces
 *
 * Copyright (C) 2002, Linus Torvalds
 *
6
 * 10Sep2002	Andrew Morton
L
Linus Torvalds 已提交
7 8 9 10
 *		Initial version.
 */

#include <linux/kernel.h>
A
Alexey Dobriyan 已提交
11
#include <linux/backing-dev.h>
12
#include <linux/gfp.h>
L
Linus Torvalds 已提交
13
#include <linux/mm.h>
N
Nick Piggin 已提交
14
#include <linux/swap.h>
15
#include <linux/export.h>
L
Linus Torvalds 已提交
16
#include <linux/pagemap.h>
17
#include <linux/highmem.h>
L
Linus Torvalds 已提交
18
#include <linux/pagevec.h>
19
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
20
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
21
				   do_invalidatepage */
22
#include <linux/cleancache.h>
23
#include <linux/rmap.h>
24
#include "internal.h"
L
Linus Torvalds 已提交
25

26 27 28
static void clear_exceptional_entry(struct address_space *mapping,
				    pgoff_t index, void *entry)
{
29 30 31
	struct radix_tree_node *node;
	void **slot;

32 33 34 35 36 37 38 39 40 41
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return;

	spin_lock_irq(&mapping->tree_lock);
	/*
	 * Regular page slots are stabilized by the page lock even
	 * without the tree itself locked.  These unlocked entries
	 * need verification under the tree lock.
	 */
42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
		goto unlock;
	if (*slot != entry)
		goto unlock;
	radix_tree_replace_slot(slot, NULL);
	mapping->nrshadows--;
	if (!node)
		goto unlock;
	workingset_node_shadows_dec(node);
	/*
	 * Don't track node without shadow entries.
	 *
	 * Avoid acquiring the list_lru lock if already untracked.
	 * The list_empty() test is safe as node->private_list is
	 * protected by mapping->tree_lock.
	 */
	if (!workingset_node_shadows(node) &&
	    !list_empty(&node->private_list))
		list_lru_del(&workingset_shadow_nodes, &node->private_list);
	__radix_tree_delete_node(&mapping->page_tree, node);
unlock:
63 64
	spin_unlock_irq(&mapping->tree_lock);
}
L
Linus Torvalds 已提交
65

66
/**
67
 * do_invalidatepage - invalidate part or all of a page
68
 * @page: the page which is affected
69 70
 * @offset: start of the range to invalidate
 * @length: length of the range to invalidate
71 72 73 74 75 76 77 78 79 80
 *
 * do_invalidatepage() is called when all or part of the page has become
 * invalidated by a truncate operation.
 *
 * do_invalidatepage() does not have to release all buffers, but it must
 * ensure that no dirty buffer is left outside @offset and that no I/O
 * is underway against any of the blocks which are outside the truncation
 * point.  Because the caller is about to free (and possibly reuse) those
 * blocks on-disk.
 */
81 82
void do_invalidatepage(struct page *page, unsigned int offset,
		       unsigned int length)
83
{
84 85
	void (*invalidatepage)(struct page *, unsigned int, unsigned int);

86
	invalidatepage = page->mapping->a_ops->invalidatepage;
87
#ifdef CONFIG_BLOCK
88 89
	if (!invalidatepage)
		invalidatepage = block_invalidatepage;
90
#endif
91
	if (invalidatepage)
92
		(*invalidatepage)(page, offset, length);
93 94
}

95 96 97 98 99 100 101 102 103 104 105 106 107 108
/*
 * This cancels just the dirty bit on the kernel page itself, it
 * does NOT actually remove dirty bits on any mmap's that may be
 * around. It also leaves the page tagged dirty, so any sync
 * activity will still find it on the dirty lists, and in particular,
 * clear_page_dirty_for_io() will still look at the dirty bits in
 * the VM.
 *
 * Doing this should *normally* only ever be done when a page
 * is truncated, and is not actually mapped anywhere at all. However,
 * fs/buffer.c does this when it notices that somebody has cleaned
 * out all the buffers on a page without actually doing it through
 * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
 */
109 110
void cancel_dirty_page(struct page *page, unsigned int account_size)
{
111 112 113 114
	if (TestClearPageDirty(page)) {
		struct address_space *mapping = page->mapping;
		if (mapping && mapping_cap_account_dirty(mapping)) {
			dec_zone_page_state(page, NR_FILE_DIRTY);
115 116
			dec_bdi_stat(mapping->backing_dev_info,
					BDI_RECLAIMABLE);
117 118 119
			if (account_size)
				task_io_account_cancelled_write(account_size);
		}
120
	}
121
}
122
EXPORT_SYMBOL(cancel_dirty_page);
123

L
Linus Torvalds 已提交
124 125
/*
 * If truncate cannot remove the fs-private metadata from the page, the page
126
 * becomes orphaned.  It will be left on the LRU and may even be mapped into
127
 * user pagetables if we're racing with filemap_fault().
L
Linus Torvalds 已提交
128 129 130
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
131
 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
L
Linus Torvalds 已提交
132 133
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
134
static int
L
Linus Torvalds 已提交
135 136 137
truncate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
138
		return -EIO;
L
Linus Torvalds 已提交
139

140
	if (page_has_private(page))
141
		do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
L
Linus Torvalds 已提交
142

143 144
	cancel_dirty_page(page, PAGE_CACHE_SIZE);

L
Linus Torvalds 已提交
145
	ClearPageMappedToDisk(page);
146
	delete_from_page_cache(page);
147
	return 0;
L
Linus Torvalds 已提交
148 149 150
}

/*
151
 * This is for invalidate_mapping_pages().  That function can be called at
L
Linus Torvalds 已提交
152
 * any time, and is not supposed to throw away dirty pages.  But pages can
N
Nick Piggin 已提交
153 154
 * be marked dirty at any time too, so use remove_mapping which safely
 * discards clean, unused pages.
L
Linus Torvalds 已提交
155 156 157 158 159 160
 *
 * Returns non-zero if the page was successfully invalidated.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
N
Nick Piggin 已提交
161 162
	int ret;

L
Linus Torvalds 已提交
163 164 165
	if (page->mapping != mapping)
		return 0;

166
	if (page_has_private(page) && !try_to_release_page(page, 0))
L
Linus Torvalds 已提交
167 168
		return 0;

N
Nick Piggin 已提交
169 170 171
	ret = remove_mapping(mapping, page);

	return ret;
L
Linus Torvalds 已提交
172 173
}

174 175 176 177 178 179 180 181 182 183
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
	if (page_mapped(page)) {
		unmap_mapping_range(mapping,
				   (loff_t)page->index << PAGE_CACHE_SHIFT,
				   PAGE_CACHE_SIZE, 0);
	}
	return truncate_complete_page(mapping, page);
}

184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
/*
 * Used to get rid of pages on hardware memory corruption.
 */
int generic_error_remove_page(struct address_space *mapping, struct page *page)
{
	if (!mapping)
		return -EINVAL;
	/*
	 * Only punch for normal data pages for now.
	 * Handling other types like directories would need more auditing.
	 */
	if (!S_ISREG(mapping->host->i_mode))
		return -EIO;
	return truncate_inode_page(mapping, page);
}
EXPORT_SYMBOL(generic_error_remove_page);

201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
/*
 * Safely invalidate one page from its pagecache mapping.
 * It only drops clean, unused pages. The page must be locked.
 *
 * Returns 1 if the page is successfully invalidated, otherwise 0.
 */
int invalidate_inode_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	if (!mapping)
		return 0;
	if (PageDirty(page) || PageWriteback(page))
		return 0;
	if (page_mapped(page))
		return 0;
	return invalidate_complete_page(mapping, page);
}

L
Linus Torvalds 已提交
219
/**
220
 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
L
Linus Torvalds 已提交
221 222
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
223
 * @lend: offset to which to truncate (inclusive)
L
Linus Torvalds 已提交
224
 *
225
 * Truncate the page cache, removing the pages that are between
226 227
 * specified offsets (and zeroing out partial pages
 * if lstart or lend + 1 is not page aligned).
L
Linus Torvalds 已提交
228 229 230 231 232 233 234 235 236 237
 *
 * Truncate takes two passes - the first pass is nonblocking.  It will not
 * block on page locks and it will not block on writeback.  The second pass
 * will wait.  This is to prevent as much IO as possible in the affected region.
 * The first pass will remove most pages, so the search cost of the second pass
 * is low.
 *
 * We pass down the cache-hot hint to the page freeing code.  Even if the
 * mapping is large, it is probably the case that the final pages are the most
 * recently touched, and freeing happens in ascending file offset order.
238 239 240 241
 *
 * Note that since ->invalidatepage() accepts range to invalidate
 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 * page aligned properly.
L
Linus Torvalds 已提交
242
 */
243 244
void truncate_inode_pages_range(struct address_space *mapping,
				loff_t lstart, loff_t lend)
L
Linus Torvalds 已提交
245
{
246 247 248 249 250
	pgoff_t		start;		/* inclusive */
	pgoff_t		end;		/* exclusive */
	unsigned int	partial_start;	/* inclusive */
	unsigned int	partial_end;	/* exclusive */
	struct pagevec	pvec;
251
	pgoff_t		indices[PAGEVEC_SIZE];
252 253
	pgoff_t		index;
	int		i;
L
Linus Torvalds 已提交
254

255
	cleancache_invalidate_inode(mapping);
256
	if (mapping->nrpages == 0 && mapping->nrshadows == 0)
L
Linus Torvalds 已提交
257 258
		return;

259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
	/* Offsets within partial pages */
	partial_start = lstart & (PAGE_CACHE_SIZE - 1);
	partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);

	/*
	 * 'start' and 'end' always covers the range of pages to be fully
	 * truncated. Partial pages are covered with 'partial_start' at the
	 * start of the range and 'partial_end' at the end of the range.
	 * Note that 'end' is exclusive while 'lend' is inclusive.
	 */
	start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
	if (lend == -1)
		/*
		 * lend == -1 indicates end-of-file so we have to set 'end'
		 * to the highest possible pgoff_t and since the type is
		 * unsigned we're using -1.
		 */
		end = -1;
	else
		end = (lend + 1) >> PAGE_CACHE_SHIFT;
279

L
Linus Torvalds 已提交
280
	pagevec_init(&pvec, 0);
281
	index = start;
282 283 284
	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE),
			indices)) {
L
Linus Torvalds 已提交
285 286 287
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

288
			/* We rely upon deletion not changing page->index */
289
			index = indices[i];
290
			if (index >= end)
291 292
				break;

293 294 295 296 297
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

N
Nick Piggin 已提交
298
			if (!trylock_page(page))
L
Linus Torvalds 已提交
299
				continue;
300
			WARN_ON(page->index != index);
L
Linus Torvalds 已提交
301 302 303 304
			if (PageWriteback(page)) {
				unlock_page(page);
				continue;
			}
305
			truncate_inode_page(mapping, page);
L
Linus Torvalds 已提交
306 307
			unlock_page(page);
		}
308
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
309 310
		pagevec_release(&pvec);
		cond_resched();
311
		index++;
L
Linus Torvalds 已提交
312 313
	}

314
	if (partial_start) {
L
Linus Torvalds 已提交
315 316
		struct page *page = find_lock_page(mapping, start - 1);
		if (page) {
317 318 319 320 321 322
			unsigned int top = PAGE_CACHE_SIZE;
			if (start > end) {
				/* Truncation within a single page */
				top = partial_end;
				partial_end = 0;
			}
L
Linus Torvalds 已提交
323
			wait_on_page_writeback(page);
324 325 326 327 328
			zero_user_segment(page, partial_start, top);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, partial_start,
						  top - partial_start);
L
Linus Torvalds 已提交
329 330 331 332
			unlock_page(page);
			page_cache_release(page);
		}
	}
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
	if (partial_end) {
		struct page *page = find_lock_page(mapping, end);
		if (page) {
			wait_on_page_writeback(page);
			zero_user_segment(page, 0, partial_end);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, 0,
						  partial_end);
			unlock_page(page);
			page_cache_release(page);
		}
	}
	/*
	 * If the truncation happened within a single page no pages
	 * will be released, just zeroed, so we can bail out now.
	 */
	if (start >= end)
		return;
L
Linus Torvalds 已提交
352

353
	index = start;
L
Linus Torvalds 已提交
354 355
	for ( ; ; ) {
		cond_resched();
356
		if (!pagevec_lookup_entries(&pvec, mapping, index,
357 358
			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
			/* If all gone from start onwards, we're done */
359
			if (index == start)
L
Linus Torvalds 已提交
360
				break;
361
			/* Otherwise restart to make sure all gone */
362
			index = start;
L
Linus Torvalds 已提交
363 364
			continue;
		}
365
		if (index == start && indices[0] >= end) {
366
			/* All gone out of hole to be punched, we're done */
367
			pagevec_remove_exceptionals(&pvec);
368 369 370
			pagevec_release(&pvec);
			break;
		}
L
Linus Torvalds 已提交
371 372 373
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

374
			/* We rely upon deletion not changing page->index */
375
			index = indices[i];
376 377 378
			if (index >= end) {
				/* Restart punch to make sure all gone */
				index = start - 1;
379
				break;
380
			}
381

382 383 384 385 386
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

L
Linus Torvalds 已提交
387
			lock_page(page);
388
			WARN_ON(page->index != index);
L
Linus Torvalds 已提交
389
			wait_on_page_writeback(page);
390
			truncate_inode_page(mapping, page);
L
Linus Torvalds 已提交
391 392
			unlock_page(page);
		}
393
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
394
		pagevec_release(&pvec);
395
		index++;
L
Linus Torvalds 已提交
396
	}
397
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
398
}
399
EXPORT_SYMBOL(truncate_inode_pages_range);
L
Linus Torvalds 已提交
400

401 402 403 404 405
/**
 * truncate_inode_pages - truncate *all* the pages from an offset
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
 *
406
 * Called under (and serialised by) inode->i_mutex.
407 408 409 410 411
 *
 * Note: When this function returns, there can be a page in the process of
 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 * mapping->nrpages can be non-zero when this function returns even after
 * truncation of the whole mapping.
412 413 414 415 416
 */
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
}
L
Linus Torvalds 已提交
417 418
EXPORT_SYMBOL(truncate_inode_pages);

419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
/**
 * truncate_inode_pages_final - truncate *all* pages before inode dies
 * @mapping: mapping to truncate
 *
 * Called under (and serialized by) inode->i_mutex.
 *
 * Filesystems have to use this in the .evict_inode path to inform the
 * VM that this is the final truncate and the inode is going away.
 */
void truncate_inode_pages_final(struct address_space *mapping)
{
	unsigned long nrshadows;
	unsigned long nrpages;

	/*
	 * Page reclaim can not participate in regular inode lifetime
	 * management (can't call iput()) and thus can race with the
	 * inode teardown.  Tell it when the address space is exiting,
	 * so that it does not install eviction information after the
	 * final truncate has begun.
	 */
	mapping_set_exiting(mapping);

	/*
	 * When reclaim installs eviction entries, it increases
	 * nrshadows first, then decreases nrpages.  Make sure we see
	 * this in the right order or we might miss an entry.
	 */
	nrpages = mapping->nrpages;
	smp_rmb();
	nrshadows = mapping->nrshadows;

	if (nrpages || nrshadows) {
		/*
		 * As truncation uses a lockless tree lookup, cycle
		 * the tree lock to make sure any ongoing tree
		 * modification that does not see AS_EXITING is
		 * completed before starting the final truncate.
		 */
		spin_lock_irq(&mapping->tree_lock);
		spin_unlock_irq(&mapping->tree_lock);

		truncate_inode_pages(mapping, 0);
	}
}
EXPORT_SYMBOL(truncate_inode_pages_final);

466 467 468 469 470 471 472 473 474 475 476 477 478 479
/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
M
Minchan Kim 已提交
480
		pgoff_t start, pgoff_t end)
L
Linus Torvalds 已提交
481
{
482
	pgoff_t indices[PAGEVEC_SIZE];
L
Linus Torvalds 已提交
483
	struct pagevec pvec;
484
	pgoff_t index = start;
M
Minchan Kim 已提交
485 486
	unsigned long ret;
	unsigned long count = 0;
L
Linus Torvalds 已提交
487 488 489
	int i;

	pagevec_init(&pvec, 0);
490 491 492
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
L
Linus Torvalds 已提交
493 494
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
495

496
			/* We rely upon deletion not changing page->index */
497
			index = indices[i];
498 499
			if (index > end)
				break;
500

501 502 503 504 505
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

506 507 508
			if (!trylock_page(page))
				continue;
			WARN_ON(page->index != index);
M
Minchan Kim 已提交
509
			ret = invalidate_inode_page(page);
L
Linus Torvalds 已提交
510
			unlock_page(page);
M
Minchan Kim 已提交
511 512 513 514 515 516 517
			/*
			 * Invalidation is a hint that the page is no longer
			 * of interest and try to speed up its reclaim.
			 */
			if (!ret)
				deactivate_page(page);
			count += ret;
L
Linus Torvalds 已提交
518
		}
519
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
520
		pagevec_release(&pvec);
521
		cond_resched();
522
		index++;
L
Linus Torvalds 已提交
523
	}
M
Minchan Kim 已提交
524
	return count;
L
Linus Torvalds 已提交
525
}
526
EXPORT_SYMBOL(invalidate_mapping_pages);
L
Linus Torvalds 已提交
527

528 529 530 531
/*
 * This is like invalidate_complete_page(), except it ignores the page's
 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 * invalidation guarantees, and cannot afford to leave pages behind because
532 533
 * shrink_page_list() has a temp ref on them, or because they're transiently
 * sitting in the lru_cache_add() pagevecs.
534 535 536 537 538 539 540
 */
static int
invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
		return 0;

541
	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
542 543
		return 0;

N
Nick Piggin 已提交
544
	spin_lock_irq(&mapping->tree_lock);
545 546 547
	if (PageDirty(page))
		goto failed;

548
	BUG_ON(page_has_private(page));
549
	__delete_from_page_cache(page, NULL);
N
Nick Piggin 已提交
550
	spin_unlock_irq(&mapping->tree_lock);
551 552 553 554

	if (mapping->a_ops->freepage)
		mapping->a_ops->freepage(page);

555 556 557
	page_cache_release(page);	/* pagecache ref */
	return 1;
failed:
N
Nick Piggin 已提交
558
	spin_unlock_irq(&mapping->tree_lock);
559 560 561
	return 0;
}

562 563 564 565 566 567 568 569 570
static int do_launder_page(struct address_space *mapping, struct page *page)
{
	if (!PageDirty(page))
		return 0;
	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
		return 0;
	return mapping->a_ops->launder_page(page);
}

L
Linus Torvalds 已提交
571 572
/**
 * invalidate_inode_pages2_range - remove range of pages from an address_space
573
 * @mapping: the address_space
L
Linus Torvalds 已提交
574 575 576 577 578 579
 * @start: the page offset 'from' which to invalidate
 * @end: the page offset 'to' which to invalidate (inclusive)
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
580
 * Returns -EBUSY if any pages could not be invalidated.
L
Linus Torvalds 已提交
581 582 583 584
 */
int invalidate_inode_pages2_range(struct address_space *mapping,
				  pgoff_t start, pgoff_t end)
{
585
	pgoff_t indices[PAGEVEC_SIZE];
L
Linus Torvalds 已提交
586
	struct pagevec pvec;
587
	pgoff_t index;
L
Linus Torvalds 已提交
588 589
	int i;
	int ret = 0;
590
	int ret2 = 0;
L
Linus Torvalds 已提交
591 592
	int did_range_unmap = 0;

593
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
594
	pagevec_init(&pvec, 0);
595
	index = start;
596 597 598
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
599
		for (i = 0; i < pagevec_count(&pvec); i++) {
L
Linus Torvalds 已提交
600
			struct page *page = pvec.pages[i];
601 602

			/* We rely upon deletion not changing page->index */
603
			index = indices[i];
604 605
			if (index > end)
				break;
L
Linus Torvalds 已提交
606

607 608 609 610 611
			if (radix_tree_exceptional_entry(page)) {
				clear_exceptional_entry(mapping, index, page);
				continue;
			}

L
Linus Torvalds 已提交
612
			lock_page(page);
613
			WARN_ON(page->index != index);
L
Linus Torvalds 已提交
614 615 616 617 618
			if (page->mapping != mapping) {
				unlock_page(page);
				continue;
			}
			wait_on_page_writeback(page);
619
			if (page_mapped(page)) {
L
Linus Torvalds 已提交
620 621 622 623 624
				if (!did_range_unmap) {
					/*
					 * Zap the rest of the file in one hit.
					 */
					unmap_mapping_range(mapping,
625 626 627
					   (loff_t)index << PAGE_CACHE_SHIFT,
					   (loff_t)(1 + end - index)
							 << PAGE_CACHE_SHIFT,
L
Linus Torvalds 已提交
628 629 630 631 632 633 634
					    0);
					did_range_unmap = 1;
				} else {
					/*
					 * Just zap this page
					 */
					unmap_mapping_range(mapping,
635 636
					   (loff_t)index << PAGE_CACHE_SHIFT,
					   PAGE_CACHE_SIZE, 0);
L
Linus Torvalds 已提交
637 638
				}
			}
639
			BUG_ON(page_mapped(page));
640 641 642
			ret2 = do_launder_page(mapping, page);
			if (ret2 == 0) {
				if (!invalidate_complete_page2(mapping, page))
643
					ret2 = -EBUSY;
644 645 646
			}
			if (ret2 < 0)
				ret = ret2;
L
Linus Torvalds 已提交
647 648
			unlock_page(page);
		}
649
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
650 651
		pagevec_release(&pvec);
		cond_resched();
652
		index++;
L
Linus Torvalds 已提交
653
	}
654
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
655 656 657 658 659 660
	return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);

/**
 * invalidate_inode_pages2 - remove all pages from an address_space
661
 * @mapping: the address_space
L
Linus Torvalds 已提交
662 663 664 665
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
666
 * Returns -EBUSY if any pages could not be invalidated.
L
Linus Torvalds 已提交
667 668 669 670 671 672
 */
int invalidate_inode_pages2(struct address_space *mapping)
{
	return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
N
npiggin@suse.de 已提交
673 674 675 676

/**
 * truncate_pagecache - unmap and remove pagecache that has been truncated
 * @inode: inode
677
 * @newsize: new file size
N
npiggin@suse.de 已提交
678 679 680 681 682 683 684 685 686 687 688
 *
 * inode's new i_size must already be written before truncate_pagecache
 * is called.
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
689
void truncate_pagecache(struct inode *inode, loff_t newsize)
N
npiggin@suse.de 已提交
690
{
691
	struct address_space *mapping = inode->i_mapping;
692
	loff_t holebegin = round_up(newsize, PAGE_SIZE);
693 694 695 696 697 698 699 700 701 702

	/*
	 * unmap_mapping_range is called twice, first simply for
	 * efficiency so that truncate_inode_pages does fewer
	 * single-page unmaps.  However after this first call, and
	 * before truncate_inode_pages finishes, it is possible for
	 * private pages to be COWed, which remain after
	 * truncate_inode_pages finishes, hence the second
	 * unmap_mapping_range call must be made for correctness.
	 */
703 704 705
	unmap_mapping_range(mapping, holebegin, 0, 1);
	truncate_inode_pages(mapping, newsize);
	unmap_mapping_range(mapping, holebegin, 0, 1);
N
npiggin@suse.de 已提交
706 707 708
}
EXPORT_SYMBOL(truncate_pagecache);

709 710 711 712 713
/**
 * truncate_setsize - update inode and pagecache for a new file size
 * @inode: inode
 * @newsize: new file size
 *
J
Jan Kara 已提交
714 715 716
 * truncate_setsize updates i_size and performs pagecache truncation (if
 * necessary) to @newsize. It will be typically be called from the filesystem's
 * setattr function when ATTR_SIZE is passed in.
717
 *
718 719 720
 * Must be called with a lock serializing truncates and writes (generally
 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
 * specific block truncation has been performed.
721 722 723
 */
void truncate_setsize(struct inode *inode, loff_t newsize)
{
724 725
	loff_t oldsize = inode->i_size;

726
	i_size_write(inode, newsize);
727 728
	if (newsize > oldsize)
		pagecache_isize_extended(inode, oldsize, newsize);
729
	truncate_pagecache(inode, newsize);
730 731 732
}
EXPORT_SYMBOL(truncate_setsize);

733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
/**
 * pagecache_isize_extended - update pagecache after extension of i_size
 * @inode:	inode for which i_size was extended
 * @from:	original inode size
 * @to:		new inode size
 *
 * Handle extension of inode size either caused by extending truncate or by
 * write starting after current i_size. We mark the page straddling current
 * i_size RO so that page_mkwrite() is called on the nearest write access to
 * the page.  This way filesystem can be sure that page_mkwrite() is called on
 * the page before user writes to the page via mmap after the i_size has been
 * changed.
 *
 * The function must be called after i_size is updated so that page fault
 * coming after we unlock the page will already see the new i_size.
 * The function must be called while we still hold i_mutex - this not only
 * makes sure i_size is stable but also that userspace cannot observe new
 * i_size value before we are prepared to store mmap writes at new inode size.
 */
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
	int bsize = 1 << inode->i_blkbits;
	loff_t rounded_from;
	struct page *page;
	pgoff_t index;

	WARN_ON(to > inode->i_size);

	if (from >= to || bsize == PAGE_CACHE_SIZE)
		return;
	/* Page straddling @from will not have any hole block created? */
	rounded_from = round_up(from, bsize);
	if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
		return;

	index = from >> PAGE_CACHE_SHIFT;
	page = find_lock_page(inode->i_mapping, index);
	/* Page not cached? Nothing to do */
	if (!page)
		return;
	/*
	 * See clear_page_dirty_for_io() for details why set_page_dirty()
	 * is needed.
	 */
	if (page_mkclean(page))
		set_page_dirty(page);
	unlock_page(page);
	page_cache_release(page);
}
EXPORT_SYMBOL(pagecache_isize_extended);

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
/**
 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 * @inode: inode
 * @lstart: offset of beginning of hole
 * @lend: offset of last byte of hole
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	struct address_space *mapping = inode->i_mapping;
	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
	/*
	 * This rounding is currently just for example: unmap_mapping_range
	 * expands its hole outwards, whereas we want it to contract the hole
	 * inwards.  However, existing callers of truncate_pagecache_range are
806 807
	 * doing their own page rounding first.  Note that unmap_mapping_range
	 * allows holelen 0 for all, and we allow lend -1 for end of file.
808 809 810 811 812 813 814 815 816 817 818 819 820
	 */

	/*
	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
	 * once (before truncating pagecache), and without "even_cows" flag:
	 * hole-punching should not remove private COWed pages from the hole.
	 */
	if ((u64)unmap_end > (u64)unmap_start)
		unmap_mapping_range(mapping, unmap_start,
				    1 + unmap_end - unmap_start, 0);
	truncate_inode_pages_range(mapping, lstart, lend);
}
EXPORT_SYMBOL(truncate_pagecache_range);