truncate.c 24.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * mm/truncate.c - code for taking down pages from address_spaces
 *
 * Copyright (C) 2002, Linus Torvalds
 *
6
 * 10Sep2002	Andrew Morton
L
Linus Torvalds 已提交
7 8 9 10
 *		Initial version.
 */

#include <linux/kernel.h>
A
Alexey Dobriyan 已提交
11
#include <linux/backing-dev.h>
12
#include <linux/dax.h>
13
#include <linux/gfp.h>
L
Linus Torvalds 已提交
14
#include <linux/mm.h>
N
Nick Piggin 已提交
15
#include <linux/swap.h>
16
#include <linux/export.h>
L
Linus Torvalds 已提交
17
#include <linux/pagemap.h>
18
#include <linux/highmem.h>
L
Linus Torvalds 已提交
19
#include <linux/pagevec.h>
20
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
21
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
22
				   do_invalidatepage */
23
#include <linux/shmem_fs.h>
24
#include <linux/cleancache.h>
25
#include <linux/rmap.h>
26
#include "internal.h"
L
Linus Torvalds 已提交
27

28 29
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
			       void *entry)
30
{
31 32 33
	struct radix_tree_node *node;
	void **slot;

J
Jan Kara 已提交
34 35 36 37 38 39
	spin_lock_irq(&mapping->tree_lock);
	/*
	 * Regular page slots are stabilized by the page lock even
	 * without the tree itself locked.  These unlocked entries
	 * need verification under the tree lock.
	 */
40
	if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot))
J
Jan Kara 已提交
41 42 43
		goto unlock;
	if (*slot != entry)
		goto unlock;
44 45
	__radix_tree_replace(&mapping->page_tree, node, slot, NULL,
			     workingset_update_node, mapping);
J
Jan Kara 已提交
46
	mapping->nrexceptional--;
47
unlock:
48 49
	spin_unlock_irq(&mapping->tree_lock);
}
L
Linus Torvalds 已提交
50

51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
/*
 * Unconditionally remove exceptional entry. Usually called from truncate path.
 */
static void truncate_exceptional_entry(struct address_space *mapping,
				       pgoff_t index, void *entry)
{
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return;

	if (dax_mapping(mapping)) {
		dax_delete_mapping_entry(mapping, index);
		return;
	}
	clear_shadow_entry(mapping, index, entry);
}

/*
 * Invalidate exceptional entry if easily possible. This handles exceptional
 * entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
 * clean entries.
 */
static int invalidate_exceptional_entry(struct address_space *mapping,
					pgoff_t index, void *entry)
{
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return 1;
	if (dax_mapping(mapping))
		return dax_invalidate_mapping_entry(mapping, index);
	clear_shadow_entry(mapping, index, entry);
	return 1;
}

/*
 * Invalidate exceptional entry if clean. This handles exceptional entries for
 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
 */
static int invalidate_exceptional_entry2(struct address_space *mapping,
					 pgoff_t index, void *entry)
{
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return 1;
	if (dax_mapping(mapping))
		return dax_invalidate_mapping_entry_sync(mapping, index);
	clear_shadow_entry(mapping, index, entry);
	return 1;
}

101
/**
102
 * do_invalidatepage - invalidate part or all of a page
103
 * @page: the page which is affected
104 105
 * @offset: start of the range to invalidate
 * @length: length of the range to invalidate
106 107 108 109 110 111 112 113 114 115
 *
 * do_invalidatepage() is called when all or part of the page has become
 * invalidated by a truncate operation.
 *
 * do_invalidatepage() does not have to release all buffers, but it must
 * ensure that no dirty buffer is left outside @offset and that no I/O
 * is underway against any of the blocks which are outside the truncation
 * point.  Because the caller is about to free (and possibly reuse) those
 * blocks on-disk.
 */
116 117
void do_invalidatepage(struct page *page, unsigned int offset,
		       unsigned int length)
118
{
119 120
	void (*invalidatepage)(struct page *, unsigned int, unsigned int);

121
	invalidatepage = page->mapping->a_ops->invalidatepage;
122
#ifdef CONFIG_BLOCK
123 124
	if (!invalidatepage)
		invalidatepage = block_invalidatepage;
125
#endif
126
	if (invalidatepage)
127
		(*invalidatepage)(page, offset, length);
128 129
}

L
Linus Torvalds 已提交
130 131
/*
 * If truncate cannot remove the fs-private metadata from the page, the page
132
 * becomes orphaned.  It will be left on the LRU and may even be mapped into
133
 * user pagetables if we're racing with filemap_fault().
L
Linus Torvalds 已提交
134 135 136
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
137
 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
L
Linus Torvalds 已提交
138 139
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
140
static int
L
Linus Torvalds 已提交
141 142 143
truncate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
144
		return -EIO;
L
Linus Torvalds 已提交
145

146
	if (page_has_private(page))
147
		do_invalidatepage(page, 0, PAGE_SIZE);
L
Linus Torvalds 已提交
148

149 150 151 152 153
	/*
	 * Some filesystems seem to re-dirty the page even after
	 * the VM has canceled the dirty bit (eg ext3 journaling).
	 * Hence dirty accounting check is placed after invalidation.
	 */
154
	cancel_dirty_page(page);
L
Linus Torvalds 已提交
155
	ClearPageMappedToDisk(page);
156
	delete_from_page_cache(page);
157
	return 0;
L
Linus Torvalds 已提交
158 159 160
}

/*
161
 * This is for invalidate_mapping_pages().  That function can be called at
L
Linus Torvalds 已提交
162
 * any time, and is not supposed to throw away dirty pages.  But pages can
N
Nick Piggin 已提交
163 164
 * be marked dirty at any time too, so use remove_mapping which safely
 * discards clean, unused pages.
L
Linus Torvalds 已提交
165 166 167 168 169 170
 *
 * Returns non-zero if the page was successfully invalidated.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
N
Nick Piggin 已提交
171 172
	int ret;

L
Linus Torvalds 已提交
173 174 175
	if (page->mapping != mapping)
		return 0;

176
	if (page_has_private(page) && !try_to_release_page(page, 0))
L
Linus Torvalds 已提交
177 178
		return 0;

N
Nick Piggin 已提交
179 180 181
	ret = remove_mapping(mapping, page);

	return ret;
L
Linus Torvalds 已提交
182 183
}

184 185
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
K
Kirill A. Shutemov 已提交
186 187 188 189
	loff_t holelen;
	VM_BUG_ON_PAGE(PageTail(page), page);

	holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
190 191
	if (page_mapped(page)) {
		unmap_mapping_range(mapping,
192
				   (loff_t)page->index << PAGE_SHIFT,
K
Kirill A. Shutemov 已提交
193
				   holelen, 0);
194 195 196 197
	}
	return truncate_complete_page(mapping, page);
}

198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
/*
 * Used to get rid of pages on hardware memory corruption.
 */
int generic_error_remove_page(struct address_space *mapping, struct page *page)
{
	if (!mapping)
		return -EINVAL;
	/*
	 * Only punch for normal data pages for now.
	 * Handling other types like directories would need more auditing.
	 */
	if (!S_ISREG(mapping->host->i_mode))
		return -EIO;
	return truncate_inode_page(mapping, page);
}
EXPORT_SYMBOL(generic_error_remove_page);

215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
/*
 * Safely invalidate one page from its pagecache mapping.
 * It only drops clean, unused pages. The page must be locked.
 *
 * Returns 1 if the page is successfully invalidated, otherwise 0.
 */
int invalidate_inode_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	if (!mapping)
		return 0;
	if (PageDirty(page) || PageWriteback(page))
		return 0;
	if (page_mapped(page))
		return 0;
	return invalidate_complete_page(mapping, page);
}

L
Linus Torvalds 已提交
233
/**
234
 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
L
Linus Torvalds 已提交
235 236
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
237
 * @lend: offset to which to truncate (inclusive)
L
Linus Torvalds 已提交
238
 *
239
 * Truncate the page cache, removing the pages that are between
240 241
 * specified offsets (and zeroing out partial pages
 * if lstart or lend + 1 is not page aligned).
L
Linus Torvalds 已提交
242 243 244 245 246 247 248 249 250 251
 *
 * Truncate takes two passes - the first pass is nonblocking.  It will not
 * block on page locks and it will not block on writeback.  The second pass
 * will wait.  This is to prevent as much IO as possible in the affected region.
 * The first pass will remove most pages, so the search cost of the second pass
 * is low.
 *
 * We pass down the cache-hot hint to the page freeing code.  Even if the
 * mapping is large, it is probably the case that the final pages are the most
 * recently touched, and freeing happens in ascending file offset order.
252 253 254 255
 *
 * Note that since ->invalidatepage() accepts range to invalidate
 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 * page aligned properly.
L
Linus Torvalds 已提交
256
 */
257 258
void truncate_inode_pages_range(struct address_space *mapping,
				loff_t lstart, loff_t lend)
L
Linus Torvalds 已提交
259
{
260 261 262 263 264
	pgoff_t		start;		/* inclusive */
	pgoff_t		end;		/* exclusive */
	unsigned int	partial_start;	/* inclusive */
	unsigned int	partial_end;	/* exclusive */
	struct pagevec	pvec;
265
	pgoff_t		indices[PAGEVEC_SIZE];
266 267
	pgoff_t		index;
	int		i;
L
Linus Torvalds 已提交
268

269
	cleancache_invalidate_inode(mapping);
270
	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
L
Linus Torvalds 已提交
271 272
		return;

273
	/* Offsets within partial pages */
274 275
	partial_start = lstart & (PAGE_SIZE - 1);
	partial_end = (lend + 1) & (PAGE_SIZE - 1);
276 277 278 279 280 281 282

	/*
	 * 'start' and 'end' always covers the range of pages to be fully
	 * truncated. Partial pages are covered with 'partial_start' at the
	 * start of the range and 'partial_end' at the end of the range.
	 * Note that 'end' is exclusive while 'lend' is inclusive.
	 */
283
	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
284 285 286 287 288 289 290 291
	if (lend == -1)
		/*
		 * lend == -1 indicates end-of-file so we have to set 'end'
		 * to the highest possible pgoff_t and since the type is
		 * unsigned we're using -1.
		 */
		end = -1;
	else
292
		end = (lend + 1) >> PAGE_SHIFT;
293

L
Linus Torvalds 已提交
294
	pagevec_init(&pvec, 0);
295
	index = start;
296 297 298
	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE),
			indices)) {
L
Linus Torvalds 已提交
299 300 301
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

302
			/* We rely upon deletion not changing page->index */
303
			index = indices[i];
304
			if (index >= end)
305 306
				break;

307
			if (radix_tree_exceptional_entry(page)) {
308 309
				truncate_exceptional_entry(mapping, index,
							   page);
310 311 312
				continue;
			}

N
Nick Piggin 已提交
313
			if (!trylock_page(page))
L
Linus Torvalds 已提交
314
				continue;
315
			WARN_ON(page_to_index(page) != index);
L
Linus Torvalds 已提交
316 317 318 319
			if (PageWriteback(page)) {
				unlock_page(page);
				continue;
			}
320
			truncate_inode_page(mapping, page);
L
Linus Torvalds 已提交
321 322
			unlock_page(page);
		}
323
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
324 325
		pagevec_release(&pvec);
		cond_resched();
326
		index++;
L
Linus Torvalds 已提交
327 328
	}

329
	if (partial_start) {
L
Linus Torvalds 已提交
330 331
		struct page *page = find_lock_page(mapping, start - 1);
		if (page) {
332
			unsigned int top = PAGE_SIZE;
333 334 335 336 337
			if (start > end) {
				/* Truncation within a single page */
				top = partial_end;
				partial_end = 0;
			}
L
Linus Torvalds 已提交
338
			wait_on_page_writeback(page);
339 340 341 342 343
			zero_user_segment(page, partial_start, top);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, partial_start,
						  top - partial_start);
L
Linus Torvalds 已提交
344
			unlock_page(page);
345
			put_page(page);
L
Linus Torvalds 已提交
346 347
		}
	}
348 349 350 351 352 353 354 355 356 357
	if (partial_end) {
		struct page *page = find_lock_page(mapping, end);
		if (page) {
			wait_on_page_writeback(page);
			zero_user_segment(page, 0, partial_end);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, 0,
						  partial_end);
			unlock_page(page);
358
			put_page(page);
359 360 361 362 363 364 365 366
		}
	}
	/*
	 * If the truncation happened within a single page no pages
	 * will be released, just zeroed, so we can bail out now.
	 */
	if (start >= end)
		return;
L
Linus Torvalds 已提交
367

368
	index = start;
L
Linus Torvalds 已提交
369 370
	for ( ; ; ) {
		cond_resched();
371
		if (!pagevec_lookup_entries(&pvec, mapping, index,
372 373
			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
			/* If all gone from start onwards, we're done */
374
			if (index == start)
L
Linus Torvalds 已提交
375
				break;
376
			/* Otherwise restart to make sure all gone */
377
			index = start;
L
Linus Torvalds 已提交
378 379
			continue;
		}
380
		if (index == start && indices[0] >= end) {
381
			/* All gone out of hole to be punched, we're done */
382
			pagevec_remove_exceptionals(&pvec);
383 384 385
			pagevec_release(&pvec);
			break;
		}
L
Linus Torvalds 已提交
386 387 388
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

389
			/* We rely upon deletion not changing page->index */
390
			index = indices[i];
391 392 393
			if (index >= end) {
				/* Restart punch to make sure all gone */
				index = start - 1;
394
				break;
395
			}
396

397
			if (radix_tree_exceptional_entry(page)) {
398 399
				truncate_exceptional_entry(mapping, index,
							   page);
400 401 402
				continue;
			}

L
Linus Torvalds 已提交
403
			lock_page(page);
404
			WARN_ON(page_to_index(page) != index);
L
Linus Torvalds 已提交
405
			wait_on_page_writeback(page);
406
			truncate_inode_page(mapping, page);
L
Linus Torvalds 已提交
407 408
			unlock_page(page);
		}
409
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
410
		pagevec_release(&pvec);
411
		index++;
L
Linus Torvalds 已提交
412
	}
413
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
414
}
415
EXPORT_SYMBOL(truncate_inode_pages_range);
L
Linus Torvalds 已提交
416

417 418 419 420 421
/**
 * truncate_inode_pages - truncate *all* the pages from an offset
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
 *
422
 * Called under (and serialised by) inode->i_mutex.
423 424 425 426 427
 *
 * Note: When this function returns, there can be a page in the process of
 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 * mapping->nrpages can be non-zero when this function returns even after
 * truncation of the whole mapping.
428 429 430 431 432
 */
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
}
L
Linus Torvalds 已提交
433 434
EXPORT_SYMBOL(truncate_inode_pages);

435 436 437 438 439 440 441 442 443 444 445
/**
 * truncate_inode_pages_final - truncate *all* pages before inode dies
 * @mapping: mapping to truncate
 *
 * Called under (and serialized by) inode->i_mutex.
 *
 * Filesystems have to use this in the .evict_inode path to inform the
 * VM that this is the final truncate and the inode is going away.
 */
void truncate_inode_pages_final(struct address_space *mapping)
{
446
	unsigned long nrexceptional;
447 448 449 450 451 452 453 454 455 456 457 458 459
	unsigned long nrpages;

	/*
	 * Page reclaim can not participate in regular inode lifetime
	 * management (can't call iput()) and thus can race with the
	 * inode teardown.  Tell it when the address space is exiting,
	 * so that it does not install eviction information after the
	 * final truncate has begun.
	 */
	mapping_set_exiting(mapping);

	/*
	 * When reclaim installs eviction entries, it increases
460
	 * nrexceptional first, then decreases nrpages.  Make sure we see
461 462 463 464
	 * this in the right order or we might miss an entry.
	 */
	nrpages = mapping->nrpages;
	smp_rmb();
465
	nrexceptional = mapping->nrexceptional;
466

467
	if (nrpages || nrexceptional) {
468 469 470 471 472 473 474 475 476 477 478 479 480 481
		/*
		 * As truncation uses a lockless tree lookup, cycle
		 * the tree lock to make sure any ongoing tree
		 * modification that does not see AS_EXITING is
		 * completed before starting the final truncate.
		 */
		spin_lock_irq(&mapping->tree_lock);
		spin_unlock_irq(&mapping->tree_lock);

		truncate_inode_pages(mapping, 0);
	}
}
EXPORT_SYMBOL(truncate_inode_pages_final);

482 483 484 485 486 487 488 489 490 491 492 493 494 495
/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
M
Minchan Kim 已提交
496
		pgoff_t start, pgoff_t end)
L
Linus Torvalds 已提交
497
{
498
	pgoff_t indices[PAGEVEC_SIZE];
L
Linus Torvalds 已提交
499
	struct pagevec pvec;
500
	pgoff_t index = start;
M
Minchan Kim 已提交
501 502
	unsigned long ret;
	unsigned long count = 0;
L
Linus Torvalds 已提交
503 504 505
	int i;

	pagevec_init(&pvec, 0);
506 507 508
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
L
Linus Torvalds 已提交
509 510
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
511

512
			/* We rely upon deletion not changing page->index */
513
			index = indices[i];
514 515
			if (index > end)
				break;
516

517
			if (radix_tree_exceptional_entry(page)) {
518 519
				invalidate_exceptional_entry(mapping, index,
							     page);
520 521 522
				continue;
			}

523 524
			if (!trylock_page(page))
				continue;
K
Kirill A. Shutemov 已提交
525

526
			WARN_ON(page_to_index(page) != index);
K
Kirill A. Shutemov 已提交
527 528 529 530 531 532 533 534 535 536 537 538 539

			/* Middle of THP: skip */
			if (PageTransTail(page)) {
				unlock_page(page);
				continue;
			} else if (PageTransHuge(page)) {
				index += HPAGE_PMD_NR - 1;
				i += HPAGE_PMD_NR - 1;
				/* 'end' is in the middle of THP */
				if (index ==  round_down(end, HPAGE_PMD_NR))
					continue;
			}

M
Minchan Kim 已提交
540
			ret = invalidate_inode_page(page);
L
Linus Torvalds 已提交
541
			unlock_page(page);
M
Minchan Kim 已提交
542 543 544 545 546
			/*
			 * Invalidation is a hint that the page is no longer
			 * of interest and try to speed up its reclaim.
			 */
			if (!ret)
547
				deactivate_file_page(page);
M
Minchan Kim 已提交
548
			count += ret;
L
Linus Torvalds 已提交
549
		}
550
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
551
		pagevec_release(&pvec);
552
		cond_resched();
553
		index++;
L
Linus Torvalds 已提交
554
	}
M
Minchan Kim 已提交
555
	return count;
L
Linus Torvalds 已提交
556
}
557
EXPORT_SYMBOL(invalidate_mapping_pages);
L
Linus Torvalds 已提交
558

559 560 561 562
/*
 * This is like invalidate_complete_page(), except it ignores the page's
 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 * invalidation guarantees, and cannot afford to leave pages behind because
563 564
 * shrink_page_list() has a temp ref on them, or because they're transiently
 * sitting in the lru_cache_add() pagevecs.
565 566 567 568
 */
static int
invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
569 570
	unsigned long flags;

571 572 573
	if (page->mapping != mapping)
		return 0;

574
	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
575 576
		return 0;

577
	spin_lock_irqsave(&mapping->tree_lock, flags);
578 579 580
	if (PageDirty(page))
		goto failed;

581
	BUG_ON(page_has_private(page));
J
Johannes Weiner 已提交
582
	__delete_from_page_cache(page, NULL);
583
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
584 585 586 587

	if (mapping->a_ops->freepage)
		mapping->a_ops->freepage(page);

588
	put_page(page);	/* pagecache ref */
589 590
	return 1;
failed:
591
	spin_unlock_irqrestore(&mapping->tree_lock, flags);
592 593 594
	return 0;
}

595 596 597 598 599 600 601 602 603
static int do_launder_page(struct address_space *mapping, struct page *page)
{
	if (!PageDirty(page))
		return 0;
	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
		return 0;
	return mapping->a_ops->launder_page(page);
}

L
Linus Torvalds 已提交
604 605
/**
 * invalidate_inode_pages2_range - remove range of pages from an address_space
606
 * @mapping: the address_space
L
Linus Torvalds 已提交
607 608 609 610 611 612
 * @start: the page offset 'from' which to invalidate
 * @end: the page offset 'to' which to invalidate (inclusive)
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
613
 * Returns -EBUSY if any pages could not be invalidated.
L
Linus Torvalds 已提交
614 615 616 617
 */
int invalidate_inode_pages2_range(struct address_space *mapping,
				  pgoff_t start, pgoff_t end)
{
618
	pgoff_t indices[PAGEVEC_SIZE];
L
Linus Torvalds 已提交
619
	struct pagevec pvec;
620
	pgoff_t index;
L
Linus Torvalds 已提交
621 622
	int i;
	int ret = 0;
623
	int ret2 = 0;
L
Linus Torvalds 已提交
624 625
	int did_range_unmap = 0;

626
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
627
	pagevec_init(&pvec, 0);
628
	index = start;
629 630 631
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
632
		for (i = 0; i < pagevec_count(&pvec); i++) {
L
Linus Torvalds 已提交
633
			struct page *page = pvec.pages[i];
634 635

			/* We rely upon deletion not changing page->index */
636
			index = indices[i];
637 638
			if (index > end)
				break;
L
Linus Torvalds 已提交
639

640
			if (radix_tree_exceptional_entry(page)) {
641 642 643
				if (!invalidate_exceptional_entry2(mapping,
								   index, page))
					ret = -EBUSY;
644 645 646
				continue;
			}

L
Linus Torvalds 已提交
647
			lock_page(page);
648
			WARN_ON(page_to_index(page) != index);
L
Linus Torvalds 已提交
649 650 651 652 653
			if (page->mapping != mapping) {
				unlock_page(page);
				continue;
			}
			wait_on_page_writeback(page);
654
			if (page_mapped(page)) {
L
Linus Torvalds 已提交
655 656 657 658 659
				if (!did_range_unmap) {
					/*
					 * Zap the rest of the file in one hit.
					 */
					unmap_mapping_range(mapping,
660
					   (loff_t)index << PAGE_SHIFT,
661
					   (loff_t)(1 + end - index)
662 663
							 << PAGE_SHIFT,
							 0);
L
Linus Torvalds 已提交
664 665 666 667 668 669
					did_range_unmap = 1;
				} else {
					/*
					 * Just zap this page
					 */
					unmap_mapping_range(mapping,
670 671
					   (loff_t)index << PAGE_SHIFT,
					   PAGE_SIZE, 0);
L
Linus Torvalds 已提交
672 673
				}
			}
674
			BUG_ON(page_mapped(page));
675 676 677
			ret2 = do_launder_page(mapping, page);
			if (ret2 == 0) {
				if (!invalidate_complete_page2(mapping, page))
678
					ret2 = -EBUSY;
679 680 681
			}
			if (ret2 < 0)
				ret = ret2;
L
Linus Torvalds 已提交
682 683
			unlock_page(page);
		}
684
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
685 686
		pagevec_release(&pvec);
		cond_resched();
687
		index++;
L
Linus Torvalds 已提交
688
	}
689
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
690 691 692 693 694 695
	return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);

/**
 * invalidate_inode_pages2 - remove all pages from an address_space
696
 * @mapping: the address_space
L
Linus Torvalds 已提交
697 698 699 700
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
701
 * Returns -EBUSY if any pages could not be invalidated.
L
Linus Torvalds 已提交
702 703 704 705 706 707
 */
int invalidate_inode_pages2(struct address_space *mapping)
{
	return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
N
npiggin@suse.de 已提交
708 709 710 711

/**
 * truncate_pagecache - unmap and remove pagecache that has been truncated
 * @inode: inode
712
 * @newsize: new file size
N
npiggin@suse.de 已提交
713 714 715 716 717 718 719 720 721 722 723
 *
 * inode's new i_size must already be written before truncate_pagecache
 * is called.
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
724
void truncate_pagecache(struct inode *inode, loff_t newsize)
N
npiggin@suse.de 已提交
725
{
726
	struct address_space *mapping = inode->i_mapping;
727
	loff_t holebegin = round_up(newsize, PAGE_SIZE);
728 729 730 731 732 733 734 735 736 737

	/*
	 * unmap_mapping_range is called twice, first simply for
	 * efficiency so that truncate_inode_pages does fewer
	 * single-page unmaps.  However after this first call, and
	 * before truncate_inode_pages finishes, it is possible for
	 * private pages to be COWed, which remain after
	 * truncate_inode_pages finishes, hence the second
	 * unmap_mapping_range call must be made for correctness.
	 */
738 739 740
	unmap_mapping_range(mapping, holebegin, 0, 1);
	truncate_inode_pages(mapping, newsize);
	unmap_mapping_range(mapping, holebegin, 0, 1);
N
npiggin@suse.de 已提交
741 742 743
}
EXPORT_SYMBOL(truncate_pagecache);

744 745 746 747 748
/**
 * truncate_setsize - update inode and pagecache for a new file size
 * @inode: inode
 * @newsize: new file size
 *
J
Jan Kara 已提交
749 750 751
 * truncate_setsize updates i_size and performs pagecache truncation (if
 * necessary) to @newsize. It will be typically be called from the filesystem's
 * setattr function when ATTR_SIZE is passed in.
752
 *
753 754 755
 * Must be called with a lock serializing truncates and writes (generally
 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
 * specific block truncation has been performed.
756 757 758
 */
void truncate_setsize(struct inode *inode, loff_t newsize)
{
759 760
	loff_t oldsize = inode->i_size;

761
	i_size_write(inode, newsize);
762 763
	if (newsize > oldsize)
		pagecache_isize_extended(inode, oldsize, newsize);
764
	truncate_pagecache(inode, newsize);
765 766 767
}
EXPORT_SYMBOL(truncate_setsize);

768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788
/**
 * pagecache_isize_extended - update pagecache after extension of i_size
 * @inode:	inode for which i_size was extended
 * @from:	original inode size
 * @to:		new inode size
 *
 * Handle extension of inode size either caused by extending truncate or by
 * write starting after current i_size. We mark the page straddling current
 * i_size RO so that page_mkwrite() is called on the nearest write access to
 * the page.  This way filesystem can be sure that page_mkwrite() is called on
 * the page before user writes to the page via mmap after the i_size has been
 * changed.
 *
 * The function must be called after i_size is updated so that page fault
 * coming after we unlock the page will already see the new i_size.
 * The function must be called while we still hold i_mutex - this not only
 * makes sure i_size is stable but also that userspace cannot observe new
 * i_size value before we are prepared to store mmap writes at new inode size.
 */
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
F
Fabian Frederick 已提交
789
	int bsize = i_blocksize(inode);
790 791 792 793 794 795
	loff_t rounded_from;
	struct page *page;
	pgoff_t index;

	WARN_ON(to > inode->i_size);

796
	if (from >= to || bsize == PAGE_SIZE)
797 798 799
		return;
	/* Page straddling @from will not have any hole block created? */
	rounded_from = round_up(from, bsize);
800
	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
801 802
		return;

803
	index = from >> PAGE_SHIFT;
804 805 806 807 808 809 810 811 812 813 814
	page = find_lock_page(inode->i_mapping, index);
	/* Page not cached? Nothing to do */
	if (!page)
		return;
	/*
	 * See clear_page_dirty_for_io() for details why set_page_dirty()
	 * is needed.
	 */
	if (page_mkclean(page))
		set_page_dirty(page);
	unlock_page(page);
815
	put_page(page);
816 817 818
}
EXPORT_SYMBOL(pagecache_isize_extended);

819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
/**
 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 * @inode: inode
 * @lstart: offset of beginning of hole
 * @lend: offset of last byte of hole
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	struct address_space *mapping = inode->i_mapping;
	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
	/*
	 * This rounding is currently just for example: unmap_mapping_range
	 * expands its hole outwards, whereas we want it to contract the hole
	 * inwards.  However, existing callers of truncate_pagecache_range are
841 842
	 * doing their own page rounding first.  Note that unmap_mapping_range
	 * allows holelen 0 for all, and we allow lend -1 for end of file.
843 844 845 846 847 848 849 850 851 852 853 854 855
	 */

	/*
	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
	 * once (before truncating pagecache), and without "even_cows" flag:
	 * hole-punching should not remove private COWed pages from the hole.
	 */
	if ((u64)unmap_end > (u64)unmap_start)
		unmap_mapping_range(mapping, unmap_start,
				    1 + unmap_end - unmap_start, 0);
	truncate_inode_pages_range(mapping, lstart, lend);
}
EXPORT_SYMBOL(truncate_pagecache_range);