truncate.c 26.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5
/*
 * mm/truncate.c - code for taking down pages from address_spaces
 *
 * Copyright (C) 2002, Linus Torvalds
 *
6
 * 10Sep2002	Andrew Morton
L
Linus Torvalds 已提交
7 8 9 10
 *		Initial version.
 */

#include <linux/kernel.h>
A
Alexey Dobriyan 已提交
11
#include <linux/backing-dev.h>
12
#include <linux/dax.h>
13
#include <linux/gfp.h>
L
Linus Torvalds 已提交
14
#include <linux/mm.h>
N
Nick Piggin 已提交
15
#include <linux/swap.h>
16
#include <linux/export.h>
L
Linus Torvalds 已提交
17
#include <linux/pagemap.h>
18
#include <linux/highmem.h>
L
Linus Torvalds 已提交
19
#include <linux/pagevec.h>
20
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
21
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
22
				   do_invalidatepage */
23
#include <linux/shmem_fs.h>
24
#include <linux/cleancache.h>
25
#include <linux/rmap.h>
26
#include "internal.h"
L
Linus Torvalds 已提交
27

28 29 30 31 32 33 34
/*
 * Regular page slots are stabilized by the page lock even without the tree
 * itself locked.  These unlocked entries need verification under the tree
 * lock.
 */
static inline void __clear_shadow_entry(struct address_space *mapping,
				pgoff_t index, void *entry)
35
{
M
Matthew Wilcox 已提交
36
	XA_STATE(xas, &mapping->i_pages, index);
37

M
Matthew Wilcox 已提交
38 39
	xas_set_update(&xas, workingset_update_node);
	if (xas_load(&xas) != entry)
40
		return;
M
Matthew Wilcox 已提交
41
	xas_store(&xas, NULL);
J
Jan Kara 已提交
42
	mapping->nrexceptional--;
43 44 45 46 47
}

static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
			       void *entry)
{
M
Matthew Wilcox 已提交
48
	xa_lock_irq(&mapping->i_pages);
49
	__clear_shadow_entry(mapping, index, entry);
M
Matthew Wilcox 已提交
50
	xa_unlock_irq(&mapping->i_pages);
51
}
L
Linus Torvalds 已提交
52

53
/*
54 55 56
 * Unconditionally remove exceptional entries. Usually called from truncate
 * path. Note that the pagevec may be altered by this function by removing
 * exceptional entries similar to what pagevec_remove_exceptionals does.
57
 */
58 59 60
static void truncate_exceptional_pvec_entries(struct address_space *mapping,
				struct pagevec *pvec, pgoff_t *indices,
				pgoff_t end)
61
{
62 63 64
	int i, j;
	bool dax, lock;

65 66 67 68
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return;

69
	for (j = 0; j < pagevec_count(pvec); j++)
70
		if (xa_is_value(pvec->pages[j]))
71 72 73
			break;

	if (j == pagevec_count(pvec))
74
		return;
75 76 77 78

	dax = dax_mapping(mapping);
	lock = !dax && indices[j] < end;
	if (lock)
M
Matthew Wilcox 已提交
79
		xa_lock_irq(&mapping->i_pages);
80 81 82 83 84

	for (i = j; i < pagevec_count(pvec); i++) {
		struct page *page = pvec->pages[i];
		pgoff_t index = indices[i];

85
		if (!xa_is_value(page)) {
86 87 88 89 90 91 92 93 94 95 96 97 98
			pvec->pages[j++] = page;
			continue;
		}

		if (index >= end)
			continue;

		if (unlikely(dax)) {
			dax_delete_mapping_entry(mapping, index);
			continue;
		}

		__clear_shadow_entry(mapping, index, page);
99
	}
100 101

	if (lock)
M
Matthew Wilcox 已提交
102
		xa_unlock_irq(&mapping->i_pages);
103
	pvec->nr = j;
104 105 106 107
}

/*
 * Invalidate exceptional entry if easily possible. This handles exceptional
108
 * entries for invalidate_inode_pages().
109 110 111 112
 */
static int invalidate_exceptional_entry(struct address_space *mapping,
					pgoff_t index, void *entry)
{
113 114
	/* Handled by shmem itself, or for DAX we do nothing. */
	if (shmem_mapping(mapping) || dax_mapping(mapping))
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
		return 1;
	clear_shadow_entry(mapping, index, entry);
	return 1;
}

/*
 * Invalidate exceptional entry if clean. This handles exceptional entries for
 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
 */
static int invalidate_exceptional_entry2(struct address_space *mapping,
					 pgoff_t index, void *entry)
{
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return 1;
	if (dax_mapping(mapping))
		return dax_invalidate_mapping_entry_sync(mapping, index);
	clear_shadow_entry(mapping, index, entry);
	return 1;
}

136
/**
137
 * do_invalidatepage - invalidate part or all of a page
138
 * @page: the page which is affected
139 140
 * @offset: start of the range to invalidate
 * @length: length of the range to invalidate
141 142 143 144 145 146 147 148 149 150
 *
 * do_invalidatepage() is called when all or part of the page has become
 * invalidated by a truncate operation.
 *
 * do_invalidatepage() does not have to release all buffers, but it must
 * ensure that no dirty buffer is left outside @offset and that no I/O
 * is underway against any of the blocks which are outside the truncation
 * point.  Because the caller is about to free (and possibly reuse) those
 * blocks on-disk.
 */
151 152
void do_invalidatepage(struct page *page, unsigned int offset,
		       unsigned int length)
153
{
154 155
	void (*invalidatepage)(struct page *, unsigned int, unsigned int);

156
	invalidatepage = page->mapping->a_ops->invalidatepage;
157
#ifdef CONFIG_BLOCK
158 159
	if (!invalidatepage)
		invalidatepage = block_invalidatepage;
160
#endif
161
	if (invalidatepage)
162
		(*invalidatepage)(page, offset, length);
163 164
}

L
Linus Torvalds 已提交
165 166
/*
 * If truncate cannot remove the fs-private metadata from the page, the page
167
 * becomes orphaned.  It will be left on the LRU and may even be mapped into
168
 * user pagetables if we're racing with filemap_fault().
L
Linus Torvalds 已提交
169 170 171
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
172
 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
L
Linus Torvalds 已提交
173 174
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
J
Jan Kara 已提交
175 176
static void
truncate_cleanup_page(struct address_space *mapping, struct page *page)
L
Linus Torvalds 已提交
177
{
J
Jan Kara 已提交
178
	if (page_mapped(page)) {
M
Matthew Wilcox 已提交
179 180
		pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
		unmap_mapping_pages(mapping, page->index, nr, false);
J
Jan Kara 已提交
181
	}
L
Linus Torvalds 已提交
182

183
	if (page_has_private(page))
184
		do_invalidatepage(page, 0, PAGE_SIZE);
L
Linus Torvalds 已提交
185

186 187 188 189 190
	/*
	 * Some filesystems seem to re-dirty the page even after
	 * the VM has canceled the dirty bit (eg ext3 journaling).
	 * Hence dirty accounting check is placed after invalidation.
	 */
191
	cancel_dirty_page(page);
L
Linus Torvalds 已提交
192 193 194 195
	ClearPageMappedToDisk(page);
}

/*
196
 * This is for invalidate_mapping_pages().  That function can be called at
L
Linus Torvalds 已提交
197
 * any time, and is not supposed to throw away dirty pages.  But pages can
N
Nick Piggin 已提交
198 199
 * be marked dirty at any time too, so use remove_mapping which safely
 * discards clean, unused pages.
L
Linus Torvalds 已提交
200 201 202 203 204 205
 *
 * Returns non-zero if the page was successfully invalidated.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
N
Nick Piggin 已提交
206 207
	int ret;

L
Linus Torvalds 已提交
208 209 210
	if (page->mapping != mapping)
		return 0;

211
	if (page_has_private(page) && !try_to_release_page(page, 0))
L
Linus Torvalds 已提交
212 213
		return 0;

N
Nick Piggin 已提交
214 215 216
	ret = remove_mapping(mapping, page);

	return ret;
L
Linus Torvalds 已提交
217 218
}

219 220
int truncate_inode_page(struct address_space *mapping, struct page *page)
{
K
Kirill A. Shutemov 已提交
221 222
	VM_BUG_ON_PAGE(PageTail(page), page);

J
Jan Kara 已提交
223 224 225 226 227 228
	if (page->mapping != mapping)
		return -EIO;

	truncate_cleanup_page(mapping, page);
	delete_from_page_cache(page);
	return 0;
229 230
}

231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
/*
 * Used to get rid of pages on hardware memory corruption.
 */
int generic_error_remove_page(struct address_space *mapping, struct page *page)
{
	if (!mapping)
		return -EINVAL;
	/*
	 * Only punch for normal data pages for now.
	 * Handling other types like directories would need more auditing.
	 */
	if (!S_ISREG(mapping->host->i_mode))
		return -EIO;
	return truncate_inode_page(mapping, page);
}
EXPORT_SYMBOL(generic_error_remove_page);

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
/*
 * Safely invalidate one page from its pagecache mapping.
 * It only drops clean, unused pages. The page must be locked.
 *
 * Returns 1 if the page is successfully invalidated, otherwise 0.
 */
int invalidate_inode_page(struct page *page)
{
	struct address_space *mapping = page_mapping(page);
	if (!mapping)
		return 0;
	if (PageDirty(page) || PageWriteback(page))
		return 0;
	if (page_mapped(page))
		return 0;
	return invalidate_complete_page(mapping, page);
}

L
Linus Torvalds 已提交
266
/**
267
 * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
L
Linus Torvalds 已提交
268 269
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
270
 * @lend: offset to which to truncate (inclusive)
L
Linus Torvalds 已提交
271
 *
272
 * Truncate the page cache, removing the pages that are between
273 274
 * specified offsets (and zeroing out partial pages
 * if lstart or lend + 1 is not page aligned).
L
Linus Torvalds 已提交
275 276 277 278 279 280 281 282 283 284
 *
 * Truncate takes two passes - the first pass is nonblocking.  It will not
 * block on page locks and it will not block on writeback.  The second pass
 * will wait.  This is to prevent as much IO as possible in the affected region.
 * The first pass will remove most pages, so the search cost of the second pass
 * is low.
 *
 * We pass down the cache-hot hint to the page freeing code.  Even if the
 * mapping is large, it is probably the case that the final pages are the most
 * recently touched, and freeing happens in ascending file offset order.
285 286 287 288
 *
 * Note that since ->invalidatepage() accepts range to invalidate
 * truncate_inode_pages_range is able to handle cases where lend + 1 is not
 * page aligned properly.
L
Linus Torvalds 已提交
289
 */
290 291
void truncate_inode_pages_range(struct address_space *mapping,
				loff_t lstart, loff_t lend)
L
Linus Torvalds 已提交
292
{
293 294 295 296 297
	pgoff_t		start;		/* inclusive */
	pgoff_t		end;		/* exclusive */
	unsigned int	partial_start;	/* inclusive */
	unsigned int	partial_end;	/* exclusive */
	struct pagevec	pvec;
298
	pgoff_t		indices[PAGEVEC_SIZE];
299 300
	pgoff_t		index;
	int		i;
L
Linus Torvalds 已提交
301

302
	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
303
		goto out;
L
Linus Torvalds 已提交
304

305
	/* Offsets within partial pages */
306 307
	partial_start = lstart & (PAGE_SIZE - 1);
	partial_end = (lend + 1) & (PAGE_SIZE - 1);
308 309 310 311 312 313 314

	/*
	 * 'start' and 'end' always covers the range of pages to be fully
	 * truncated. Partial pages are covered with 'partial_start' at the
	 * start of the range and 'partial_end' at the end of the range.
	 * Note that 'end' is exclusive while 'lend' is inclusive.
	 */
315
	start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
316 317 318 319 320 321 322 323
	if (lend == -1)
		/*
		 * lend == -1 indicates end-of-file so we have to set 'end'
		 * to the highest possible pgoff_t and since the type is
		 * unsigned we're using -1.
		 */
		end = -1;
	else
324
		end = (lend + 1) >> PAGE_SHIFT;
325

326
	pagevec_init(&pvec);
327
	index = start;
328 329 330
	while (index < end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE),
			indices)) {
331 332 333 334 335 336 337
		/*
		 * Pagevec array has exceptional entries and we may also fail
		 * to lock some pages. So we store pages that can be deleted
		 * in a new pagevec.
		 */
		struct pagevec locked_pvec;

338
		pagevec_init(&locked_pvec);
L
Linus Torvalds 已提交
339 340 341
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

342
			/* We rely upon deletion not changing page->index */
343
			index = indices[i];
344
			if (index >= end)
345 346
				break;

347
			if (xa_is_value(page))
348 349
				continue;

N
Nick Piggin 已提交
350
			if (!trylock_page(page))
L
Linus Torvalds 已提交
351
				continue;
352
			WARN_ON(page_to_index(page) != index);
L
Linus Torvalds 已提交
353 354 355 356
			if (PageWriteback(page)) {
				unlock_page(page);
				continue;
			}
357 358 359 360 361
			if (page->mapping != mapping) {
				unlock_page(page);
				continue;
			}
			pagevec_add(&locked_pvec, page);
L
Linus Torvalds 已提交
362
		}
363 364 365 366 367
		for (i = 0; i < pagevec_count(&locked_pvec); i++)
			truncate_cleanup_page(mapping, locked_pvec.pages[i]);
		delete_from_page_cache_batch(mapping, &locked_pvec);
		for (i = 0; i < pagevec_count(&locked_pvec); i++)
			unlock_page(locked_pvec.pages[i]);
368
		truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
L
Linus Torvalds 已提交
369 370
		pagevec_release(&pvec);
		cond_resched();
371
		index++;
L
Linus Torvalds 已提交
372
	}
373
	if (partial_start) {
L
Linus Torvalds 已提交
374 375
		struct page *page = find_lock_page(mapping, start - 1);
		if (page) {
376
			unsigned int top = PAGE_SIZE;
377 378 379 380 381
			if (start > end) {
				/* Truncation within a single page */
				top = partial_end;
				partial_end = 0;
			}
L
Linus Torvalds 已提交
382
			wait_on_page_writeback(page);
383 384 385 386 387
			zero_user_segment(page, partial_start, top);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, partial_start,
						  top - partial_start);
L
Linus Torvalds 已提交
388
			unlock_page(page);
389
			put_page(page);
L
Linus Torvalds 已提交
390 391
		}
	}
392 393 394 395 396 397 398 399 400 401
	if (partial_end) {
		struct page *page = find_lock_page(mapping, end);
		if (page) {
			wait_on_page_writeback(page);
			zero_user_segment(page, 0, partial_end);
			cleancache_invalidate_page(mapping, page);
			if (page_has_private(page))
				do_invalidatepage(page, 0,
						  partial_end);
			unlock_page(page);
402
			put_page(page);
403 404 405 406 407 408 409
		}
	}
	/*
	 * If the truncation happened within a single page no pages
	 * will be released, just zeroed, so we can bail out now.
	 */
	if (start >= end)
410
		goto out;
L
Linus Torvalds 已提交
411

412
	index = start;
L
Linus Torvalds 已提交
413 414
	for ( ; ; ) {
		cond_resched();
415
		if (!pagevec_lookup_entries(&pvec, mapping, index,
416 417
			min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
			/* If all gone from start onwards, we're done */
418
			if (index == start)
L
Linus Torvalds 已提交
419
				break;
420
			/* Otherwise restart to make sure all gone */
421
			index = start;
L
Linus Torvalds 已提交
422 423
			continue;
		}
424
		if (index == start && indices[0] >= end) {
425
			/* All gone out of hole to be punched, we're done */
426
			pagevec_remove_exceptionals(&pvec);
427 428 429
			pagevec_release(&pvec);
			break;
		}
430

L
Linus Torvalds 已提交
431 432 433
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

434
			/* We rely upon deletion not changing page->index */
435
			index = indices[i];
436 437 438
			if (index >= end) {
				/* Restart punch to make sure all gone */
				index = start - 1;
439
				break;
440
			}
441

442
			if (xa_is_value(page))
443 444
				continue;

L
Linus Torvalds 已提交
445
			lock_page(page);
446
			WARN_ON(page_to_index(page) != index);
L
Linus Torvalds 已提交
447
			wait_on_page_writeback(page);
448
			truncate_inode_page(mapping, page);
L
Linus Torvalds 已提交
449 450
			unlock_page(page);
		}
451
		truncate_exceptional_pvec_entries(mapping, &pvec, indices, end);
L
Linus Torvalds 已提交
452
		pagevec_release(&pvec);
453
		index++;
L
Linus Torvalds 已提交
454
	}
455 456

out:
457
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
458
}
459
EXPORT_SYMBOL(truncate_inode_pages_range);
L
Linus Torvalds 已提交
460

461 462 463 464 465
/**
 * truncate_inode_pages - truncate *all* the pages from an offset
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
 *
466
 * Called under (and serialised by) inode->i_mutex.
467 468 469 470 471
 *
 * Note: When this function returns, there can be a page in the process of
 * deletion (inside __delete_from_page_cache()) in the specified range.  Thus
 * mapping->nrpages can be non-zero when this function returns even after
 * truncation of the whole mapping.
472 473 474 475 476
 */
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
}
L
Linus Torvalds 已提交
477 478
EXPORT_SYMBOL(truncate_inode_pages);

479 480 481 482 483 484 485 486 487 488 489
/**
 * truncate_inode_pages_final - truncate *all* pages before inode dies
 * @mapping: mapping to truncate
 *
 * Called under (and serialized by) inode->i_mutex.
 *
 * Filesystems have to use this in the .evict_inode path to inform the
 * VM that this is the final truncate and the inode is going away.
 */
void truncate_inode_pages_final(struct address_space *mapping)
{
490
	unsigned long nrexceptional;
491 492 493 494 495 496 497 498 499 500 501 502 503
	unsigned long nrpages;

	/*
	 * Page reclaim can not participate in regular inode lifetime
	 * management (can't call iput()) and thus can race with the
	 * inode teardown.  Tell it when the address space is exiting,
	 * so that it does not install eviction information after the
	 * final truncate has begun.
	 */
	mapping_set_exiting(mapping);

	/*
	 * When reclaim installs eviction entries, it increases
504
	 * nrexceptional first, then decreases nrpages.  Make sure we see
505 506 507 508
	 * this in the right order or we might miss an entry.
	 */
	nrpages = mapping->nrpages;
	smp_rmb();
509
	nrexceptional = mapping->nrexceptional;
510

511
	if (nrpages || nrexceptional) {
512 513 514 515 516 517
		/*
		 * As truncation uses a lockless tree lookup, cycle
		 * the tree lock to make sure any ongoing tree
		 * modification that does not see AS_EXITING is
		 * completed before starting the final truncate.
		 */
M
Matthew Wilcox 已提交
518 519
		xa_lock_irq(&mapping->i_pages);
		xa_unlock_irq(&mapping->i_pages);
520 521 522 523 524 525

		truncate_inode_pages(mapping, 0);
	}
}
EXPORT_SYMBOL(truncate_inode_pages_final);

526 527 528 529 530 531 532 533 534 535 536 537 538 539
/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
M
Minchan Kim 已提交
540
		pgoff_t start, pgoff_t end)
L
Linus Torvalds 已提交
541
{
542
	pgoff_t indices[PAGEVEC_SIZE];
L
Linus Torvalds 已提交
543
	struct pagevec pvec;
544
	pgoff_t index = start;
M
Minchan Kim 已提交
545 546
	unsigned long ret;
	unsigned long count = 0;
L
Linus Torvalds 已提交
547 548
	int i;

549
	pagevec_init(&pvec);
550 551 552
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
L
Linus Torvalds 已提交
553 554
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
555

556
			/* We rely upon deletion not changing page->index */
557
			index = indices[i];
558 559
			if (index > end)
				break;
560

561
			if (xa_is_value(page)) {
562 563
				invalidate_exceptional_entry(mapping, index,
							     page);
564 565 566
				continue;
			}

567 568
			if (!trylock_page(page))
				continue;
K
Kirill A. Shutemov 已提交
569

570
			WARN_ON(page_to_index(page) != index);
K
Kirill A. Shutemov 已提交
571 572 573 574 575 576 577 578

			/* Middle of THP: skip */
			if (PageTransTail(page)) {
				unlock_page(page);
				continue;
			} else if (PageTransHuge(page)) {
				index += HPAGE_PMD_NR - 1;
				i += HPAGE_PMD_NR - 1;
579 580 581 582 583 584 585
				/*
				 * 'end' is in the middle of THP. Don't
				 * invalidate the page as the part outside of
				 * 'end' could be still useful.
				 */
				if (index > end) {
					unlock_page(page);
K
Kirill A. Shutemov 已提交
586
					continue;
587
				}
K
Kirill A. Shutemov 已提交
588 589
			}

M
Minchan Kim 已提交
590
			ret = invalidate_inode_page(page);
L
Linus Torvalds 已提交
591
			unlock_page(page);
M
Minchan Kim 已提交
592 593 594 595 596
			/*
			 * Invalidation is a hint that the page is no longer
			 * of interest and try to speed up its reclaim.
			 */
			if (!ret)
597
				deactivate_file_page(page);
M
Minchan Kim 已提交
598
			count += ret;
L
Linus Torvalds 已提交
599
		}
600
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
601
		pagevec_release(&pvec);
602
		cond_resched();
603
		index++;
L
Linus Torvalds 已提交
604
	}
M
Minchan Kim 已提交
605
	return count;
L
Linus Torvalds 已提交
606
}
607
EXPORT_SYMBOL(invalidate_mapping_pages);
L
Linus Torvalds 已提交
608

609 610 611 612
/*
 * This is like invalidate_complete_page(), except it ignores the page's
 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 * invalidation guarantees, and cannot afford to leave pages behind because
613 614
 * shrink_page_list() has a temp ref on them, or because they're transiently
 * sitting in the lru_cache_add() pagevecs.
615 616 617 618
 */
static int
invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
619 620
	unsigned long flags;

621 622 623
	if (page->mapping != mapping)
		return 0;

624
	if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
625 626
		return 0;

M
Matthew Wilcox 已提交
627
	xa_lock_irqsave(&mapping->i_pages, flags);
628 629 630
	if (PageDirty(page))
		goto failed;

631
	BUG_ON(page_has_private(page));
J
Johannes Weiner 已提交
632
	__delete_from_page_cache(page, NULL);
M
Matthew Wilcox 已提交
633
	xa_unlock_irqrestore(&mapping->i_pages, flags);
634 635 636 637

	if (mapping->a_ops->freepage)
		mapping->a_ops->freepage(page);

638
	put_page(page);	/* pagecache ref */
639 640
	return 1;
failed:
M
Matthew Wilcox 已提交
641
	xa_unlock_irqrestore(&mapping->i_pages, flags);
642 643 644
	return 0;
}

645 646 647 648 649 650 651 652 653
static int do_launder_page(struct address_space *mapping, struct page *page)
{
	if (!PageDirty(page))
		return 0;
	if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
		return 0;
	return mapping->a_ops->launder_page(page);
}

L
Linus Torvalds 已提交
654 655
/**
 * invalidate_inode_pages2_range - remove range of pages from an address_space
656
 * @mapping: the address_space
L
Linus Torvalds 已提交
657 658 659 660 661 662
 * @start: the page offset 'from' which to invalidate
 * @end: the page offset 'to' which to invalidate (inclusive)
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
663
 * Returns -EBUSY if any pages could not be invalidated.
L
Linus Torvalds 已提交
664 665 666 667
 */
int invalidate_inode_pages2_range(struct address_space *mapping,
				  pgoff_t start, pgoff_t end)
{
668
	pgoff_t indices[PAGEVEC_SIZE];
L
Linus Torvalds 已提交
669
	struct pagevec pvec;
670
	pgoff_t index;
L
Linus Torvalds 已提交
671 672
	int i;
	int ret = 0;
673
	int ret2 = 0;
L
Linus Torvalds 已提交
674 675
	int did_range_unmap = 0;

676
	if (mapping->nrpages == 0 && mapping->nrexceptional == 0)
677
		goto out;
678

679
	pagevec_init(&pvec);
680
	index = start;
681 682 683
	while (index <= end && pagevec_lookup_entries(&pvec, mapping, index,
			min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
			indices)) {
684
		for (i = 0; i < pagevec_count(&pvec); i++) {
L
Linus Torvalds 已提交
685
			struct page *page = pvec.pages[i];
686 687

			/* We rely upon deletion not changing page->index */
688
			index = indices[i];
689 690
			if (index > end)
				break;
L
Linus Torvalds 已提交
691

692
			if (xa_is_value(page)) {
693 694 695
				if (!invalidate_exceptional_entry2(mapping,
								   index, page))
					ret = -EBUSY;
696 697 698
				continue;
			}

L
Linus Torvalds 已提交
699
			lock_page(page);
700
			WARN_ON(page_to_index(page) != index);
L
Linus Torvalds 已提交
701 702 703 704 705
			if (page->mapping != mapping) {
				unlock_page(page);
				continue;
			}
			wait_on_page_writeback(page);
706
			if (page_mapped(page)) {
L
Linus Torvalds 已提交
707 708 709 710
				if (!did_range_unmap) {
					/*
					 * Zap the rest of the file in one hit.
					 */
M
Matthew Wilcox 已提交
711 712
					unmap_mapping_pages(mapping, index,
						(1 + end - index), false);
L
Linus Torvalds 已提交
713 714 715 716 717
					did_range_unmap = 1;
				} else {
					/*
					 * Just zap this page
					 */
M
Matthew Wilcox 已提交
718 719
					unmap_mapping_pages(mapping, index,
								1, false);
L
Linus Torvalds 已提交
720 721
				}
			}
722
			BUG_ON(page_mapped(page));
723 724 725
			ret2 = do_launder_page(mapping, page);
			if (ret2 == 0) {
				if (!invalidate_complete_page2(mapping, page))
726
					ret2 = -EBUSY;
727 728 729
			}
			if (ret2 < 0)
				ret = ret2;
L
Linus Torvalds 已提交
730 731
			unlock_page(page);
		}
732
		pagevec_remove_exceptionals(&pvec);
L
Linus Torvalds 已提交
733 734
		pagevec_release(&pvec);
		cond_resched();
735
		index++;
L
Linus Torvalds 已提交
736
	}
737
	/*
M
Matthew Wilcox 已提交
738
	 * For DAX we invalidate page tables after invalidating page cache.  We
739 740
	 * could invalidate page tables while invalidating each entry however
	 * that would be expensive. And doing range unmapping before doesn't
M
Matthew Wilcox 已提交
741
	 * work as we have no cheap way to find whether page cache entry didn't
742 743 744
	 * get remapped later.
	 */
	if (dax_mapping(mapping)) {
M
Matthew Wilcox 已提交
745
		unmap_mapping_pages(mapping, start, end - start + 1, false);
746
	}
747
out:
748
	cleancache_invalidate_inode(mapping);
L
Linus Torvalds 已提交
749 750 751 752 753 754
	return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);

/**
 * invalidate_inode_pages2 - remove all pages from an address_space
755
 * @mapping: the address_space
L
Linus Torvalds 已提交
756 757 758 759
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
760
 * Returns -EBUSY if any pages could not be invalidated.
L
Linus Torvalds 已提交
761 762 763 764 765 766
 */
int invalidate_inode_pages2(struct address_space *mapping)
{
	return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
N
npiggin@suse.de 已提交
767 768 769 770

/**
 * truncate_pagecache - unmap and remove pagecache that has been truncated
 * @inode: inode
771
 * @newsize: new file size
N
npiggin@suse.de 已提交
772 773 774 775 776 777 778 779 780 781 782
 *
 * inode's new i_size must already be written before truncate_pagecache
 * is called.
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
783
void truncate_pagecache(struct inode *inode, loff_t newsize)
N
npiggin@suse.de 已提交
784
{
785
	struct address_space *mapping = inode->i_mapping;
786
	loff_t holebegin = round_up(newsize, PAGE_SIZE);
787 788 789 790 791 792 793 794 795 796

	/*
	 * unmap_mapping_range is called twice, first simply for
	 * efficiency so that truncate_inode_pages does fewer
	 * single-page unmaps.  However after this first call, and
	 * before truncate_inode_pages finishes, it is possible for
	 * private pages to be COWed, which remain after
	 * truncate_inode_pages finishes, hence the second
	 * unmap_mapping_range call must be made for correctness.
	 */
797 798 799
	unmap_mapping_range(mapping, holebegin, 0, 1);
	truncate_inode_pages(mapping, newsize);
	unmap_mapping_range(mapping, holebegin, 0, 1);
N
npiggin@suse.de 已提交
800 801 802
}
EXPORT_SYMBOL(truncate_pagecache);

803 804 805 806 807
/**
 * truncate_setsize - update inode and pagecache for a new file size
 * @inode: inode
 * @newsize: new file size
 *
J
Jan Kara 已提交
808 809 810
 * truncate_setsize updates i_size and performs pagecache truncation (if
 * necessary) to @newsize. It will be typically be called from the filesystem's
 * setattr function when ATTR_SIZE is passed in.
811
 *
812 813 814
 * Must be called with a lock serializing truncates and writes (generally
 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
 * specific block truncation has been performed.
815 816 817
 */
void truncate_setsize(struct inode *inode, loff_t newsize)
{
818 819
	loff_t oldsize = inode->i_size;

820
	i_size_write(inode, newsize);
821 822
	if (newsize > oldsize)
		pagecache_isize_extended(inode, oldsize, newsize);
823
	truncate_pagecache(inode, newsize);
824 825 826
}
EXPORT_SYMBOL(truncate_setsize);

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847
/**
 * pagecache_isize_extended - update pagecache after extension of i_size
 * @inode:	inode for which i_size was extended
 * @from:	original inode size
 * @to:		new inode size
 *
 * Handle extension of inode size either caused by extending truncate or by
 * write starting after current i_size. We mark the page straddling current
 * i_size RO so that page_mkwrite() is called on the nearest write access to
 * the page.  This way filesystem can be sure that page_mkwrite() is called on
 * the page before user writes to the page via mmap after the i_size has been
 * changed.
 *
 * The function must be called after i_size is updated so that page fault
 * coming after we unlock the page will already see the new i_size.
 * The function must be called while we still hold i_mutex - this not only
 * makes sure i_size is stable but also that userspace cannot observe new
 * i_size value before we are prepared to store mmap writes at new inode size.
 */
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
{
F
Fabian Frederick 已提交
848
	int bsize = i_blocksize(inode);
849 850 851 852 853 854
	loff_t rounded_from;
	struct page *page;
	pgoff_t index;

	WARN_ON(to > inode->i_size);

855
	if (from >= to || bsize == PAGE_SIZE)
856 857 858
		return;
	/* Page straddling @from will not have any hole block created? */
	rounded_from = round_up(from, bsize);
859
	if (to <= rounded_from || !(rounded_from & (PAGE_SIZE - 1)))
860 861
		return;

862
	index = from >> PAGE_SHIFT;
863 864 865 866 867 868 869 870 871 872 873
	page = find_lock_page(inode->i_mapping, index);
	/* Page not cached? Nothing to do */
	if (!page)
		return;
	/*
	 * See clear_page_dirty_for_io() for details why set_page_dirty()
	 * is needed.
	 */
	if (page_mkclean(page))
		set_page_dirty(page);
	unlock_page(page);
874
	put_page(page);
875 876 877
}
EXPORT_SYMBOL(pagecache_isize_extended);

878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
/**
 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
 * @inode: inode
 * @lstart: offset of beginning of hole
 * @lend: offset of last byte of hole
 *
 * This function should typically be called before the filesystem
 * releases resources associated with the freed range (eg. deallocates
 * blocks). This way, pagecache will always stay logically coherent
 * with on-disk format, and the filesystem would not have to deal with
 * situations such as writepage being called for a page that has already
 * had its underlying blocks deallocated.
 */
void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
{
	struct address_space *mapping = inode->i_mapping;
	loff_t unmap_start = round_up(lstart, PAGE_SIZE);
	loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
	/*
	 * This rounding is currently just for example: unmap_mapping_range
	 * expands its hole outwards, whereas we want it to contract the hole
	 * inwards.  However, existing callers of truncate_pagecache_range are
900 901
	 * doing their own page rounding first.  Note that unmap_mapping_range
	 * allows holelen 0 for all, and we allow lend -1 for end of file.
902 903 904 905 906 907 908 909 910 911 912 913 914
	 */

	/*
	 * Unlike in truncate_pagecache, unmap_mapping_range is called only
	 * once (before truncating pagecache), and without "even_cows" flag:
	 * hole-punching should not remove private COWed pages from the hole.
	 */
	if ((u64)unmap_end > (u64)unmap_start)
		unmap_mapping_range(mapping, unmap_start,
				    1 + unmap_end - unmap_start, 0);
	truncate_inode_pages_range(mapping, lstart, lend);
}
EXPORT_SYMBOL(truncate_pagecache_range);