truncate.c 11.5 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11
/*
 * mm/truncate.c - code for taking down pages from address_spaces
 *
 * Copyright (C) 2002, Linus Torvalds
 *
 * 10Sep2002	akpm@zip.com.au
 *		Initial version.
 */

#include <linux/kernel.h>
#include <linux/mm.h>
N
Nick Piggin 已提交
12
#include <linux/swap.h>
L
Linus Torvalds 已提交
13 14 15
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/pagevec.h>
16
#include <linux/task_io_accounting_ops.h>
L
Linus Torvalds 已提交
17
#include <linux/buffer_head.h>	/* grr. try_to_release_page,
18
				   do_invalidatepage */
L
Linus Torvalds 已提交
19 20


21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38
/**
 * do_invalidatepage - invalidate part of all of a page
 * @page: the page which is affected
 * @offset: the index of the truncation point
 *
 * do_invalidatepage() is called when all or part of the page has become
 * invalidated by a truncate operation.
 *
 * do_invalidatepage() does not have to release all buffers, but it must
 * ensure that no dirty buffer is left outside @offset and that no I/O
 * is underway against any of the blocks which are outside the truncation
 * point.  Because the caller is about to free (and possibly reuse) those
 * blocks on-disk.
 */
void do_invalidatepage(struct page *page, unsigned long offset)
{
	void (*invalidatepage)(struct page *, unsigned long);
	invalidatepage = page->mapping->a_ops->invalidatepage;
39
#ifdef CONFIG_BLOCK
40 41
	if (!invalidatepage)
		invalidatepage = block_invalidatepage;
42
#endif
43 44 45 46
	if (invalidatepage)
		(*invalidatepage)(page, offset);
}

L
Linus Torvalds 已提交
47 48 49 50 51 52 53
static inline void truncate_partial_page(struct page *page, unsigned partial)
{
	memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
	if (PagePrivate(page))
		do_invalidatepage(page, partial);
}

54 55 56 57 58 59 60 61 62
void cancel_dirty_page(struct page *page, unsigned int account_size)
{
	/* If we're cancelling the page, it had better not be mapped any more */
	if (page_mapped(page)) {
		static unsigned int warncount;

		WARN_ON(++warncount < 5);
	}
		
63 64
	if (TestClearPageDirty(page) && account_size &&
			mapping_cap_account_dirty(page->mapping)) {
65
		dec_zone_page_state(page, NR_FILE_DIRTY);
66
		task_io_account_cancelled_write(account_size);
67
	}
68 69
}

L
Linus Torvalds 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
/*
 * If truncate cannot remove the fs-private metadata from the page, the page
 * becomes anonymous.  It will be left on the LRU and may even be mapped into
 * user pagetables if we're racing with filemap_nopage().
 *
 * We need to bale out if page->mapping is no longer equal to the original
 * mapping.  This happens a) when the VM reclaimed the page while we waited on
 * its lock, b) when a concurrent invalidate_inode_pages got there first and
 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
 */
static void
truncate_complete_page(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
		return;

86 87
	cancel_dirty_page(page, PAGE_CACHE_SIZE);

L
Linus Torvalds 已提交
88 89 90 91 92 93 94 95 96 97 98 99
	if (PagePrivate(page))
		do_invalidatepage(page, 0);

	ClearPageUptodate(page);
	ClearPageMappedToDisk(page);
	remove_from_page_cache(page);
	page_cache_release(page);	/* pagecache ref */
}

/*
 * This is for invalidate_inode_pages().  That function can be called at
 * any time, and is not supposed to throw away dirty pages.  But pages can
N
Nick Piggin 已提交
100 101
 * be marked dirty at any time too, so use remove_mapping which safely
 * discards clean, unused pages.
L
Linus Torvalds 已提交
102 103 104 105 106 107
 *
 * Returns non-zero if the page was successfully invalidated.
 */
static int
invalidate_complete_page(struct address_space *mapping, struct page *page)
{
N
Nick Piggin 已提交
108 109
	int ret;

L
Linus Torvalds 已提交
110 111 112 113 114 115
	if (page->mapping != mapping)
		return 0;

	if (PagePrivate(page) && !try_to_release_page(page, 0))
		return 0;

N
Nick Piggin 已提交
116 117 118
	ret = remove_mapping(mapping, page);

	return ret;
L
Linus Torvalds 已提交
119 120 121
}

/**
122 123
 * truncate_inode_pages - truncate range of pages specified by start and
 * end byte offsets
L
Linus Torvalds 已提交
124 125
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
126
 * @lend: offset to which to truncate
L
Linus Torvalds 已提交
127
 *
128 129 130
 * Truncate the page cache, removing the pages that are between
 * specified offsets (and zeroing out partial page
 * (if lstart is not page aligned)).
L
Linus Torvalds 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144
 *
 * Truncate takes two passes - the first pass is nonblocking.  It will not
 * block on page locks and it will not block on writeback.  The second pass
 * will wait.  This is to prevent as much IO as possible in the affected region.
 * The first pass will remove most pages, so the search cost of the second pass
 * is low.
 *
 * When looking at page->index outside the page lock we need to be careful to
 * copy it into a local to avoid races (it could change at any time).
 *
 * We pass down the cache-hot hint to the page freeing code.  Even if the
 * mapping is large, it is probably the case that the final pages are the most
 * recently touched, and freeing happens in ascending file offset order.
 */
145 146
void truncate_inode_pages_range(struct address_space *mapping,
				loff_t lstart, loff_t lend)
L
Linus Torvalds 已提交
147 148
{
	const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
149
	pgoff_t end;
L
Linus Torvalds 已提交
150 151 152 153 154 155 156 157
	const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
	struct pagevec pvec;
	pgoff_t next;
	int i;

	if (mapping->nrpages == 0)
		return;

158 159 160
	BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
	end = (lend >> PAGE_CACHE_SHIFT);

L
Linus Torvalds 已提交
161 162
	pagevec_init(&pvec, 0);
	next = start;
163 164
	while (next <= end &&
	       pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
L
Linus Torvalds 已提交
165 166 167 168
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
			pgoff_t page_index = page->index;

169 170 171 172 173
			if (page_index > end) {
				next = page_index;
				break;
			}

L
Linus Torvalds 已提交
174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208
			if (page_index > next)
				next = page_index;
			next++;
			if (TestSetPageLocked(page))
				continue;
			if (PageWriteback(page)) {
				unlock_page(page);
				continue;
			}
			truncate_complete_page(mapping, page);
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}

	if (partial) {
		struct page *page = find_lock_page(mapping, start - 1);
		if (page) {
			wait_on_page_writeback(page);
			truncate_partial_page(page, partial);
			unlock_page(page);
			page_cache_release(page);
		}
	}

	next = start;
	for ( ; ; ) {
		cond_resched();
		if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
			if (next == start)
				break;
			next = start;
			continue;
		}
209 210 211 212
		if (pvec.pages[0]->index > end) {
			pagevec_release(&pvec);
			break;
		}
L
Linus Torvalds 已提交
213 214 215
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];

216 217
			if (page->index > end)
				break;
L
Linus Torvalds 已提交
218 219 220 221 222 223 224 225 226 227 228
			lock_page(page);
			wait_on_page_writeback(page);
			if (page->index > next)
				next = page->index;
			next++;
			truncate_complete_page(mapping, page);
			unlock_page(page);
		}
		pagevec_release(&pvec);
	}
}
229
EXPORT_SYMBOL(truncate_inode_pages_range);
L
Linus Torvalds 已提交
230

231 232 233 234 235
/**
 * truncate_inode_pages - truncate *all* the pages from an offset
 * @mapping: mapping to truncate
 * @lstart: offset from which to truncate
 *
236
 * Called under (and serialised by) inode->i_mutex.
237 238 239 240 241
 */
void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
{
	truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
}
L
Linus Torvalds 已提交
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
EXPORT_SYMBOL(truncate_inode_pages);

/**
 * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
 * @mapping: the address_space which holds the pages to invalidate
 * @start: the offset 'from' which to invalidate
 * @end: the offset 'to' which to invalidate (inclusive)
 *
 * This function only removes the unlocked pages, if you want to
 * remove all the pages of one inode, you must call truncate_inode_pages.
 *
 * invalidate_mapping_pages() will not block on IO activity. It will not
 * invalidate pages which are dirty, locked, under writeback or mapped into
 * pagetables.
 */
unsigned long invalidate_mapping_pages(struct address_space *mapping,
				pgoff_t start, pgoff_t end)
{
	struct pagevec pvec;
	pgoff_t next = start;
	unsigned long ret = 0;
	int i;

	pagevec_init(&pvec, 0);
	while (next <= end &&
			pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
		for (i = 0; i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
270 271
			pgoff_t index;
			int lock_failed;
L
Linus Torvalds 已提交
272

273 274 275 276 277 278 279 280 281 282 283
			lock_failed = TestSetPageLocked(page);

			/*
			 * We really shouldn't be looking at the ->index of an
			 * unlocked page.  But we're not allowed to lock these
			 * pages.  So we rely upon nobody altering the ->index
			 * of this (pinned-by-us) page.
			 */
			index = page->index;
			if (index > next)
				next = index;
L
Linus Torvalds 已提交
284
			next++;
285 286 287
			if (lock_failed)
				continue;

L
Linus Torvalds 已提交
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
			if (PageDirty(page) || PageWriteback(page))
				goto unlock;
			if (page_mapped(page))
				goto unlock;
			ret += invalidate_complete_page(mapping, page);
unlock:
			unlock_page(page);
			if (next > end)
				break;
		}
		pagevec_release(&pvec);
	}
	return ret;
}

unsigned long invalidate_inode_pages(struct address_space *mapping)
{
	return invalidate_mapping_pages(mapping, 0, ~0UL);
}
EXPORT_SYMBOL(invalidate_inode_pages);

309 310 311 312 313 314 315 316 317 318 319 320 321
/*
 * This is like invalidate_complete_page(), except it ignores the page's
 * refcount.  We do this because invalidate_inode_pages2() needs stronger
 * invalidation guarantees, and cannot afford to leave pages behind because
 * shrink_list() has a temp ref on them, or because they're transiently sitting
 * in the lru_cache_add() pagevecs.
 */
static int
invalidate_complete_page2(struct address_space *mapping, struct page *page)
{
	if (page->mapping != mapping)
		return 0;

322
	if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339
		return 0;

	write_lock_irq(&mapping->tree_lock);
	if (PageDirty(page))
		goto failed;

	BUG_ON(PagePrivate(page));
	__remove_from_page_cache(page);
	write_unlock_irq(&mapping->tree_lock);
	ClearPageUptodate(page);
	page_cache_release(page);	/* pagecache ref */
	return 1;
failed:
	write_unlock_irq(&mapping->tree_lock);
	return 0;
}

L
Linus Torvalds 已提交
340 341
/**
 * invalidate_inode_pages2_range - remove range of pages from an address_space
342
 * @mapping: the address_space
L
Linus Torvalds 已提交
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
 * @start: the page offset 'from' which to invalidate
 * @end: the page offset 'to' which to invalidate (inclusive)
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
 * Returns -EIO if any pages could not be invalidated.
 */
int invalidate_inode_pages2_range(struct address_space *mapping,
				  pgoff_t start, pgoff_t end)
{
	struct pagevec pvec;
	pgoff_t next;
	int i;
	int ret = 0;
	int did_range_unmap = 0;
	int wrapped = 0;

	pagevec_init(&pvec, 0);
	next = start;
	while (next <= end && !ret && !wrapped &&
		pagevec_lookup(&pvec, mapping, next,
			min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
		for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
			struct page *page = pvec.pages[i];
			pgoff_t page_index;

			lock_page(page);
			if (page->mapping != mapping) {
				unlock_page(page);
				continue;
			}
			page_index = page->index;
			next = page_index + 1;
			if (next == 0)
				wrapped = 1;
			if (page_index > end) {
				unlock_page(page);
				break;
			}
			wait_on_page_writeback(page);
			while (page_mapped(page)) {
				if (!did_range_unmap) {
					/*
					 * Zap the rest of the file in one hit.
					 */
					unmap_mapping_range(mapping,
390 391
					   (loff_t)page_index<<PAGE_CACHE_SHIFT,
					   (loff_t)(end - page_index + 1)
L
Linus Torvalds 已提交
392 393 394 395 396 397 398 399
							<< PAGE_CACHE_SHIFT,
					    0);
					did_range_unmap = 1;
				} else {
					/*
					 * Just zap this page
					 */
					unmap_mapping_range(mapping,
400
					  (loff_t)page_index<<PAGE_CACHE_SHIFT,
L
Linus Torvalds 已提交
401 402 403
					  PAGE_CACHE_SIZE, 0);
				}
			}
404
			if (!invalidate_complete_page2(mapping, page))
L
Linus Torvalds 已提交
405 406 407 408 409 410
				ret = -EIO;
			unlock_page(page);
		}
		pagevec_release(&pvec);
		cond_resched();
	}
411
	WARN_ON_ONCE(ret);
L
Linus Torvalds 已提交
412 413 414 415 416 417
	return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);

/**
 * invalidate_inode_pages2 - remove all pages from an address_space
418
 * @mapping: the address_space
L
Linus Torvalds 已提交
419 420 421 422 423 424 425 426 427 428 429
 *
 * Any pages which are found to be mapped into pagetables are unmapped prior to
 * invalidation.
 *
 * Returns -EIO if any pages could not be invalidated.
 */
int invalidate_inode_pages2(struct address_space *mapping)
{
	return invalidate_inode_pages2_range(mapping, 0, -1);
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2);