dax.c 40.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * fs/dax.c - Direct Access filesystem code
 * Copyright (c) 2013-2014 Intel Corporation
 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
20
#include <linux/dax.h>
21 22
#include <linux/fs.h>
#include <linux/genhd.h>
23 24 25
#include <linux/highmem.h>
#include <linux/memcontrol.h>
#include <linux/mm.h>
26
#include <linux/mutex.h>
R
Ross Zwisler 已提交
27
#include <linux/pagevec.h>
28
#include <linux/pmem.h>
29
#include <linux/sched.h>
30
#include <linux/sched/signal.h>
31
#include <linux/uio.h>
32
#include <linux/vmstat.h>
D
Dan Williams 已提交
33
#include <linux/pfn_t.h>
34
#include <linux/sizes.h>
35
#include <linux/mmu_notifier.h>
36 37
#include <linux/iomap.h>
#include "internal.h"
38

39 40 41
#define CREATE_TRACE_POINTS
#include <trace/events/fs_dax.h>

J
Jan Kara 已提交
42 43 44 45
/* We choose 4096 entries - same as per-zone page wait tables */
#define DAX_WAIT_TABLE_BITS 12
#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)

46
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
J
Jan Kara 已提交
47 48 49 50 51 52 53 54 55 56 57

static int __init init_dax_wait_table(void)
{
	int i;

	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
		init_waitqueue_head(wait_table + i);
	return 0;
}
fs_initcall(init_dax_wait_table);

58 59 60 61 62
static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
{
	struct request_queue *q = bdev->bd_queue;
	long rc = -EIO;

D
Dan Williams 已提交
63
	dax->addr = ERR_PTR(-EIO);
64 65 66 67 68
	if (blk_queue_enter(q, true) != 0)
		return rc;

	rc = bdev_direct_access(bdev, dax);
	if (rc < 0) {
D
Dan Williams 已提交
69
		dax->addr = ERR_PTR(rc);
70 71 72 73 74 75 76 77 78 79 80 81 82 83
		blk_queue_exit(q);
		return rc;
	}
	return rc;
}

static void dax_unmap_atomic(struct block_device *bdev,
		const struct blk_dax_ctl *dax)
{
	if (IS_ERR(dax->addr))
		return;
	blk_queue_exit(bdev->bd_queue);
}

84
static int dax_is_pmd_entry(void *entry)
85
{
86
	return (unsigned long)entry & RADIX_DAX_PMD;
87 88
}

89
static int dax_is_pte_entry(void *entry)
90
{
91
	return !((unsigned long)entry & RADIX_DAX_PMD);
92 93
}

94
static int dax_is_zero_entry(void *entry)
95
{
96
	return (unsigned long)entry & RADIX_DAX_HZP;
97 98
}

99
static int dax_is_empty_entry(void *entry)
100
{
101
	return (unsigned long)entry & RADIX_DAX_EMPTY;
102 103
}

104
struct page *read_dax_sector(struct block_device *bdev, sector_t n)
105
{
106
	struct page *page = alloc_pages(GFP_KERNEL, 0);
107
	struct blk_dax_ctl dax = {
108 109
		.size = PAGE_SIZE,
		.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
110
	};
111
	long rc;
112

113 114
	if (!page)
		return ERR_PTR(-ENOMEM);
115

116 117 118 119
	rc = dax_map_atomic(bdev, &dax);
	if (rc < 0)
		return ERR_PTR(rc);
	memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
120
	dax_unmap_atomic(bdev, &dax);
121
	return page;
122
}
123

J
Jan Kara 已提交
124 125 126 127 128
/*
 * DAX radix tree locking
 */
struct exceptional_entry_key {
	struct address_space *mapping;
129
	pgoff_t entry_start;
J
Jan Kara 已提交
130 131 132 133 134 135 136
};

struct wait_exceptional_entry_queue {
	wait_queue_t wait;
	struct exceptional_entry_key key;
};

137 138 139 140 141 142 143 144 145 146
static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
		pgoff_t index, void *entry, struct exceptional_entry_key *key)
{
	unsigned long hash;

	/*
	 * If 'entry' is a PMD, align the 'index' that we use for the wait
	 * queue to the start of that PMD.  This ensures that all offsets in
	 * the range covered by the PMD map to the same bit lock.
	 */
147
	if (dax_is_pmd_entry(entry))
148 149 150 151 152 153 154 155 156
		index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);

	key->mapping = mapping;
	key->entry_start = index;

	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
	return wait_table + hash;
}

J
Jan Kara 已提交
157 158 159 160 161 162 163 164
static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
				       int sync, void *keyp)
{
	struct exceptional_entry_key *key = keyp;
	struct wait_exceptional_entry_queue *ewait =
		container_of(wait, struct wait_exceptional_entry_queue, wait);

	if (key->mapping != ewait->key.mapping ||
165
	    key->entry_start != ewait->key.entry_start)
J
Jan Kara 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
		return 0;
	return autoremove_wake_function(wait, mode, sync, NULL);
}

/*
 * Check whether the given slot is locked. The function must be called with
 * mapping->tree_lock held
 */
static inline int slot_locked(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
	return entry & RADIX_DAX_ENTRY_LOCK;
}

/*
 * Mark the given slot is locked. The function must be called with
 * mapping->tree_lock held
 */
static inline void *lock_slot(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);

	entry |= RADIX_DAX_ENTRY_LOCK;
191
	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
J
Jan Kara 已提交
192 193 194 195 196 197 198 199 200 201 202 203 204
	return (void *)entry;
}

/*
 * Mark the given slot is unlocked. The function must be called with
 * mapping->tree_lock held
 */
static inline void *unlock_slot(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);

	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
205
	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
J
Jan Kara 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
	return (void *)entry;
}

/*
 * Lookup entry in radix tree, wait for it to become unlocked if it is
 * exceptional entry and return it. The caller must call
 * put_unlocked_mapping_entry() when he decided not to lock the entry or
 * put_locked_mapping_entry() when he locked the entry and now wants to
 * unlock it.
 *
 * The function must be called with mapping->tree_lock held.
 */
static void *get_unlocked_mapping_entry(struct address_space *mapping,
					pgoff_t index, void ***slotp)
{
221
	void *entry, **slot;
J
Jan Kara 已提交
222
	struct wait_exceptional_entry_queue ewait;
223
	wait_queue_head_t *wq;
J
Jan Kara 已提交
224 225 226 227 228

	init_wait(&ewait.wait);
	ewait.wait.func = wake_exceptional_entry_func;

	for (;;) {
229
		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
J
Jan Kara 已提交
230
					  &slot);
231
		if (!entry || !radix_tree_exceptional_entry(entry) ||
J
Jan Kara 已提交
232 233 234
		    !slot_locked(mapping, slot)) {
			if (slotp)
				*slotp = slot;
235
			return entry;
J
Jan Kara 已提交
236
		}
237 238

		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
J
Jan Kara 已提交
239 240 241 242 243 244 245 246 247
		prepare_to_wait_exclusive(wq, &ewait.wait,
					  TASK_UNINTERRUPTIBLE);
		spin_unlock_irq(&mapping->tree_lock);
		schedule();
		finish_wait(wq, &ewait.wait);
		spin_lock_irq(&mapping->tree_lock);
	}
}

248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
static void dax_unlock_mapping_entry(struct address_space *mapping,
				     pgoff_t index)
{
	void *entry, **slot;

	spin_lock_irq(&mapping->tree_lock);
	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
			 !slot_locked(mapping, slot))) {
		spin_unlock_irq(&mapping->tree_lock);
		return;
	}
	unlock_slot(mapping, slot);
	spin_unlock_irq(&mapping->tree_lock);
	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}

265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
static void put_locked_mapping_entry(struct address_space *mapping,
				     pgoff_t index, void *entry)
{
	if (!radix_tree_exceptional_entry(entry)) {
		unlock_page(entry);
		put_page(entry);
	} else {
		dax_unlock_mapping_entry(mapping, index);
	}
}

/*
 * Called when we are done with radix tree entry we looked up via
 * get_unlocked_mapping_entry() and which we didn't lock in the end.
 */
static void put_unlocked_mapping_entry(struct address_space *mapping,
				       pgoff_t index, void *entry)
{
	if (!radix_tree_exceptional_entry(entry))
		return;

	/* We have to wake up next waiter for the radix tree entry lock */
	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}

J
Jan Kara 已提交
290 291 292 293 294 295
/*
 * Find radix tree entry at given index. If it points to a page, return with
 * the page locked. If it points to the exceptional entry, return with the
 * radix tree entry locked. If the radix tree doesn't contain given index,
 * create empty exceptional entry for the index and return with it locked.
 *
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
 * either return that locked entry or will return an error.  This error will
 * happen if there are any 4k entries (either zero pages or DAX entries)
 * within the 2MiB range that we are requesting.
 *
 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
 * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
 * insertion will fail if it finds any 4k entries already in the tree, and a
 * 4k insertion will cause an existing 2MiB entry to be unmapped and
 * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
 * well as 2MiB empty entries.
 *
 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
 * real storage backing them.  We will leave these real 2MiB DAX entries in
 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
 *
J
Jan Kara 已提交
312 313 314 315
 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 * persistent memory the benefit is doubtful. We can add that later if we can
 * show it helps.
 */
316 317
static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
		unsigned long size_flag)
J
Jan Kara 已提交
318
{
319
	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
320
	void *entry, **slot;
J
Jan Kara 已提交
321 322 323

restart:
	spin_lock_irq(&mapping->tree_lock);
324
	entry = get_unlocked_mapping_entry(mapping, index, &slot);
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344

	if (entry) {
		if (size_flag & RADIX_DAX_PMD) {
			if (!radix_tree_exceptional_entry(entry) ||
			    dax_is_pte_entry(entry)) {
				put_unlocked_mapping_entry(mapping, index,
						entry);
				entry = ERR_PTR(-EEXIST);
				goto out_unlock;
			}
		} else { /* trying to grab a PTE entry */
			if (radix_tree_exceptional_entry(entry) &&
			    dax_is_pmd_entry(entry) &&
			    (dax_is_zero_entry(entry) ||
			     dax_is_empty_entry(entry))) {
				pmd_downgrade = true;
			}
		}
	}

J
Jan Kara 已提交
345
	/* No entry for given index? Make sure radix tree is big enough. */
346
	if (!entry || pmd_downgrade) {
J
Jan Kara 已提交
347 348
		int err;

349 350 351 352 353 354 355 356
		if (pmd_downgrade) {
			/*
			 * Make sure 'entry' remains valid while we drop
			 * mapping->tree_lock.
			 */
			entry = lock_slot(mapping, slot);
		}

J
Jan Kara 已提交
357
		spin_unlock_irq(&mapping->tree_lock);
358 359 360 361 362 363 364 365 366
		/*
		 * Besides huge zero pages the only other thing that gets
		 * downgraded are empty entries which don't need to be
		 * unmapped.
		 */
		if (pmd_downgrade && dax_is_zero_entry(entry))
			unmap_mapping_range(mapping,
				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);

J
Jan Kara 已提交
367 368
		err = radix_tree_preload(
				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
369 370 371
		if (err) {
			if (pmd_downgrade)
				put_locked_mapping_entry(mapping, index, entry);
J
Jan Kara 已提交
372
			return ERR_PTR(err);
373
		}
J
Jan Kara 已提交
374
		spin_lock_irq(&mapping->tree_lock);
375

376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
		if (!entry) {
			/*
			 * We needed to drop the page_tree lock while calling
			 * radix_tree_preload() and we didn't have an entry to
			 * lock.  See if another thread inserted an entry at
			 * our index during this time.
			 */
			entry = __radix_tree_lookup(&mapping->page_tree, index,
					NULL, &slot);
			if (entry) {
				radix_tree_preload_end();
				spin_unlock_irq(&mapping->tree_lock);
				goto restart;
			}
		}

392 393 394 395 396 397 398 399 400 401 402
		if (pmd_downgrade) {
			radix_tree_delete(&mapping->page_tree, index);
			mapping->nrexceptional--;
			dax_wake_mapping_entry_waiter(mapping, index, entry,
					true);
		}

		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);

		err = __radix_tree_insert(&mapping->page_tree, index,
				dax_radix_order(entry), entry);
J
Jan Kara 已提交
403 404 405
		radix_tree_preload_end();
		if (err) {
			spin_unlock_irq(&mapping->tree_lock);
406
			/*
407 408 409 410 411 412
			 * Our insertion of a DAX entry failed, most likely
			 * because we were inserting a PMD entry and it
			 * collided with a PTE sized entry at a different
			 * index in the PMD range.  We haven't inserted
			 * anything into the radix tree and have no waiters to
			 * wake.
413
			 */
J
Jan Kara 已提交
414 415 416 417 418
			return ERR_PTR(err);
		}
		/* Good, we have inserted empty locked entry into the tree. */
		mapping->nrexceptional++;
		spin_unlock_irq(&mapping->tree_lock);
419
		return entry;
J
Jan Kara 已提交
420 421
	}
	/* Normal page in radix tree? */
422 423
	if (!radix_tree_exceptional_entry(entry)) {
		struct page *page = entry;
J
Jan Kara 已提交
424 425 426 427 428 429 430 431 432 433 434 435

		get_page(page);
		spin_unlock_irq(&mapping->tree_lock);
		lock_page(page);
		/* Page got truncated? Retry... */
		if (unlikely(page->mapping != mapping)) {
			unlock_page(page);
			put_page(page);
			goto restart;
		}
		return page;
	}
436
	entry = lock_slot(mapping, slot);
437
 out_unlock:
J
Jan Kara 已提交
438
	spin_unlock_irq(&mapping->tree_lock);
439
	return entry;
J
Jan Kara 已提交
440 441
}

442 443 444
/*
 * We do not necessarily hold the mapping->tree_lock when we call this
 * function so it is possible that 'entry' is no longer a valid item in the
445 446 447
 * radix tree.  This is okay because all we really need to do is to find the
 * correct waitqueue where tasks might be waiting for that old 'entry' and
 * wake them.
448
 */
J
Jan Kara 已提交
449
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
450
		pgoff_t index, void *entry, bool wake_all)
J
Jan Kara 已提交
451
{
452 453 454 455
	struct exceptional_entry_key key;
	wait_queue_head_t *wq;

	wq = dax_entry_waitqueue(mapping, index, entry, &key);
J
Jan Kara 已提交
456 457 458 459 460 461 462

	/*
	 * Checking for locked entry and prepare_to_wait_exclusive() happens
	 * under mapping->tree_lock, ditto for entry handling in our callers.
	 * So at this point all tasks that could have seen our entry locked
	 * must be in the waitqueue and the following check will see them.
	 */
463
	if (waitqueue_active(wq))
J
Jan Kara 已提交
464 465 466
		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
}

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
					  pgoff_t index, bool trunc)
{
	int ret = 0;
	void *entry;
	struct radix_tree_root *page_tree = &mapping->page_tree;

	spin_lock_irq(&mapping->tree_lock);
	entry = get_unlocked_mapping_entry(mapping, index, NULL);
	if (!entry || !radix_tree_exceptional_entry(entry))
		goto out;
	if (!trunc &&
	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
		goto out;
	radix_tree_delete(page_tree, index);
	mapping->nrexceptional--;
	ret = 1;
out:
	put_unlocked_mapping_entry(mapping, index, entry);
	spin_unlock_irq(&mapping->tree_lock);
	return ret;
}
J
Jan Kara 已提交
490 491 492 493 494 495
/*
 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
 * entry to get unlocked before deleting it.
 */
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
{
496
	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
J
Jan Kara 已提交
497 498 499 500 501 502 503 504

	/*
	 * This gets called from truncate / punch_hole path. As such, the caller
	 * must hold locks protecting against concurrent modifications of the
	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
	 * caller has seen exceptional entry for this index, we better find it
	 * at that index as well...
	 */
505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
	WARN_ON_ONCE(!ret);
	return ret;
}

/*
 * Invalidate exceptional DAX entry if easily possible. This handles DAX
 * entries for invalidate_inode_pages() so we evict the entry only if we can
 * do so without blocking.
 */
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
{
	int ret = 0;
	void *entry, **slot;
	struct radix_tree_root *page_tree = &mapping->page_tree;

	spin_lock_irq(&mapping->tree_lock);
	entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
	if (!entry || !radix_tree_exceptional_entry(entry) ||
	    slot_locked(mapping, slot))
		goto out;
	if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
	    radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
		goto out;
	radix_tree_delete(page_tree, index);
J
Jan Kara 已提交
529
	mapping->nrexceptional--;
530 531
	ret = 1;
out:
J
Jan Kara 已提交
532
	spin_unlock_irq(&mapping->tree_lock);
533 534 535 536
	if (ret)
		dax_wake_mapping_entry_waiter(mapping, index, entry, true);
	return ret;
}
J
Jan Kara 已提交
537

538 539 540 541 542 543 544
/*
 * Invalidate exceptional DAX entry if it is clean.
 */
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
				      pgoff_t index)
{
	return __dax_invalidate_mapping_entry(mapping, index, false);
J
Jan Kara 已提交
545 546
}

547 548 549 550 551 552 553 554
/*
 * The user has performed a load from a hole in the file.  Allocating
 * a new page in the file would cause excessive storage usage for
 * workloads with sparse files.  We allocate a page cache page instead.
 * We'll kick it out of the page cache if it's ever written to,
 * otherwise it will simply fall out of the page cache under memory
 * pressure without ever having been dirtied.
 */
555
static int dax_load_hole(struct address_space *mapping, void **entry,
J
Jan Kara 已提交
556
			 struct vm_fault *vmf)
557
{
J
Jan Kara 已提交
558
	struct page *page;
559
	int ret;
560

J
Jan Kara 已提交
561
	/* Hole page already exists? Return it...  */
562 563 564
	if (!radix_tree_exceptional_entry(*entry)) {
		page = *entry;
		goto out;
J
Jan Kara 已提交
565
	}
566

J
Jan Kara 已提交
567 568 569
	/* This will replace locked radix tree entry with a hole page */
	page = find_or_create_page(mapping, vmf->pgoff,
				   vmf->gfp_mask | __GFP_ZERO);
570
	if (!page)
J
Jan Kara 已提交
571
		return VM_FAULT_OOM;
572
 out:
573
	vmf->page = page;
574 575 576 577 578 579 580 581 582
	ret = finish_fault(vmf);
	vmf->page = NULL;
	*entry = page;
	if (!ret) {
		/* Grab reference for PTE that is now referencing the page */
		get_page(page);
		return VM_FAULT_NOPAGE;
	}
	return ret;
583 584
}

585 586
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
		struct page *to, unsigned long vaddr)
587
{
588
	struct blk_dax_ctl dax = {
589 590
		.sector = sector,
		.size = size,
591
	};
592 593
	void *vto;

594 595
	if (dax_map_atomic(bdev, &dax) < 0)
		return PTR_ERR(dax.addr);
596
	vto = kmap_atomic(to);
597
	copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
598
	kunmap_atomic(vto);
599
	dax_unmap_atomic(bdev, &dax);
600 601 602
	return 0;
}

603 604 605 606 607 608 609
/*
 * By this point grab_mapping_entry() has ensured that we have a locked entry
 * of the appropriate size so we don't have to worry about downgrading PMDs to
 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 * already in the tree, we will skip the insertion and just dirty the PMD as
 * appropriate.
 */
J
Jan Kara 已提交
610 611
static void *dax_insert_mapping_entry(struct address_space *mapping,
				      struct vm_fault *vmf,
612 613
				      void *entry, sector_t sector,
				      unsigned long flags)
R
Ross Zwisler 已提交
614 615
{
	struct radix_tree_root *page_tree = &mapping->page_tree;
J
Jan Kara 已提交
616 617 618 619
	int error = 0;
	bool hole_fill = false;
	void *new_entry;
	pgoff_t index = vmf->pgoff;
R
Ross Zwisler 已提交
620

J
Jan Kara 已提交
621
	if (vmf->flags & FAULT_FLAG_WRITE)
622
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
R
Ross Zwisler 已提交
623

J
Jan Kara 已提交
624 625 626 627 628 629 630 631 632 633 634 635
	/* Replacing hole page with block mapping? */
	if (!radix_tree_exceptional_entry(entry)) {
		hole_fill = true;
		/*
		 * Unmap the page now before we remove it from page cache below.
		 * The page is locked so it cannot be faulted in again.
		 */
		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
				    PAGE_SIZE, 0);
		error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
		if (error)
			return ERR_PTR(error);
636 637 638 639
	} else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
		/* replacing huge zero page with PMD block mapping */
		unmap_mapping_range(mapping,
			(vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
R
Ross Zwisler 已提交
640 641
	}

J
Jan Kara 已提交
642
	spin_lock_irq(&mapping->tree_lock);
643 644
	new_entry = dax_radix_locked_entry(sector, flags);

J
Jan Kara 已提交
645 646 647 648
	if (hole_fill) {
		__delete_from_page_cache(entry, NULL);
		/* Drop pagecache reference */
		put_page(entry);
649 650
		error = __radix_tree_insert(page_tree, index,
				dax_radix_order(new_entry), new_entry);
J
Jan Kara 已提交
651 652
		if (error) {
			new_entry = ERR_PTR(error);
R
Ross Zwisler 已提交
653 654
			goto unlock;
		}
J
Jan Kara 已提交
655
		mapping->nrexceptional++;
656 657 658 659 660 661 662 663 664
	} else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
		/*
		 * Only swap our new entry into the radix tree if the current
		 * entry is a zero page or an empty entry.  If a normal PTE or
		 * PMD entry is already in the tree, we leave it alone.  This
		 * means that if we are trying to insert a PTE and the
		 * existing entry is a PMD, we will just leave the PMD in the
		 * tree and dirty it if necessary.
		 */
665
		struct radix_tree_node *node;
J
Jan Kara 已提交
666 667
		void **slot;
		void *ret;
R
Ross Zwisler 已提交
668

669
		ret = __radix_tree_lookup(page_tree, index, &node, &slot);
J
Jan Kara 已提交
670
		WARN_ON_ONCE(ret != entry);
671 672
		__radix_tree_replace(page_tree, node, slot,
				     new_entry, NULL, NULL);
R
Ross Zwisler 已提交
673
	}
J
Jan Kara 已提交
674
	if (vmf->flags & FAULT_FLAG_WRITE)
R
Ross Zwisler 已提交
675 676 677
		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
 unlock:
	spin_unlock_irq(&mapping->tree_lock);
J
Jan Kara 已提交
678 679 680 681 682 683 684 685 686 687 688 689
	if (hole_fill) {
		radix_tree_preload_end();
		/*
		 * We don't need hole page anymore, it has been replaced with
		 * locked radix tree entry now.
		 */
		if (mapping->a_ops->freepage)
			mapping->a_ops->freepage(entry);
		unlock_page(entry);
		put_page(entry);
	}
	return new_entry;
R
Ross Zwisler 已提交
690 691
}

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
static inline unsigned long
pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
{
	unsigned long address;

	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
	return address;
}

/* Walk all mappings of a given index of a file and writeprotect them */
static void dax_mapping_entry_mkclean(struct address_space *mapping,
				      pgoff_t index, unsigned long pfn)
{
	struct vm_area_struct *vma;
707 708
	pte_t pte, *ptep = NULL;
	pmd_t *pmdp = NULL;
709 710 711 712 713 714 715 716 717 718 719 720 721 722
	spinlock_t *ptl;
	bool changed;

	i_mmap_lock_read(mapping);
	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
		unsigned long address;

		cond_resched();

		if (!(vma->vm_flags & VM_SHARED))
			continue;

		address = pgoff_address(index, vma);
		changed = false;
723
		if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
724 725
			continue;

726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758
		if (pmdp) {
#ifdef CONFIG_FS_DAX_PMD
			pmd_t pmd;

			if (pfn != pmd_pfn(*pmdp))
				goto unlock_pmd;
			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
				goto unlock_pmd;

			flush_cache_page(vma, address, pfn);
			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
			pmd = pmd_wrprotect(pmd);
			pmd = pmd_mkclean(pmd);
			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
			changed = true;
unlock_pmd:
			spin_unlock(ptl);
#endif
		} else {
			if (pfn != pte_pfn(*ptep))
				goto unlock_pte;
			if (!pte_dirty(*ptep) && !pte_write(*ptep))
				goto unlock_pte;

			flush_cache_page(vma, address, pfn);
			pte = ptep_clear_flush(vma, address, ptep);
			pte = pte_wrprotect(pte);
			pte = pte_mkclean(pte);
			set_pte_at(vma->vm_mm, address, ptep, pte);
			changed = true;
unlock_pte:
			pte_unmap_unlock(ptep, ptl);
		}
759 760 761 762 763 764 765

		if (changed)
			mmu_notifier_invalidate_page(vma->vm_mm, address);
	}
	i_mmap_unlock_read(mapping);
}

R
Ross Zwisler 已提交
766 767 768 769 770
static int dax_writeback_one(struct block_device *bdev,
		struct address_space *mapping, pgoff_t index, void *entry)
{
	struct radix_tree_root *page_tree = &mapping->page_tree;
	struct blk_dax_ctl dax;
771
	void *entry2, **slot;
R
Ross Zwisler 已提交
772 773 774
	int ret = 0;

	/*
775 776
	 * A page got tagged dirty in DAX mapping? Something is seriously
	 * wrong.
R
Ross Zwisler 已提交
777
	 */
778 779
	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
		return -EIO;
R
Ross Zwisler 已提交
780

781 782 783 784 785 786 787 788 789 790 791 792
	spin_lock_irq(&mapping->tree_lock);
	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
	/* Entry got punched out / reallocated? */
	if (!entry2 || !radix_tree_exceptional_entry(entry2))
		goto put_unlocked;
	/*
	 * Entry got reallocated elsewhere? No need to writeback. We have to
	 * compare sectors as we must not bail out due to difference in lockbit
	 * or entry type.
	 */
	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
		goto put_unlocked;
793 794
	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
				dax_is_zero_entry(entry))) {
R
Ross Zwisler 已提交
795
		ret = -EIO;
796
		goto put_unlocked;
R
Ross Zwisler 已提交
797 798
	}

799 800 801 802 803 804 805 806 807 808 809 810 811 812 813
	/* Another fsync thread may have already written back this entry */
	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
		goto put_unlocked;
	/* Lock the entry to serialize with page faults */
	entry = lock_slot(mapping, slot);
	/*
	 * We can clear the tag now but we have to be careful so that concurrent
	 * dax_writeback_one() calls for the same index cannot finish before we
	 * actually flush the caches. This is achieved as the calls will look
	 * at the entry only under tree_lock and once they do that they will
	 * see the entry locked and wait for it to unlock.
	 */
	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
	spin_unlock_irq(&mapping->tree_lock);

814 815 816 817 818 819 820 821 822
	/*
	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
	 * in the middle of a PMD, the 'index' we are given will be aligned to
	 * the start index of the PMD, as will the sector we pull from
	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
	 * worry about partial PMD writebacks.
	 */
	dax.sector = dax_radix_sector(entry);
	dax.size = PAGE_SIZE << dax_radix_order(entry);
R
Ross Zwisler 已提交
823 824 825 826 827 828

	/*
	 * We cannot hold tree_lock while calling dax_map_atomic() because it
	 * eventually calls cond_resched().
	 */
	ret = dax_map_atomic(bdev, &dax);
829 830
	if (ret < 0) {
		put_locked_mapping_entry(mapping, index, entry);
R
Ross Zwisler 已提交
831
		return ret;
832
	}
R
Ross Zwisler 已提交
833 834 835 836 837 838

	if (WARN_ON_ONCE(ret < dax.size)) {
		ret = -EIO;
		goto unmap;
	}

839
	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
R
Ross Zwisler 已提交
840
	wb_cache_pmem(dax.addr, dax.size);
841 842 843 844 845 846 847 848 849
	/*
	 * After we have flushed the cache, we can clear the dirty tag. There
	 * cannot be new dirty data in the pfn after the flush has completed as
	 * the pfn mappings are writeprotected and fault waits for mapping
	 * entry lock.
	 */
	spin_lock_irq(&mapping->tree_lock);
	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
	spin_unlock_irq(&mapping->tree_lock);
R
Ross Zwisler 已提交
850 851
 unmap:
	dax_unmap_atomic(bdev, &dax);
852
	put_locked_mapping_entry(mapping, index, entry);
R
Ross Zwisler 已提交
853 854
	return ret;

855 856
 put_unlocked:
	put_unlocked_mapping_entry(mapping, index, entry2);
R
Ross Zwisler 已提交
857 858 859 860 861 862 863 864 865
	spin_unlock_irq(&mapping->tree_lock);
	return ret;
}

/*
 * Flush the mapping to the persistent domain within the byte range of [start,
 * end]. This is required by data integrity operations to ensure file data is
 * on persistent storage prior to completion of the operation.
 */
866 867
int dax_writeback_mapping_range(struct address_space *mapping,
		struct block_device *bdev, struct writeback_control *wbc)
R
Ross Zwisler 已提交
868 869
{
	struct inode *inode = mapping->host;
870
	pgoff_t start_index, end_index;
R
Ross Zwisler 已提交
871 872 873 874 875 876 877 878
	pgoff_t indices[PAGEVEC_SIZE];
	struct pagevec pvec;
	bool done = false;
	int i, ret = 0;

	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
		return -EIO;

879 880 881
	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
		return 0;

882 883
	start_index = wbc->range_start >> PAGE_SHIFT;
	end_index = wbc->range_end >> PAGE_SHIFT;
R
Ross Zwisler 已提交
884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911

	tag_pages_for_writeback(mapping, start_index, end_index);

	pagevec_init(&pvec, 0);
	while (!done) {
		pvec.nr = find_get_entries_tag(mapping, start_index,
				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
				pvec.pages, indices);

		if (pvec.nr == 0)
			break;

		for (i = 0; i < pvec.nr; i++) {
			if (indices[i] > end_index) {
				done = true;
				break;
			}

			ret = dax_writeback_one(bdev, mapping, indices[i],
					pvec.pages[i]);
			if (ret < 0)
				return ret;
		}
	}
	return 0;
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);

J
Jan Kara 已提交
912
static int dax_insert_mapping(struct address_space *mapping,
913 914
		struct block_device *bdev, sector_t sector, size_t size,
		void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
915
{
916
	unsigned long vaddr = vmf->address;
917
	struct blk_dax_ctl dax = {
918 919
		.sector = sector,
		.size = size,
920
	};
J
Jan Kara 已提交
921 922
	void *ret;
	void *entry = *entryp;
923

J
Jan Kara 已提交
924 925
	if (dax_map_atomic(bdev, &dax) < 0)
		return PTR_ERR(dax.addr);
926
	dax_unmap_atomic(bdev, &dax);
927

928
	ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
J
Jan Kara 已提交
929 930
	if (IS_ERR(ret))
		return PTR_ERR(ret);
J
Jan Kara 已提交
931
	*entryp = ret;
R
Ross Zwisler 已提交
932

J
Jan Kara 已提交
933
	return vm_insert_mixed(vma, vaddr, dax.pfn);
934 935
}

936 937 938 939
/**
 * dax_pfn_mkwrite - handle first write to DAX page
 * @vmf: The description of the fault
 */
940
int dax_pfn_mkwrite(struct vm_fault *vmf)
941
{
942
	struct file *file = vmf->vma->vm_file;
J
Jan Kara 已提交
943
	struct address_space *mapping = file->f_mapping;
944
	void *entry, **slot;
J
Jan Kara 已提交
945
	pgoff_t index = vmf->pgoff;
946

J
Jan Kara 已提交
947
	spin_lock_irq(&mapping->tree_lock);
948 949 950 951 952 953 954
	entry = get_unlocked_mapping_entry(mapping, index, &slot);
	if (!entry || !radix_tree_exceptional_entry(entry)) {
		if (entry)
			put_unlocked_mapping_entry(mapping, index, entry);
		spin_unlock_irq(&mapping->tree_lock);
		return VM_FAULT_NOPAGE;
	}
J
Jan Kara 已提交
955
	radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
956
	entry = lock_slot(mapping, slot);
J
Jan Kara 已提交
957
	spin_unlock_irq(&mapping->tree_lock);
958 959 960 961 962 963 964
	/*
	 * If we race with somebody updating the PTE and finish_mkwrite_fault()
	 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
	 * the fault in either case.
	 */
	finish_mkwrite_fault(vmf);
	put_locked_mapping_entry(mapping, index, entry);
965 966 967 968
	return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);

969 970 971 972 973 974 975 976 977 978 979 980 981
static bool dax_range_is_aligned(struct block_device *bdev,
				 unsigned int offset, unsigned int length)
{
	unsigned short sector_size = bdev_logical_block_size(bdev);

	if (!IS_ALIGNED(offset, sector_size))
		return false;
	if (!IS_ALIGNED(length, sector_size))
		return false;

	return true;
}

982 983 984 985 986 987 988 989
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
		unsigned int offset, unsigned int length)
{
	struct blk_dax_ctl dax = {
		.sector		= sector,
		.size		= PAGE_SIZE,
	};

990 991 992 993
	if (dax_range_is_aligned(bdev, offset, length)) {
		sector_t start_sector = dax.sector + (offset >> 9);

		return blkdev_issue_zeroout(bdev, start_sector,
994
				length >> 9, GFP_NOFS, 0);
995 996 997 998 999 1000
	} else {
		if (dax_map_atomic(bdev, &dax) < 0)
			return PTR_ERR(dax.addr);
		clear_pmem(dax.addr + offset, length);
		dax_unmap_atomic(bdev, &dax);
	}
1001 1002 1003 1004
	return 0;
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);

1005
static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
M
Matthew Wilcox 已提交
1006
{
1007
	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
M
Matthew Wilcox 已提交
1008
}
1009 1010

static loff_t
1011
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
		struct iomap *iomap)
{
	struct iov_iter *iter = data;
	loff_t end = pos + length, done = 0;
	ssize_t ret = 0;

	if (iov_iter_rw(iter) == READ) {
		end = min(end, i_size_read(inode));
		if (pos >= end)
			return 0;

		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
			return iov_iter_zero(min(length, end - pos), iter);
	}

	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
		return -EIO;

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040
	/*
	 * Write can allocate block for an area which has a hole page mapped
	 * into page tables. We have to tear down these mappings so that data
	 * written by write(2) is visible in mmap.
	 */
	if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
		invalidate_inode_pages2_range(inode->i_mapping,
					      pos >> PAGE_SHIFT,
					      (end - 1) >> PAGE_SHIFT);
	}

1041 1042 1043 1044 1045
	while (pos < end) {
		unsigned offset = pos & (PAGE_SIZE - 1);
		struct blk_dax_ctl dax = { 0 };
		ssize_t map_len;

1046 1047 1048 1049 1050
		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			break;
		}

1051
		dax.sector = dax_iomap_sector(iomap, pos);
1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
		dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
		map_len = dax_map_atomic(iomap->bdev, &dax);
		if (map_len < 0) {
			ret = map_len;
			break;
		}

		dax.addr += offset;
		map_len -= offset;
		if (map_len > end - pos)
			map_len = end - pos;

		if (iov_iter_rw(iter) == WRITE)
			map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
		else
			map_len = copy_to_iter(dax.addr, map_len, iter);
		dax_unmap_atomic(iomap->bdev, &dax);
		if (map_len <= 0) {
			ret = map_len ? map_len : -EFAULT;
			break;
		}

		pos += map_len;
		length -= map_len;
		done += map_len;
	}

	return done ? done : ret;
}

/**
1083
 * dax_iomap_rw - Perform I/O to a DAX file
1084 1085 1086 1087 1088 1089 1090 1091 1092
 * @iocb:	The control block for this I/O
 * @iter:	The addresses to do I/O from or to
 * @ops:	iomap ops passed from the file system
 *
 * This function performs read and write operations to directly mapped
 * persistent memory.  The callers needs to take care of read/write exclusion
 * and evicting any page cache pages in the region under I/O.
 */
ssize_t
1093
dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1094
		const struct iomap_ops *ops)
1095 1096 1097 1098 1099 1100
{
	struct address_space *mapping = iocb->ki_filp->f_mapping;
	struct inode *inode = mapping->host;
	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
	unsigned flags = 0;

1101 1102
	if (iov_iter_rw(iter) == WRITE) {
		lockdep_assert_held_exclusive(&inode->i_rwsem);
1103
		flags |= IOMAP_WRITE;
1104 1105 1106
	} else {
		lockdep_assert_held(&inode->i_rwsem);
	}
1107 1108 1109

	while (iov_iter_count(iter)) {
		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1110
				iter, dax_iomap_actor);
1111 1112 1113 1114 1115 1116 1117 1118 1119
		if (ret <= 0)
			break;
		pos += ret;
		done += ret;
	}

	iocb->ki_pos += done;
	return done ? done : ret;
}
1120
EXPORT_SYMBOL_GPL(dax_iomap_rw);
1121

1122 1123 1124 1125 1126 1127 1128 1129 1130
static int dax_fault_return(int error)
{
	if (error == 0)
		return VM_FAULT_NOPAGE;
	if (error == -ENOMEM)
		return VM_FAULT_OOM;
	return VM_FAULT_SIGBUS;
}

1131 1132
static int dax_iomap_pte_fault(struct vm_fault *vmf,
			       const struct iomap_ops *ops)
1133
{
1134
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1135
	struct inode *inode = mapping->host;
1136
	unsigned long vaddr = vmf->address;
1137 1138 1139
	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
	sector_t sector;
	struct iomap iomap = { 0 };
J
Jan Kara 已提交
1140
	unsigned flags = IOMAP_FAULT;
1141
	int error, major = 0;
1142
	int vmf_ret = 0;
1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
	void *entry;

	/*
	 * Check whether offset isn't beyond end of file now. Caller is supposed
	 * to hold locks serializing us with truncate / punch hole so this is
	 * a reliable test.
	 */
	if (pos >= i_size_read(inode))
		return VM_FAULT_SIGBUS;

	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
		flags |= IOMAP_WRITE;

	/*
	 * Note that we don't bother to use iomap_apply here: DAX required
	 * the file system block size to be equal the page size, which means
	 * that we never have to deal with more than a single extent here.
	 */
	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
	if (error)
1163
		return dax_fault_return(error);
1164
	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1165 1166 1167 1168 1169 1170 1171
		vmf_ret = dax_fault_return(-EIO);	/* fs corruption? */
		goto finish_iomap;
	}

	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
	if (IS_ERR(entry)) {
		vmf_ret = dax_fault_return(PTR_ERR(entry));
1172
		goto finish_iomap;
1173 1174
	}

1175
	sector = dax_iomap_sector(&iomap, pos);
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193

	if (vmf->cow_page) {
		switch (iomap.type) {
		case IOMAP_HOLE:
		case IOMAP_UNWRITTEN:
			clear_user_highpage(vmf->cow_page, vaddr);
			break;
		case IOMAP_MAPPED:
			error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
					vmf->cow_page, vaddr);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EIO;
			break;
		}

		if (error)
1194
			goto error_unlock_entry;
1195 1196 1197 1198 1199

		__SetPageUptodate(vmf->cow_page);
		vmf_ret = finish_fault(vmf);
		if (!vmf_ret)
			vmf_ret = VM_FAULT_DONE_COW;
1200
		goto unlock_entry;
1201 1202 1203 1204 1205 1206
	}

	switch (iomap.type) {
	case IOMAP_MAPPED:
		if (iomap.flags & IOMAP_F_NEW) {
			count_vm_event(PGMAJFAULT);
1207
			mem_cgroup_count_vm_event(vmf->vma->vm_mm, PGMAJFAULT);
1208 1209 1210
			major = VM_FAULT_MAJOR;
		}
		error = dax_insert_mapping(mapping, iomap.bdev, sector,
1211
				PAGE_SIZE, &entry, vmf->vma, vmf);
1212 1213 1214
		/* -EBUSY is fine, somebody else faulted on the same PTE */
		if (error == -EBUSY)
			error = 0;
1215 1216 1217
		break;
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
1218
		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1219
			vmf_ret = dax_load_hole(mapping, &entry, vmf);
1220
			goto unlock_entry;
1221
		}
1222 1223 1224 1225 1226 1227 1228
		/*FALLTHRU*/
	default:
		WARN_ON_ONCE(1);
		error = -EIO;
		break;
	}

1229 1230
 error_unlock_entry:
	vmf_ret = dax_fault_return(error) | major;
1231
 unlock_entry:
1232
	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
 finish_iomap:
	if (ops->iomap_end) {
		int copied = PAGE_SIZE;

		if (vmf_ret & VM_FAULT_ERROR)
			copied = 0;
		/*
		 * The fault is done by now and there's no way back (other
		 * thread may be already happily using PTE we have installed).
		 * Just ignore error from ->iomap_end since we cannot do much
		 * with it.
		 */
		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1246
	}
1247
	return vmf_ret;
1248
}
1249 1250 1251 1252 1253 1254 1255 1256

#ifdef CONFIG_FS_DAX_PMD
/*
 * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
 * more often than one might expect in the below functions.
 */
#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)

1257 1258
static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
		loff_t pos, void **entryp)
1259
{
1260
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1261
	struct block_device *bdev = iomap->bdev;
1262
	struct inode *inode = mapping->host;
1263 1264 1265 1266 1267
	struct blk_dax_ctl dax = {
		.sector = dax_iomap_sector(iomap, pos),
		.size = PMD_SIZE,
	};
	long length = dax_map_atomic(bdev, &dax);
1268
	void *ret = NULL;
1269 1270

	if (length < 0) /* dax_map_atomic() failed */
1271
		goto fallback;
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283
	if (length < PMD_SIZE)
		goto unmap_fallback;
	if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
		goto unmap_fallback;
	if (!pfn_t_devmap(dax.pfn))
		goto unmap_fallback;

	dax_unmap_atomic(bdev, &dax);

	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
			RADIX_DAX_PMD);
	if (IS_ERR(ret))
1284
		goto fallback;
1285 1286
	*entryp = ret;

1287 1288 1289
	trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
			dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
1290 1291 1292

 unmap_fallback:
	dax_unmap_atomic(bdev, &dax);
1293
fallback:
1294 1295
	trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
			dax.pfn, ret);
1296 1297 1298
	return VM_FAULT_FALLBACK;
}

1299 1300
static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
		void **entryp)
1301
{
1302 1303
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
	unsigned long pmd_addr = vmf->address & PMD_MASK;
1304
	struct inode *inode = mapping->host;
1305
	struct page *zero_page;
1306
	void *ret = NULL;
1307 1308 1309
	spinlock_t *ptl;
	pmd_t pmd_entry;

1310
	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1311 1312

	if (unlikely(!zero_page))
1313
		goto fallback;
1314 1315 1316 1317

	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
			RADIX_DAX_PMD | RADIX_DAX_HZP);
	if (IS_ERR(ret))
1318
		goto fallback;
1319 1320
	*entryp = ret;

1321 1322
	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
	if (!pmd_none(*(vmf->pmd))) {
1323
		spin_unlock(ptl);
1324
		goto fallback;
1325 1326
	}

1327
	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1328
	pmd_entry = pmd_mkhuge(pmd_entry);
1329
	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1330
	spin_unlock(ptl);
1331
	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1332
	return VM_FAULT_NOPAGE;
1333 1334

fallback:
1335
	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1336
	return VM_FAULT_FALLBACK;
1337 1338
}

1339 1340
static int dax_iomap_pmd_fault(struct vm_fault *vmf,
			       const struct iomap_ops *ops)
1341
{
1342
	struct vm_area_struct *vma = vmf->vma;
1343
	struct address_space *mapping = vma->vm_file->f_mapping;
1344 1345
	unsigned long pmd_addr = vmf->address & PMD_MASK;
	bool write = vmf->flags & FAULT_FLAG_WRITE;
J
Jan Kara 已提交
1346
	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1347 1348 1349 1350 1351 1352 1353 1354
	struct inode *inode = mapping->host;
	int result = VM_FAULT_FALLBACK;
	struct iomap iomap = { 0 };
	pgoff_t max_pgoff, pgoff;
	void *entry;
	loff_t pos;
	int error;

1355 1356 1357 1358 1359 1360 1361 1362
	/*
	 * Check whether offset isn't beyond end of file now. Caller is
	 * supposed to hold locks serializing us with truncate / punch hole so
	 * this is a reliable test.
	 */
	pgoff = linear_page_index(vma, pmd_addr);
	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;

1363
	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1364

1365 1366 1367 1368 1369 1370 1371 1372 1373 1374
	/* Fall back to PTEs if we're going to COW */
	if (write && !(vma->vm_flags & VM_SHARED))
		goto fallback;

	/* If the PMD would extend outside the VMA */
	if (pmd_addr < vma->vm_start)
		goto fallback;
	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
		goto fallback;

1375 1376 1377 1378
	if (pgoff > max_pgoff) {
		result = VM_FAULT_SIGBUS;
		goto out;
	}
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391

	/* If the PMD would extend beyond the file size */
	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
		goto fallback;

	/*
	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
	 * setting up a mapping, so really we're using iomap_begin() as a way
	 * to look up our filesystem block.
	 */
	pos = (loff_t)pgoff << PAGE_SHIFT;
	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
	if (error)
1392 1393
		goto fallback;

1394 1395 1396
	if (iomap.offset + iomap.length < pos + PMD_SIZE)
		goto finish_iomap;

1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
	/*
	 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
	 * PMD or a HZP entry.  If it can't (because a 4k page is already in
	 * the tree, for instance), it will return -EEXIST and we just fall
	 * back to 4k entries.
	 */
	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
	if (IS_ERR(entry))
		goto finish_iomap;

1407 1408
	switch (iomap.type) {
	case IOMAP_MAPPED:
1409
		result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1410 1411 1412 1413
		break;
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
		if (WARN_ON_ONCE(write))
1414
			goto unlock_entry;
1415
		result = dax_pmd_load_hole(vmf, &iomap, &entry);
1416 1417 1418 1419 1420 1421
		break;
	default:
		WARN_ON_ONCE(1);
		break;
	}

1422 1423
 unlock_entry:
	put_locked_mapping_entry(mapping, pgoff, entry);
1424 1425
 finish_iomap:
	if (ops->iomap_end) {
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437
		int copied = PMD_SIZE;

		if (result == VM_FAULT_FALLBACK)
			copied = 0;
		/*
		 * The fault is done by now and there's no way back (other
		 * thread may be already happily using PMD we have installed).
		 * Just ignore error from ->iomap_end since we cannot do much
		 * with it.
		 */
		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
				&iomap);
1438 1439 1440
	}
 fallback:
	if (result == VM_FAULT_FALLBACK) {
1441
		split_huge_pmd(vma, vmf->pmd, vmf->address);
1442 1443
		count_vm_event(THP_FAULT_FALLBACK);
	}
1444
out:
1445
	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1446 1447
	return result;
}
1448
#else
1449 1450
static int dax_iomap_pmd_fault(struct vm_fault *vmf,
			       const struct iomap_ops *ops)
1451 1452 1453
{
	return VM_FAULT_FALLBACK;
}
1454
#endif /* CONFIG_FS_DAX_PMD */
1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465

/**
 * dax_iomap_fault - handle a page fault on a DAX file
 * @vmf: The description of the fault
 * @ops: iomap ops passed from the file system
 *
 * When a page fault occurs, filesystems may call this helper in
 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
 * has done all the necessary locking for page fault to proceed
 * successfully.
 */
1466 1467
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
		    const struct iomap_ops *ops)
1468
{
1469 1470
	switch (pe_size) {
	case PE_SIZE_PTE:
1471
		return dax_iomap_pte_fault(vmf, ops);
1472
	case PE_SIZE_PMD:
1473 1474 1475 1476 1477 1478
		return dax_iomap_pmd_fault(vmf, ops);
	default:
		return VM_FAULT_FALLBACK;
	}
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);