dax.c 39.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * fs/dax.c - Direct Access filesystem code
 * Copyright (c) 2013-2014 Intel Corporation
 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
20
#include <linux/dax.h>
21 22
#include <linux/fs.h>
#include <linux/genhd.h>
23 24 25
#include <linux/highmem.h>
#include <linux/memcontrol.h>
#include <linux/mm.h>
26
#include <linux/mutex.h>
R
Ross Zwisler 已提交
27
#include <linux/pagevec.h>
28
#include <linux/sched.h>
29
#include <linux/sched/signal.h>
30
#include <linux/uio.h>
31
#include <linux/vmstat.h>
D
Dan Williams 已提交
32
#include <linux/pfn_t.h>
33
#include <linux/sizes.h>
34
#include <linux/mmu_notifier.h>
35 36
#include <linux/iomap.h>
#include "internal.h"
37

38 39 40
#define CREATE_TRACE_POINTS
#include <trace/events/fs_dax.h>

J
Jan Kara 已提交
41 42 43 44
/* We choose 4096 entries - same as per-zone page wait tables */
#define DAX_WAIT_TABLE_BITS 12
#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)

45
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
J
Jan Kara 已提交
46 47 48 49 50 51 52 53 54 55 56

static int __init init_dax_wait_table(void)
{
	int i;

	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
		init_waitqueue_head(wait_table + i);
	return 0;
}
fs_initcall(init_dax_wait_table);

57
static int dax_is_pmd_entry(void *entry)
58
{
59
	return (unsigned long)entry & RADIX_DAX_PMD;
60 61
}

62
static int dax_is_pte_entry(void *entry)
63
{
64
	return !((unsigned long)entry & RADIX_DAX_PMD);
65 66
}

67
static int dax_is_zero_entry(void *entry)
68
{
69
	return (unsigned long)entry & RADIX_DAX_ZERO_PAGE;
70 71
}

72
static int dax_is_empty_entry(void *entry)
73
{
74
	return (unsigned long)entry & RADIX_DAX_EMPTY;
75 76
}

J
Jan Kara 已提交
77 78 79 80 81
/*
 * DAX radix tree locking
 */
struct exceptional_entry_key {
	struct address_space *mapping;
82
	pgoff_t entry_start;
J
Jan Kara 已提交
83 84 85
};

struct wait_exceptional_entry_queue {
86
	wait_queue_entry_t wait;
J
Jan Kara 已提交
87 88 89
	struct exceptional_entry_key key;
};

90 91 92 93 94 95 96 97 98 99
static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
		pgoff_t index, void *entry, struct exceptional_entry_key *key)
{
	unsigned long hash;

	/*
	 * If 'entry' is a PMD, align the 'index' that we use for the wait
	 * queue to the start of that PMD.  This ensures that all offsets in
	 * the range covered by the PMD map to the same bit lock.
	 */
100
	if (dax_is_pmd_entry(entry))
101 102 103 104 105 106 107 108 109
		index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);

	key->mapping = mapping;
	key->entry_start = index;

	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
	return wait_table + hash;
}

110
static int wake_exceptional_entry_func(wait_queue_entry_t *wait, unsigned int mode,
J
Jan Kara 已提交
111 112 113 114 115 116 117
				       int sync, void *keyp)
{
	struct exceptional_entry_key *key = keyp;
	struct wait_exceptional_entry_queue *ewait =
		container_of(wait, struct wait_exceptional_entry_queue, wait);

	if (key->mapping != ewait->key.mapping ||
118
	    key->entry_start != ewait->key.entry_start)
J
Jan Kara 已提交
119 120 121 122
		return 0;
	return autoremove_wake_function(wait, mode, sync, NULL);
}

R
Ross Zwisler 已提交
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147
/*
 * We do not necessarily hold the mapping->tree_lock when we call this
 * function so it is possible that 'entry' is no longer a valid item in the
 * radix tree.  This is okay because all we really need to do is to find the
 * correct waitqueue where tasks might be waiting for that old 'entry' and
 * wake them.
 */
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
		pgoff_t index, void *entry, bool wake_all)
{
	struct exceptional_entry_key key;
	wait_queue_head_t *wq;

	wq = dax_entry_waitqueue(mapping, index, entry, &key);

	/*
	 * Checking for locked entry and prepare_to_wait_exclusive() happens
	 * under mapping->tree_lock, ditto for entry handling in our callers.
	 * So at this point all tasks that could have seen our entry locked
	 * must be in the waitqueue and the following check will see them.
	 */
	if (waitqueue_active(wq))
		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
}

J
Jan Kara 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
/*
 * Check whether the given slot is locked. The function must be called with
 * mapping->tree_lock held
 */
static inline int slot_locked(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
	return entry & RADIX_DAX_ENTRY_LOCK;
}

/*
 * Mark the given slot is locked. The function must be called with
 * mapping->tree_lock held
 */
static inline void *lock_slot(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);

	entry |= RADIX_DAX_ENTRY_LOCK;
169
	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
J
Jan Kara 已提交
170 171 172 173 174 175 176 177 178 179 180 181 182
	return (void *)entry;
}

/*
 * Mark the given slot is unlocked. The function must be called with
 * mapping->tree_lock held
 */
static inline void *unlock_slot(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);

	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
183
	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
J
Jan Kara 已提交
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
	return (void *)entry;
}

/*
 * Lookup entry in radix tree, wait for it to become unlocked if it is
 * exceptional entry and return it. The caller must call
 * put_unlocked_mapping_entry() when he decided not to lock the entry or
 * put_locked_mapping_entry() when he locked the entry and now wants to
 * unlock it.
 *
 * The function must be called with mapping->tree_lock held.
 */
static void *get_unlocked_mapping_entry(struct address_space *mapping,
					pgoff_t index, void ***slotp)
{
199
	void *entry, **slot;
J
Jan Kara 已提交
200
	struct wait_exceptional_entry_queue ewait;
201
	wait_queue_head_t *wq;
J
Jan Kara 已提交
202 203 204 205 206

	init_wait(&ewait.wait);
	ewait.wait.func = wake_exceptional_entry_func;

	for (;;) {
207
		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
J
Jan Kara 已提交
208
					  &slot);
209 210
		if (!entry ||
		    WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)) ||
J
Jan Kara 已提交
211 212 213
		    !slot_locked(mapping, slot)) {
			if (slotp)
				*slotp = slot;
214
			return entry;
J
Jan Kara 已提交
215
		}
216 217

		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
J
Jan Kara 已提交
218 219 220 221 222 223 224 225 226
		prepare_to_wait_exclusive(wq, &ewait.wait,
					  TASK_UNINTERRUPTIBLE);
		spin_unlock_irq(&mapping->tree_lock);
		schedule();
		finish_wait(wq, &ewait.wait);
		spin_lock_irq(&mapping->tree_lock);
	}
}

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243
static void dax_unlock_mapping_entry(struct address_space *mapping,
				     pgoff_t index)
{
	void *entry, **slot;

	spin_lock_irq(&mapping->tree_lock);
	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
			 !slot_locked(mapping, slot))) {
		spin_unlock_irq(&mapping->tree_lock);
		return;
	}
	unlock_slot(mapping, slot);
	spin_unlock_irq(&mapping->tree_lock);
	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}

244
static void put_locked_mapping_entry(struct address_space *mapping,
245
		pgoff_t index)
246
{
247
	dax_unlock_mapping_entry(mapping, index);
248 249 250 251 252 253 254 255 256
}

/*
 * Called when we are done with radix tree entry we looked up via
 * get_unlocked_mapping_entry() and which we didn't lock in the end.
 */
static void put_unlocked_mapping_entry(struct address_space *mapping,
				       pgoff_t index, void *entry)
{
257
	if (!entry)
258 259 260 261 262 263
		return;

	/* We have to wake up next waiter for the radix tree entry lock */
	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}

J
Jan Kara 已提交
264
/*
265 266 267 268
 * Find radix tree entry at given index. If it points to an exceptional entry,
 * return it with the radix tree entry locked. If the radix tree doesn't
 * contain given index, create an empty exceptional entry for the index and
 * return with it locked.
J
Jan Kara 已提交
269
 *
270 271
 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
 * either return that locked entry or will return an error.  This error will
272 273
 * happen if there are any 4k entries within the 2MiB range that we are
 * requesting.
274 275 276 277 278 279 280 281 282 283 284 285
 *
 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
 * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
 * insertion will fail if it finds any 4k entries already in the tree, and a
 * 4k insertion will cause an existing 2MiB entry to be unmapped and
 * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
 * well as 2MiB empty entries.
 *
 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
 * real storage backing them.  We will leave these real 2MiB DAX entries in
 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
 *
J
Jan Kara 已提交
286 287 288 289
 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 * persistent memory the benefit is doubtful. We can add that later if we can
 * show it helps.
 */
290 291
static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
		unsigned long size_flag)
J
Jan Kara 已提交
292
{
293
	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
294
	void *entry, **slot;
J
Jan Kara 已提交
295 296 297

restart:
	spin_lock_irq(&mapping->tree_lock);
298
	entry = get_unlocked_mapping_entry(mapping, index, &slot);
299

300 301 302 303 304
	if (WARN_ON_ONCE(entry && !radix_tree_exceptional_entry(entry))) {
		entry = ERR_PTR(-EIO);
		goto out_unlock;
	}

305 306
	if (entry) {
		if (size_flag & RADIX_DAX_PMD) {
307
			if (dax_is_pte_entry(entry)) {
308 309 310 311 312 313
				put_unlocked_mapping_entry(mapping, index,
						entry);
				entry = ERR_PTR(-EEXIST);
				goto out_unlock;
			}
		} else { /* trying to grab a PTE entry */
314
			if (dax_is_pmd_entry(entry) &&
315 316 317 318 319 320 321
			    (dax_is_zero_entry(entry) ||
			     dax_is_empty_entry(entry))) {
				pmd_downgrade = true;
			}
		}
	}

J
Jan Kara 已提交
322
	/* No entry for given index? Make sure radix tree is big enough. */
323
	if (!entry || pmd_downgrade) {
J
Jan Kara 已提交
324 325
		int err;

326 327 328 329 330 331 332 333
		if (pmd_downgrade) {
			/*
			 * Make sure 'entry' remains valid while we drop
			 * mapping->tree_lock.
			 */
			entry = lock_slot(mapping, slot);
		}

J
Jan Kara 已提交
334
		spin_unlock_irq(&mapping->tree_lock);
335 336 337 338 339 340 341 342 343
		/*
		 * Besides huge zero pages the only other thing that gets
		 * downgraded are empty entries which don't need to be
		 * unmapped.
		 */
		if (pmd_downgrade && dax_is_zero_entry(entry))
			unmap_mapping_range(mapping,
				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);

J
Jan Kara 已提交
344 345
		err = radix_tree_preload(
				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
346 347
		if (err) {
			if (pmd_downgrade)
348
				put_locked_mapping_entry(mapping, index);
J
Jan Kara 已提交
349
			return ERR_PTR(err);
350
		}
J
Jan Kara 已提交
351
		spin_lock_irq(&mapping->tree_lock);
352

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
		if (!entry) {
			/*
			 * We needed to drop the page_tree lock while calling
			 * radix_tree_preload() and we didn't have an entry to
			 * lock.  See if another thread inserted an entry at
			 * our index during this time.
			 */
			entry = __radix_tree_lookup(&mapping->page_tree, index,
					NULL, &slot);
			if (entry) {
				radix_tree_preload_end();
				spin_unlock_irq(&mapping->tree_lock);
				goto restart;
			}
		}

369 370 371 372 373 374 375 376 377 378 379
		if (pmd_downgrade) {
			radix_tree_delete(&mapping->page_tree, index);
			mapping->nrexceptional--;
			dax_wake_mapping_entry_waiter(mapping, index, entry,
					true);
		}

		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);

		err = __radix_tree_insert(&mapping->page_tree, index,
				dax_radix_order(entry), entry);
J
Jan Kara 已提交
380 381 382
		radix_tree_preload_end();
		if (err) {
			spin_unlock_irq(&mapping->tree_lock);
383
			/*
384 385 386 387 388 389
			 * Our insertion of a DAX entry failed, most likely
			 * because we were inserting a PMD entry and it
			 * collided with a PTE sized entry at a different
			 * index in the PMD range.  We haven't inserted
			 * anything into the radix tree and have no waiters to
			 * wake.
390
			 */
J
Jan Kara 已提交
391 392 393 394 395
			return ERR_PTR(err);
		}
		/* Good, we have inserted empty locked entry into the tree. */
		mapping->nrexceptional++;
		spin_unlock_irq(&mapping->tree_lock);
396
		return entry;
J
Jan Kara 已提交
397
	}
398
	entry = lock_slot(mapping, slot);
399
 out_unlock:
J
Jan Kara 已提交
400
	spin_unlock_irq(&mapping->tree_lock);
401
	return entry;
J
Jan Kara 已提交
402 403
}

404 405 406 407 408 409 410 411 412
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
					  pgoff_t index, bool trunc)
{
	int ret = 0;
	void *entry;
	struct radix_tree_root *page_tree = &mapping->page_tree;

	spin_lock_irq(&mapping->tree_lock);
	entry = get_unlocked_mapping_entry(mapping, index, NULL);
413
	if (!entry || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry)))
414 415 416 417 418 419 420 421 422 423 424 425 426
		goto out;
	if (!trunc &&
	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
		goto out;
	radix_tree_delete(page_tree, index);
	mapping->nrexceptional--;
	ret = 1;
out:
	put_unlocked_mapping_entry(mapping, index, entry);
	spin_unlock_irq(&mapping->tree_lock);
	return ret;
}
J
Jan Kara 已提交
427 428 429 430 431 432
/*
 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
 * entry to get unlocked before deleting it.
 */
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
{
433
	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
J
Jan Kara 已提交
434 435 436 437 438 439 440 441

	/*
	 * This gets called from truncate / punch_hole path. As such, the caller
	 * must hold locks protecting against concurrent modifications of the
	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
	 * caller has seen exceptional entry for this index, we better find it
	 * at that index as well...
	 */
442 443 444 445 446 447 448 449 450 451 452
	WARN_ON_ONCE(!ret);
	return ret;
}

/*
 * Invalidate exceptional DAX entry if it is clean.
 */
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
				      pgoff_t index)
{
	return __dax_invalidate_mapping_entry(mapping, index, false);
J
Jan Kara 已提交
453 454
}

455 456 457
static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
		sector_t sector, size_t size, struct page *to,
		unsigned long vaddr)
458
{
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
	void *vto, *kaddr;
	pgoff_t pgoff;
	pfn_t pfn;
	long rc;
	int id;

	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
	if (rc)
		return rc;

	id = dax_read_lock();
	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
	if (rc < 0) {
		dax_read_unlock(id);
		return rc;
	}
475
	vto = kmap_atomic(to);
476
	copy_user_page(vto, (void __force *)kaddr, vaddr, to);
477
	kunmap_atomic(vto);
478
	dax_read_unlock(id);
479 480 481
	return 0;
}

482 483 484 485 486 487 488
/*
 * By this point grab_mapping_entry() has ensured that we have a locked entry
 * of the appropriate size so we don't have to worry about downgrading PMDs to
 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 * already in the tree, we will skip the insertion and just dirty the PMD as
 * appropriate.
 */
J
Jan Kara 已提交
489 490
static void *dax_insert_mapping_entry(struct address_space *mapping,
				      struct vm_fault *vmf,
491 492
				      void *entry, sector_t sector,
				      unsigned long flags)
R
Ross Zwisler 已提交
493 494
{
	struct radix_tree_root *page_tree = &mapping->page_tree;
J
Jan Kara 已提交
495 496
	void *new_entry;
	pgoff_t index = vmf->pgoff;
R
Ross Zwisler 已提交
497

J
Jan Kara 已提交
498
	if (vmf->flags & FAULT_FLAG_WRITE)
499
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
R
Ross Zwisler 已提交
500

501 502 503 504 505 506 507 508 509
	if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
		/* we are replacing a zero page with block mapping */
		if (dax_is_pmd_entry(entry))
			unmap_mapping_range(mapping,
					(vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
					PMD_SIZE, 0);
		else /* pte entry */
			unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
					PAGE_SIZE, 0);
R
Ross Zwisler 已提交
510 511
	}

J
Jan Kara 已提交
512
	spin_lock_irq(&mapping->tree_lock);
513 514
	new_entry = dax_radix_locked_entry(sector, flags);

515
	if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
516 517 518 519 520 521 522 523
		/*
		 * Only swap our new entry into the radix tree if the current
		 * entry is a zero page or an empty entry.  If a normal PTE or
		 * PMD entry is already in the tree, we leave it alone.  This
		 * means that if we are trying to insert a PTE and the
		 * existing entry is a PMD, we will just leave the PMD in the
		 * tree and dirty it if necessary.
		 */
524
		struct radix_tree_node *node;
J
Jan Kara 已提交
525 526
		void **slot;
		void *ret;
R
Ross Zwisler 已提交
527

528
		ret = __radix_tree_lookup(page_tree, index, &node, &slot);
J
Jan Kara 已提交
529
		WARN_ON_ONCE(ret != entry);
530 531
		__radix_tree_replace(page_tree, node, slot,
				     new_entry, NULL, NULL);
532
		entry = new_entry;
R
Ross Zwisler 已提交
533
	}
534

J
Jan Kara 已提交
535
	if (vmf->flags & FAULT_FLAG_WRITE)
R
Ross Zwisler 已提交
536
		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
537

R
Ross Zwisler 已提交
538
	spin_unlock_irq(&mapping->tree_lock);
539
	return entry;
R
Ross Zwisler 已提交
540 541
}

542 543 544 545 546 547 548 549 550 551 552 553 554 555 556
static inline unsigned long
pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
{
	unsigned long address;

	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
	return address;
}

/* Walk all mappings of a given index of a file and writeprotect them */
static void dax_mapping_entry_mkclean(struct address_space *mapping,
				      pgoff_t index, unsigned long pfn)
{
	struct vm_area_struct *vma;
557 558
	pte_t pte, *ptep = NULL;
	pmd_t *pmdp = NULL;
559 560 561 562
	spinlock_t *ptl;

	i_mmap_lock_read(mapping);
	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
563
		unsigned long address, start, end;
564 565 566 567 568 569 570

		cond_resched();

		if (!(vma->vm_flags & VM_SHARED))
			continue;

		address = pgoff_address(index, vma);
571 572 573 574 575 576 577

		/*
		 * Note because we provide start/end to follow_pte_pmd it will
		 * call mmu_notifier_invalidate_range_start() on our behalf
		 * before taking any lock.
		 */
		if (follow_pte_pmd(vma->vm_mm, address, &start, &end, &ptep, &pmdp, &ptl))
578 579
			continue;

580 581 582 583 584 585 586 587 588 589 590 591 592 593
		if (pmdp) {
#ifdef CONFIG_FS_DAX_PMD
			pmd_t pmd;

			if (pfn != pmd_pfn(*pmdp))
				goto unlock_pmd;
			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
				goto unlock_pmd;

			flush_cache_page(vma, address, pfn);
			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
			pmd = pmd_wrprotect(pmd);
			pmd = pmd_mkclean(pmd);
			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
594
			mmu_notifier_invalidate_range(vma->vm_mm, start, end);
595 596 597 598 599 600 601 602 603 604 605 606 607 608
unlock_pmd:
			spin_unlock(ptl);
#endif
		} else {
			if (pfn != pte_pfn(*ptep))
				goto unlock_pte;
			if (!pte_dirty(*ptep) && !pte_write(*ptep))
				goto unlock_pte;

			flush_cache_page(vma, address, pfn);
			pte = ptep_clear_flush(vma, address, ptep);
			pte = pte_wrprotect(pte);
			pte = pte_mkclean(pte);
			set_pte_at(vma->vm_mm, address, ptep, pte);
609
			mmu_notifier_invalidate_range(vma->vm_mm, start, end);
610 611 612
unlock_pte:
			pte_unmap_unlock(ptep, ptl);
		}
613

614
		mmu_notifier_invalidate_range_end(vma->vm_mm, start, end);
615 616 617 618
	}
	i_mmap_unlock_read(mapping);
}

R
Ross Zwisler 已提交
619
static int dax_writeback_one(struct block_device *bdev,
620 621
		struct dax_device *dax_dev, struct address_space *mapping,
		pgoff_t index, void *entry)
R
Ross Zwisler 已提交
622 623
{
	struct radix_tree_root *page_tree = &mapping->page_tree;
624 625 626 627 628 629
	void *entry2, **slot, *kaddr;
	long ret = 0, id;
	sector_t sector;
	pgoff_t pgoff;
	size_t size;
	pfn_t pfn;
R
Ross Zwisler 已提交
630 631

	/*
632 633
	 * A page got tagged dirty in DAX mapping? Something is seriously
	 * wrong.
R
Ross Zwisler 已提交
634
	 */
635 636
	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
		return -EIO;
R
Ross Zwisler 已提交
637

638 639 640
	spin_lock_irq(&mapping->tree_lock);
	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
	/* Entry got punched out / reallocated? */
641
	if (!entry2 || WARN_ON_ONCE(!radix_tree_exceptional_entry(entry2)))
642 643 644 645 646 647 648 649
		goto put_unlocked;
	/*
	 * Entry got reallocated elsewhere? No need to writeback. We have to
	 * compare sectors as we must not bail out due to difference in lockbit
	 * or entry type.
	 */
	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
		goto put_unlocked;
650 651
	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
				dax_is_zero_entry(entry))) {
R
Ross Zwisler 已提交
652
		ret = -EIO;
653
		goto put_unlocked;
R
Ross Zwisler 已提交
654 655
	}

656 657 658 659 660 661 662 663 664 665 666 667 668 669 670
	/* Another fsync thread may have already written back this entry */
	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
		goto put_unlocked;
	/* Lock the entry to serialize with page faults */
	entry = lock_slot(mapping, slot);
	/*
	 * We can clear the tag now but we have to be careful so that concurrent
	 * dax_writeback_one() calls for the same index cannot finish before we
	 * actually flush the caches. This is achieved as the calls will look
	 * at the entry only under tree_lock and once they do that they will
	 * see the entry locked and wait for it to unlock.
	 */
	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
	spin_unlock_irq(&mapping->tree_lock);

671 672 673 674 675 676 677
	/*
	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
	 * in the middle of a PMD, the 'index' we are given will be aligned to
	 * the start index of the PMD, as will the sector we pull from
	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
	 * worry about partial PMD writebacks.
	 */
678 679 680 681 682 683 684
	sector = dax_radix_sector(entry);
	size = PAGE_SIZE << dax_radix_order(entry);

	id = dax_read_lock();
	ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
	if (ret)
		goto dax_unlock;
R
Ross Zwisler 已提交
685 686

	/*
687 688
	 * dax_direct_access() may sleep, so cannot hold tree_lock over
	 * its invocation.
R
Ross Zwisler 已提交
689
	 */
690 691 692
	ret = dax_direct_access(dax_dev, pgoff, size / PAGE_SIZE, &kaddr, &pfn);
	if (ret < 0)
		goto dax_unlock;
R
Ross Zwisler 已提交
693

694
	if (WARN_ON_ONCE(ret < size / PAGE_SIZE)) {
R
Ross Zwisler 已提交
695
		ret = -EIO;
696
		goto dax_unlock;
R
Ross Zwisler 已提交
697 698
	}

699
	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(pfn));
700
	dax_flush(dax_dev, pgoff, kaddr, size);
701 702 703 704 705 706 707 708 709
	/*
	 * After we have flushed the cache, we can clear the dirty tag. There
	 * cannot be new dirty data in the pfn after the flush has completed as
	 * the pfn mappings are writeprotected and fault waits for mapping
	 * entry lock.
	 */
	spin_lock_irq(&mapping->tree_lock);
	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
	spin_unlock_irq(&mapping->tree_lock);
710
	trace_dax_writeback_one(mapping->host, index, size >> PAGE_SHIFT);
711 712
 dax_unlock:
	dax_read_unlock(id);
713
	put_locked_mapping_entry(mapping, index);
R
Ross Zwisler 已提交
714 715
	return ret;

716 717
 put_unlocked:
	put_unlocked_mapping_entry(mapping, index, entry2);
R
Ross Zwisler 已提交
718 719 720 721 722 723 724 725 726
	spin_unlock_irq(&mapping->tree_lock);
	return ret;
}

/*
 * Flush the mapping to the persistent domain within the byte range of [start,
 * end]. This is required by data integrity operations to ensure file data is
 * on persistent storage prior to completion of the operation.
 */
727 728
int dax_writeback_mapping_range(struct address_space *mapping,
		struct block_device *bdev, struct writeback_control *wbc)
R
Ross Zwisler 已提交
729 730
{
	struct inode *inode = mapping->host;
731
	pgoff_t start_index, end_index;
R
Ross Zwisler 已提交
732
	pgoff_t indices[PAGEVEC_SIZE];
733
	struct dax_device *dax_dev;
R
Ross Zwisler 已提交
734 735 736 737 738 739 740
	struct pagevec pvec;
	bool done = false;
	int i, ret = 0;

	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
		return -EIO;

741 742 743
	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
		return 0;

744 745 746 747
	dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
	if (!dax_dev)
		return -EIO;

748 749
	start_index = wbc->range_start >> PAGE_SHIFT;
	end_index = wbc->range_end >> PAGE_SHIFT;
R
Ross Zwisler 已提交
750

751 752
	trace_dax_writeback_range(inode, start_index, end_index);

R
Ross Zwisler 已提交
753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769
	tag_pages_for_writeback(mapping, start_index, end_index);

	pagevec_init(&pvec, 0);
	while (!done) {
		pvec.nr = find_get_entries_tag(mapping, start_index,
				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
				pvec.pages, indices);

		if (pvec.nr == 0)
			break;

		for (i = 0; i < pvec.nr; i++) {
			if (indices[i] > end_index) {
				done = true;
				break;
			}

770 771
			ret = dax_writeback_one(bdev, dax_dev, mapping,
					indices[i], pvec.pages[i]);
772 773
			if (ret < 0) {
				mapping_set_error(mapping, ret);
774
				goto out;
775
			}
R
Ross Zwisler 已提交
776
		}
777
		start_index = indices[pvec.nr - 1] + 1;
R
Ross Zwisler 已提交
778
	}
779
out:
780
	put_dax(dax_dev);
781 782
	trace_dax_writeback_range_done(inode, start_index, end_index);
	return (ret < 0 ? ret : 0);
R
Ross Zwisler 已提交
783 784 785
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);

J
Jan Kara 已提交
786
static int dax_insert_mapping(struct address_space *mapping,
787
		struct block_device *bdev, struct dax_device *dax_dev,
788
		sector_t sector, size_t size, void *entry,
789
		struct vm_area_struct *vma, struct vm_fault *vmf)
790
{
791
	unsigned long vaddr = vmf->address;
792 793 794 795
	void *ret, *kaddr;
	pgoff_t pgoff;
	int id, rc;
	pfn_t pfn;
796

797 798 799
	rc = bdev_dax_pgoff(bdev, sector, size, &pgoff);
	if (rc)
		return rc;
800

801 802 803 804 805 806 807 808 809
	id = dax_read_lock();
	rc = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
	if (rc < 0) {
		dax_read_unlock(id);
		return rc;
	}
	dax_read_unlock(id);

	ret = dax_insert_mapping_entry(mapping, vmf, entry, sector, 0);
J
Jan Kara 已提交
810 811
	if (IS_ERR(ret))
		return PTR_ERR(ret);
R
Ross Zwisler 已提交
812

813
	trace_dax_insert_mapping(mapping->host, vmf, ret);
814 815 816 817
	if (vmf->flags & FAULT_FLAG_WRITE)
		return vm_insert_mixed_mkwrite(vma, vaddr, pfn);
	else
		return vm_insert_mixed(vma, vaddr, pfn);
818 819
}

R
Ross Zwisler 已提交
820
/*
821 822 823 824 825
 * The user has performed a load from a hole in the file.  Allocating a new
 * page in the file would cause excessive storage usage for workloads with
 * sparse files.  Instead we insert a read-only mapping of the 4k zero page.
 * If this page is ever written to we will re-fault and change the mapping to
 * point to real DAX storage instead.
R
Ross Zwisler 已提交
826
 */
827
static int dax_load_hole(struct address_space *mapping, void *entry,
R
Ross Zwisler 已提交
828 829 830
			 struct vm_fault *vmf)
{
	struct inode *inode = mapping->host;
831 832 833 834
	unsigned long vaddr = vmf->address;
	int ret = VM_FAULT_NOPAGE;
	struct page *zero_page;
	void *entry2;
R
Ross Zwisler 已提交
835

836 837
	zero_page = ZERO_PAGE(0);
	if (unlikely(!zero_page)) {
R
Ross Zwisler 已提交
838 839 840 841
		ret = VM_FAULT_OOM;
		goto out;
	}

842 843 844 845 846
	entry2 = dax_insert_mapping_entry(mapping, vmf, entry, 0,
			RADIX_DAX_ZERO_PAGE);
	if (IS_ERR(entry2)) {
		ret = VM_FAULT_SIGBUS;
		goto out;
R
Ross Zwisler 已提交
847
	}
848 849

	vm_insert_mixed(vmf->vma, vaddr, page_to_pfn_t(zero_page));
R
Ross Zwisler 已提交
850 851 852 853 854
out:
	trace_dax_load_hole(inode, vmf, ret);
	return ret;
}

855 856 857 858 859 860 861 862 863 864 865 866 867
static bool dax_range_is_aligned(struct block_device *bdev,
				 unsigned int offset, unsigned int length)
{
	unsigned short sector_size = bdev_logical_block_size(bdev);

	if (!IS_ALIGNED(offset, sector_size))
		return false;
	if (!IS_ALIGNED(length, sector_size))
		return false;

	return true;
}

868 869 870
int __dax_zero_page_range(struct block_device *bdev,
		struct dax_device *dax_dev, sector_t sector,
		unsigned int offset, unsigned int size)
871
{
872 873
	if (dax_range_is_aligned(bdev, offset, size)) {
		sector_t start_sector = sector + (offset >> 9);
874 875

		return blkdev_issue_zeroout(bdev, start_sector,
876
				size >> 9, GFP_NOFS, 0);
877
	} else {
878 879 880 881 882
		pgoff_t pgoff;
		long rc, id;
		void *kaddr;
		pfn_t pfn;

883
		rc = bdev_dax_pgoff(bdev, sector, PAGE_SIZE, &pgoff);
884 885 886 887
		if (rc)
			return rc;

		id = dax_read_lock();
888
		rc = dax_direct_access(dax_dev, pgoff, 1, &kaddr,
889 890 891 892 893
				&pfn);
		if (rc < 0) {
			dax_read_unlock(id);
			return rc;
		}
894 895
		memset(kaddr + offset, 0, size);
		dax_flush(dax_dev, pgoff, kaddr + offset, size);
896
		dax_read_unlock(id);
897
	}
898 899 900 901
	return 0;
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);

902
static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
M
Matthew Wilcox 已提交
903
{
904
	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
M
Matthew Wilcox 已提交
905
}
906 907

static loff_t
908
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
909 910
		struct iomap *iomap)
{
911 912
	struct block_device *bdev = iomap->bdev;
	struct dax_device *dax_dev = iomap->dax_dev;
913 914 915
	struct iov_iter *iter = data;
	loff_t end = pos + length, done = 0;
	ssize_t ret = 0;
916
	int id;
917 918 919 920 921 922 923 924 925 926 927 928 929

	if (iov_iter_rw(iter) == READ) {
		end = min(end, i_size_read(inode));
		if (pos >= end)
			return 0;

		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
			return iov_iter_zero(min(length, end - pos), iter);
	}

	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
		return -EIO;

930 931 932 933 934
	/*
	 * Write can allocate block for an area which has a hole page mapped
	 * into page tables. We have to tear down these mappings so that data
	 * written by write(2) is visible in mmap.
	 */
935
	if (iomap->flags & IOMAP_F_NEW) {
936 937 938 939 940
		invalidate_inode_pages2_range(inode->i_mapping,
					      pos >> PAGE_SHIFT,
					      (end - 1) >> PAGE_SHIFT);
	}

941
	id = dax_read_lock();
942 943
	while (pos < end) {
		unsigned offset = pos & (PAGE_SIZE - 1);
944 945
		const size_t size = ALIGN(length + offset, PAGE_SIZE);
		const sector_t sector = dax_iomap_sector(iomap, pos);
946
		ssize_t map_len;
947 948 949
		pgoff_t pgoff;
		void *kaddr;
		pfn_t pfn;
950

951 952 953 954 955
		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			break;
		}

956 957 958 959 960 961
		ret = bdev_dax_pgoff(bdev, sector, size, &pgoff);
		if (ret)
			break;

		map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
				&kaddr, &pfn);
962 963 964 965 966
		if (map_len < 0) {
			ret = map_len;
			break;
		}

967 968
		map_len = PFN_PHYS(map_len);
		kaddr += offset;
969 970 971 972 973
		map_len -= offset;
		if (map_len > end - pos)
			map_len = end - pos;

		if (iov_iter_rw(iter) == WRITE)
974 975
			map_len = dax_copy_from_iter(dax_dev, pgoff, kaddr,
					map_len, iter);
976
		else
977
			map_len = copy_to_iter(kaddr, map_len, iter);
978 979 980 981 982 983 984 985 986
		if (map_len <= 0) {
			ret = map_len ? map_len : -EFAULT;
			break;
		}

		pos += map_len;
		length -= map_len;
		done += map_len;
	}
987
	dax_read_unlock(id);
988 989 990 991 992

	return done ? done : ret;
}

/**
993
 * dax_iomap_rw - Perform I/O to a DAX file
994 995 996 997 998 999 1000 1001 1002
 * @iocb:	The control block for this I/O
 * @iter:	The addresses to do I/O from or to
 * @ops:	iomap ops passed from the file system
 *
 * This function performs read and write operations to directly mapped
 * persistent memory.  The callers needs to take care of read/write exclusion
 * and evicting any page cache pages in the region under I/O.
 */
ssize_t
1003
dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1004
		const struct iomap_ops *ops)
1005 1006 1007 1008 1009 1010
{
	struct address_space *mapping = iocb->ki_filp->f_mapping;
	struct inode *inode = mapping->host;
	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
	unsigned flags = 0;

1011 1012
	if (iov_iter_rw(iter) == WRITE) {
		lockdep_assert_held_exclusive(&inode->i_rwsem);
1013
		flags |= IOMAP_WRITE;
1014 1015 1016
	} else {
		lockdep_assert_held(&inode->i_rwsem);
	}
1017 1018 1019

	while (iov_iter_count(iter)) {
		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1020
				iter, dax_iomap_actor);
1021 1022 1023 1024 1025 1026 1027 1028 1029
		if (ret <= 0)
			break;
		pos += ret;
		done += ret;
	}

	iocb->ki_pos += done;
	return done ? done : ret;
}
1030
EXPORT_SYMBOL_GPL(dax_iomap_rw);
1031

1032 1033 1034 1035 1036 1037 1038 1039 1040
static int dax_fault_return(int error)
{
	if (error == 0)
		return VM_FAULT_NOPAGE;
	if (error == -ENOMEM)
		return VM_FAULT_OOM;
	return VM_FAULT_SIGBUS;
}

1041 1042
static int dax_iomap_pte_fault(struct vm_fault *vmf,
			       const struct iomap_ops *ops)
1043
{
1044
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1045
	struct inode *inode = mapping->host;
1046
	unsigned long vaddr = vmf->address;
1047 1048 1049
	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
	sector_t sector;
	struct iomap iomap = { 0 };
J
Jan Kara 已提交
1050
	unsigned flags = IOMAP_FAULT;
1051
	int error, major = 0;
1052
	int vmf_ret = 0;
1053 1054
	void *entry;

1055
	trace_dax_pte_fault(inode, vmf, vmf_ret);
1056 1057 1058 1059 1060
	/*
	 * Check whether offset isn't beyond end of file now. Caller is supposed
	 * to hold locks serializing us with truncate / punch hole so this is
	 * a reliable test.
	 */
1061 1062 1063 1064
	if (pos >= i_size_read(inode)) {
		vmf_ret = VM_FAULT_SIGBUS;
		goto out;
	}
1065 1066 1067 1068

	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
		flags |= IOMAP_WRITE;

1069 1070 1071 1072 1073 1074
	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
	if (IS_ERR(entry)) {
		vmf_ret = dax_fault_return(PTR_ERR(entry));
		goto out;
	}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085
	/*
	 * It is possible, particularly with mixed reads & writes to private
	 * mappings, that we have raced with a PMD fault that overlaps with
	 * the PTE we need to set up.  If so just return and the fault will be
	 * retried.
	 */
	if (pmd_trans_huge(*vmf->pmd) || pmd_devmap(*vmf->pmd)) {
		vmf_ret = VM_FAULT_NOPAGE;
		goto unlock_entry;
	}

1086 1087 1088 1089 1090 1091
	/*
	 * Note that we don't bother to use iomap_apply here: DAX required
	 * the file system block size to be equal the page size, which means
	 * that we never have to deal with more than a single extent here.
	 */
	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
1092 1093
	if (error) {
		vmf_ret = dax_fault_return(error);
1094
		goto unlock_entry;
1095
	}
1096
	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1097 1098
		error = -EIO;	/* fs corruption? */
		goto error_finish_iomap;
1099 1100
	}

1101
	sector = dax_iomap_sector(&iomap, pos);
1102 1103 1104 1105 1106 1107 1108 1109

	if (vmf->cow_page) {
		switch (iomap.type) {
		case IOMAP_HOLE:
		case IOMAP_UNWRITTEN:
			clear_user_highpage(vmf->cow_page, vaddr);
			break;
		case IOMAP_MAPPED:
1110 1111
			error = copy_user_dax(iomap.bdev, iomap.dax_dev,
					sector, PAGE_SIZE, vmf->cow_page, vaddr);
1112 1113 1114 1115 1116 1117 1118 1119
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EIO;
			break;
		}

		if (error)
1120
			goto error_finish_iomap;
1121 1122 1123 1124 1125

		__SetPageUptodate(vmf->cow_page);
		vmf_ret = finish_fault(vmf);
		if (!vmf_ret)
			vmf_ret = VM_FAULT_DONE_COW;
1126
		goto finish_iomap;
1127 1128 1129 1130 1131 1132
	}

	switch (iomap.type) {
	case IOMAP_MAPPED:
		if (iomap.flags & IOMAP_F_NEW) {
			count_vm_event(PGMAJFAULT);
1133
			count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
1134 1135
			major = VM_FAULT_MAJOR;
		}
1136
		error = dax_insert_mapping(mapping, iomap.bdev, iomap.dax_dev,
1137
				sector, PAGE_SIZE, entry, vmf->vma, vmf);
1138 1139 1140
		/* -EBUSY is fine, somebody else faulted on the same PTE */
		if (error == -EBUSY)
			error = 0;
1141 1142 1143
		break;
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
1144
		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1145
			vmf_ret = dax_load_hole(mapping, entry, vmf);
1146
			goto finish_iomap;
1147
		}
1148 1149 1150 1151 1152 1153 1154
		/*FALLTHRU*/
	default:
		WARN_ON_ONCE(1);
		error = -EIO;
		break;
	}

1155
 error_finish_iomap:
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169
	vmf_ret = dax_fault_return(error) | major;
 finish_iomap:
	if (ops->iomap_end) {
		int copied = PAGE_SIZE;

		if (vmf_ret & VM_FAULT_ERROR)
			copied = 0;
		/*
		 * The fault is done by now and there's no way back (other
		 * thread may be already happily using PTE we have installed).
		 * Just ignore error from ->iomap_end since we cannot do much
		 * with it.
		 */
		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1170
	}
1171
 unlock_entry:
1172
	put_locked_mapping_entry(mapping, vmf->pgoff);
1173
 out:
1174
	trace_dax_pte_fault_done(inode, vmf, vmf_ret);
1175
	return vmf_ret;
1176
}
1177 1178 1179 1180 1181 1182 1183 1184

#ifdef CONFIG_FS_DAX_PMD
/*
 * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
 * more often than one might expect in the below functions.
 */
#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)

1185
static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
1186
		loff_t pos, void *entry)
1187
{
1188
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1189 1190
	const sector_t sector = dax_iomap_sector(iomap, pos);
	struct dax_device *dax_dev = iomap->dax_dev;
1191
	struct block_device *bdev = iomap->bdev;
1192
	struct inode *inode = mapping->host;
1193 1194 1195 1196 1197 1198 1199 1200
	const size_t size = PMD_SIZE;
	void *ret = NULL, *kaddr;
	long length = 0;
	pgoff_t pgoff;
	pfn_t pfn;
	int id;

	if (bdev_dax_pgoff(bdev, sector, size, &pgoff) != 0)
1201
		goto fallback;
1202

1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	id = dax_read_lock();
	length = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size), &kaddr, &pfn);
	if (length < 0)
		goto unlock_fallback;
	length = PFN_PHYS(length);

	if (length < size)
		goto unlock_fallback;
	if (pfn_t_to_pfn(pfn) & PG_PMD_COLOUR)
		goto unlock_fallback;
	if (!pfn_t_devmap(pfn))
		goto unlock_fallback;
	dax_read_unlock(id);

1217
	ret = dax_insert_mapping_entry(mapping, vmf, entry, sector,
1218 1219
			RADIX_DAX_PMD);
	if (IS_ERR(ret))
1220
		goto fallback;
1221

1222
	trace_dax_pmd_insert_mapping(inode, vmf, length, pfn, ret);
1223
	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
1224
			pfn, vmf->flags & FAULT_FLAG_WRITE);
1225

1226 1227
unlock_fallback:
	dax_read_unlock(id);
1228
fallback:
1229
	trace_dax_pmd_insert_mapping_fallback(inode, vmf, length, pfn, ret);
1230 1231 1232
	return VM_FAULT_FALLBACK;
}

1233
static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
1234
		void *entry)
1235
{
1236 1237
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
	unsigned long pmd_addr = vmf->address & PMD_MASK;
1238
	struct inode *inode = mapping->host;
1239
	struct page *zero_page;
1240
	void *ret = NULL;
1241 1242 1243
	spinlock_t *ptl;
	pmd_t pmd_entry;

1244
	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1245 1246

	if (unlikely(!zero_page))
1247
		goto fallback;
1248

1249 1250
	ret = dax_insert_mapping_entry(mapping, vmf, entry, 0,
			RADIX_DAX_PMD | RADIX_DAX_ZERO_PAGE);
1251
	if (IS_ERR(ret))
1252
		goto fallback;
1253

1254 1255
	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
	if (!pmd_none(*(vmf->pmd))) {
1256
		spin_unlock(ptl);
1257
		goto fallback;
1258 1259
	}

1260
	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1261
	pmd_entry = pmd_mkhuge(pmd_entry);
1262
	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1263
	spin_unlock(ptl);
1264
	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1265
	return VM_FAULT_NOPAGE;
1266 1267

fallback:
1268
	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1269
	return VM_FAULT_FALLBACK;
1270 1271
}

1272 1273
static int dax_iomap_pmd_fault(struct vm_fault *vmf,
			       const struct iomap_ops *ops)
1274
{
1275
	struct vm_area_struct *vma = vmf->vma;
1276
	struct address_space *mapping = vma->vm_file->f_mapping;
1277 1278
	unsigned long pmd_addr = vmf->address & PMD_MASK;
	bool write = vmf->flags & FAULT_FLAG_WRITE;
J
Jan Kara 已提交
1279
	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1280 1281 1282 1283 1284 1285 1286 1287
	struct inode *inode = mapping->host;
	int result = VM_FAULT_FALLBACK;
	struct iomap iomap = { 0 };
	pgoff_t max_pgoff, pgoff;
	void *entry;
	loff_t pos;
	int error;

1288 1289 1290 1291 1292 1293 1294 1295
	/*
	 * Check whether offset isn't beyond end of file now. Caller is
	 * supposed to hold locks serializing us with truncate / punch hole so
	 * this is a reliable test.
	 */
	pgoff = linear_page_index(vma, pmd_addr);
	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;

1296
	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1297

1298 1299 1300 1301 1302 1303 1304 1305 1306 1307
	/*
	 * Make sure that the faulting address's PMD offset (color) matches
	 * the PMD offset from the start of the file.  This is necessary so
	 * that a PMD range in the page table overlaps exactly with a PMD
	 * range in the radix tree.
	 */
	if ((vmf->pgoff & PG_PMD_COLOUR) !=
	    ((vmf->address >> PAGE_SHIFT) & PG_PMD_COLOUR))
		goto fallback;

1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
	/* Fall back to PTEs if we're going to COW */
	if (write && !(vma->vm_flags & VM_SHARED))
		goto fallback;

	/* If the PMD would extend outside the VMA */
	if (pmd_addr < vma->vm_start)
		goto fallback;
	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
		goto fallback;

1318 1319 1320 1321
	if (pgoff > max_pgoff) {
		result = VM_FAULT_SIGBUS;
		goto out;
	}
1322 1323 1324 1325 1326

	/* If the PMD would extend beyond the file size */
	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
		goto fallback;

1327
	/*
1328 1329 1330 1331
	 * grab_mapping_entry() will make sure we get a 2MiB empty entry, a
	 * 2MiB zero page entry or a DAX PMD.  If it can't (because a 4k page
	 * is already in the tree, for instance), it will return -EEXIST and
	 * we just fall back to 4k entries.
1332 1333 1334 1335 1336
	 */
	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
	if (IS_ERR(entry))
		goto fallback;

1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348
	/*
	 * It is possible, particularly with mixed reads & writes to private
	 * mappings, that we have raced with a PTE fault that overlaps with
	 * the PMD we need to set up.  If so just return and the fault will be
	 * retried.
	 */
	if (!pmd_none(*vmf->pmd) && !pmd_trans_huge(*vmf->pmd) &&
			!pmd_devmap(*vmf->pmd)) {
		result = 0;
		goto unlock_entry;
	}

1349 1350 1351 1352 1353 1354 1355 1356
	/*
	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
	 * setting up a mapping, so really we're using iomap_begin() as a way
	 * to look up our filesystem block.
	 */
	pos = (loff_t)pgoff << PAGE_SHIFT;
	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
	if (error)
1357
		goto unlock_entry;
1358

1359 1360 1361 1362 1363
	if (iomap.offset + iomap.length < pos + PMD_SIZE)
		goto finish_iomap;

	switch (iomap.type) {
	case IOMAP_MAPPED:
1364
		result = dax_pmd_insert_mapping(vmf, &iomap, pos, entry);
1365 1366 1367 1368
		break;
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
		if (WARN_ON_ONCE(write))
1369
			break;
1370
		result = dax_pmd_load_hole(vmf, &iomap, entry);
1371 1372 1373 1374 1375 1376 1377 1378
		break;
	default:
		WARN_ON_ONCE(1);
		break;
	}

 finish_iomap:
	if (ops->iomap_end) {
1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390
		int copied = PMD_SIZE;

		if (result == VM_FAULT_FALLBACK)
			copied = 0;
		/*
		 * The fault is done by now and there's no way back (other
		 * thread may be already happily using PMD we have installed).
		 * Just ignore error from ->iomap_end since we cannot do much
		 * with it.
		 */
		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
				&iomap);
1391
	}
1392
 unlock_entry:
1393
	put_locked_mapping_entry(mapping, pgoff);
1394 1395
 fallback:
	if (result == VM_FAULT_FALLBACK) {
1396
		split_huge_pmd(vma, vmf->pmd, vmf->address);
1397 1398
		count_vm_event(THP_FAULT_FALLBACK);
	}
1399
out:
1400
	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1401 1402
	return result;
}
1403
#else
1404 1405
static int dax_iomap_pmd_fault(struct vm_fault *vmf,
			       const struct iomap_ops *ops)
1406 1407 1408
{
	return VM_FAULT_FALLBACK;
}
1409
#endif /* CONFIG_FS_DAX_PMD */
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420

/**
 * dax_iomap_fault - handle a page fault on a DAX file
 * @vmf: The description of the fault
 * @ops: iomap ops passed from the file system
 *
 * When a page fault occurs, filesystems may call this helper in
 * their fault handler for DAX files. dax_iomap_fault() assumes the caller
 * has done all the necessary locking for page fault to proceed
 * successfully.
 */
1421 1422
int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
		    const struct iomap_ops *ops)
1423
{
1424 1425
	switch (pe_size) {
	case PE_SIZE_PTE:
1426
		return dax_iomap_pte_fault(vmf, ops);
1427
	case PE_SIZE_PMD:
1428 1429 1430 1431 1432 1433
		return dax_iomap_pmd_fault(vmf, ops);
	default:
		return VM_FAULT_FALLBACK;
	}
}
EXPORT_SYMBOL_GPL(dax_iomap_fault);