dax.c 40.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * fs/dax.c - Direct Access filesystem code
 * Copyright (c) 2013-2014 Intel Corporation
 * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
 * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms and conditions of the GNU General Public License,
 * version 2, as published by the Free Software Foundation.
 *
 * This program is distributed in the hope it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 */

#include <linux/atomic.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
20
#include <linux/dax.h>
21 22
#include <linux/fs.h>
#include <linux/genhd.h>
23 24 25
#include <linux/highmem.h>
#include <linux/memcontrol.h>
#include <linux/mm.h>
26
#include <linux/mutex.h>
R
Ross Zwisler 已提交
27
#include <linux/pagevec.h>
28
#include <linux/pmem.h>
29
#include <linux/sched.h>
30
#include <linux/uio.h>
31
#include <linux/vmstat.h>
D
Dan Williams 已提交
32
#include <linux/pfn_t.h>
33
#include <linux/sizes.h>
34
#include <linux/mmu_notifier.h>
35 36
#include <linux/iomap.h>
#include "internal.h"
37

38 39 40
#define CREATE_TRACE_POINTS
#include <trace/events/fs_dax.h>

J
Jan Kara 已提交
41 42 43 44
/* We choose 4096 entries - same as per-zone page wait tables */
#define DAX_WAIT_TABLE_BITS 12
#define DAX_WAIT_TABLE_ENTRIES (1 << DAX_WAIT_TABLE_BITS)

45
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
J
Jan Kara 已提交
46 47 48 49 50 51 52 53 54 55 56

static int __init init_dax_wait_table(void)
{
	int i;

	for (i = 0; i < DAX_WAIT_TABLE_ENTRIES; i++)
		init_waitqueue_head(wait_table + i);
	return 0;
}
fs_initcall(init_dax_wait_table);

57 58 59 60 61
static long dax_map_atomic(struct block_device *bdev, struct blk_dax_ctl *dax)
{
	struct request_queue *q = bdev->bd_queue;
	long rc = -EIO;

D
Dan Williams 已提交
62
	dax->addr = ERR_PTR(-EIO);
63 64 65 66 67
	if (blk_queue_enter(q, true) != 0)
		return rc;

	rc = bdev_direct_access(bdev, dax);
	if (rc < 0) {
D
Dan Williams 已提交
68
		dax->addr = ERR_PTR(rc);
69 70 71 72 73 74 75 76 77 78 79 80 81 82
		blk_queue_exit(q);
		return rc;
	}
	return rc;
}

static void dax_unmap_atomic(struct block_device *bdev,
		const struct blk_dax_ctl *dax)
{
	if (IS_ERR(dax->addr))
		return;
	blk_queue_exit(bdev->bd_queue);
}

83
static int dax_is_pmd_entry(void *entry)
84
{
85
	return (unsigned long)entry & RADIX_DAX_PMD;
86 87
}

88
static int dax_is_pte_entry(void *entry)
89
{
90
	return !((unsigned long)entry & RADIX_DAX_PMD);
91 92
}

93
static int dax_is_zero_entry(void *entry)
94
{
95
	return (unsigned long)entry & RADIX_DAX_HZP;
96 97
}

98
static int dax_is_empty_entry(void *entry)
99
{
100
	return (unsigned long)entry & RADIX_DAX_EMPTY;
101 102
}

103
struct page *read_dax_sector(struct block_device *bdev, sector_t n)
104
{
105
	struct page *page = alloc_pages(GFP_KERNEL, 0);
106
	struct blk_dax_ctl dax = {
107 108
		.size = PAGE_SIZE,
		.sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
109
	};
110
	long rc;
111

112 113
	if (!page)
		return ERR_PTR(-ENOMEM);
114

115 116 117 118
	rc = dax_map_atomic(bdev, &dax);
	if (rc < 0)
		return ERR_PTR(rc);
	memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
119
	dax_unmap_atomic(bdev, &dax);
120
	return page;
121
}
122

J
Jan Kara 已提交
123 124 125 126 127
/*
 * DAX radix tree locking
 */
struct exceptional_entry_key {
	struct address_space *mapping;
128
	pgoff_t entry_start;
J
Jan Kara 已提交
129 130 131 132 133 134 135
};

struct wait_exceptional_entry_queue {
	wait_queue_t wait;
	struct exceptional_entry_key key;
};

136 137 138 139 140 141 142 143 144 145
static wait_queue_head_t *dax_entry_waitqueue(struct address_space *mapping,
		pgoff_t index, void *entry, struct exceptional_entry_key *key)
{
	unsigned long hash;

	/*
	 * If 'entry' is a PMD, align the 'index' that we use for the wait
	 * queue to the start of that PMD.  This ensures that all offsets in
	 * the range covered by the PMD map to the same bit lock.
	 */
146
	if (dax_is_pmd_entry(entry))
147 148 149 150 151 152 153 154 155
		index &= ~((1UL << (PMD_SHIFT - PAGE_SHIFT)) - 1);

	key->mapping = mapping;
	key->entry_start = index;

	hash = hash_long((unsigned long)mapping ^ index, DAX_WAIT_TABLE_BITS);
	return wait_table + hash;
}

J
Jan Kara 已提交
156 157 158 159 160 161 162 163
static int wake_exceptional_entry_func(wait_queue_t *wait, unsigned int mode,
				       int sync, void *keyp)
{
	struct exceptional_entry_key *key = keyp;
	struct wait_exceptional_entry_queue *ewait =
		container_of(wait, struct wait_exceptional_entry_queue, wait);

	if (key->mapping != ewait->key.mapping ||
164
	    key->entry_start != ewait->key.entry_start)
J
Jan Kara 已提交
165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
		return 0;
	return autoremove_wake_function(wait, mode, sync, NULL);
}

/*
 * Check whether the given slot is locked. The function must be called with
 * mapping->tree_lock held
 */
static inline int slot_locked(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);
	return entry & RADIX_DAX_ENTRY_LOCK;
}

/*
 * Mark the given slot is locked. The function must be called with
 * mapping->tree_lock held
 */
static inline void *lock_slot(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);

	entry |= RADIX_DAX_ENTRY_LOCK;
190
	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
J
Jan Kara 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203
	return (void *)entry;
}

/*
 * Mark the given slot is unlocked. The function must be called with
 * mapping->tree_lock held
 */
static inline void *unlock_slot(struct address_space *mapping, void **slot)
{
	unsigned long entry = (unsigned long)
		radix_tree_deref_slot_protected(slot, &mapping->tree_lock);

	entry &= ~(unsigned long)RADIX_DAX_ENTRY_LOCK;
204
	radix_tree_replace_slot(&mapping->page_tree, slot, (void *)entry);
J
Jan Kara 已提交
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
	return (void *)entry;
}

/*
 * Lookup entry in radix tree, wait for it to become unlocked if it is
 * exceptional entry and return it. The caller must call
 * put_unlocked_mapping_entry() when he decided not to lock the entry or
 * put_locked_mapping_entry() when he locked the entry and now wants to
 * unlock it.
 *
 * The function must be called with mapping->tree_lock held.
 */
static void *get_unlocked_mapping_entry(struct address_space *mapping,
					pgoff_t index, void ***slotp)
{
220
	void *entry, **slot;
J
Jan Kara 已提交
221
	struct wait_exceptional_entry_queue ewait;
222
	wait_queue_head_t *wq;
J
Jan Kara 已提交
223 224 225 226 227

	init_wait(&ewait.wait);
	ewait.wait.func = wake_exceptional_entry_func;

	for (;;) {
228
		entry = __radix_tree_lookup(&mapping->page_tree, index, NULL,
J
Jan Kara 已提交
229
					  &slot);
230
		if (!entry || !radix_tree_exceptional_entry(entry) ||
J
Jan Kara 已提交
231 232 233
		    !slot_locked(mapping, slot)) {
			if (slotp)
				*slotp = slot;
234
			return entry;
J
Jan Kara 已提交
235
		}
236 237

		wq = dax_entry_waitqueue(mapping, index, entry, &ewait.key);
J
Jan Kara 已提交
238 239 240 241 242 243 244 245 246
		prepare_to_wait_exclusive(wq, &ewait.wait,
					  TASK_UNINTERRUPTIBLE);
		spin_unlock_irq(&mapping->tree_lock);
		schedule();
		finish_wait(wq, &ewait.wait);
		spin_lock_irq(&mapping->tree_lock);
	}
}

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
static void dax_unlock_mapping_entry(struct address_space *mapping,
				     pgoff_t index)
{
	void *entry, **slot;

	spin_lock_irq(&mapping->tree_lock);
	entry = __radix_tree_lookup(&mapping->page_tree, index, NULL, &slot);
	if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry) ||
			 !slot_locked(mapping, slot))) {
		spin_unlock_irq(&mapping->tree_lock);
		return;
	}
	unlock_slot(mapping, slot);
	spin_unlock_irq(&mapping->tree_lock);
	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}

264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288
static void put_locked_mapping_entry(struct address_space *mapping,
				     pgoff_t index, void *entry)
{
	if (!radix_tree_exceptional_entry(entry)) {
		unlock_page(entry);
		put_page(entry);
	} else {
		dax_unlock_mapping_entry(mapping, index);
	}
}

/*
 * Called when we are done with radix tree entry we looked up via
 * get_unlocked_mapping_entry() and which we didn't lock in the end.
 */
static void put_unlocked_mapping_entry(struct address_space *mapping,
				       pgoff_t index, void *entry)
{
	if (!radix_tree_exceptional_entry(entry))
		return;

	/* We have to wake up next waiter for the radix tree entry lock */
	dax_wake_mapping_entry_waiter(mapping, index, entry, false);
}

J
Jan Kara 已提交
289 290 291 292 293 294
/*
 * Find radix tree entry at given index. If it points to a page, return with
 * the page locked. If it points to the exceptional entry, return with the
 * radix tree entry locked. If the radix tree doesn't contain given index,
 * create empty exceptional entry for the index and return with it locked.
 *
295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
 * When requesting an entry with size RADIX_DAX_PMD, grab_mapping_entry() will
 * either return that locked entry or will return an error.  This error will
 * happen if there are any 4k entries (either zero pages or DAX entries)
 * within the 2MiB range that we are requesting.
 *
 * We always favor 4k entries over 2MiB entries. There isn't a flow where we
 * evict 4k entries in order to 'upgrade' them to a 2MiB entry.  A 2MiB
 * insertion will fail if it finds any 4k entries already in the tree, and a
 * 4k insertion will cause an existing 2MiB entry to be unmapped and
 * downgraded to 4k entries.  This happens for both 2MiB huge zero pages as
 * well as 2MiB empty entries.
 *
 * The exception to this downgrade path is for 2MiB DAX PMD entries that have
 * real storage backing them.  We will leave these real 2MiB DAX entries in
 * the tree, and PTE writes will simply dirty the entire 2MiB DAX entry.
 *
J
Jan Kara 已提交
311 312 313 314
 * Note: Unlike filemap_fault() we don't honor FAULT_FLAG_RETRY flags. For
 * persistent memory the benefit is doubtful. We can add that later if we can
 * show it helps.
 */
315 316
static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
		unsigned long size_flag)
J
Jan Kara 已提交
317
{
318
	bool pmd_downgrade = false; /* splitting 2MiB entry into 4k entries? */
319
	void *entry, **slot;
J
Jan Kara 已提交
320 321 322

restart:
	spin_lock_irq(&mapping->tree_lock);
323
	entry = get_unlocked_mapping_entry(mapping, index, &slot);
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343

	if (entry) {
		if (size_flag & RADIX_DAX_PMD) {
			if (!radix_tree_exceptional_entry(entry) ||
			    dax_is_pte_entry(entry)) {
				put_unlocked_mapping_entry(mapping, index,
						entry);
				entry = ERR_PTR(-EEXIST);
				goto out_unlock;
			}
		} else { /* trying to grab a PTE entry */
			if (radix_tree_exceptional_entry(entry) &&
			    dax_is_pmd_entry(entry) &&
			    (dax_is_zero_entry(entry) ||
			     dax_is_empty_entry(entry))) {
				pmd_downgrade = true;
			}
		}
	}

J
Jan Kara 已提交
344
	/* No entry for given index? Make sure radix tree is big enough. */
345
	if (!entry || pmd_downgrade) {
J
Jan Kara 已提交
346 347
		int err;

348 349 350 351 352 353 354 355
		if (pmd_downgrade) {
			/*
			 * Make sure 'entry' remains valid while we drop
			 * mapping->tree_lock.
			 */
			entry = lock_slot(mapping, slot);
		}

J
Jan Kara 已提交
356
		spin_unlock_irq(&mapping->tree_lock);
357 358 359 360 361 362 363 364 365
		/*
		 * Besides huge zero pages the only other thing that gets
		 * downgraded are empty entries which don't need to be
		 * unmapped.
		 */
		if (pmd_downgrade && dax_is_zero_entry(entry))
			unmap_mapping_range(mapping,
				(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);

J
Jan Kara 已提交
366 367
		err = radix_tree_preload(
				mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
368 369 370
		if (err) {
			if (pmd_downgrade)
				put_locked_mapping_entry(mapping, index, entry);
J
Jan Kara 已提交
371
			return ERR_PTR(err);
372
		}
J
Jan Kara 已提交
373
		spin_lock_irq(&mapping->tree_lock);
374 375 376 377 378 379 380 381 382 383 384 385

		if (pmd_downgrade) {
			radix_tree_delete(&mapping->page_tree, index);
			mapping->nrexceptional--;
			dax_wake_mapping_entry_waiter(mapping, index, entry,
					true);
		}

		entry = dax_radix_locked_entry(0, size_flag | RADIX_DAX_EMPTY);

		err = __radix_tree_insert(&mapping->page_tree, index,
				dax_radix_order(entry), entry);
J
Jan Kara 已提交
386 387 388
		radix_tree_preload_end();
		if (err) {
			spin_unlock_irq(&mapping->tree_lock);
389 390 391 392 393 394 395
			/*
			 * Someone already created the entry?  This is a
			 * normal failure when inserting PMDs in a range
			 * that already contains PTEs.  In that case we want
			 * to return -EEXIST immediately.
			 */
			if (err == -EEXIST && !(size_flag & RADIX_DAX_PMD))
J
Jan Kara 已提交
396
				goto restart;
397 398 399 400 401 402 403
			/*
			 * Our insertion of a DAX PMD entry failed, most
			 * likely because it collided with a PTE sized entry
			 * at a different index in the PMD range.  We haven't
			 * inserted anything into the radix tree and have no
			 * waiters to wake.
			 */
J
Jan Kara 已提交
404 405 406 407 408
			return ERR_PTR(err);
		}
		/* Good, we have inserted empty locked entry into the tree. */
		mapping->nrexceptional++;
		spin_unlock_irq(&mapping->tree_lock);
409
		return entry;
J
Jan Kara 已提交
410 411
	}
	/* Normal page in radix tree? */
412 413
	if (!radix_tree_exceptional_entry(entry)) {
		struct page *page = entry;
J
Jan Kara 已提交
414 415 416 417 418 419 420 421 422 423 424 425

		get_page(page);
		spin_unlock_irq(&mapping->tree_lock);
		lock_page(page);
		/* Page got truncated? Retry... */
		if (unlikely(page->mapping != mapping)) {
			unlock_page(page);
			put_page(page);
			goto restart;
		}
		return page;
	}
426
	entry = lock_slot(mapping, slot);
427
 out_unlock:
J
Jan Kara 已提交
428
	spin_unlock_irq(&mapping->tree_lock);
429
	return entry;
J
Jan Kara 已提交
430 431
}

432 433 434
/*
 * We do not necessarily hold the mapping->tree_lock when we call this
 * function so it is possible that 'entry' is no longer a valid item in the
435 436 437
 * radix tree.  This is okay because all we really need to do is to find the
 * correct waitqueue where tasks might be waiting for that old 'entry' and
 * wake them.
438
 */
J
Jan Kara 已提交
439
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
440
		pgoff_t index, void *entry, bool wake_all)
J
Jan Kara 已提交
441
{
442 443 444 445
	struct exceptional_entry_key key;
	wait_queue_head_t *wq;

	wq = dax_entry_waitqueue(mapping, index, entry, &key);
J
Jan Kara 已提交
446 447 448 449 450 451 452

	/*
	 * Checking for locked entry and prepare_to_wait_exclusive() happens
	 * under mapping->tree_lock, ditto for entry handling in our callers.
	 * So at this point all tasks that could have seen our entry locked
	 * must be in the waitqueue and the following check will see them.
	 */
453
	if (waitqueue_active(wq))
J
Jan Kara 已提交
454 455 456
		__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
}

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
					  pgoff_t index, bool trunc)
{
	int ret = 0;
	void *entry;
	struct radix_tree_root *page_tree = &mapping->page_tree;

	spin_lock_irq(&mapping->tree_lock);
	entry = get_unlocked_mapping_entry(mapping, index, NULL);
	if (!entry || !radix_tree_exceptional_entry(entry))
		goto out;
	if (!trunc &&
	    (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
	     radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
		goto out;
	radix_tree_delete(page_tree, index);
	mapping->nrexceptional--;
	ret = 1;
out:
	put_unlocked_mapping_entry(mapping, index, entry);
	spin_unlock_irq(&mapping->tree_lock);
	return ret;
}
J
Jan Kara 已提交
480 481 482 483 484 485
/*
 * Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
 * entry to get unlocked before deleting it.
 */
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
{
486
	int ret = __dax_invalidate_mapping_entry(mapping, index, true);
J
Jan Kara 已提交
487 488 489 490 491 492 493 494

	/*
	 * This gets called from truncate / punch_hole path. As such, the caller
	 * must hold locks protecting against concurrent modifications of the
	 * radix tree (usually fs-private i_mmap_sem for writing). Since the
	 * caller has seen exceptional entry for this index, we better find it
	 * at that index as well...
	 */
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	WARN_ON_ONCE(!ret);
	return ret;
}

/*
 * Invalidate exceptional DAX entry if easily possible. This handles DAX
 * entries for invalidate_inode_pages() so we evict the entry only if we can
 * do so without blocking.
 */
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
{
	int ret = 0;
	void *entry, **slot;
	struct radix_tree_root *page_tree = &mapping->page_tree;

	spin_lock_irq(&mapping->tree_lock);
	entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
	if (!entry || !radix_tree_exceptional_entry(entry) ||
	    slot_locked(mapping, slot))
		goto out;
	if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
	    radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
		goto out;
	radix_tree_delete(page_tree, index);
J
Jan Kara 已提交
519
	mapping->nrexceptional--;
520 521
	ret = 1;
out:
J
Jan Kara 已提交
522
	spin_unlock_irq(&mapping->tree_lock);
523 524 525 526
	if (ret)
		dax_wake_mapping_entry_waiter(mapping, index, entry, true);
	return ret;
}
J
Jan Kara 已提交
527

528 529 530 531 532 533 534
/*
 * Invalidate exceptional DAX entry if it is clean.
 */
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
				      pgoff_t index)
{
	return __dax_invalidate_mapping_entry(mapping, index, false);
J
Jan Kara 已提交
535 536
}

537 538 539 540 541 542 543 544
/*
 * The user has performed a load from a hole in the file.  Allocating
 * a new page in the file would cause excessive storage usage for
 * workloads with sparse files.  We allocate a page cache page instead.
 * We'll kick it out of the page cache if it's ever written to,
 * otherwise it will simply fall out of the page cache under memory
 * pressure without ever having been dirtied.
 */
545
static int dax_load_hole(struct address_space *mapping, void **entry,
J
Jan Kara 已提交
546
			 struct vm_fault *vmf)
547
{
J
Jan Kara 已提交
548
	struct page *page;
549
	int ret;
550

J
Jan Kara 已提交
551
	/* Hole page already exists? Return it...  */
552 553 554
	if (!radix_tree_exceptional_entry(*entry)) {
		page = *entry;
		goto out;
J
Jan Kara 已提交
555
	}
556

J
Jan Kara 已提交
557 558 559
	/* This will replace locked radix tree entry with a hole page */
	page = find_or_create_page(mapping, vmf->pgoff,
				   vmf->gfp_mask | __GFP_ZERO);
560
	if (!page)
J
Jan Kara 已提交
561
		return VM_FAULT_OOM;
562
 out:
563
	vmf->page = page;
564 565 566 567 568 569 570 571 572
	ret = finish_fault(vmf);
	vmf->page = NULL;
	*entry = page;
	if (!ret) {
		/* Grab reference for PTE that is now referencing the page */
		get_page(page);
		return VM_FAULT_NOPAGE;
	}
	return ret;
573 574
}

575 576
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
		struct page *to, unsigned long vaddr)
577
{
578
	struct blk_dax_ctl dax = {
579 580
		.sector = sector,
		.size = size,
581
	};
582 583
	void *vto;

584 585
	if (dax_map_atomic(bdev, &dax) < 0)
		return PTR_ERR(dax.addr);
586
	vto = kmap_atomic(to);
587
	copy_user_page(vto, (void __force *)dax.addr, vaddr, to);
588
	kunmap_atomic(vto);
589
	dax_unmap_atomic(bdev, &dax);
590 591 592
	return 0;
}

593 594 595 596 597 598 599
/*
 * By this point grab_mapping_entry() has ensured that we have a locked entry
 * of the appropriate size so we don't have to worry about downgrading PMDs to
 * PTEs.  If we happen to be trying to insert a PTE and there is a PMD
 * already in the tree, we will skip the insertion and just dirty the PMD as
 * appropriate.
 */
J
Jan Kara 已提交
600 601
static void *dax_insert_mapping_entry(struct address_space *mapping,
				      struct vm_fault *vmf,
602 603
				      void *entry, sector_t sector,
				      unsigned long flags)
R
Ross Zwisler 已提交
604 605
{
	struct radix_tree_root *page_tree = &mapping->page_tree;
J
Jan Kara 已提交
606 607 608 609
	int error = 0;
	bool hole_fill = false;
	void *new_entry;
	pgoff_t index = vmf->pgoff;
R
Ross Zwisler 已提交
610

J
Jan Kara 已提交
611
	if (vmf->flags & FAULT_FLAG_WRITE)
612
		__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
R
Ross Zwisler 已提交
613

J
Jan Kara 已提交
614 615 616 617 618 619 620 621 622 623 624 625
	/* Replacing hole page with block mapping? */
	if (!radix_tree_exceptional_entry(entry)) {
		hole_fill = true;
		/*
		 * Unmap the page now before we remove it from page cache below.
		 * The page is locked so it cannot be faulted in again.
		 */
		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
				    PAGE_SIZE, 0);
		error = radix_tree_preload(vmf->gfp_mask & ~__GFP_HIGHMEM);
		if (error)
			return ERR_PTR(error);
626 627 628 629
	} else if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_HZP)) {
		/* replacing huge zero page with PMD block mapping */
		unmap_mapping_range(mapping,
			(vmf->pgoff << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
R
Ross Zwisler 已提交
630 631
	}

J
Jan Kara 已提交
632
	spin_lock_irq(&mapping->tree_lock);
633 634
	new_entry = dax_radix_locked_entry(sector, flags);

J
Jan Kara 已提交
635 636 637 638
	if (hole_fill) {
		__delete_from_page_cache(entry, NULL);
		/* Drop pagecache reference */
		put_page(entry);
639 640
		error = __radix_tree_insert(page_tree, index,
				dax_radix_order(new_entry), new_entry);
J
Jan Kara 已提交
641 642
		if (error) {
			new_entry = ERR_PTR(error);
R
Ross Zwisler 已提交
643 644
			goto unlock;
		}
J
Jan Kara 已提交
645
		mapping->nrexceptional++;
646 647 648 649 650 651 652 653 654
	} else if (dax_is_zero_entry(entry) || dax_is_empty_entry(entry)) {
		/*
		 * Only swap our new entry into the radix tree if the current
		 * entry is a zero page or an empty entry.  If a normal PTE or
		 * PMD entry is already in the tree, we leave it alone.  This
		 * means that if we are trying to insert a PTE and the
		 * existing entry is a PMD, we will just leave the PMD in the
		 * tree and dirty it if necessary.
		 */
655
		struct radix_tree_node *node;
J
Jan Kara 已提交
656 657
		void **slot;
		void *ret;
R
Ross Zwisler 已提交
658

659
		ret = __radix_tree_lookup(page_tree, index, &node, &slot);
J
Jan Kara 已提交
660
		WARN_ON_ONCE(ret != entry);
661 662
		__radix_tree_replace(page_tree, node, slot,
				     new_entry, NULL, NULL);
R
Ross Zwisler 已提交
663
	}
J
Jan Kara 已提交
664
	if (vmf->flags & FAULT_FLAG_WRITE)
R
Ross Zwisler 已提交
665 666 667
		radix_tree_tag_set(page_tree, index, PAGECACHE_TAG_DIRTY);
 unlock:
	spin_unlock_irq(&mapping->tree_lock);
J
Jan Kara 已提交
668 669 670 671 672 673 674 675 676 677 678 679
	if (hole_fill) {
		radix_tree_preload_end();
		/*
		 * We don't need hole page anymore, it has been replaced with
		 * locked radix tree entry now.
		 */
		if (mapping->a_ops->freepage)
			mapping->a_ops->freepage(entry);
		unlock_page(entry);
		put_page(entry);
	}
	return new_entry;
R
Ross Zwisler 已提交
680 681
}

682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
static inline unsigned long
pgoff_address(pgoff_t pgoff, struct vm_area_struct *vma)
{
	unsigned long address;

	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
	VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma);
	return address;
}

/* Walk all mappings of a given index of a file and writeprotect them */
static void dax_mapping_entry_mkclean(struct address_space *mapping,
				      pgoff_t index, unsigned long pfn)
{
	struct vm_area_struct *vma;
697 698
	pte_t pte, *ptep = NULL;
	pmd_t *pmdp = NULL;
699 700 701 702 703 704 705 706 707 708 709 710 711 712
	spinlock_t *ptl;
	bool changed;

	i_mmap_lock_read(mapping);
	vma_interval_tree_foreach(vma, &mapping->i_mmap, index, index) {
		unsigned long address;

		cond_resched();

		if (!(vma->vm_flags & VM_SHARED))
			continue;

		address = pgoff_address(index, vma);
		changed = false;
713
		if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
714 715
			continue;

716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
		if (pmdp) {
#ifdef CONFIG_FS_DAX_PMD
			pmd_t pmd;

			if (pfn != pmd_pfn(*pmdp))
				goto unlock_pmd;
			if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
				goto unlock_pmd;

			flush_cache_page(vma, address, pfn);
			pmd = pmdp_huge_clear_flush(vma, address, pmdp);
			pmd = pmd_wrprotect(pmd);
			pmd = pmd_mkclean(pmd);
			set_pmd_at(vma->vm_mm, address, pmdp, pmd);
			changed = true;
unlock_pmd:
			spin_unlock(ptl);
#endif
		} else {
			if (pfn != pte_pfn(*ptep))
				goto unlock_pte;
			if (!pte_dirty(*ptep) && !pte_write(*ptep))
				goto unlock_pte;

			flush_cache_page(vma, address, pfn);
			pte = ptep_clear_flush(vma, address, ptep);
			pte = pte_wrprotect(pte);
			pte = pte_mkclean(pte);
			set_pte_at(vma->vm_mm, address, ptep, pte);
			changed = true;
unlock_pte:
			pte_unmap_unlock(ptep, ptl);
		}
749 750 751 752 753 754 755

		if (changed)
			mmu_notifier_invalidate_page(vma->vm_mm, address);
	}
	i_mmap_unlock_read(mapping);
}

R
Ross Zwisler 已提交
756 757 758 759 760
static int dax_writeback_one(struct block_device *bdev,
		struct address_space *mapping, pgoff_t index, void *entry)
{
	struct radix_tree_root *page_tree = &mapping->page_tree;
	struct blk_dax_ctl dax;
761
	void *entry2, **slot;
R
Ross Zwisler 已提交
762 763 764
	int ret = 0;

	/*
765 766
	 * A page got tagged dirty in DAX mapping? Something is seriously
	 * wrong.
R
Ross Zwisler 已提交
767
	 */
768 769
	if (WARN_ON(!radix_tree_exceptional_entry(entry)))
		return -EIO;
R
Ross Zwisler 已提交
770

771 772 773 774 775 776 777 778 779 780 781 782
	spin_lock_irq(&mapping->tree_lock);
	entry2 = get_unlocked_mapping_entry(mapping, index, &slot);
	/* Entry got punched out / reallocated? */
	if (!entry2 || !radix_tree_exceptional_entry(entry2))
		goto put_unlocked;
	/*
	 * Entry got reallocated elsewhere? No need to writeback. We have to
	 * compare sectors as we must not bail out due to difference in lockbit
	 * or entry type.
	 */
	if (dax_radix_sector(entry2) != dax_radix_sector(entry))
		goto put_unlocked;
783 784
	if (WARN_ON_ONCE(dax_is_empty_entry(entry) ||
				dax_is_zero_entry(entry))) {
R
Ross Zwisler 已提交
785
		ret = -EIO;
786
		goto put_unlocked;
R
Ross Zwisler 已提交
787 788
	}

789 790 791 792 793 794 795 796 797 798 799 800 801 802 803
	/* Another fsync thread may have already written back this entry */
	if (!radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
		goto put_unlocked;
	/* Lock the entry to serialize with page faults */
	entry = lock_slot(mapping, slot);
	/*
	 * We can clear the tag now but we have to be careful so that concurrent
	 * dax_writeback_one() calls for the same index cannot finish before we
	 * actually flush the caches. This is achieved as the calls will look
	 * at the entry only under tree_lock and once they do that they will
	 * see the entry locked and wait for it to unlock.
	 */
	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_TOWRITE);
	spin_unlock_irq(&mapping->tree_lock);

804 805 806 807 808 809 810 811 812
	/*
	 * Even if dax_writeback_mapping_range() was given a wbc->range_start
	 * in the middle of a PMD, the 'index' we are given will be aligned to
	 * the start index of the PMD, as will the sector we pull from
	 * 'entry'.  This allows us to flush for PMD_SIZE and not have to
	 * worry about partial PMD writebacks.
	 */
	dax.sector = dax_radix_sector(entry);
	dax.size = PAGE_SIZE << dax_radix_order(entry);
R
Ross Zwisler 已提交
813 814 815 816 817 818

	/*
	 * We cannot hold tree_lock while calling dax_map_atomic() because it
	 * eventually calls cond_resched().
	 */
	ret = dax_map_atomic(bdev, &dax);
819 820
	if (ret < 0) {
		put_locked_mapping_entry(mapping, index, entry);
R
Ross Zwisler 已提交
821
		return ret;
822
	}
R
Ross Zwisler 已提交
823 824 825 826 827 828

	if (WARN_ON_ONCE(ret < dax.size)) {
		ret = -EIO;
		goto unmap;
	}

829
	dax_mapping_entry_mkclean(mapping, index, pfn_t_to_pfn(dax.pfn));
R
Ross Zwisler 已提交
830
	wb_cache_pmem(dax.addr, dax.size);
831 832 833 834 835 836 837 838 839
	/*
	 * After we have flushed the cache, we can clear the dirty tag. There
	 * cannot be new dirty data in the pfn after the flush has completed as
	 * the pfn mappings are writeprotected and fault waits for mapping
	 * entry lock.
	 */
	spin_lock_irq(&mapping->tree_lock);
	radix_tree_tag_clear(page_tree, index, PAGECACHE_TAG_DIRTY);
	spin_unlock_irq(&mapping->tree_lock);
R
Ross Zwisler 已提交
840 841
 unmap:
	dax_unmap_atomic(bdev, &dax);
842
	put_locked_mapping_entry(mapping, index, entry);
R
Ross Zwisler 已提交
843 844
	return ret;

845 846
 put_unlocked:
	put_unlocked_mapping_entry(mapping, index, entry2);
R
Ross Zwisler 已提交
847 848 849 850 851 852 853 854 855
	spin_unlock_irq(&mapping->tree_lock);
	return ret;
}

/*
 * Flush the mapping to the persistent domain within the byte range of [start,
 * end]. This is required by data integrity operations to ensure file data is
 * on persistent storage prior to completion of the operation.
 */
856 857
int dax_writeback_mapping_range(struct address_space *mapping,
		struct block_device *bdev, struct writeback_control *wbc)
R
Ross Zwisler 已提交
858 859
{
	struct inode *inode = mapping->host;
860
	pgoff_t start_index, end_index;
R
Ross Zwisler 已提交
861 862 863 864 865 866 867 868
	pgoff_t indices[PAGEVEC_SIZE];
	struct pagevec pvec;
	bool done = false;
	int i, ret = 0;

	if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
		return -EIO;

869 870 871
	if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
		return 0;

872 873
	start_index = wbc->range_start >> PAGE_SHIFT;
	end_index = wbc->range_end >> PAGE_SHIFT;
R
Ross Zwisler 已提交
874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901

	tag_pages_for_writeback(mapping, start_index, end_index);

	pagevec_init(&pvec, 0);
	while (!done) {
		pvec.nr = find_get_entries_tag(mapping, start_index,
				PAGECACHE_TAG_TOWRITE, PAGEVEC_SIZE,
				pvec.pages, indices);

		if (pvec.nr == 0)
			break;

		for (i = 0; i < pvec.nr; i++) {
			if (indices[i] > end_index) {
				done = true;
				break;
			}

			ret = dax_writeback_one(bdev, mapping, indices[i],
					pvec.pages[i]);
			if (ret < 0)
				return ret;
		}
	}
	return 0;
}
EXPORT_SYMBOL_GPL(dax_writeback_mapping_range);

J
Jan Kara 已提交
902
static int dax_insert_mapping(struct address_space *mapping,
903 904
		struct block_device *bdev, sector_t sector, size_t size,
		void **entryp, struct vm_area_struct *vma, struct vm_fault *vmf)
905
{
906
	unsigned long vaddr = vmf->address;
907
	struct blk_dax_ctl dax = {
908 909
		.sector = sector,
		.size = size,
910
	};
J
Jan Kara 已提交
911 912
	void *ret;
	void *entry = *entryp;
913

J
Jan Kara 已提交
914 915
	if (dax_map_atomic(bdev, &dax) < 0)
		return PTR_ERR(dax.addr);
916
	dax_unmap_atomic(bdev, &dax);
917

918
	ret = dax_insert_mapping_entry(mapping, vmf, entry, dax.sector, 0);
J
Jan Kara 已提交
919 920
	if (IS_ERR(ret))
		return PTR_ERR(ret);
J
Jan Kara 已提交
921
	*entryp = ret;
R
Ross Zwisler 已提交
922

J
Jan Kara 已提交
923
	return vm_insert_mixed(vma, vaddr, dax.pfn);
924 925
}

926 927 928 929 930 931 932
/**
 * dax_pfn_mkwrite - handle first write to DAX page
 * @vma: The virtual memory area where the fault occurred
 * @vmf: The description of the fault
 */
int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
{
R
Ross Zwisler 已提交
933
	struct file *file = vma->vm_file;
J
Jan Kara 已提交
934
	struct address_space *mapping = file->f_mapping;
935
	void *entry, **slot;
J
Jan Kara 已提交
936
	pgoff_t index = vmf->pgoff;
937

J
Jan Kara 已提交
938
	spin_lock_irq(&mapping->tree_lock);
939 940 941 942 943 944 945
	entry = get_unlocked_mapping_entry(mapping, index, &slot);
	if (!entry || !radix_tree_exceptional_entry(entry)) {
		if (entry)
			put_unlocked_mapping_entry(mapping, index, entry);
		spin_unlock_irq(&mapping->tree_lock);
		return VM_FAULT_NOPAGE;
	}
J
Jan Kara 已提交
946
	radix_tree_tag_set(&mapping->page_tree, index, PAGECACHE_TAG_DIRTY);
947
	entry = lock_slot(mapping, slot);
J
Jan Kara 已提交
948
	spin_unlock_irq(&mapping->tree_lock);
949 950 951 952 953 954 955
	/*
	 * If we race with somebody updating the PTE and finish_mkwrite_fault()
	 * fails, we don't care. We need to return VM_FAULT_NOPAGE and retry
	 * the fault in either case.
	 */
	finish_mkwrite_fault(vmf);
	put_locked_mapping_entry(mapping, index, entry);
956 957 958 959
	return VM_FAULT_NOPAGE;
}
EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);

960 961 962 963 964 965 966 967 968 969 970 971 972
static bool dax_range_is_aligned(struct block_device *bdev,
				 unsigned int offset, unsigned int length)
{
	unsigned short sector_size = bdev_logical_block_size(bdev);

	if (!IS_ALIGNED(offset, sector_size))
		return false;
	if (!IS_ALIGNED(length, sector_size))
		return false;

	return true;
}

973 974 975 976 977 978 979 980
int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
		unsigned int offset, unsigned int length)
{
	struct blk_dax_ctl dax = {
		.sector		= sector,
		.size		= PAGE_SIZE,
	};

981 982 983 984 985 986 987 988 989 990 991
	if (dax_range_is_aligned(bdev, offset, length)) {
		sector_t start_sector = dax.sector + (offset >> 9);

		return blkdev_issue_zeroout(bdev, start_sector,
				length >> 9, GFP_NOFS, true);
	} else {
		if (dax_map_atomic(bdev, &dax) < 0)
			return PTR_ERR(dax.addr);
		clear_pmem(dax.addr + offset, length);
		dax_unmap_atomic(bdev, &dax);
	}
992 993 994 995
	return 0;
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);

996
static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
M
Matthew Wilcox 已提交
997
{
998
	return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
M
Matthew Wilcox 已提交
999
}
1000 1001

static loff_t
1002
dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
		struct iomap *iomap)
{
	struct iov_iter *iter = data;
	loff_t end = pos + length, done = 0;
	ssize_t ret = 0;

	if (iov_iter_rw(iter) == READ) {
		end = min(end, i_size_read(inode));
		if (pos >= end)
			return 0;

		if (iomap->type == IOMAP_HOLE || iomap->type == IOMAP_UNWRITTEN)
			return iov_iter_zero(min(length, end - pos), iter);
	}

	if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
		return -EIO;

1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031
	/*
	 * Write can allocate block for an area which has a hole page mapped
	 * into page tables. We have to tear down these mappings so that data
	 * written by write(2) is visible in mmap.
	 */
	if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
		invalidate_inode_pages2_range(inode->i_mapping,
					      pos >> PAGE_SHIFT,
					      (end - 1) >> PAGE_SHIFT);
	}

1032 1033 1034 1035 1036
	while (pos < end) {
		unsigned offset = pos & (PAGE_SIZE - 1);
		struct blk_dax_ctl dax = { 0 };
		ssize_t map_len;

1037 1038 1039 1040 1041
		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			break;
		}

1042
		dax.sector = dax_iomap_sector(iomap, pos);
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
		dax.size = (length + offset + PAGE_SIZE - 1) & PAGE_MASK;
		map_len = dax_map_atomic(iomap->bdev, &dax);
		if (map_len < 0) {
			ret = map_len;
			break;
		}

		dax.addr += offset;
		map_len -= offset;
		if (map_len > end - pos)
			map_len = end - pos;

		if (iov_iter_rw(iter) == WRITE)
			map_len = copy_from_iter_pmem(dax.addr, map_len, iter);
		else
			map_len = copy_to_iter(dax.addr, map_len, iter);
		dax_unmap_atomic(iomap->bdev, &dax);
		if (map_len <= 0) {
			ret = map_len ? map_len : -EFAULT;
			break;
		}

		pos += map_len;
		length -= map_len;
		done += map_len;
	}

	return done ? done : ret;
}

/**
1074
 * dax_iomap_rw - Perform I/O to a DAX file
1075 1076 1077 1078 1079 1080 1081 1082 1083
 * @iocb:	The control block for this I/O
 * @iter:	The addresses to do I/O from or to
 * @ops:	iomap ops passed from the file system
 *
 * This function performs read and write operations to directly mapped
 * persistent memory.  The callers needs to take care of read/write exclusion
 * and evicting any page cache pages in the region under I/O.
 */
ssize_t
1084
dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
1085
		const struct iomap_ops *ops)
1086 1087 1088 1089 1090 1091
{
	struct address_space *mapping = iocb->ki_filp->f_mapping;
	struct inode *inode = mapping->host;
	loff_t pos = iocb->ki_pos, ret = 0, done = 0;
	unsigned flags = 0;

1092 1093
	if (iov_iter_rw(iter) == WRITE) {
		lockdep_assert_held_exclusive(&inode->i_rwsem);
1094
		flags |= IOMAP_WRITE;
1095 1096 1097
	} else {
		lockdep_assert_held(&inode->i_rwsem);
	}
1098 1099 1100

	while (iov_iter_count(iter)) {
		ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
1101
				iter, dax_iomap_actor);
1102 1103 1104 1105 1106 1107 1108 1109 1110
		if (ret <= 0)
			break;
		pos += ret;
		done += ret;
	}

	iocb->ki_pos += done;
	return done ? done : ret;
}
1111
EXPORT_SYMBOL_GPL(dax_iomap_rw);
1112

1113 1114 1115 1116 1117 1118 1119 1120 1121
static int dax_fault_return(int error)
{
	if (error == 0)
		return VM_FAULT_NOPAGE;
	if (error == -ENOMEM)
		return VM_FAULT_OOM;
	return VM_FAULT_SIGBUS;
}

1122
/**
1123
 * dax_iomap_fault - handle a page fault on a DAX file
1124 1125 1126 1127 1128 1129 1130 1131
 * @vma: The virtual memory area where the fault occurred
 * @vmf: The description of the fault
 * @ops: iomap ops passed from the file system
 *
 * When a page fault occurs, filesystems may call this helper in their fault
 * or mkwrite handler for DAX files. Assumes the caller has done all the
 * necessary locking for the page fault to proceed successfully.
 */
1132
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
1133
			const struct iomap_ops *ops)
1134 1135 1136
{
	struct address_space *mapping = vma->vm_file->f_mapping;
	struct inode *inode = mapping->host;
1137
	unsigned long vaddr = vmf->address;
1138 1139 1140
	loff_t pos = (loff_t)vmf->pgoff << PAGE_SHIFT;
	sector_t sector;
	struct iomap iomap = { 0 };
J
Jan Kara 已提交
1141
	unsigned flags = IOMAP_FAULT;
1142
	int error, major = 0;
1143
	int vmf_ret = 0;
1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163
	void *entry;

	/*
	 * Check whether offset isn't beyond end of file now. Caller is supposed
	 * to hold locks serializing us with truncate / punch hole so this is
	 * a reliable test.
	 */
	if (pos >= i_size_read(inode))
		return VM_FAULT_SIGBUS;

	if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
		flags |= IOMAP_WRITE;

	/*
	 * Note that we don't bother to use iomap_apply here: DAX required
	 * the file system block size to be equal the page size, which means
	 * that we never have to deal with more than a single extent here.
	 */
	error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
	if (error)
1164
		return dax_fault_return(error);
1165
	if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
1166 1167 1168 1169 1170 1171 1172
		vmf_ret = dax_fault_return(-EIO);	/* fs corruption? */
		goto finish_iomap;
	}

	entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
	if (IS_ERR(entry)) {
		vmf_ret = dax_fault_return(PTR_ERR(entry));
1173
		goto finish_iomap;
1174 1175
	}

1176
	sector = dax_iomap_sector(&iomap, pos);
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194

	if (vmf->cow_page) {
		switch (iomap.type) {
		case IOMAP_HOLE:
		case IOMAP_UNWRITTEN:
			clear_user_highpage(vmf->cow_page, vaddr);
			break;
		case IOMAP_MAPPED:
			error = copy_user_dax(iomap.bdev, sector, PAGE_SIZE,
					vmf->cow_page, vaddr);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EIO;
			break;
		}

		if (error)
1195
			goto error_unlock_entry;
1196 1197 1198 1199 1200

		__SetPageUptodate(vmf->cow_page);
		vmf_ret = finish_fault(vmf);
		if (!vmf_ret)
			vmf_ret = VM_FAULT_DONE_COW;
1201
		goto unlock_entry;
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212
	}

	switch (iomap.type) {
	case IOMAP_MAPPED:
		if (iomap.flags & IOMAP_F_NEW) {
			count_vm_event(PGMAJFAULT);
			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
			major = VM_FAULT_MAJOR;
		}
		error = dax_insert_mapping(mapping, iomap.bdev, sector,
				PAGE_SIZE, &entry, vma, vmf);
1213 1214 1215
		/* -EBUSY is fine, somebody else faulted on the same PTE */
		if (error == -EBUSY)
			error = 0;
1216 1217 1218
		break;
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
1219
		if (!(vmf->flags & FAULT_FLAG_WRITE)) {
1220
			vmf_ret = dax_load_hole(mapping, &entry, vmf);
1221
			goto unlock_entry;
1222
		}
1223 1224 1225 1226 1227 1228 1229
		/*FALLTHRU*/
	default:
		WARN_ON_ONCE(1);
		error = -EIO;
		break;
	}

1230 1231
 error_unlock_entry:
	vmf_ret = dax_fault_return(error) | major;
1232
 unlock_entry:
1233
	put_locked_mapping_entry(mapping, vmf->pgoff, entry);
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
 finish_iomap:
	if (ops->iomap_end) {
		int copied = PAGE_SIZE;

		if (vmf_ret & VM_FAULT_ERROR)
			copied = 0;
		/*
		 * The fault is done by now and there's no way back (other
		 * thread may be already happily using PTE we have installed).
		 * Just ignore error from ->iomap_end since we cannot do much
		 * with it.
		 */
		ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
1247
	}
1248
	return vmf_ret;
1249
}
1250
EXPORT_SYMBOL_GPL(dax_iomap_fault);
1251 1252 1253 1254 1255 1256 1257 1258

#ifdef CONFIG_FS_DAX_PMD
/*
 * The 'colour' (ie low bits) within a PMD of a page offset.  This comes up
 * more often than one might expect in the below functions.
 */
#define PG_PMD_COLOUR	((PMD_SIZE >> PAGE_SHIFT) - 1)

1259 1260
static int dax_pmd_insert_mapping(struct vm_fault *vmf, struct iomap *iomap,
		loff_t pos, void **entryp)
1261
{
1262
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
1263
	struct block_device *bdev = iomap->bdev;
1264
	struct inode *inode = mapping->host;
1265 1266 1267 1268 1269
	struct blk_dax_ctl dax = {
		.sector = dax_iomap_sector(iomap, pos),
		.size = PMD_SIZE,
	};
	long length = dax_map_atomic(bdev, &dax);
1270
	void *ret = NULL;
1271 1272

	if (length < 0) /* dax_map_atomic() failed */
1273
		goto fallback;
1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285
	if (length < PMD_SIZE)
		goto unmap_fallback;
	if (pfn_t_to_pfn(dax.pfn) & PG_PMD_COLOUR)
		goto unmap_fallback;
	if (!pfn_t_devmap(dax.pfn))
		goto unmap_fallback;

	dax_unmap_atomic(bdev, &dax);

	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, dax.sector,
			RADIX_DAX_PMD);
	if (IS_ERR(ret))
1286
		goto fallback;
1287 1288
	*entryp = ret;

1289 1290 1291
	trace_dax_pmd_insert_mapping(inode, vmf, length, dax.pfn, ret);
	return vmf_insert_pfn_pmd(vmf->vma, vmf->address, vmf->pmd,
			dax.pfn, vmf->flags & FAULT_FLAG_WRITE);
1292 1293 1294

 unmap_fallback:
	dax_unmap_atomic(bdev, &dax);
1295
fallback:
1296 1297
	trace_dax_pmd_insert_mapping_fallback(inode, vmf, length,
			dax.pfn, ret);
1298 1299 1300
	return VM_FAULT_FALLBACK;
}

1301 1302
static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
		void **entryp)
1303
{
1304 1305
	struct address_space *mapping = vmf->vma->vm_file->f_mapping;
	unsigned long pmd_addr = vmf->address & PMD_MASK;
1306
	struct inode *inode = mapping->host;
1307
	struct page *zero_page;
1308
	void *ret = NULL;
1309 1310 1311
	spinlock_t *ptl;
	pmd_t pmd_entry;

1312
	zero_page = mm_get_huge_zero_page(vmf->vma->vm_mm);
1313 1314

	if (unlikely(!zero_page))
1315
		goto fallback;
1316 1317 1318 1319

	ret = dax_insert_mapping_entry(mapping, vmf, *entryp, 0,
			RADIX_DAX_PMD | RADIX_DAX_HZP);
	if (IS_ERR(ret))
1320
		goto fallback;
1321 1322
	*entryp = ret;

1323 1324
	ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
	if (!pmd_none(*(vmf->pmd))) {
1325
		spin_unlock(ptl);
1326
		goto fallback;
1327 1328
	}

1329
	pmd_entry = mk_pmd(zero_page, vmf->vma->vm_page_prot);
1330
	pmd_entry = pmd_mkhuge(pmd_entry);
1331
	set_pmd_at(vmf->vma->vm_mm, pmd_addr, vmf->pmd, pmd_entry);
1332
	spin_unlock(ptl);
1333
	trace_dax_pmd_load_hole(inode, vmf, zero_page, ret);
1334
	return VM_FAULT_NOPAGE;
1335 1336

fallback:
1337
	trace_dax_pmd_load_hole_fallback(inode, vmf, zero_page, ret);
1338
	return VM_FAULT_FALLBACK;
1339 1340
}

1341
int dax_iomap_pmd_fault(struct vm_fault *vmf, const struct iomap_ops *ops)
1342
{
1343
	struct vm_area_struct *vma = vmf->vma;
1344
	struct address_space *mapping = vma->vm_file->f_mapping;
1345 1346
	unsigned long pmd_addr = vmf->address & PMD_MASK;
	bool write = vmf->flags & FAULT_FLAG_WRITE;
J
Jan Kara 已提交
1347
	unsigned int iomap_flags = (write ? IOMAP_WRITE : 0) | IOMAP_FAULT;
1348 1349 1350 1351 1352 1353 1354 1355
	struct inode *inode = mapping->host;
	int result = VM_FAULT_FALLBACK;
	struct iomap iomap = { 0 };
	pgoff_t max_pgoff, pgoff;
	void *entry;
	loff_t pos;
	int error;

1356 1357 1358 1359 1360 1361 1362 1363
	/*
	 * Check whether offset isn't beyond end of file now. Caller is
	 * supposed to hold locks serializing us with truncate / punch hole so
	 * this is a reliable test.
	 */
	pgoff = linear_page_index(vma, pmd_addr);
	max_pgoff = (i_size_read(inode) - 1) >> PAGE_SHIFT;

1364
	trace_dax_pmd_fault(inode, vmf, max_pgoff, 0);
1365

1366 1367 1368 1369 1370 1371 1372 1373 1374 1375
	/* Fall back to PTEs if we're going to COW */
	if (write && !(vma->vm_flags & VM_SHARED))
		goto fallback;

	/* If the PMD would extend outside the VMA */
	if (pmd_addr < vma->vm_start)
		goto fallback;
	if ((pmd_addr + PMD_SIZE) > vma->vm_end)
		goto fallback;

1376 1377 1378 1379
	if (pgoff > max_pgoff) {
		result = VM_FAULT_SIGBUS;
		goto out;
	}
1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392

	/* If the PMD would extend beyond the file size */
	if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
		goto fallback;

	/*
	 * Note that we don't use iomap_apply here.  We aren't doing I/O, only
	 * setting up a mapping, so really we're using iomap_begin() as a way
	 * to look up our filesystem block.
	 */
	pos = (loff_t)pgoff << PAGE_SHIFT;
	error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
	if (error)
1393 1394
		goto fallback;

1395 1396 1397
	if (iomap.offset + iomap.length < pos + PMD_SIZE)
		goto finish_iomap;

1398 1399 1400 1401 1402 1403 1404 1405 1406 1407
	/*
	 * grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
	 * PMD or a HZP entry.  If it can't (because a 4k page is already in
	 * the tree, for instance), it will return -EEXIST and we just fall
	 * back to 4k entries.
	 */
	entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
	if (IS_ERR(entry))
		goto finish_iomap;

1408 1409
	switch (iomap.type) {
	case IOMAP_MAPPED:
1410
		result = dax_pmd_insert_mapping(vmf, &iomap, pos, &entry);
1411 1412 1413 1414
		break;
	case IOMAP_UNWRITTEN:
	case IOMAP_HOLE:
		if (WARN_ON_ONCE(write))
1415
			goto unlock_entry;
1416
		result = dax_pmd_load_hole(vmf, &iomap, &entry);
1417 1418 1419 1420 1421 1422
		break;
	default:
		WARN_ON_ONCE(1);
		break;
	}

1423 1424
 unlock_entry:
	put_locked_mapping_entry(mapping, pgoff, entry);
1425 1426
 finish_iomap:
	if (ops->iomap_end) {
1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
		int copied = PMD_SIZE;

		if (result == VM_FAULT_FALLBACK)
			copied = 0;
		/*
		 * The fault is done by now and there's no way back (other
		 * thread may be already happily using PMD we have installed).
		 * Just ignore error from ->iomap_end since we cannot do much
		 * with it.
		 */
		ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
				&iomap);
1439 1440 1441
	}
 fallback:
	if (result == VM_FAULT_FALLBACK) {
1442
		split_huge_pmd(vma, vmf->pmd, vmf->address);
1443 1444
		count_vm_event(THP_FAULT_FALLBACK);
	}
1445
out:
1446
	trace_dax_pmd_fault_done(inode, vmf, max_pgoff, result);
1447 1448 1449 1450
	return result;
}
EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
#endif /* CONFIG_FS_DAX_PMD */