xfs_buf.c 48.0 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17
 */
18
#include "xfs.h"
L
Linus Torvalds 已提交
19 20
#include <linux/stddef.h>
#include <linux/errno.h>
21
#include <linux/gfp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include <linux/hash.h>
32
#include <linux/kthread.h>
C
Christoph Lameter 已提交
33
#include <linux/migrate.h>
34
#include <linux/backing-dev.h>
35
#include <linux/freezer.h>
36
#include <linux/sched/mm.h>
L
Linus Torvalds 已提交
37

38
#include "xfs_format.h"
39
#include "xfs_log_format.h"
40
#include "xfs_trans_resv.h"
41
#include "xfs_sb.h"
42
#include "xfs_mount.h"
C
Christoph Hellwig 已提交
43
#include "xfs_trace.h"
44
#include "xfs_log.h"
45

46
static kmem_zone_t *xfs_buf_zone;
47

48 49 50 51
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
L
Linus Torvalds 已提交
52
#else
53 54 55
# define XB_SET_OWNER(bp)	do { } while (0)
# define XB_CLEAR_OWNER(bp)	do { } while (0)
# define XB_GET_OWNER(bp)	do { } while (0)
L
Linus Torvalds 已提交
56 57
#endif

58
#define xb_to_gfp(flags) \
D
Dave Chinner 已提交
59
	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
L
Linus Torvalds 已提交
60 61


62 63 64 65 66 67 68
static inline int
xfs_buf_is_vmapped(
	struct xfs_buf	*bp)
{
	/*
	 * Return true if the buffer is vmapped.
	 *
69 70 71
	 * b_addr is null if the buffer is not mapped, but the code is clever
	 * enough to know it doesn't have to map a single page, so the check has
	 * to be both for b_addr and bp->b_page_count > 1.
72
	 */
73
	return bp->b_addr && bp->b_page_count > 1;
74 75 76 77 78 79 80 81 82
}

static inline int
xfs_buf_vmap_len(
	struct xfs_buf	*bp)
{
	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
/*
 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
 * this buffer. The count is incremented once per buffer (per hold cycle)
 * because the corresponding decrement is deferred to buffer release. Buffers
 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
 * tracking adds unnecessary overhead. This is used for sychronization purposes
 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
 * in-flight buffers.
 *
 * Buffers that are never released (e.g., superblock, iclog buffers) must set
 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
 * never reaches zero and unmount hangs indefinitely.
 */
static inline void
xfs_buf_ioacct_inc(
	struct xfs_buf	*bp)
{
	if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
		return;

	ASSERT(bp->b_flags & XBF_ASYNC);
	bp->b_flags |= _XBF_IN_FLIGHT;
	percpu_counter_inc(&bp->b_target->bt_io_count);
}

/*
 * Clear the in-flight state on a buffer about to be released to the LRU or
 * freed and unaccount from the buftarg.
 */
static inline void
xfs_buf_ioacct_dec(
	struct xfs_buf	*bp)
{
	if (!(bp->b_flags & _XBF_IN_FLIGHT))
		return;

	bp->b_flags &= ~_XBF_IN_FLIGHT;
	percpu_counter_dec(&bp->b_target->bt_io_count);
}

123 124 125 126 127 128 129 130 131 132 133 134
/*
 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
 * b_lru_ref count so that the buffer is freed immediately when the buffer
 * reference count falls to zero. If the buffer is already on the LRU, we need
 * to remove the reference that LRU holds on the buffer.
 *
 * This prevents build-up of stale buffers on the LRU.
 */
void
xfs_buf_stale(
	struct xfs_buf	*bp)
{
135 136
	ASSERT(xfs_buf_islocked(bp));

137
	bp->b_flags |= XBF_STALE;
138 139 140 141 142 143 144 145

	/*
	 * Clear the delwri status so that a delwri queue walker will not
	 * flush this buffer to disk now that it is stale. The delwri queue has
	 * a reference to the buffer, so this is safe to do.
	 */
	bp->b_flags &= ~_XBF_DELWRI_Q;

146 147 148 149 150 151 152 153
	/*
	 * Once the buffer is marked stale and unlocked, a subsequent lookup
	 * could reset b_flags. There is no guarantee that the buffer is
	 * unaccounted (released to LRU) before that occurs. Drop in-flight
	 * status now to preserve accounting consistency.
	 */
	xfs_buf_ioacct_dec(bp);

154 155 156
	spin_lock(&bp->b_lock);
	atomic_set(&bp->b_lru_ref, 0);
	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
157 158 159
	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
		atomic_dec(&bp->b_hold);

160
	ASSERT(atomic_read(&bp->b_hold) >= 1);
161
	spin_unlock(&bp->b_lock);
162
}
L
Linus Torvalds 已提交
163

164 165 166 167 168 169 170 171 172
static int
xfs_buf_get_maps(
	struct xfs_buf		*bp,
	int			map_count)
{
	ASSERT(bp->b_maps == NULL);
	bp->b_map_count = map_count;

	if (map_count == 1) {
173
		bp->b_maps = &bp->__b_map;
174 175 176 177 178 179
		return 0;
	}

	bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
				KM_NOFS);
	if (!bp->b_maps)
D
Dave Chinner 已提交
180
		return -ENOMEM;
181 182 183 184 185 186 187 188 189 190
	return 0;
}

/*
 *	Frees b_pages if it was allocated.
 */
static void
xfs_buf_free_maps(
	struct xfs_buf	*bp)
{
191
	if (bp->b_maps != &bp->__b_map) {
192 193 194 195 196
		kmem_free(bp->b_maps);
		bp->b_maps = NULL;
	}
}

197
struct xfs_buf *
198
_xfs_buf_alloc(
199
	struct xfs_buftarg	*target,
200 201
	struct xfs_buf_map	*map,
	int			nmaps,
202
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
203
{
204
	struct xfs_buf		*bp;
205 206
	int			error;
	int			i;
207

D
Dave Chinner 已提交
208
	bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
209 210 211
	if (unlikely(!bp))
		return NULL;

L
Linus Torvalds 已提交
212
	/*
213 214
	 * We don't want certain flags to appear in b_flags unless they are
	 * specifically set by later operations on the buffer.
L
Linus Torvalds 已提交
215
	 */
216
	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
217 218

	atomic_set(&bp->b_hold, 1);
219
	atomic_set(&bp->b_lru_ref, 1);
220
	init_completion(&bp->b_iowait);
221
	INIT_LIST_HEAD(&bp->b_lru);
222
	INIT_LIST_HEAD(&bp->b_list);
T
Thomas Gleixner 已提交
223
	sema_init(&bp->b_sema, 0); /* held, no waiters */
224
	spin_lock_init(&bp->b_lock);
225 226
	XB_SET_OWNER(bp);
	bp->b_target = target;
227
	bp->b_flags = flags;
D
Dave Chinner 已提交
228

L
Linus Torvalds 已提交
229
	/*
230 231
	 * Set length and io_length to the same value initially.
	 * I/O routines should use io_length, which will be the same in
L
Linus Torvalds 已提交
232 233
	 * most cases but may be reset (e.g. XFS recovery).
	 */
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
	error = xfs_buf_get_maps(bp, nmaps);
	if (error)  {
		kmem_zone_free(xfs_buf_zone, bp);
		return NULL;
	}

	bp->b_bn = map[0].bm_bn;
	bp->b_length = 0;
	for (i = 0; i < nmaps; i++) {
		bp->b_maps[i].bm_bn = map[i].bm_bn;
		bp->b_maps[i].bm_len = map[i].bm_len;
		bp->b_length += map[i].bm_len;
	}
	bp->b_io_length = bp->b_length;

249 250 251
	atomic_set(&bp->b_pin_count, 0);
	init_waitqueue_head(&bp->b_waiters);

252
	XFS_STATS_INC(target->bt_mount, xb_create);
C
Christoph Hellwig 已提交
253
	trace_xfs_buf_init(bp, _RET_IP_);
254 255

	return bp;
L
Linus Torvalds 已提交
256 257 258
}

/*
259 260
 *	Allocate a page array capable of holding a specified number
 *	of pages, and point the page buf at it.
L
Linus Torvalds 已提交
261 262
 */
STATIC int
263 264
_xfs_buf_get_pages(
	xfs_buf_t		*bp,
265
	int			page_count)
L
Linus Torvalds 已提交
266 267
{
	/* Make sure that we have a page list */
268 269 270 271
	if (bp->b_pages == NULL) {
		bp->b_page_count = page_count;
		if (page_count <= XB_PAGES) {
			bp->b_pages = bp->b_page_array;
L
Linus Torvalds 已提交
272
		} else {
273
			bp->b_pages = kmem_alloc(sizeof(struct page *) *
D
Dave Chinner 已提交
274
						 page_count, KM_NOFS);
275
			if (bp->b_pages == NULL)
L
Linus Torvalds 已提交
276 277
				return -ENOMEM;
		}
278
		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
L
Linus Torvalds 已提交
279 280 281 282 283
	}
	return 0;
}

/*
284
 *	Frees b_pages if it was allocated.
L
Linus Torvalds 已提交
285 286
 */
STATIC void
287
_xfs_buf_free_pages(
L
Linus Torvalds 已提交
288 289
	xfs_buf_t	*bp)
{
290
	if (bp->b_pages != bp->b_page_array) {
291
		kmem_free(bp->b_pages);
292
		bp->b_pages = NULL;
L
Linus Torvalds 已提交
293 294 295 296 297 298 299
	}
}

/*
 *	Releases the specified buffer.
 *
 * 	The modification state of any associated pages is left unchanged.
300
 * 	The buffer must not be on any hash - use xfs_buf_rele instead for
L
Linus Torvalds 已提交
301 302 303
 * 	hashed and refcounted buffers
 */
void
304
xfs_buf_free(
L
Linus Torvalds 已提交
305 306
	xfs_buf_t		*bp)
{
C
Christoph Hellwig 已提交
307
	trace_xfs_buf_free(bp, _RET_IP_);
L
Linus Torvalds 已提交
308

309 310
	ASSERT(list_empty(&bp->b_lru));

311
	if (bp->b_flags & _XBF_PAGES) {
L
Linus Torvalds 已提交
312 313
		uint		i;

314
		if (xfs_buf_is_vmapped(bp))
A
Alex Elder 已提交
315 316
			vm_unmap_ram(bp->b_addr - bp->b_offset,
					bp->b_page_count);
L
Linus Torvalds 已提交
317

318 319 320
		for (i = 0; i < bp->b_page_count; i++) {
			struct page	*page = bp->b_pages[i];

321
			__free_page(page);
322
		}
323 324
	} else if (bp->b_flags & _XBF_KMEM)
		kmem_free(bp->b_addr);
325
	_xfs_buf_free_pages(bp);
326
	xfs_buf_free_maps(bp);
327
	kmem_zone_free(xfs_buf_zone, bp);
L
Linus Torvalds 已提交
328 329 330
}

/*
331
 * Allocates all the pages for buffer in question and builds it's page list.
L
Linus Torvalds 已提交
332 333
 */
STATIC int
334
xfs_buf_allocate_memory(
L
Linus Torvalds 已提交
335 336 337
	xfs_buf_t		*bp,
	uint			flags)
{
338
	size_t			size;
L
Linus Torvalds 已提交
339
	size_t			nbytes, offset;
340
	gfp_t			gfp_mask = xb_to_gfp(flags);
L
Linus Torvalds 已提交
341
	unsigned short		page_count, i;
D
Dave Chinner 已提交
342
	xfs_off_t		start, end;
L
Linus Torvalds 已提交
343 344
	int			error;

345 346 347 348 349
	/*
	 * for buffers that are contained within a single page, just allocate
	 * the memory from the heap - there's no need for the complexity of
	 * page arrays to keep allocation down to order 0.
	 */
D
Dave Chinner 已提交
350 351
	size = BBTOB(bp->b_length);
	if (size < PAGE_SIZE) {
D
Dave Chinner 已提交
352
		bp->b_addr = kmem_alloc(size, KM_NOFS);
353 354 355 356 357
		if (!bp->b_addr) {
			/* low memory - use alloc_page loop instead */
			goto use_alloc_page;
		}

D
Dave Chinner 已提交
358
		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
359 360 361 362 363 364 365 366 367 368
		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
			/* b_addr spans two pages - use alloc_page instead */
			kmem_free(bp->b_addr);
			bp->b_addr = NULL;
			goto use_alloc_page;
		}
		bp->b_offset = offset_in_page(bp->b_addr);
		bp->b_pages = bp->b_page_array;
		bp->b_pages[0] = virt_to_page(bp->b_addr);
		bp->b_page_count = 1;
369
		bp->b_flags |= _XBF_KMEM;
370 371 372 373
		return 0;
	}

use_alloc_page:
374 375
	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
376
								>> PAGE_SHIFT;
D
Dave Chinner 已提交
377
	page_count = end - start;
378
	error = _xfs_buf_get_pages(bp, page_count);
L
Linus Torvalds 已提交
379 380 381
	if (unlikely(error))
		return error;

382
	offset = bp->b_offset;
383
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
384

385
	for (i = 0; i < bp->b_page_count; i++) {
L
Linus Torvalds 已提交
386 387
		struct page	*page;
		uint		retries = 0;
388 389
retry:
		page = alloc_page(gfp_mask);
L
Linus Torvalds 已提交
390
		if (unlikely(page == NULL)) {
391 392
			if (flags & XBF_READ_AHEAD) {
				bp->b_page_count = i;
D
Dave Chinner 已提交
393
				error = -ENOMEM;
394
				goto out_free_pages;
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402 403
			}

			/*
			 * This could deadlock.
			 *
			 * But until all the XFS lowlevel code is revamped to
			 * handle buffer allocation failures we can't do much.
			 */
			if (!(++retries % 100))
404
				xfs_err(NULL,
405 406
		"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
					current->comm, current->pid,
407
					__func__, gfp_mask);
L
Linus Torvalds 已提交
408

409
			XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
410
			congestion_wait(BLK_RW_ASYNC, HZ/50);
L
Linus Torvalds 已提交
411 412 413
			goto retry;
		}

414
		XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
L
Linus Torvalds 已提交
415

416
		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
L
Linus Torvalds 已提交
417
		size -= nbytes;
418
		bp->b_pages[i] = page;
L
Linus Torvalds 已提交
419 420
		offset = 0;
	}
421
	return 0;
L
Linus Torvalds 已提交
422

423 424 425
out_free_pages:
	for (i = 0; i < bp->b_page_count; i++)
		__free_page(bp->b_pages[i]);
426
	bp->b_flags &= ~_XBF_PAGES;
L
Linus Torvalds 已提交
427 428 429 430
	return error;
}

/*
L
Lucas De Marchi 已提交
431
 *	Map buffer into kernel address-space if necessary.
L
Linus Torvalds 已提交
432 433
 */
STATIC int
434
_xfs_buf_map_pages(
L
Linus Torvalds 已提交
435 436 437
	xfs_buf_t		*bp,
	uint			flags)
{
438
	ASSERT(bp->b_flags & _XBF_PAGES);
439
	if (bp->b_page_count == 1) {
440
		/* A single page buffer is always mappable */
441
		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
442 443 444
	} else if (flags & XBF_UNMAPPED) {
		bp->b_addr = NULL;
	} else {
445
		int retried = 0;
446 447 448 449 450 451 452 453 454 455 456
		unsigned noio_flag;

		/*
		 * vm_map_ram() will allocate auxillary structures (e.g.
		 * pagetables) with GFP_KERNEL, yet we are likely to be under
		 * GFP_NOFS context here. Hence we need to tell memory reclaim
		 * that we are in such a context via PF_MEMALLOC_NOIO to prevent
		 * memory reclaim re-entering the filesystem here and
		 * potentially deadlocking.
		 */
		noio_flag = memalloc_noio_save();
457 458 459 460 461 462 463
		do {
			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
						-1, PAGE_KERNEL);
			if (bp->b_addr)
				break;
			vm_unmap_aliases();
		} while (retried++ <= 1);
464
		memalloc_noio_restore(noio_flag);
465 466

		if (!bp->b_addr)
L
Linus Torvalds 已提交
467
			return -ENOMEM;
468
		bp->b_addr += bp->b_offset;
L
Linus Torvalds 已提交
469 470 471 472 473 474 475 476
	}

	return 0;
}

/*
 *	Finding and Reading Buffers
 */
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532
static int
_xfs_buf_obj_cmp(
	struct rhashtable_compare_arg	*arg,
	const void			*obj)
{
	const struct xfs_buf_map	*map = arg->key;
	const struct xfs_buf		*bp = obj;

	/*
	 * The key hashing in the lookup path depends on the key being the
	 * first element of the compare_arg, make sure to assert this.
	 */
	BUILD_BUG_ON(offsetof(struct xfs_buf_map, bm_bn) != 0);

	if (bp->b_bn != map->bm_bn)
		return 1;

	if (unlikely(bp->b_length != map->bm_len)) {
		/*
		 * found a block number match. If the range doesn't
		 * match, the only way this is allowed is if the buffer
		 * in the cache is stale and the transaction that made
		 * it stale has not yet committed. i.e. we are
		 * reallocating a busy extent. Skip this buffer and
		 * continue searching for an exact match.
		 */
		ASSERT(bp->b_flags & XBF_STALE);
		return 1;
	}
	return 0;
}

static const struct rhashtable_params xfs_buf_hash_params = {
	.min_size		= 32,	/* empty AGs have minimal footprint */
	.nelem_hint		= 16,
	.key_len		= sizeof(xfs_daddr_t),
	.key_offset		= offsetof(struct xfs_buf, b_bn),
	.head_offset		= offsetof(struct xfs_buf, b_rhash_head),
	.automatic_shrinking	= true,
	.obj_cmpfn		= _xfs_buf_obj_cmp,
};

int
xfs_buf_hash_init(
	struct xfs_perag	*pag)
{
	spin_lock_init(&pag->pag_buf_lock);
	return rhashtable_init(&pag->pag_buf_hash, &xfs_buf_hash_params);
}

void
xfs_buf_hash_destroy(
	struct xfs_perag	*pag)
{
	rhashtable_destroy(&pag->pag_buf_hash);
}
L
Linus Torvalds 已提交
533 534

/*
535
 *	Look up, and creates if absent, a lockable buffer for
L
Linus Torvalds 已提交
536
 *	a given range of an inode.  The buffer is returned
537
 *	locked.	No I/O is implied by this call.
L
Linus Torvalds 已提交
538 539
 */
xfs_buf_t *
540
_xfs_buf_find(
541
	struct xfs_buftarg	*btp,
542 543
	struct xfs_buf_map	*map,
	int			nmaps,
544 545
	xfs_buf_flags_t		flags,
	xfs_buf_t		*new_bp)
L
Linus Torvalds 已提交
546
{
547 548
	struct xfs_perag	*pag;
	xfs_buf_t		*bp;
549
	struct xfs_buf_map	cmap = { .bm_bn = map[0].bm_bn };
550
	xfs_daddr_t		eofs;
551
	int			i;
L
Linus Torvalds 已提交
552

553
	for (i = 0; i < nmaps; i++)
554
		cmap.bm_len += map[i].bm_len;
L
Linus Torvalds 已提交
555 556

	/* Check for IOs smaller than the sector size / not sector aligned */
557 558
	ASSERT(!(BBTOB(cmap.bm_len) < btp->bt_meta_sectorsize));
	ASSERT(!(BBTOB(cmap.bm_bn) & (xfs_off_t)btp->bt_meta_sectormask));
L
Linus Torvalds 已提交
559

560 561 562 563 564
	/*
	 * Corrupted block numbers can get through to here, unfortunately, so we
	 * have to check that the buffer falls within the filesystem bounds.
	 */
	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
565
	if (cmap.bm_bn < 0 || cmap.bm_bn >= eofs) {
566
		/*
D
Dave Chinner 已提交
567
		 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
568 569 570 571 572
		 * but none of the higher level infrastructure supports
		 * returning a specific error on buffer lookup failures.
		 */
		xfs_alert(btp->bt_mount,
			  "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
573
			  __func__, cmap.bm_bn, eofs);
D
Dave Chinner 已提交
574
		WARN_ON(1);
575 576 577
		return NULL;
	}

578
	pag = xfs_perag_get(btp->bt_mount,
579
			    xfs_daddr_to_agno(btp->bt_mount, cmap.bm_bn));
580 581

	spin_lock(&pag->pag_buf_lock);
582 583 584 585 586
	bp = rhashtable_lookup_fast(&pag->pag_buf_hash, &cmap,
				    xfs_buf_hash_params);
	if (bp) {
		atomic_inc(&bp->b_hold);
		goto found;
L
Linus Torvalds 已提交
587 588 589
	}

	/* No match found */
590
	if (new_bp) {
591 592
		/* the buffer keeps the perag reference until it is freed */
		new_bp->b_pag = pag;
593 594 595
		rhashtable_insert_fast(&pag->pag_buf_hash,
				       &new_bp->b_rhash_head,
				       xfs_buf_hash_params);
596
		spin_unlock(&pag->pag_buf_lock);
L
Linus Torvalds 已提交
597
	} else {
598
		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
599 600
		spin_unlock(&pag->pag_buf_lock);
		xfs_perag_put(pag);
L
Linus Torvalds 已提交
601
	}
602
	return new_bp;
L
Linus Torvalds 已提交
603 604

found:
605 606
	spin_unlock(&pag->pag_buf_lock);
	xfs_perag_put(pag);
L
Linus Torvalds 已提交
607

608 609
	if (!xfs_buf_trylock(bp)) {
		if (flags & XBF_TRYLOCK) {
610
			xfs_buf_rele(bp);
611
			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
612
			return NULL;
L
Linus Torvalds 已提交
613
		}
614
		xfs_buf_lock(bp);
615
		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
L
Linus Torvalds 已提交
616 617
	}

618 619 620 621 622
	/*
	 * if the buffer is stale, clear all the external state associated with
	 * it. We need to keep flags such as how we allocated the buffer memory
	 * intact here.
	 */
623 624
	if (bp->b_flags & XBF_STALE) {
		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
D
Dave Chinner 已提交
625
		ASSERT(bp->b_iodone == NULL);
626
		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
627
		bp->b_ops = NULL;
628
	}
C
Christoph Hellwig 已提交
629 630

	trace_xfs_buf_find(bp, flags, _RET_IP_);
631
	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
632
	return bp;
L
Linus Torvalds 已提交
633 634 635
}

/*
636 637 638
 * Assembles a buffer covering the specified range. The code is optimised for
 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
 * more hits than misses.
L
Linus Torvalds 已提交
639
 */
640
struct xfs_buf *
641 642 643 644
xfs_buf_get_map(
	struct xfs_buftarg	*target,
	struct xfs_buf_map	*map,
	int			nmaps,
645
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
646
{
647 648
	struct xfs_buf		*bp;
	struct xfs_buf		*new_bp;
649
	int			error = 0;
L
Linus Torvalds 已提交
650

651
	bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
652 653 654
	if (likely(bp))
		goto found;

655
	new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
656
	if (unlikely(!new_bp))
L
Linus Torvalds 已提交
657 658
		return NULL;

659 660
	error = xfs_buf_allocate_memory(new_bp, flags);
	if (error) {
661
		xfs_buf_free(new_bp);
662 663 664
		return NULL;
	}

665
	bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
666
	if (!bp) {
667
		xfs_buf_free(new_bp);
668 669 670
		return NULL;
	}

671 672
	if (bp != new_bp)
		xfs_buf_free(new_bp);
L
Linus Torvalds 已提交
673

674
found:
675
	if (!bp->b_addr) {
676
		error = _xfs_buf_map_pages(bp, flags);
L
Linus Torvalds 已提交
677
		if (unlikely(error)) {
678
			xfs_warn(target->bt_mount,
679
				"%s: failed to map pagesn", __func__);
D
Dave Chinner 已提交
680 681
			xfs_buf_relse(bp);
			return NULL;
L
Linus Torvalds 已提交
682 683 684
		}
	}

685 686 687 688 689 690 691
	/*
	 * Clear b_error if this is a lookup from a caller that doesn't expect
	 * valid data to be found in the buffer.
	 */
	if (!(flags & XBF_READ))
		xfs_buf_ioerror(bp, 0);

692
	XFS_STATS_INC(target->bt_mount, xb_get);
C
Christoph Hellwig 已提交
693
	trace_xfs_buf_get(bp, flags, _RET_IP_);
694
	return bp;
L
Linus Torvalds 已提交
695 696
}

C
Christoph Hellwig 已提交
697 698 699 700 701
STATIC int
_xfs_buf_read(
	xfs_buf_t		*bp,
	xfs_buf_flags_t		flags)
{
702
	ASSERT(!(flags & XBF_WRITE));
703
	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
C
Christoph Hellwig 已提交
704

705
	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
706
	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
C
Christoph Hellwig 已提交
707

708 709
	if (flags & XBF_ASYNC) {
		xfs_buf_submit(bp);
710
		return 0;
711 712
	}
	return xfs_buf_submit_wait(bp);
C
Christoph Hellwig 已提交
713 714
}

L
Linus Torvalds 已提交
715
xfs_buf_t *
716 717 718 719
xfs_buf_read_map(
	struct xfs_buftarg	*target,
	struct xfs_buf_map	*map,
	int			nmaps,
720
	xfs_buf_flags_t		flags,
721
	const struct xfs_buf_ops *ops)
L
Linus Torvalds 已提交
722
{
723
	struct xfs_buf		*bp;
724 725 726

	flags |= XBF_READ;

727
	bp = xfs_buf_get_map(target, map, nmaps, flags);
728
	if (bp) {
C
Christoph Hellwig 已提交
729 730
		trace_xfs_buf_read(bp, flags, _RET_IP_);

731
		if (!(bp->b_flags & XBF_DONE)) {
732
			XFS_STATS_INC(target->bt_mount, xb_get_read);
733
			bp->b_ops = ops;
C
Christoph Hellwig 已提交
734
			_xfs_buf_read(bp, flags);
735
		} else if (flags & XBF_ASYNC) {
L
Linus Torvalds 已提交
736 737 738 739
			/*
			 * Read ahead call which is already satisfied,
			 * drop the buffer
			 */
D
Dave Chinner 已提交
740 741
			xfs_buf_relse(bp);
			return NULL;
L
Linus Torvalds 已提交
742 743
		} else {
			/* We do not want read in the flags */
744
			bp->b_flags &= ~XBF_READ;
L
Linus Torvalds 已提交
745 746 747
		}
	}

748
	return bp;
L
Linus Torvalds 已提交
749 750 751
}

/*
752 753
 *	If we are not low on memory then do the readahead in a deadlock
 *	safe manner.
L
Linus Torvalds 已提交
754 755
 */
void
756 757 758
xfs_buf_readahead_map(
	struct xfs_buftarg	*target,
	struct xfs_buf_map	*map,
759
	int			nmaps,
760
	const struct xfs_buf_ops *ops)
L
Linus Torvalds 已提交
761
{
762
	if (bdi_read_congested(target->bt_bdev->bd_bdi))
L
Linus Torvalds 已提交
763 764
		return;

765
	xfs_buf_read_map(target, map, nmaps,
766
		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
L
Linus Torvalds 已提交
767 768
}

769 770 771 772
/*
 * Read an uncached buffer from disk. Allocates and returns a locked
 * buffer containing the disk contents or nothing.
 */
773
int
774 775 776
xfs_buf_read_uncached(
	struct xfs_buftarg	*target,
	xfs_daddr_t		daddr,
777
	size_t			numblks,
778
	int			flags,
779
	struct xfs_buf		**bpp,
780
	const struct xfs_buf_ops *ops)
781
{
782
	struct xfs_buf		*bp;
783

784 785
	*bpp = NULL;

786
	bp = xfs_buf_get_uncached(target, numblks, flags);
787
	if (!bp)
788
		return -ENOMEM;
789 790

	/* set up the buffer for a read IO */
791
	ASSERT(bp->b_map_count == 1);
792
	bp->b_bn = XFS_BUF_DADDR_NULL;  /* always null for uncached buffers */
793
	bp->b_maps[0].bm_bn = daddr;
794
	bp->b_flags |= XBF_READ;
795
	bp->b_ops = ops;
796

797
	xfs_buf_submit_wait(bp);
798 799
	if (bp->b_error) {
		int	error = bp->b_error;
C
Christoph Hellwig 已提交
800
		xfs_buf_relse(bp);
801
		return error;
C
Christoph Hellwig 已提交
802
	}
803 804 805

	*bpp = bp;
	return 0;
L
Linus Torvalds 已提交
806 807
}

808 809 810 811 812 813 814
/*
 * Return a buffer allocated as an empty buffer and associated to external
 * memory via xfs_buf_associate_memory() back to it's empty state.
 */
void
xfs_buf_set_empty(
	struct xfs_buf		*bp,
815
	size_t			numblks)
816 817 818 819 820 821 822
{
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);

	bp->b_pages = NULL;
	bp->b_page_count = 0;
	bp->b_addr = NULL;
823
	bp->b_length = numblks;
824
	bp->b_io_length = numblks;
825 826

	ASSERT(bp->b_map_count == 1);
827
	bp->b_bn = XFS_BUF_DADDR_NULL;
828 829
	bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
	bp->b_maps[0].bm_len = bp->b_length;
830 831
}

L
Linus Torvalds 已提交
832 833 834 835
static inline struct page *
mem_to_page(
	void			*addr)
{
836
	if ((!is_vmalloc_addr(addr))) {
L
Linus Torvalds 已提交
837 838 839 840 841 842 843
		return virt_to_page(addr);
	} else {
		return vmalloc_to_page(addr);
	}
}

int
844 845
xfs_buf_associate_memory(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
846 847 848 849 850
	void			*mem,
	size_t			len)
{
	int			rval;
	int			i = 0;
851 852 853
	unsigned long		pageaddr;
	unsigned long		offset;
	size_t			buflen;
L
Linus Torvalds 已提交
854 855
	int			page_count;

856
	pageaddr = (unsigned long)mem & PAGE_MASK;
857
	offset = (unsigned long)mem - pageaddr;
858 859
	buflen = PAGE_ALIGN(len + offset);
	page_count = buflen >> PAGE_SHIFT;
L
Linus Torvalds 已提交
860 861

	/* Free any previous set of page pointers */
862 863
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
864

865 866
	bp->b_pages = NULL;
	bp->b_addr = mem;
L
Linus Torvalds 已提交
867

868
	rval = _xfs_buf_get_pages(bp, page_count);
L
Linus Torvalds 已提交
869 870 871
	if (rval)
		return rval;

872
	bp->b_offset = offset;
873 874 875

	for (i = 0; i < bp->b_page_count; i++) {
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
876
		pageaddr += PAGE_SIZE;
L
Linus Torvalds 已提交
877 878
	}

879
	bp->b_io_length = BTOBB(len);
880
	bp->b_length = BTOBB(buflen);
L
Linus Torvalds 已提交
881 882 883 884 885

	return 0;
}

xfs_buf_t *
886 887
xfs_buf_get_uncached(
	struct xfs_buftarg	*target,
888
	size_t			numblks,
889
	int			flags)
L
Linus Torvalds 已提交
890
{
891
	unsigned long		page_count;
892
	int			error, i;
893 894
	struct xfs_buf		*bp;
	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
L
Linus Torvalds 已提交
895

896 897
	/* flags might contain irrelevant bits, pass only what we care about */
	bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
L
Linus Torvalds 已提交
898 899 900
	if (unlikely(bp == NULL))
		goto fail;

901
	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
902
	error = _xfs_buf_get_pages(bp, page_count);
903
	if (error)
L
Linus Torvalds 已提交
904 905
		goto fail_free_buf;

906
	for (i = 0; i < page_count; i++) {
907
		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
908 909
		if (!bp->b_pages[i])
			goto fail_free_mem;
L
Linus Torvalds 已提交
910
	}
911
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
912

913
	error = _xfs_buf_map_pages(bp, 0);
914
	if (unlikely(error)) {
915
		xfs_warn(target->bt_mount,
916
			"%s: failed to map pages", __func__);
L
Linus Torvalds 已提交
917
		goto fail_free_mem;
918
	}
L
Linus Torvalds 已提交
919

920
	trace_xfs_buf_get_uncached(bp, _RET_IP_);
L
Linus Torvalds 已提交
921
	return bp;
922

L
Linus Torvalds 已提交
923
 fail_free_mem:
924 925
	while (--i >= 0)
		__free_page(bp->b_pages[i]);
926
	_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
927
 fail_free_buf:
928
	xfs_buf_free_maps(bp);
929
	kmem_zone_free(xfs_buf_zone, bp);
L
Linus Torvalds 已提交
930 931 932 933 934 935 936 937 938 939
 fail:
	return NULL;
}

/*
 *	Increment reference count on buffer, to hold the buffer concurrently
 *	with another thread which may release (free) the buffer asynchronously.
 *	Must hold the buffer already to call this function.
 */
void
940 941
xfs_buf_hold(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
942
{
C
Christoph Hellwig 已提交
943
	trace_xfs_buf_hold(bp, _RET_IP_);
944
	atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
945 946 947
}

/*
948 949
 * Release a hold on the specified buffer. If the hold count is 1, the buffer is
 * placed on LRU or freed (depending on b_lru_ref).
L
Linus Torvalds 已提交
950 951
 */
void
952 953
xfs_buf_rele(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
954
{
955
	struct xfs_perag	*pag = bp->b_pag;
956 957
	bool			release;
	bool			freebuf = false;
L
Linus Torvalds 已提交
958

C
Christoph Hellwig 已提交
959
	trace_xfs_buf_rele(bp, _RET_IP_);
L
Linus Torvalds 已提交
960

961
	if (!pag) {
962
		ASSERT(list_empty(&bp->b_lru));
963 964
		if (atomic_dec_and_test(&bp->b_hold)) {
			xfs_buf_ioacct_dec(bp);
965
			xfs_buf_free(bp);
966
		}
967 968 969
		return;
	}

970
	ASSERT(atomic_read(&bp->b_hold) > 0);
971

972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
	release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
	spin_lock(&bp->b_lock);
	if (!release) {
		/*
		 * Drop the in-flight state if the buffer is already on the LRU
		 * and it holds the only reference. This is racy because we
		 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
		 * ensures the decrement occurs only once per-buf.
		 */
		if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
			xfs_buf_ioacct_dec(bp);
		goto out_unlock;
	}

	/* the last reference has been dropped ... */
	xfs_buf_ioacct_dec(bp);
	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
		/*
		 * If the buffer is added to the LRU take a new reference to the
		 * buffer for the LRU and clear the (now stale) dispose list
		 * state flag
		 */
		if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
			bp->b_state &= ~XFS_BSTATE_DISPOSE;
			atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
997
		}
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009
		spin_unlock(&pag->pag_buf_lock);
	} else {
		/*
		 * most of the time buffers will already be removed from the
		 * LRU, so optimise that case by checking for the
		 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
		 * was on was the disposal list
		 */
		if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
			list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
		} else {
			ASSERT(list_empty(&bp->b_lru));
L
Linus Torvalds 已提交
1010
		}
1011 1012

		ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1013 1014
		rhashtable_remove_fast(&pag->pag_buf_hash, &bp->b_rhash_head,
				       xfs_buf_hash_params);
1015 1016 1017
		spin_unlock(&pag->pag_buf_lock);
		xfs_perag_put(pag);
		freebuf = true;
L
Linus Torvalds 已提交
1018
	}
1019 1020 1021 1022 1023 1024

out_unlock:
	spin_unlock(&bp->b_lock);

	if (freebuf)
		xfs_buf_free(bp);
L
Linus Torvalds 已提交
1025 1026 1027 1028
}


/*
1029
 *	Lock a buffer object, if it is not already locked.
1030 1031 1032 1033 1034 1035 1036 1037
 *
 *	If we come across a stale, pinned, locked buffer, we know that we are
 *	being asked to lock a buffer that has been reallocated. Because it is
 *	pinned, we know that the log has not been pushed to disk and hence it
 *	will still be locked.  Rather than continuing to have trylock attempts
 *	fail until someone else pushes the log, push it ourselves before
 *	returning.  This means that the xfsaild will not get stuck trying
 *	to push on stale inode buffers.
L
Linus Torvalds 已提交
1038 1039
 */
int
1040 1041
xfs_buf_trylock(
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1042 1043 1044
{
	int			locked;

1045
	locked = down_trylock(&bp->b_sema) == 0;
1046
	if (locked) {
1047
		XB_SET_OWNER(bp);
1048 1049 1050 1051
		trace_xfs_buf_trylock(bp, _RET_IP_);
	} else {
		trace_xfs_buf_trylock_fail(bp, _RET_IP_);
	}
1052
	return locked;
L
Linus Torvalds 已提交
1053 1054 1055
}

/*
1056
 *	Lock a buffer object.
1057 1058 1059 1060 1061 1062
 *
 *	If we come across a stale, pinned, locked buffer, we know that we
 *	are being asked to lock a buffer that has been reallocated. Because
 *	it is pinned, we know that the log has not been pushed to disk and
 *	hence it will still be locked. Rather than sleeping until someone
 *	else pushes the log, push it ourselves before trying to get the lock.
L
Linus Torvalds 已提交
1063
 */
1064 1065
void
xfs_buf_lock(
1066
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1067
{
C
Christoph Hellwig 已提交
1068 1069
	trace_xfs_buf_lock(bp, _RET_IP_);

1070
	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1071
		xfs_log_force(bp->b_target->bt_mount, 0);
1072 1073
	down(&bp->b_sema);
	XB_SET_OWNER(bp);
C
Christoph Hellwig 已提交
1074 1075

	trace_xfs_buf_lock_done(bp, _RET_IP_);
L
Linus Torvalds 已提交
1076 1077 1078
}

void
1079
xfs_buf_unlock(
1080
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1081
{
1082 1083
	XB_CLEAR_OWNER(bp);
	up(&bp->b_sema);
C
Christoph Hellwig 已提交
1084 1085

	trace_xfs_buf_unlock(bp, _RET_IP_);
L
Linus Torvalds 已提交
1086 1087
}

1088 1089 1090
STATIC void
xfs_buf_wait_unpin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1091 1092 1093
{
	DECLARE_WAITQUEUE	(wait, current);

1094
	if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
1095 1096
		return;

1097
	add_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
1098 1099
	for (;;) {
		set_current_state(TASK_UNINTERRUPTIBLE);
1100
		if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
1101
			break;
J
Jens Axboe 已提交
1102
		io_schedule();
L
Linus Torvalds 已提交
1103
	}
1104
	remove_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
1105 1106 1107 1108 1109 1110 1111
	set_current_state(TASK_RUNNING);
}

/*
 *	Buffer Utility Routines
 */

1112 1113 1114
void
xfs_buf_ioend(
	struct xfs_buf	*bp)
L
Linus Torvalds 已提交
1115
{
1116 1117 1118
	bool		read = bp->b_flags & XBF_READ;

	trace_xfs_buf_iodone(bp, _RET_IP_);
1119 1120

	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1121

1122 1123 1124 1125 1126 1127 1128
	/*
	 * Pull in IO completion errors now. We are guaranteed to be running
	 * single threaded, so we don't need the lock to read b_io_error.
	 */
	if (!bp->b_error && bp->b_io_error)
		xfs_buf_ioerror(bp, bp->b_io_error);

1129 1130 1131
	/* Only validate buffers that were read without errors */
	if (read && !bp->b_error && bp->b_ops) {
		ASSERT(!bp->b_iodone);
1132
		bp->b_ops->verify_read(bp);
1133 1134 1135 1136
	}

	if (!bp->b_error)
		bp->b_flags |= XBF_DONE;
L
Linus Torvalds 已提交
1137

1138
	if (bp->b_iodone)
1139 1140
		(*(bp->b_iodone))(bp);
	else if (bp->b_flags & XBF_ASYNC)
L
Linus Torvalds 已提交
1141
		xfs_buf_relse(bp);
1142
	else
1143
		complete(&bp->b_iowait);
L
Linus Torvalds 已提交
1144 1145
}

1146 1147 1148
static void
xfs_buf_ioend_work(
	struct work_struct	*work)
L
Linus Torvalds 已提交
1149
{
1150
	struct xfs_buf		*bp =
1151
		container_of(work, xfs_buf_t, b_ioend_work);
C
Christoph Hellwig 已提交
1152

1153 1154
	xfs_buf_ioend(bp);
}
L
Linus Torvalds 已提交
1155

1156
static void
1157 1158 1159
xfs_buf_ioend_async(
	struct xfs_buf	*bp)
{
1160 1161
	INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
	queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
L
Linus Torvalds 已提交
1162 1163 1164
}

void
1165 1166 1167
xfs_buf_ioerror(
	xfs_buf_t		*bp,
	int			error)
L
Linus Torvalds 已提交
1168
{
D
Dave Chinner 已提交
1169 1170
	ASSERT(error <= 0 && error >= -1000);
	bp->b_error = error;
C
Christoph Hellwig 已提交
1171
	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
L
Linus Torvalds 已提交
1172 1173
}

1174 1175 1176 1177 1178 1179
void
xfs_buf_ioerror_alert(
	struct xfs_buf		*bp,
	const char		*func)
{
	xfs_alert(bp->b_target->bt_mount,
1180
"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
D
Dave Chinner 已提交
1181
		(__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
1182 1183
}

1184 1185 1186 1187 1188 1189 1190 1191 1192
int
xfs_bwrite(
	struct xfs_buf		*bp)
{
	int			error;

	ASSERT(xfs_buf_islocked(bp));

	bp->b_flags |= XBF_WRITE;
D
Dave Chinner 已提交
1193 1194
	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
			 XBF_WRITE_FAIL | XBF_DONE);
1195

1196
	error = xfs_buf_submit_wait(bp);
1197 1198 1199 1200 1201 1202 1203
	if (error) {
		xfs_force_shutdown(bp->b_target->bt_mount,
				   SHUTDOWN_META_IO_ERROR);
	}
	return error;
}

1204
static void
1205
xfs_buf_bio_end_io(
1206
	struct bio		*bio)
L
Linus Torvalds 已提交
1207
{
1208
	struct xfs_buf		*bp = (struct xfs_buf *)bio->bi_private;
L
Linus Torvalds 已提交
1209

1210 1211 1212 1213
	/*
	 * don't overwrite existing errors - otherwise we can lose errors on
	 * buffers that require multiple bios to complete.
	 */
1214 1215
	if (bio->bi_error)
		cmpxchg(&bp->b_io_error, 0, bio->bi_error);
L
Linus Torvalds 已提交
1216

1217
	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1218 1219
		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));

1220 1221
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
		xfs_buf_ioend_async(bp);
L
Linus Torvalds 已提交
1222 1223 1224
	bio_put(bio);
}

1225 1226 1227 1228 1229 1230
static void
xfs_buf_ioapply_map(
	struct xfs_buf	*bp,
	int		map,
	int		*buf_offset,
	int		*count,
M
Mike Christie 已提交
1231 1232
	int		op,
	int		op_flags)
L
Linus Torvalds 已提交
1233
{
1234 1235 1236 1237 1238 1239 1240
	int		page_index;
	int		total_nr_pages = bp->b_page_count;
	int		nr_pages;
	struct bio	*bio;
	sector_t	sector =  bp->b_maps[map].bm_bn;
	int		size;
	int		offset;
L
Linus Torvalds 已提交
1241

1242
	total_nr_pages = bp->b_page_count;
L
Linus Torvalds 已提交
1243

1244 1245 1246 1247 1248 1249
	/* skip the pages in the buffer before the start offset */
	page_index = 0;
	offset = *buf_offset;
	while (offset >= PAGE_SIZE) {
		page_index++;
		offset -= PAGE_SIZE;
1250 1251
	}

1252 1253 1254 1255 1256 1257 1258
	/*
	 * Limit the IO size to the length of the current vector, and update the
	 * remaining IO count for the next time around.
	 */
	size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
	*count -= size;
	*buf_offset += size;
1259

L
Linus Torvalds 已提交
1260
next_chunk:
1261
	atomic_inc(&bp->b_io_remaining);
1262
	nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
L
Linus Torvalds 已提交
1263 1264

	bio = bio_alloc(GFP_NOIO, nr_pages);
1265
	bio->bi_bdev = bp->b_target->bt_bdev;
1266
	bio->bi_iter.bi_sector = sector;
1267 1268
	bio->bi_end_io = xfs_buf_bio_end_io;
	bio->bi_private = bp;
M
Mike Christie 已提交
1269
	bio_set_op_attrs(bio, op, op_flags);
1270

1271
	for (; size && nr_pages; nr_pages--, page_index++) {
1272
		int	rbytes, nbytes = PAGE_SIZE - offset;
L
Linus Torvalds 已提交
1273 1274 1275 1276

		if (nbytes > size)
			nbytes = size;

1277 1278
		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
				      offset);
1279
		if (rbytes < nbytes)
L
Linus Torvalds 已提交
1280 1281 1282
			break;

		offset = 0;
1283
		sector += BTOBB(nbytes);
L
Linus Torvalds 已提交
1284 1285 1286 1287
		size -= nbytes;
		total_nr_pages--;
	}

1288
	if (likely(bio->bi_iter.bi_size)) {
1289 1290 1291 1292
		if (xfs_buf_is_vmapped(bp)) {
			flush_kernel_vmap_range(bp->b_addr,
						xfs_buf_vmap_len(bp));
		}
1293
		submit_bio(bio);
L
Linus Torvalds 已提交
1294 1295 1296
		if (size)
			goto next_chunk;
	} else {
1297 1298
		/*
		 * This is guaranteed not to be the last io reference count
1299
		 * because the caller (xfs_buf_submit) holds a count itself.
1300 1301
		 */
		atomic_dec(&bp->b_io_remaining);
D
Dave Chinner 已提交
1302
		xfs_buf_ioerror(bp, -EIO);
1303
		bio_put(bio);
L
Linus Torvalds 已提交
1304
	}
1305 1306 1307 1308 1309 1310 1311 1312

}

STATIC void
_xfs_buf_ioapply(
	struct xfs_buf	*bp)
{
	struct blk_plug	plug;
M
Mike Christie 已提交
1313 1314
	int		op;
	int		op_flags = 0;
1315 1316 1317 1318
	int		offset;
	int		size;
	int		i;

1319 1320 1321 1322 1323 1324
	/*
	 * Make sure we capture only current IO errors rather than stale errors
	 * left over from previous use of the buffer (e.g. failed readahead).
	 */
	bp->b_error = 0;

1325 1326 1327 1328 1329 1330 1331
	/*
	 * Initialize the I/O completion workqueue if we haven't yet or the
	 * submitter has not opted to specify a custom one.
	 */
	if (!bp->b_ioend_wq)
		bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;

1332
	if (bp->b_flags & XBF_WRITE) {
M
Mike Christie 已提交
1333
		op = REQ_OP_WRITE;
1334
		if (bp->b_flags & XBF_SYNCIO)
1335
			op_flags = REQ_SYNC;
1336
		if (bp->b_flags & XBF_FUA)
M
Mike Christie 已提交
1337
			op_flags |= REQ_FUA;
1338
		if (bp->b_flags & XBF_FLUSH)
1339
			op_flags |= REQ_PREFLUSH;
1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352

		/*
		 * Run the write verifier callback function if it exists. If
		 * this function fails it will mark the buffer with an error and
		 * the IO should not be dispatched.
		 */
		if (bp->b_ops) {
			bp->b_ops->verify_write(bp);
			if (bp->b_error) {
				xfs_force_shutdown(bp->b_target->bt_mount,
						   SHUTDOWN_CORRUPT_INCORE);
				return;
			}
1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366
		} else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
			struct xfs_mount *mp = bp->b_target->bt_mount;

			/*
			 * non-crc filesystems don't attach verifiers during
			 * log recovery, so don't warn for such filesystems.
			 */
			if (xfs_sb_version_hascrc(&mp->m_sb)) {
				xfs_warn(mp,
					"%s: no ops on block 0x%llx/0x%x",
					__func__, bp->b_bn, bp->b_length);
				xfs_hex_dump(bp->b_addr, 64);
				dump_stack();
			}
1367
		}
1368
	} else if (bp->b_flags & XBF_READ_AHEAD) {
M
Mike Christie 已提交
1369 1370
		op = REQ_OP_READ;
		op_flags = REQ_RAHEAD;
1371
	} else {
M
Mike Christie 已提交
1372
		op = REQ_OP_READ;
1373 1374 1375
	}

	/* we only use the buffer cache for meta-data */
M
Mike Christie 已提交
1376
	op_flags |= REQ_META;
1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387

	/*
	 * Walk all the vectors issuing IO on them. Set up the initial offset
	 * into the buffer and the desired IO size before we start -
	 * _xfs_buf_ioapply_vec() will modify them appropriately for each
	 * subsequent call.
	 */
	offset = bp->b_offset;
	size = BBTOB(bp->b_io_length);
	blk_start_plug(&plug);
	for (i = 0; i < bp->b_map_count; i++) {
M
Mike Christie 已提交
1388
		xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
1389 1390 1391 1392 1393 1394
		if (bp->b_error)
			break;
		if (size <= 0)
			break;	/* all done */
	}
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1395 1396
}

1397 1398 1399 1400 1401 1402
/*
 * Asynchronous IO submission path. This transfers the buffer lock ownership and
 * the current reference to the IO. It is not safe to reference the buffer after
 * a call to this function unless the caller holds an additional reference
 * itself.
 */
1403
void
1404 1405
xfs_buf_submit(
	struct xfs_buf	*bp)
L
Linus Torvalds 已提交
1406
{
1407
	trace_xfs_buf_submit(bp, _RET_IP_);
L
Linus Torvalds 已提交
1408

1409
	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
	ASSERT(bp->b_flags & XBF_ASYNC);

	/* on shutdown we stale and complete the buffer immediately */
	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
		xfs_buf_ioerror(bp, -EIO);
		bp->b_flags &= ~XBF_DONE;
		xfs_buf_stale(bp);
		xfs_buf_ioend(bp);
		return;
	}
L
Linus Torvalds 已提交
1420

1421
	if (bp->b_flags & XBF_WRITE)
1422
		xfs_buf_wait_unpin(bp);
1423

1424 1425 1426
	/* clear the internal error state to avoid spurious errors */
	bp->b_io_error = 0;

1427
	/*
1428 1429 1430 1431 1432 1433
	 * The caller's reference is released during I/O completion.
	 * This occurs some time after the last b_io_remaining reference is
	 * released, so after we drop our Io reference we have to have some
	 * other reference to ensure the buffer doesn't go away from underneath
	 * us. Take a direct reference to ensure we have safe access to the
	 * buffer until we are finished with it.
1434
	 */
1435
	xfs_buf_hold(bp);
L
Linus Torvalds 已提交
1436

1437
	/*
1438 1439 1440
	 * Set the count to 1 initially, this will stop an I/O completion
	 * callout which happens before we have started all the I/O from calling
	 * xfs_buf_ioend too early.
L
Linus Torvalds 已提交
1441
	 */
1442
	atomic_set(&bp->b_io_remaining, 1);
1443
	xfs_buf_ioacct_inc(bp);
1444
	_xfs_buf_ioapply(bp);
1445

1446
	/*
1447 1448 1449
	 * If _xfs_buf_ioapply failed, we can get back here with only the IO
	 * reference we took above. If we drop it to zero, run completion so
	 * that we don't return to the caller with completion still pending.
1450
	 */
1451
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1452
		if (bp->b_error)
1453 1454 1455 1456
			xfs_buf_ioend(bp);
		else
			xfs_buf_ioend_async(bp);
	}
L
Linus Torvalds 已提交
1457

1458
	xfs_buf_rele(bp);
1459
	/* Note: it is not safe to reference bp now we've dropped our ref */
L
Linus Torvalds 已提交
1460 1461 1462
}

/*
1463
 * Synchronous buffer IO submission path, read or write.
L
Linus Torvalds 已提交
1464 1465
 */
int
1466 1467
xfs_buf_submit_wait(
	struct xfs_buf	*bp)
L
Linus Torvalds 已提交
1468
{
1469
	int		error;
C
Christoph Hellwig 已提交
1470

1471 1472 1473
	trace_xfs_buf_submit_wait(bp, _RET_IP_);

	ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
C
Christoph Hellwig 已提交
1474

1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509
	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
		xfs_buf_ioerror(bp, -EIO);
		xfs_buf_stale(bp);
		bp->b_flags &= ~XBF_DONE;
		return -EIO;
	}

	if (bp->b_flags & XBF_WRITE)
		xfs_buf_wait_unpin(bp);

	/* clear the internal error state to avoid spurious errors */
	bp->b_io_error = 0;

	/*
	 * For synchronous IO, the IO does not inherit the submitters reference
	 * count, nor the buffer lock. Hence we cannot release the reference we
	 * are about to take until we've waited for all IO completion to occur,
	 * including any xfs_buf_ioend_async() work that may be pending.
	 */
	xfs_buf_hold(bp);

	/*
	 * Set the count to 1 initially, this will stop an I/O completion
	 * callout which happens before we have started all the I/O from calling
	 * xfs_buf_ioend too early.
	 */
	atomic_set(&bp->b_io_remaining, 1);
	_xfs_buf_ioapply(bp);

	/*
	 * make sure we run completion synchronously if it raced with us and is
	 * already complete.
	 */
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
		xfs_buf_ioend(bp);
C
Christoph Hellwig 已提交
1510

1511 1512 1513
	/* wait for completion before gathering the error from the buffer */
	trace_xfs_buf_iowait(bp, _RET_IP_);
	wait_for_completion(&bp->b_iowait);
C
Christoph Hellwig 已提交
1514
	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1515 1516 1517 1518 1519 1520 1521 1522
	error = bp->b_error;

	/*
	 * all done now, we can release the hold that keeps the buffer
	 * referenced for the entire IO.
	 */
	xfs_buf_rele(bp);
	return error;
L
Linus Torvalds 已提交
1523 1524
}

1525
void *
1526
xfs_buf_offset(
1527
	struct xfs_buf		*bp,
L
Linus Torvalds 已提交
1528 1529 1530 1531
	size_t			offset)
{
	struct page		*page;

1532
	if (bp->b_addr)
1533
		return bp->b_addr + offset;
L
Linus Torvalds 已提交
1534

1535
	offset += bp->b_offset;
1536
	page = bp->b_pages[offset >> PAGE_SHIFT];
1537
	return page_address(page) + (offset & (PAGE_SIZE-1));
L
Linus Torvalds 已提交
1538 1539 1540 1541 1542 1543
}

/*
 *	Move data into or out of a buffer.
 */
void
1544 1545
xfs_buf_iomove(
	xfs_buf_t		*bp,	/* buffer to process		*/
L
Linus Torvalds 已提交
1546 1547
	size_t			boff,	/* starting buffer offset	*/
	size_t			bsize,	/* length to copy		*/
1548
	void			*data,	/* data address			*/
1549
	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
L
Linus Torvalds 已提交
1550
{
D
Dave Chinner 已提交
1551
	size_t			bend;
L
Linus Torvalds 已提交
1552 1553 1554

	bend = boff + bsize;
	while (boff < bend) {
D
Dave Chinner 已提交
1555 1556 1557 1558 1559 1560 1561 1562
		struct page	*page;
		int		page_index, page_offset, csize;

		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
		page = bp->b_pages[page_index];
		csize = min_t(size_t, PAGE_SIZE - page_offset,
				      BBTOB(bp->b_io_length) - boff);
L
Linus Torvalds 已提交
1563

D
Dave Chinner 已提交
1564
		ASSERT((csize + page_offset) <= PAGE_SIZE);
L
Linus Torvalds 已提交
1565 1566

		switch (mode) {
1567
		case XBRW_ZERO:
D
Dave Chinner 已提交
1568
			memset(page_address(page) + page_offset, 0, csize);
L
Linus Torvalds 已提交
1569
			break;
1570
		case XBRW_READ:
D
Dave Chinner 已提交
1571
			memcpy(data, page_address(page) + page_offset, csize);
L
Linus Torvalds 已提交
1572
			break;
1573
		case XBRW_WRITE:
D
Dave Chinner 已提交
1574
			memcpy(page_address(page) + page_offset, data, csize);
L
Linus Torvalds 已提交
1575 1576 1577 1578 1579 1580 1581 1582
		}

		boff += csize;
		data += csize;
	}
}

/*
1583
 *	Handling of buffer targets (buftargs).
L
Linus Torvalds 已提交
1584 1585 1586
 */

/*
1587 1588 1589
 * Wait for any bufs with callbacks that have been submitted but have not yet
 * returned. These buffers will have an elevated hold count, so wait on those
 * while freeing all the buffers only held by the LRU.
L
Linus Torvalds 已提交
1590
 */
1591 1592 1593
static enum lru_status
xfs_buftarg_wait_rele(
	struct list_head	*item,
1594
	struct list_lru_one	*lru,
1595 1596 1597
	spinlock_t		*lru_lock,
	void			*arg)

L
Linus Torvalds 已提交
1598
{
1599
	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1600
	struct list_head	*dispose = arg;
1601

1602
	if (atomic_read(&bp->b_hold) > 1) {
1603
		/* need to wait, so skip it this pass */
1604
		trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1605
		return LRU_SKIP;
L
Linus Torvalds 已提交
1606
	}
1607 1608
	if (!spin_trylock(&bp->b_lock))
		return LRU_SKIP;
1609

1610 1611 1612 1613 1614 1615
	/*
	 * clear the LRU reference count so the buffer doesn't get
	 * ignored in xfs_buf_rele().
	 */
	atomic_set(&bp->b_lru_ref, 0);
	bp->b_state |= XFS_BSTATE_DISPOSE;
1616
	list_lru_isolate_move(lru, item, dispose);
1617 1618
	spin_unlock(&bp->b_lock);
	return LRU_REMOVED;
L
Linus Torvalds 已提交
1619 1620
}

1621 1622 1623 1624
void
xfs_wait_buftarg(
	struct xfs_buftarg	*btp)
{
1625 1626 1627
	LIST_HEAD(dispose);
	int loop = 0;

1628
	/*
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
	 * First wait on the buftarg I/O count for all in-flight buffers to be
	 * released. This is critical as new buffers do not make the LRU until
	 * they are released.
	 *
	 * Next, flush the buffer workqueue to ensure all completion processing
	 * has finished. Just waiting on buffer locks is not sufficient for
	 * async IO as the reference count held over IO is not released until
	 * after the buffer lock is dropped. Hence we need to ensure here that
	 * all reference counts have been dropped before we start walking the
	 * LRU list.
1639
	 */
1640 1641
	while (percpu_counter_sum(&btp->bt_io_count))
		delay(100);
1642
	flush_workqueue(btp->bt_mount->m_buf_workqueue);
1643

1644 1645
	/* loop until there is nothing left on the lru list. */
	while (list_lru_count(&btp->bt_lru)) {
1646
		list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1647 1648 1649 1650 1651 1652
			      &dispose, LONG_MAX);

		while (!list_empty(&dispose)) {
			struct xfs_buf *bp;
			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
			list_del_init(&bp->b_lru);
1653 1654
			if (bp->b_flags & XBF_WRITE_FAIL) {
				xfs_alert(btp->bt_mount,
1655
"Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
1656
					(long long)bp->b_bn);
1657 1658
				xfs_alert(btp->bt_mount,
"Please run xfs_repair to determine the extent of the problem.");
1659
			}
1660 1661 1662 1663 1664
			xfs_buf_rele(bp);
		}
		if (loop++ != 0)
			delay(100);
	}
1665 1666 1667 1668 1669
}

static enum lru_status
xfs_buftarg_isolate(
	struct list_head	*item,
1670
	struct list_lru_one	*lru,
1671 1672 1673 1674 1675 1676
	spinlock_t		*lru_lock,
	void			*arg)
{
	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
	struct list_head	*dispose = arg;

1677 1678 1679 1680 1681 1682
	/*
	 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
	 * If we fail to get the lock, just skip it.
	 */
	if (!spin_trylock(&bp->b_lock))
		return LRU_SKIP;
1683 1684 1685 1686 1687
	/*
	 * Decrement the b_lru_ref count unless the value is already
	 * zero. If the value is already zero, we need to reclaim the
	 * buffer, otherwise it gets another trip through the LRU.
	 */
1688 1689
	if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
		spin_unlock(&bp->b_lock);
1690
		return LRU_ROTATE;
1691
	}
1692

1693
	bp->b_state |= XFS_BSTATE_DISPOSE;
1694
	list_lru_isolate_move(lru, item, dispose);
1695
	spin_unlock(&bp->b_lock);
1696 1697 1698
	return LRU_REMOVED;
}

1699
static unsigned long
1700
xfs_buftarg_shrink_scan(
1701
	struct shrinker		*shrink,
1702
	struct shrink_control	*sc)
1703
{
1704 1705
	struct xfs_buftarg	*btp = container_of(shrink,
					struct xfs_buftarg, bt_shrinker);
1706
	LIST_HEAD(dispose);
1707
	unsigned long		freed;
1708

1709 1710
	freed = list_lru_shrink_walk(&btp->bt_lru, sc,
				     xfs_buftarg_isolate, &dispose);
1711 1712

	while (!list_empty(&dispose)) {
1713
		struct xfs_buf *bp;
1714 1715 1716 1717 1718
		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
		list_del_init(&bp->b_lru);
		xfs_buf_rele(bp);
	}

1719 1720 1721
	return freed;
}

1722
static unsigned long
1723 1724 1725 1726 1727 1728
xfs_buftarg_shrink_count(
	struct shrinker		*shrink,
	struct shrink_control	*sc)
{
	struct xfs_buftarg	*btp = container_of(shrink,
					struct xfs_buftarg, bt_shrinker);
1729
	return list_lru_shrink_count(&btp->bt_lru, sc);
1730 1731
}

L
Linus Torvalds 已提交
1732 1733
void
xfs_free_buftarg(
1734 1735
	struct xfs_mount	*mp,
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1736
{
1737
	unregister_shrinker(&btp->bt_shrinker);
1738 1739
	ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
	percpu_counter_destroy(&btp->bt_io_count);
G
Glauber Costa 已提交
1740
	list_lru_destroy(&btp->bt_lru);
1741

1742
	xfs_blkdev_issue_flush(btp);
1743

1744
	kmem_free(btp);
L
Linus Torvalds 已提交
1745 1746
}

1747 1748
int
xfs_setsize_buftarg(
L
Linus Torvalds 已提交
1749
	xfs_buftarg_t		*btp,
1750
	unsigned int		sectorsize)
L
Linus Torvalds 已提交
1751
{
1752
	/* Set up metadata sector size info */
1753 1754
	btp->bt_meta_sectorsize = sectorsize;
	btp->bt_meta_sectormask = sectorsize - 1;
L
Linus Torvalds 已提交
1755

1756
	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1757
		xfs_warn(btp->bt_mount,
1758 1759
			"Cannot set_blocksize to %u on device %pg",
			sectorsize, btp->bt_bdev);
D
Dave Chinner 已提交
1760
		return -EINVAL;
L
Linus Torvalds 已提交
1761 1762
	}

1763 1764 1765 1766
	/* Set up device logical sector size mask */
	btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
	btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;

L
Linus Torvalds 已提交
1767 1768 1769 1770
	return 0;
}

/*
1771 1772 1773
 * When allocating the initial buffer target we have not yet
 * read in the superblock, so don't know what sized sectors
 * are being used at this early stage.  Play safe.
1774
 */
L
Linus Torvalds 已提交
1775 1776 1777 1778 1779
STATIC int
xfs_setsize_buftarg_early(
	xfs_buftarg_t		*btp,
	struct block_device	*bdev)
{
1780
	return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
L
Linus Torvalds 已提交
1781 1782 1783 1784
}

xfs_buftarg_t *
xfs_alloc_buftarg(
1785
	struct xfs_mount	*mp,
1786
	struct block_device	*bdev)
L
Linus Torvalds 已提交
1787 1788 1789
{
	xfs_buftarg_t		*btp;

1790
	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
L
Linus Torvalds 已提交
1791

1792
	btp->bt_mount = mp;
1793 1794
	btp->bt_dev =  bdev->bd_dev;
	btp->bt_bdev = bdev;
1795

L
Linus Torvalds 已提交
1796 1797
	if (xfs_setsize_buftarg_early(btp, bdev))
		goto error;
1798 1799 1800 1801

	if (list_lru_init(&btp->bt_lru))
		goto error;

1802 1803 1804
	if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
		goto error;

1805 1806
	btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
	btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1807
	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1808
	btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1809
	register_shrinker(&btp->bt_shrinker);
L
Linus Torvalds 已提交
1810 1811 1812
	return btp;

error:
1813
	kmem_free(btp);
L
Linus Torvalds 已提交
1814 1815 1816 1817
	return NULL;
}

/*
1818 1819 1820 1821 1822 1823 1824 1825 1826
 * Add a buffer to the delayed write list.
 *
 * This queues a buffer for writeout if it hasn't already been.  Note that
 * neither this routine nor the buffer list submission functions perform
 * any internal synchronization.  It is expected that the lists are thread-local
 * to the callers.
 *
 * Returns true if we queued up the buffer, or false if it already had
 * been on the buffer list.
L
Linus Torvalds 已提交
1827
 */
1828
bool
1829
xfs_buf_delwri_queue(
1830 1831
	struct xfs_buf		*bp,
	struct list_head	*list)
L
Linus Torvalds 已提交
1832
{
1833
	ASSERT(xfs_buf_islocked(bp));
1834
	ASSERT(!(bp->b_flags & XBF_READ));
L
Linus Torvalds 已提交
1835

1836 1837 1838 1839 1840 1841 1842 1843
	/*
	 * If the buffer is already marked delwri it already is queued up
	 * by someone else for imediate writeout.  Just ignore it in that
	 * case.
	 */
	if (bp->b_flags & _XBF_DELWRI_Q) {
		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
		return false;
L
Linus Torvalds 已提交
1844 1845
	}

1846
	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1847 1848

	/*
1849 1850 1851 1852 1853 1854
	 * If a buffer gets written out synchronously or marked stale while it
	 * is on a delwri list we lazily remove it. To do this, the other party
	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
	 * It remains referenced and on the list.  In a rare corner case it
	 * might get readded to a delwri list after the synchronous writeout, in
	 * which case we need just need to re-add the flag here.
1855
	 */
1856 1857 1858 1859
	bp->b_flags |= _XBF_DELWRI_Q;
	if (list_empty(&bp->b_list)) {
		atomic_inc(&bp->b_hold);
		list_add_tail(&bp->b_list, list);
1860 1861
	}

1862
	return true;
1863 1864
}

1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879
/*
 * Compare function is more complex than it needs to be because
 * the return value is only 32 bits and we are doing comparisons
 * on 64 bit values
 */
static int
xfs_buf_cmp(
	void		*priv,
	struct list_head *a,
	struct list_head *b)
{
	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
	xfs_daddr_t		diff;

1880
	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1881 1882 1883 1884 1885 1886 1887
	if (diff < 0)
		return -1;
	if (diff > 0)
		return 1;
	return 0;
}

1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899
/*
 * submit buffers for write.
 *
 * When we have a large buffer list, we do not want to hold all the buffers
 * locked while we block on the request queue waiting for IO dispatch. To avoid
 * this problem, we lock and submit buffers in groups of 50, thereby minimising
 * the lock hold times for lists which may contain thousands of objects.
 *
 * To do this, we sort the buffer list before we walk the list to lock and
 * submit buffers, and we plug and unplug around each group of buffers we
 * submit.
 */
1900
static int
1901
xfs_buf_delwri_submit_buffers(
1902
	struct list_head	*buffer_list,
1903
	struct list_head	*wait_list)
L
Linus Torvalds 已提交
1904
{
1905
	struct xfs_buf		*bp, *n;
1906
	LIST_HEAD		(submit_list);
1907
	int			pinned = 0;
1908
	struct blk_plug		plug;
1909

1910
	list_sort(NULL, buffer_list, xfs_buf_cmp);
1911

1912
	blk_start_plug(&plug);
1913
	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1914
		if (!wait_list) {
1915 1916 1917 1918 1919 1920 1921 1922 1923
			if (xfs_buf_ispinned(bp)) {
				pinned++;
				continue;
			}
			if (!xfs_buf_trylock(bp))
				continue;
		} else {
			xfs_buf_lock(bp);
		}
1924

1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935
		/*
		 * Someone else might have written the buffer synchronously or
		 * marked it stale in the meantime.  In that case only the
		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
		 * reference and remove it from the list here.
		 */
		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
			list_del_init(&bp->b_list);
			xfs_buf_relse(bp);
			continue;
		}
D
Dave Chinner 已提交
1936

1937
		trace_xfs_buf_delwri_split(bp, _RET_IP_);
1938

1939
		/*
1940 1941 1942 1943 1944
		 * We do all IO submission async. This means if we need
		 * to wait for IO completion we need to take an extra
		 * reference so the buffer is still valid on the other
		 * side. We need to move the buffer onto the io_list
		 * at this point so the caller can still access it.
1945
		 */
1946
		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
1947 1948
		bp->b_flags |= XBF_WRITE | XBF_ASYNC;
		if (wait_list) {
1949
			xfs_buf_hold(bp);
1950 1951
			list_move_tail(&bp->b_list, wait_list);
		} else
1952
			list_del_init(&bp->b_list);
D
Dave Chinner 已提交
1953

1954
		xfs_buf_submit(bp);
1955 1956
	}
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1957

1958
	return pinned;
L
Linus Torvalds 已提交
1959 1960 1961
}

/*
1962 1963 1964 1965 1966 1967 1968
 * Write out a buffer list asynchronously.
 *
 * This will take the @buffer_list, write all non-locked and non-pinned buffers
 * out and not wait for I/O completion on any of the buffers.  This interface
 * is only safely useable for callers that can track I/O completion by higher
 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
 * function.
L
Linus Torvalds 已提交
1969 1970
 */
int
1971 1972
xfs_buf_delwri_submit_nowait(
	struct list_head	*buffer_list)
L
Linus Torvalds 已提交
1973
{
1974
	return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
1975
}
L
Linus Torvalds 已提交
1976

1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988
/*
 * Write out a buffer list synchronously.
 *
 * This will take the @buffer_list, write all buffers out and wait for I/O
 * completion on all of the buffers. @buffer_list is consumed by the function,
 * so callers must have some other way of tracking buffers if they require such
 * functionality.
 */
int
xfs_buf_delwri_submit(
	struct list_head	*buffer_list)
{
1989
	LIST_HEAD		(wait_list);
1990 1991
	int			error = 0, error2;
	struct xfs_buf		*bp;
L
Linus Torvalds 已提交
1992

1993
	xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
L
Linus Torvalds 已提交
1994

1995
	/* Wait for IO to complete. */
1996 1997
	while (!list_empty(&wait_list)) {
		bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1998

1999
		list_del_init(&bp->b_list);
2000 2001 2002 2003

		/* locking the buffer will wait for async IO completion. */
		xfs_buf_lock(bp);
		error2 = bp->b_error;
2004 2005 2006
		xfs_buf_relse(bp);
		if (!error)
			error = error2;
L
Linus Torvalds 已提交
2007 2008
	}

2009
	return error;
L
Linus Torvalds 已提交
2010 2011
}

2012
int __init
2013
xfs_buf_init(void)
L
Linus Torvalds 已提交
2014
{
2015 2016
	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
						KM_ZONE_HWALIGN, NULL);
2017
	if (!xfs_buf_zone)
C
Christoph Hellwig 已提交
2018
		goto out;
2019

2020
	return 0;
L
Linus Torvalds 已提交
2021

C
Christoph Hellwig 已提交
2022
 out:
2023
	return -ENOMEM;
L
Linus Torvalds 已提交
2024 2025 2026
}

void
2027
xfs_buf_terminate(void)
L
Linus Torvalds 已提交
2028
{
2029
	kmem_zone_destroy(xfs_buf_zone);
L
Linus Torvalds 已提交
2030
}