xfs_buf.c 47.4 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17
 */
18
#include "xfs.h"
L
Linus Torvalds 已提交
19 20
#include <linux/stddef.h>
#include <linux/errno.h>
21
#include <linux/gfp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include <linux/hash.h>
32
#include <linux/kthread.h>
C
Christoph Lameter 已提交
33
#include <linux/migrate.h>
34
#include <linux/backing-dev.h>
35
#include <linux/freezer.h>
L
Linus Torvalds 已提交
36

37
#include "xfs_format.h"
38
#include "xfs_log_format.h"
39
#include "xfs_trans_resv.h"
40
#include "xfs_sb.h"
41
#include "xfs_mount.h"
C
Christoph Hellwig 已提交
42
#include "xfs_trace.h"
43
#include "xfs_log.h"
44

45
static kmem_zone_t *xfs_buf_zone;
46

47 48 49 50
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
L
Linus Torvalds 已提交
51
#else
52 53 54
# define XB_SET_OWNER(bp)	do { } while (0)
# define XB_CLEAR_OWNER(bp)	do { } while (0)
# define XB_GET_OWNER(bp)	do { } while (0)
L
Linus Torvalds 已提交
55 56
#endif

57
#define xb_to_gfp(flags) \
D
Dave Chinner 已提交
58
	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : GFP_NOFS) | __GFP_NOWARN)
L
Linus Torvalds 已提交
59 60


61 62 63 64 65 66 67
static inline int
xfs_buf_is_vmapped(
	struct xfs_buf	*bp)
{
	/*
	 * Return true if the buffer is vmapped.
	 *
68 69 70
	 * b_addr is null if the buffer is not mapped, but the code is clever
	 * enough to know it doesn't have to map a single page, so the check has
	 * to be both for b_addr and bp->b_page_count > 1.
71
	 */
72
	return bp->b_addr && bp->b_page_count > 1;
73 74 75 76 77 78 79 80 81
}

static inline int
xfs_buf_vmap_len(
	struct xfs_buf	*bp)
{
	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
}

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
/*
 * Bump the I/O in flight count on the buftarg if we haven't yet done so for
 * this buffer. The count is incremented once per buffer (per hold cycle)
 * because the corresponding decrement is deferred to buffer release. Buffers
 * can undergo I/O multiple times in a hold-release cycle and per buffer I/O
 * tracking adds unnecessary overhead. This is used for sychronization purposes
 * with unmount (see xfs_wait_buftarg()), so all we really need is a count of
 * in-flight buffers.
 *
 * Buffers that are never released (e.g., superblock, iclog buffers) must set
 * the XBF_NO_IOACCT flag before I/O submission. Otherwise, the buftarg count
 * never reaches zero and unmount hangs indefinitely.
 */
static inline void
xfs_buf_ioacct_inc(
	struct xfs_buf	*bp)
{
	if (bp->b_flags & (XBF_NO_IOACCT|_XBF_IN_FLIGHT))
		return;

	ASSERT(bp->b_flags & XBF_ASYNC);
	bp->b_flags |= _XBF_IN_FLIGHT;
	percpu_counter_inc(&bp->b_target->bt_io_count);
}

/*
 * Clear the in-flight state on a buffer about to be released to the LRU or
 * freed and unaccount from the buftarg.
 */
static inline void
xfs_buf_ioacct_dec(
	struct xfs_buf	*bp)
{
	if (!(bp->b_flags & _XBF_IN_FLIGHT))
		return;

	bp->b_flags &= ~_XBF_IN_FLIGHT;
	percpu_counter_dec(&bp->b_target->bt_io_count);
}

122 123 124 125 126 127 128 129 130 131 132 133
/*
 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
 * b_lru_ref count so that the buffer is freed immediately when the buffer
 * reference count falls to zero. If the buffer is already on the LRU, we need
 * to remove the reference that LRU holds on the buffer.
 *
 * This prevents build-up of stale buffers on the LRU.
 */
void
xfs_buf_stale(
	struct xfs_buf	*bp)
{
134 135
	ASSERT(xfs_buf_islocked(bp));

136
	bp->b_flags |= XBF_STALE;
137 138 139 140 141 142 143 144

	/*
	 * Clear the delwri status so that a delwri queue walker will not
	 * flush this buffer to disk now that it is stale. The delwri queue has
	 * a reference to the buffer, so this is safe to do.
	 */
	bp->b_flags &= ~_XBF_DELWRI_Q;

145 146 147 148 149 150 151 152
	/*
	 * Once the buffer is marked stale and unlocked, a subsequent lookup
	 * could reset b_flags. There is no guarantee that the buffer is
	 * unaccounted (released to LRU) before that occurs. Drop in-flight
	 * status now to preserve accounting consistency.
	 */
	xfs_buf_ioacct_dec(bp);

153 154 155
	spin_lock(&bp->b_lock);
	atomic_set(&bp->b_lru_ref, 0);
	if (!(bp->b_state & XFS_BSTATE_DISPOSE) &&
156 157 158
	    (list_lru_del(&bp->b_target->bt_lru, &bp->b_lru)))
		atomic_dec(&bp->b_hold);

159
	ASSERT(atomic_read(&bp->b_hold) >= 1);
160
	spin_unlock(&bp->b_lock);
161
}
L
Linus Torvalds 已提交
162

163 164 165 166 167 168 169 170 171
static int
xfs_buf_get_maps(
	struct xfs_buf		*bp,
	int			map_count)
{
	ASSERT(bp->b_maps == NULL);
	bp->b_map_count = map_count;

	if (map_count == 1) {
172
		bp->b_maps = &bp->__b_map;
173 174 175 176 177 178
		return 0;
	}

	bp->b_maps = kmem_zalloc(map_count * sizeof(struct xfs_buf_map),
				KM_NOFS);
	if (!bp->b_maps)
D
Dave Chinner 已提交
179
		return -ENOMEM;
180 181 182 183 184 185 186 187 188 189
	return 0;
}

/*
 *	Frees b_pages if it was allocated.
 */
static void
xfs_buf_free_maps(
	struct xfs_buf	*bp)
{
190
	if (bp->b_maps != &bp->__b_map) {
191 192 193 194 195
		kmem_free(bp->b_maps);
		bp->b_maps = NULL;
	}
}

196
struct xfs_buf *
197
_xfs_buf_alloc(
198
	struct xfs_buftarg	*target,
199 200
	struct xfs_buf_map	*map,
	int			nmaps,
201
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
202
{
203
	struct xfs_buf		*bp;
204 205
	int			error;
	int			i;
206

D
Dave Chinner 已提交
207
	bp = kmem_zone_zalloc(xfs_buf_zone, KM_NOFS);
208 209 210
	if (unlikely(!bp))
		return NULL;

L
Linus Torvalds 已提交
211
	/*
212 213
	 * We don't want certain flags to appear in b_flags unless they are
	 * specifically set by later operations on the buffer.
L
Linus Torvalds 已提交
214
	 */
215
	flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
216 217

	atomic_set(&bp->b_hold, 1);
218
	atomic_set(&bp->b_lru_ref, 1);
219
	init_completion(&bp->b_iowait);
220
	INIT_LIST_HEAD(&bp->b_lru);
221
	INIT_LIST_HEAD(&bp->b_list);
222
	RB_CLEAR_NODE(&bp->b_rbnode);
T
Thomas Gleixner 已提交
223
	sema_init(&bp->b_sema, 0); /* held, no waiters */
224
	spin_lock_init(&bp->b_lock);
225 226
	XB_SET_OWNER(bp);
	bp->b_target = target;
227
	bp->b_flags = flags;
D
Dave Chinner 已提交
228

L
Linus Torvalds 已提交
229
	/*
230 231
	 * Set length and io_length to the same value initially.
	 * I/O routines should use io_length, which will be the same in
L
Linus Torvalds 已提交
232 233
	 * most cases but may be reset (e.g. XFS recovery).
	 */
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
	error = xfs_buf_get_maps(bp, nmaps);
	if (error)  {
		kmem_zone_free(xfs_buf_zone, bp);
		return NULL;
	}

	bp->b_bn = map[0].bm_bn;
	bp->b_length = 0;
	for (i = 0; i < nmaps; i++) {
		bp->b_maps[i].bm_bn = map[i].bm_bn;
		bp->b_maps[i].bm_len = map[i].bm_len;
		bp->b_length += map[i].bm_len;
	}
	bp->b_io_length = bp->b_length;

249 250 251
	atomic_set(&bp->b_pin_count, 0);
	init_waitqueue_head(&bp->b_waiters);

252
	XFS_STATS_INC(target->bt_mount, xb_create);
C
Christoph Hellwig 已提交
253
	trace_xfs_buf_init(bp, _RET_IP_);
254 255

	return bp;
L
Linus Torvalds 已提交
256 257 258
}

/*
259 260
 *	Allocate a page array capable of holding a specified number
 *	of pages, and point the page buf at it.
L
Linus Torvalds 已提交
261 262
 */
STATIC int
263 264
_xfs_buf_get_pages(
	xfs_buf_t		*bp,
265
	int			page_count)
L
Linus Torvalds 已提交
266 267
{
	/* Make sure that we have a page list */
268 269 270 271
	if (bp->b_pages == NULL) {
		bp->b_page_count = page_count;
		if (page_count <= XB_PAGES) {
			bp->b_pages = bp->b_page_array;
L
Linus Torvalds 已提交
272
		} else {
273
			bp->b_pages = kmem_alloc(sizeof(struct page *) *
D
Dave Chinner 已提交
274
						 page_count, KM_NOFS);
275
			if (bp->b_pages == NULL)
L
Linus Torvalds 已提交
276 277
				return -ENOMEM;
		}
278
		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
L
Linus Torvalds 已提交
279 280 281 282 283
	}
	return 0;
}

/*
284
 *	Frees b_pages if it was allocated.
L
Linus Torvalds 已提交
285 286
 */
STATIC void
287
_xfs_buf_free_pages(
L
Linus Torvalds 已提交
288 289
	xfs_buf_t	*bp)
{
290
	if (bp->b_pages != bp->b_page_array) {
291
		kmem_free(bp->b_pages);
292
		bp->b_pages = NULL;
L
Linus Torvalds 已提交
293 294 295 296 297 298 299
	}
}

/*
 *	Releases the specified buffer.
 *
 * 	The modification state of any associated pages is left unchanged.
300
 * 	The buffer must not be on any hash - use xfs_buf_rele instead for
L
Linus Torvalds 已提交
301 302 303
 * 	hashed and refcounted buffers
 */
void
304
xfs_buf_free(
L
Linus Torvalds 已提交
305 306
	xfs_buf_t		*bp)
{
C
Christoph Hellwig 已提交
307
	trace_xfs_buf_free(bp, _RET_IP_);
L
Linus Torvalds 已提交
308

309 310
	ASSERT(list_empty(&bp->b_lru));

311
	if (bp->b_flags & _XBF_PAGES) {
L
Linus Torvalds 已提交
312 313
		uint		i;

314
		if (xfs_buf_is_vmapped(bp))
A
Alex Elder 已提交
315 316
			vm_unmap_ram(bp->b_addr - bp->b_offset,
					bp->b_page_count);
L
Linus Torvalds 已提交
317

318 319 320
		for (i = 0; i < bp->b_page_count; i++) {
			struct page	*page = bp->b_pages[i];

321
			__free_page(page);
322
		}
323 324
	} else if (bp->b_flags & _XBF_KMEM)
		kmem_free(bp->b_addr);
325
	_xfs_buf_free_pages(bp);
326
	xfs_buf_free_maps(bp);
327
	kmem_zone_free(xfs_buf_zone, bp);
L
Linus Torvalds 已提交
328 329 330
}

/*
331
 * Allocates all the pages for buffer in question and builds it's page list.
L
Linus Torvalds 已提交
332 333
 */
STATIC int
334
xfs_buf_allocate_memory(
L
Linus Torvalds 已提交
335 336 337
	xfs_buf_t		*bp,
	uint			flags)
{
338
	size_t			size;
L
Linus Torvalds 已提交
339
	size_t			nbytes, offset;
340
	gfp_t			gfp_mask = xb_to_gfp(flags);
L
Linus Torvalds 已提交
341
	unsigned short		page_count, i;
D
Dave Chinner 已提交
342
	xfs_off_t		start, end;
L
Linus Torvalds 已提交
343 344
	int			error;

345 346 347 348 349
	/*
	 * for buffers that are contained within a single page, just allocate
	 * the memory from the heap - there's no need for the complexity of
	 * page arrays to keep allocation down to order 0.
	 */
D
Dave Chinner 已提交
350 351
	size = BBTOB(bp->b_length);
	if (size < PAGE_SIZE) {
D
Dave Chinner 已提交
352
		bp->b_addr = kmem_alloc(size, KM_NOFS);
353 354 355 356 357
		if (!bp->b_addr) {
			/* low memory - use alloc_page loop instead */
			goto use_alloc_page;
		}

D
Dave Chinner 已提交
358
		if (((unsigned long)(bp->b_addr + size - 1) & PAGE_MASK) !=
359 360 361 362 363 364 365 366 367 368
		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
			/* b_addr spans two pages - use alloc_page instead */
			kmem_free(bp->b_addr);
			bp->b_addr = NULL;
			goto use_alloc_page;
		}
		bp->b_offset = offset_in_page(bp->b_addr);
		bp->b_pages = bp->b_page_array;
		bp->b_pages[0] = virt_to_page(bp->b_addr);
		bp->b_page_count = 1;
369
		bp->b_flags |= _XBF_KMEM;
370 371 372 373
		return 0;
	}

use_alloc_page:
374 375
	start = BBTOB(bp->b_maps[0].bm_bn) >> PAGE_SHIFT;
	end = (BBTOB(bp->b_maps[0].bm_bn + bp->b_length) + PAGE_SIZE - 1)
376
								>> PAGE_SHIFT;
D
Dave Chinner 已提交
377
	page_count = end - start;
378
	error = _xfs_buf_get_pages(bp, page_count);
L
Linus Torvalds 已提交
379 380 381
	if (unlikely(error))
		return error;

382
	offset = bp->b_offset;
383
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
384

385
	for (i = 0; i < bp->b_page_count; i++) {
L
Linus Torvalds 已提交
386 387
		struct page	*page;
		uint		retries = 0;
388 389
retry:
		page = alloc_page(gfp_mask);
L
Linus Torvalds 已提交
390
		if (unlikely(page == NULL)) {
391 392
			if (flags & XBF_READ_AHEAD) {
				bp->b_page_count = i;
D
Dave Chinner 已提交
393
				error = -ENOMEM;
394
				goto out_free_pages;
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402 403
			}

			/*
			 * This could deadlock.
			 *
			 * But until all the XFS lowlevel code is revamped to
			 * handle buffer allocation failures we can't do much.
			 */
			if (!(++retries % 100))
404
				xfs_err(NULL,
405 406
		"%s(%u) possible memory allocation deadlock in %s (mode:0x%x)",
					current->comm, current->pid,
407
					__func__, gfp_mask);
L
Linus Torvalds 已提交
408

409
			XFS_STATS_INC(bp->b_target->bt_mount, xb_page_retries);
410
			congestion_wait(BLK_RW_ASYNC, HZ/50);
L
Linus Torvalds 已提交
411 412 413
			goto retry;
		}

414
		XFS_STATS_INC(bp->b_target->bt_mount, xb_page_found);
L
Linus Torvalds 已提交
415

416
		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
L
Linus Torvalds 已提交
417
		size -= nbytes;
418
		bp->b_pages[i] = page;
L
Linus Torvalds 已提交
419 420
		offset = 0;
	}
421
	return 0;
L
Linus Torvalds 已提交
422

423 424 425
out_free_pages:
	for (i = 0; i < bp->b_page_count; i++)
		__free_page(bp->b_pages[i]);
L
Linus Torvalds 已提交
426 427 428 429
	return error;
}

/*
L
Lucas De Marchi 已提交
430
 *	Map buffer into kernel address-space if necessary.
L
Linus Torvalds 已提交
431 432
 */
STATIC int
433
_xfs_buf_map_pages(
L
Linus Torvalds 已提交
434 435 436
	xfs_buf_t		*bp,
	uint			flags)
{
437
	ASSERT(bp->b_flags & _XBF_PAGES);
438
	if (bp->b_page_count == 1) {
439
		/* A single page buffer is always mappable */
440
		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
441 442 443
	} else if (flags & XBF_UNMAPPED) {
		bp->b_addr = NULL;
	} else {
444
		int retried = 0;
445 446 447 448 449 450 451 452 453 454 455
		unsigned noio_flag;

		/*
		 * vm_map_ram() will allocate auxillary structures (e.g.
		 * pagetables) with GFP_KERNEL, yet we are likely to be under
		 * GFP_NOFS context here. Hence we need to tell memory reclaim
		 * that we are in such a context via PF_MEMALLOC_NOIO to prevent
		 * memory reclaim re-entering the filesystem here and
		 * potentially deadlocking.
		 */
		noio_flag = memalloc_noio_save();
456 457 458 459 460 461 462
		do {
			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
						-1, PAGE_KERNEL);
			if (bp->b_addr)
				break;
			vm_unmap_aliases();
		} while (retried++ <= 1);
463
		memalloc_noio_restore(noio_flag);
464 465

		if (!bp->b_addr)
L
Linus Torvalds 已提交
466
			return -ENOMEM;
467
		bp->b_addr += bp->b_offset;
L
Linus Torvalds 已提交
468 469 470 471 472 473 474 475 476 477
	}

	return 0;
}

/*
 *	Finding and Reading Buffers
 */

/*
478
 *	Look up, and creates if absent, a lockable buffer for
L
Linus Torvalds 已提交
479
 *	a given range of an inode.  The buffer is returned
480
 *	locked.	No I/O is implied by this call.
L
Linus Torvalds 已提交
481 482
 */
xfs_buf_t *
483
_xfs_buf_find(
484
	struct xfs_buftarg	*btp,
485 486
	struct xfs_buf_map	*map,
	int			nmaps,
487 488
	xfs_buf_flags_t		flags,
	xfs_buf_t		*new_bp)
L
Linus Torvalds 已提交
489
{
490 491 492 493
	struct xfs_perag	*pag;
	struct rb_node		**rbp;
	struct rb_node		*parent;
	xfs_buf_t		*bp;
494
	xfs_daddr_t		blkno = map[0].bm_bn;
495
	xfs_daddr_t		eofs;
496 497
	int			numblks = 0;
	int			i;
L
Linus Torvalds 已提交
498

499 500
	for (i = 0; i < nmaps; i++)
		numblks += map[i].bm_len;
L
Linus Torvalds 已提交
501 502

	/* Check for IOs smaller than the sector size / not sector aligned */
D
Dave Chinner 已提交
503
	ASSERT(!(BBTOB(numblks) < btp->bt_meta_sectorsize));
504
	ASSERT(!(BBTOB(blkno) & (xfs_off_t)btp->bt_meta_sectormask));
L
Linus Torvalds 已提交
505

506 507 508 509 510
	/*
	 * Corrupted block numbers can get through to here, unfortunately, so we
	 * have to check that the buffer falls within the filesystem bounds.
	 */
	eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
511
	if (blkno < 0 || blkno >= eofs) {
512
		/*
D
Dave Chinner 已提交
513
		 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
514 515 516 517 518 519
		 * but none of the higher level infrastructure supports
		 * returning a specific error on buffer lookup failures.
		 */
		xfs_alert(btp->bt_mount,
			  "%s: Block out of range: block 0x%llx, EOFS 0x%llx ",
			  __func__, blkno, eofs);
D
Dave Chinner 已提交
520
		WARN_ON(1);
521 522 523
		return NULL;
	}

524 525
	/* get tree root */
	pag = xfs_perag_get(btp->bt_mount,
526
				xfs_daddr_to_agno(btp->bt_mount, blkno));
527 528 529 530 531 532 533 534 535 536

	/* walk tree */
	spin_lock(&pag->pag_buf_lock);
	rbp = &pag->pag_buf_tree.rb_node;
	parent = NULL;
	bp = NULL;
	while (*rbp) {
		parent = *rbp;
		bp = rb_entry(parent, struct xfs_buf, b_rbnode);

D
Dave Chinner 已提交
537
		if (blkno < bp->b_bn)
538
			rbp = &(*rbp)->rb_left;
D
Dave Chinner 已提交
539
		else if (blkno > bp->b_bn)
540 541 542
			rbp = &(*rbp)->rb_right;
		else {
			/*
D
Dave Chinner 已提交
543
			 * found a block number match. If the range doesn't
544 545 546 547 548 549
			 * match, the only way this is allowed is if the buffer
			 * in the cache is stale and the transaction that made
			 * it stale has not yet committed. i.e. we are
			 * reallocating a busy extent. Skip this buffer and
			 * continue searching to the right for an exact match.
			 */
550
			if (bp->b_length != numblks) {
551 552 553 554
				ASSERT(bp->b_flags & XBF_STALE);
				rbp = &(*rbp)->rb_right;
				continue;
			}
555
			atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
556 557 558 559 560
			goto found;
		}
	}

	/* No match found */
561
	if (new_bp) {
562 563 564 565 566
		rb_link_node(&new_bp->b_rbnode, parent, rbp);
		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
		/* the buffer keeps the perag reference until it is freed */
		new_bp->b_pag = pag;
		spin_unlock(&pag->pag_buf_lock);
L
Linus Torvalds 已提交
567
	} else {
568
		XFS_STATS_INC(btp->bt_mount, xb_miss_locked);
569 570
		spin_unlock(&pag->pag_buf_lock);
		xfs_perag_put(pag);
L
Linus Torvalds 已提交
571
	}
572
	return new_bp;
L
Linus Torvalds 已提交
573 574

found:
575 576
	spin_unlock(&pag->pag_buf_lock);
	xfs_perag_put(pag);
L
Linus Torvalds 已提交
577

578 579
	if (!xfs_buf_trylock(bp)) {
		if (flags & XBF_TRYLOCK) {
580
			xfs_buf_rele(bp);
581
			XFS_STATS_INC(btp->bt_mount, xb_busy_locked);
582
			return NULL;
L
Linus Torvalds 已提交
583
		}
584
		xfs_buf_lock(bp);
585
		XFS_STATS_INC(btp->bt_mount, xb_get_locked_waited);
L
Linus Torvalds 已提交
586 587
	}

588 589 590 591 592
	/*
	 * if the buffer is stale, clear all the external state associated with
	 * it. We need to keep flags such as how we allocated the buffer memory
	 * intact here.
	 */
593 594
	if (bp->b_flags & XBF_STALE) {
		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
D
Dave Chinner 已提交
595
		ASSERT(bp->b_iodone == NULL);
596
		bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
597
		bp->b_ops = NULL;
598
	}
C
Christoph Hellwig 已提交
599 600

	trace_xfs_buf_find(bp, flags, _RET_IP_);
601
	XFS_STATS_INC(btp->bt_mount, xb_get_locked);
602
	return bp;
L
Linus Torvalds 已提交
603 604 605
}

/*
606 607 608
 * Assembles a buffer covering the specified range. The code is optimised for
 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
 * more hits than misses.
L
Linus Torvalds 已提交
609
 */
610
struct xfs_buf *
611 612 613 614
xfs_buf_get_map(
	struct xfs_buftarg	*target,
	struct xfs_buf_map	*map,
	int			nmaps,
615
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
616
{
617 618
	struct xfs_buf		*bp;
	struct xfs_buf		*new_bp;
619
	int			error = 0;
L
Linus Torvalds 已提交
620

621
	bp = _xfs_buf_find(target, map, nmaps, flags, NULL);
622 623 624
	if (likely(bp))
		goto found;

625
	new_bp = _xfs_buf_alloc(target, map, nmaps, flags);
626
	if (unlikely(!new_bp))
L
Linus Torvalds 已提交
627 628
		return NULL;

629 630
	error = xfs_buf_allocate_memory(new_bp, flags);
	if (error) {
631
		xfs_buf_free(new_bp);
632 633 634
		return NULL;
	}

635
	bp = _xfs_buf_find(target, map, nmaps, flags, new_bp);
636
	if (!bp) {
637
		xfs_buf_free(new_bp);
638 639 640
		return NULL;
	}

641 642
	if (bp != new_bp)
		xfs_buf_free(new_bp);
L
Linus Torvalds 已提交
643

644
found:
645
	if (!bp->b_addr) {
646
		error = _xfs_buf_map_pages(bp, flags);
L
Linus Torvalds 已提交
647
		if (unlikely(error)) {
648
			xfs_warn(target->bt_mount,
649
				"%s: failed to map pagesn", __func__);
D
Dave Chinner 已提交
650 651
			xfs_buf_relse(bp);
			return NULL;
L
Linus Torvalds 已提交
652 653 654
		}
	}

655 656 657 658 659 660 661
	/*
	 * Clear b_error if this is a lookup from a caller that doesn't expect
	 * valid data to be found in the buffer.
	 */
	if (!(flags & XBF_READ))
		xfs_buf_ioerror(bp, 0);

662
	XFS_STATS_INC(target->bt_mount, xb_get);
C
Christoph Hellwig 已提交
663
	trace_xfs_buf_get(bp, flags, _RET_IP_);
664
	return bp;
L
Linus Torvalds 已提交
665 666
}

C
Christoph Hellwig 已提交
667 668 669 670 671
STATIC int
_xfs_buf_read(
	xfs_buf_t		*bp,
	xfs_buf_flags_t		flags)
{
672
	ASSERT(!(flags & XBF_WRITE));
673
	ASSERT(bp->b_maps[0].bm_bn != XFS_BUF_DADDR_NULL);
C
Christoph Hellwig 已提交
674

675
	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_READ_AHEAD);
676
	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
C
Christoph Hellwig 已提交
677

678 679
	if (flags & XBF_ASYNC) {
		xfs_buf_submit(bp);
680
		return 0;
681 682
	}
	return xfs_buf_submit_wait(bp);
C
Christoph Hellwig 已提交
683 684
}

L
Linus Torvalds 已提交
685
xfs_buf_t *
686 687 688 689
xfs_buf_read_map(
	struct xfs_buftarg	*target,
	struct xfs_buf_map	*map,
	int			nmaps,
690
	xfs_buf_flags_t		flags,
691
	const struct xfs_buf_ops *ops)
L
Linus Torvalds 已提交
692
{
693
	struct xfs_buf		*bp;
694 695 696

	flags |= XBF_READ;

697
	bp = xfs_buf_get_map(target, map, nmaps, flags);
698
	if (bp) {
C
Christoph Hellwig 已提交
699 700
		trace_xfs_buf_read(bp, flags, _RET_IP_);

701
		if (!(bp->b_flags & XBF_DONE)) {
702
			XFS_STATS_INC(target->bt_mount, xb_get_read);
703
			bp->b_ops = ops;
C
Christoph Hellwig 已提交
704
			_xfs_buf_read(bp, flags);
705
		} else if (flags & XBF_ASYNC) {
L
Linus Torvalds 已提交
706 707 708 709
			/*
			 * Read ahead call which is already satisfied,
			 * drop the buffer
			 */
D
Dave Chinner 已提交
710 711
			xfs_buf_relse(bp);
			return NULL;
L
Linus Torvalds 已提交
712 713
		} else {
			/* We do not want read in the flags */
714
			bp->b_flags &= ~XBF_READ;
L
Linus Torvalds 已提交
715 716 717
		}
	}

718
	return bp;
L
Linus Torvalds 已提交
719 720 721
}

/*
722 723
 *	If we are not low on memory then do the readahead in a deadlock
 *	safe manner.
L
Linus Torvalds 已提交
724 725
 */
void
726 727 728
xfs_buf_readahead_map(
	struct xfs_buftarg	*target,
	struct xfs_buf_map	*map,
729
	int			nmaps,
730
	const struct xfs_buf_ops *ops)
L
Linus Torvalds 已提交
731
{
732
	if (bdi_read_congested(target->bt_bdi))
L
Linus Torvalds 已提交
733 734
		return;

735
	xfs_buf_read_map(target, map, nmaps,
736
		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD, ops);
L
Linus Torvalds 已提交
737 738
}

739 740 741 742
/*
 * Read an uncached buffer from disk. Allocates and returns a locked
 * buffer containing the disk contents or nothing.
 */
743
int
744 745 746
xfs_buf_read_uncached(
	struct xfs_buftarg	*target,
	xfs_daddr_t		daddr,
747
	size_t			numblks,
748
	int			flags,
749
	struct xfs_buf		**bpp,
750
	const struct xfs_buf_ops *ops)
751
{
752
	struct xfs_buf		*bp;
753

754 755
	*bpp = NULL;

756
	bp = xfs_buf_get_uncached(target, numblks, flags);
757
	if (!bp)
758
		return -ENOMEM;
759 760

	/* set up the buffer for a read IO */
761
	ASSERT(bp->b_map_count == 1);
762
	bp->b_bn = XFS_BUF_DADDR_NULL;  /* always null for uncached buffers */
763
	bp->b_maps[0].bm_bn = daddr;
764
	bp->b_flags |= XBF_READ;
765
	bp->b_ops = ops;
766

767
	xfs_buf_submit_wait(bp);
768 769
	if (bp->b_error) {
		int	error = bp->b_error;
C
Christoph Hellwig 已提交
770
		xfs_buf_relse(bp);
771
		return error;
C
Christoph Hellwig 已提交
772
	}
773 774 775

	*bpp = bp;
	return 0;
L
Linus Torvalds 已提交
776 777
}

778 779 780 781 782 783 784
/*
 * Return a buffer allocated as an empty buffer and associated to external
 * memory via xfs_buf_associate_memory() back to it's empty state.
 */
void
xfs_buf_set_empty(
	struct xfs_buf		*bp,
785
	size_t			numblks)
786 787 788 789 790 791 792
{
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);

	bp->b_pages = NULL;
	bp->b_page_count = 0;
	bp->b_addr = NULL;
793
	bp->b_length = numblks;
794
	bp->b_io_length = numblks;
795 796

	ASSERT(bp->b_map_count == 1);
797
	bp->b_bn = XFS_BUF_DADDR_NULL;
798 799
	bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
	bp->b_maps[0].bm_len = bp->b_length;
800 801
}

L
Linus Torvalds 已提交
802 803 804 805
static inline struct page *
mem_to_page(
	void			*addr)
{
806
	if ((!is_vmalloc_addr(addr))) {
L
Linus Torvalds 已提交
807 808 809 810 811 812 813
		return virt_to_page(addr);
	} else {
		return vmalloc_to_page(addr);
	}
}

int
814 815
xfs_buf_associate_memory(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
816 817 818 819 820
	void			*mem,
	size_t			len)
{
	int			rval;
	int			i = 0;
821 822 823
	unsigned long		pageaddr;
	unsigned long		offset;
	size_t			buflen;
L
Linus Torvalds 已提交
824 825
	int			page_count;

826
	pageaddr = (unsigned long)mem & PAGE_MASK;
827
	offset = (unsigned long)mem - pageaddr;
828 829
	buflen = PAGE_ALIGN(len + offset);
	page_count = buflen >> PAGE_SHIFT;
L
Linus Torvalds 已提交
830 831

	/* Free any previous set of page pointers */
832 833
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
834

835 836
	bp->b_pages = NULL;
	bp->b_addr = mem;
L
Linus Torvalds 已提交
837

838
	rval = _xfs_buf_get_pages(bp, page_count);
L
Linus Torvalds 已提交
839 840 841
	if (rval)
		return rval;

842
	bp->b_offset = offset;
843 844 845

	for (i = 0; i < bp->b_page_count; i++) {
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
846
		pageaddr += PAGE_SIZE;
L
Linus Torvalds 已提交
847 848
	}

849
	bp->b_io_length = BTOBB(len);
850
	bp->b_length = BTOBB(buflen);
L
Linus Torvalds 已提交
851 852 853 854 855

	return 0;
}

xfs_buf_t *
856 857
xfs_buf_get_uncached(
	struct xfs_buftarg	*target,
858
	size_t			numblks,
859
	int			flags)
L
Linus Torvalds 已提交
860
{
861
	unsigned long		page_count;
862
	int			error, i;
863 864
	struct xfs_buf		*bp;
	DEFINE_SINGLE_BUF_MAP(map, XFS_BUF_DADDR_NULL, numblks);
L
Linus Torvalds 已提交
865

866 867
	/* flags might contain irrelevant bits, pass only what we care about */
	bp = _xfs_buf_alloc(target, &map, 1, flags & XBF_NO_IOACCT);
L
Linus Torvalds 已提交
868 869 870
	if (unlikely(bp == NULL))
		goto fail;

871
	page_count = PAGE_ALIGN(numblks << BBSHIFT) >> PAGE_SHIFT;
872
	error = _xfs_buf_get_pages(bp, page_count);
873
	if (error)
L
Linus Torvalds 已提交
874 875
		goto fail_free_buf;

876
	for (i = 0; i < page_count; i++) {
877
		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
878 879
		if (!bp->b_pages[i])
			goto fail_free_mem;
L
Linus Torvalds 已提交
880
	}
881
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
882

883
	error = _xfs_buf_map_pages(bp, 0);
884
	if (unlikely(error)) {
885
		xfs_warn(target->bt_mount,
886
			"%s: failed to map pages", __func__);
L
Linus Torvalds 已提交
887
		goto fail_free_mem;
888
	}
L
Linus Torvalds 已提交
889

890
	trace_xfs_buf_get_uncached(bp, _RET_IP_);
L
Linus Torvalds 已提交
891
	return bp;
892

L
Linus Torvalds 已提交
893
 fail_free_mem:
894 895
	while (--i >= 0)
		__free_page(bp->b_pages[i]);
896
	_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
897
 fail_free_buf:
898
	xfs_buf_free_maps(bp);
899
	kmem_zone_free(xfs_buf_zone, bp);
L
Linus Torvalds 已提交
900 901 902 903 904 905 906 907 908 909
 fail:
	return NULL;
}

/*
 *	Increment reference count on buffer, to hold the buffer concurrently
 *	with another thread which may release (free) the buffer asynchronously.
 *	Must hold the buffer already to call this function.
 */
void
910 911
xfs_buf_hold(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
912
{
C
Christoph Hellwig 已提交
913
	trace_xfs_buf_hold(bp, _RET_IP_);
914
	atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
915 916 917
}

/*
918 919
 * Release a hold on the specified buffer. If the hold count is 1, the buffer is
 * placed on LRU or freed (depending on b_lru_ref).
L
Linus Torvalds 已提交
920 921
 */
void
922 923
xfs_buf_rele(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
924
{
925
	struct xfs_perag	*pag = bp->b_pag;
926 927
	bool			release;
	bool			freebuf = false;
L
Linus Torvalds 已提交
928

C
Christoph Hellwig 已提交
929
	trace_xfs_buf_rele(bp, _RET_IP_);
L
Linus Torvalds 已提交
930

931
	if (!pag) {
932
		ASSERT(list_empty(&bp->b_lru));
933
		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
934 935
		if (atomic_dec_and_test(&bp->b_hold)) {
			xfs_buf_ioacct_dec(bp);
936
			xfs_buf_free(bp);
937
		}
938 939 940
		return;
	}

941
	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
942

943
	ASSERT(atomic_read(&bp->b_hold) > 0);
944

945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
	release = atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock);
	spin_lock(&bp->b_lock);
	if (!release) {
		/*
		 * Drop the in-flight state if the buffer is already on the LRU
		 * and it holds the only reference. This is racy because we
		 * haven't acquired the pag lock, but the use of _XBF_IN_FLIGHT
		 * ensures the decrement occurs only once per-buf.
		 */
		if ((atomic_read(&bp->b_hold) == 1) && !list_empty(&bp->b_lru))
			xfs_buf_ioacct_dec(bp);
		goto out_unlock;
	}

	/* the last reference has been dropped ... */
	xfs_buf_ioacct_dec(bp);
	if (!(bp->b_flags & XBF_STALE) && atomic_read(&bp->b_lru_ref)) {
		/*
		 * If the buffer is added to the LRU take a new reference to the
		 * buffer for the LRU and clear the (now stale) dispose list
		 * state flag
		 */
		if (list_lru_add(&bp->b_target->bt_lru, &bp->b_lru)) {
			bp->b_state &= ~XFS_BSTATE_DISPOSE;
			atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
970
		}
971 972 973 974 975 976 977 978 979 980 981 982
		spin_unlock(&pag->pag_buf_lock);
	} else {
		/*
		 * most of the time buffers will already be removed from the
		 * LRU, so optimise that case by checking for the
		 * XFS_BSTATE_DISPOSE flag indicating the last list the buffer
		 * was on was the disposal list
		 */
		if (!(bp->b_state & XFS_BSTATE_DISPOSE)) {
			list_lru_del(&bp->b_target->bt_lru, &bp->b_lru);
		} else {
			ASSERT(list_empty(&bp->b_lru));
L
Linus Torvalds 已提交
983
		}
984 985 986 987 988 989

		ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
		rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
		spin_unlock(&pag->pag_buf_lock);
		xfs_perag_put(pag);
		freebuf = true;
L
Linus Torvalds 已提交
990
	}
991 992 993 994 995 996

out_unlock:
	spin_unlock(&bp->b_lock);

	if (freebuf)
		xfs_buf_free(bp);
L
Linus Torvalds 已提交
997 998 999 1000
}


/*
1001
 *	Lock a buffer object, if it is not already locked.
1002 1003 1004 1005 1006 1007 1008 1009
 *
 *	If we come across a stale, pinned, locked buffer, we know that we are
 *	being asked to lock a buffer that has been reallocated. Because it is
 *	pinned, we know that the log has not been pushed to disk and hence it
 *	will still be locked.  Rather than continuing to have trylock attempts
 *	fail until someone else pushes the log, push it ourselves before
 *	returning.  This means that the xfsaild will not get stuck trying
 *	to push on stale inode buffers.
L
Linus Torvalds 已提交
1010 1011
 */
int
1012 1013
xfs_buf_trylock(
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1014 1015 1016
{
	int			locked;

1017
	locked = down_trylock(&bp->b_sema) == 0;
1018
	if (locked) {
1019
		XB_SET_OWNER(bp);
1020 1021 1022 1023
		trace_xfs_buf_trylock(bp, _RET_IP_);
	} else {
		trace_xfs_buf_trylock_fail(bp, _RET_IP_);
	}
1024
	return locked;
L
Linus Torvalds 已提交
1025 1026 1027
}

/*
1028
 *	Lock a buffer object.
1029 1030 1031 1032 1033 1034
 *
 *	If we come across a stale, pinned, locked buffer, we know that we
 *	are being asked to lock a buffer that has been reallocated. Because
 *	it is pinned, we know that the log has not been pushed to disk and
 *	hence it will still be locked. Rather than sleeping until someone
 *	else pushes the log, push it ourselves before trying to get the lock.
L
Linus Torvalds 已提交
1035
 */
1036 1037
void
xfs_buf_lock(
1038
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1039
{
C
Christoph Hellwig 已提交
1040 1041
	trace_xfs_buf_lock(bp, _RET_IP_);

1042
	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
1043
		xfs_log_force(bp->b_target->bt_mount, 0);
1044 1045
	down(&bp->b_sema);
	XB_SET_OWNER(bp);
C
Christoph Hellwig 已提交
1046 1047

	trace_xfs_buf_lock_done(bp, _RET_IP_);
L
Linus Torvalds 已提交
1048 1049 1050
}

void
1051
xfs_buf_unlock(
1052
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1053
{
1054 1055
	XB_CLEAR_OWNER(bp);
	up(&bp->b_sema);
C
Christoph Hellwig 已提交
1056 1057

	trace_xfs_buf_unlock(bp, _RET_IP_);
L
Linus Torvalds 已提交
1058 1059
}

1060 1061 1062
STATIC void
xfs_buf_wait_unpin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1063 1064 1065
{
	DECLARE_WAITQUEUE	(wait, current);

1066
	if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
1067 1068
		return;

1069
	add_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
1070 1071
	for (;;) {
		set_current_state(TASK_UNINTERRUPTIBLE);
1072
		if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
1073
			break;
J
Jens Axboe 已提交
1074
		io_schedule();
L
Linus Torvalds 已提交
1075
	}
1076
	remove_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
1077 1078 1079 1080 1081 1082 1083
	set_current_state(TASK_RUNNING);
}

/*
 *	Buffer Utility Routines
 */

1084 1085 1086
void
xfs_buf_ioend(
	struct xfs_buf	*bp)
L
Linus Torvalds 已提交
1087
{
1088 1089 1090
	bool		read = bp->b_flags & XBF_READ;

	trace_xfs_buf_iodone(bp, _RET_IP_);
1091 1092

	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1093

1094 1095 1096 1097 1098 1099 1100
	/*
	 * Pull in IO completion errors now. We are guaranteed to be running
	 * single threaded, so we don't need the lock to read b_io_error.
	 */
	if (!bp->b_error && bp->b_io_error)
		xfs_buf_ioerror(bp, bp->b_io_error);

1101 1102 1103
	/* Only validate buffers that were read without errors */
	if (read && !bp->b_error && bp->b_ops) {
		ASSERT(!bp->b_iodone);
1104
		bp->b_ops->verify_read(bp);
1105 1106 1107 1108
	}

	if (!bp->b_error)
		bp->b_flags |= XBF_DONE;
L
Linus Torvalds 已提交
1109

1110
	if (bp->b_iodone)
1111 1112
		(*(bp->b_iodone))(bp);
	else if (bp->b_flags & XBF_ASYNC)
L
Linus Torvalds 已提交
1113
		xfs_buf_relse(bp);
1114
	else
1115
		complete(&bp->b_iowait);
L
Linus Torvalds 已提交
1116 1117
}

1118 1119 1120
static void
xfs_buf_ioend_work(
	struct work_struct	*work)
L
Linus Torvalds 已提交
1121
{
1122
	struct xfs_buf		*bp =
1123
		container_of(work, xfs_buf_t, b_ioend_work);
C
Christoph Hellwig 已提交
1124

1125 1126
	xfs_buf_ioend(bp);
}
L
Linus Torvalds 已提交
1127

1128
static void
1129 1130 1131
xfs_buf_ioend_async(
	struct xfs_buf	*bp)
{
1132 1133
	INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
	queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
L
Linus Torvalds 已提交
1134 1135 1136
}

void
1137 1138 1139
xfs_buf_ioerror(
	xfs_buf_t		*bp,
	int			error)
L
Linus Torvalds 已提交
1140
{
D
Dave Chinner 已提交
1141 1142
	ASSERT(error <= 0 && error >= -1000);
	bp->b_error = error;
C
Christoph Hellwig 已提交
1143
	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
L
Linus Torvalds 已提交
1144 1145
}

1146 1147 1148 1149 1150 1151
void
xfs_buf_ioerror_alert(
	struct xfs_buf		*bp,
	const char		*func)
{
	xfs_alert(bp->b_target->bt_mount,
1152
"metadata I/O error: block 0x%llx (\"%s\") error %d numblks %d",
D
Dave Chinner 已提交
1153
		(__uint64_t)XFS_BUF_ADDR(bp), func, -bp->b_error, bp->b_length);
1154 1155
}

1156 1157 1158 1159 1160 1161 1162 1163 1164
int
xfs_bwrite(
	struct xfs_buf		*bp)
{
	int			error;

	ASSERT(xfs_buf_islocked(bp));

	bp->b_flags |= XBF_WRITE;
D
Dave Chinner 已提交
1165 1166
	bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q |
			 XBF_WRITE_FAIL | XBF_DONE);
1167

1168
	error = xfs_buf_submit_wait(bp);
1169 1170 1171 1172 1173 1174 1175
	if (error) {
		xfs_force_shutdown(bp->b_target->bt_mount,
				   SHUTDOWN_META_IO_ERROR);
	}
	return error;
}

1176
static void
1177
xfs_buf_bio_end_io(
1178
	struct bio		*bio)
L
Linus Torvalds 已提交
1179
{
1180
	struct xfs_buf		*bp = (struct xfs_buf *)bio->bi_private;
L
Linus Torvalds 已提交
1181

1182 1183 1184 1185
	/*
	 * don't overwrite existing errors - otherwise we can lose errors on
	 * buffers that require multiple bios to complete.
	 */
1186 1187
	if (bio->bi_error)
		cmpxchg(&bp->b_io_error, 0, bio->bi_error);
L
Linus Torvalds 已提交
1188

1189
	if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
1190 1191
		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));

1192 1193
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
		xfs_buf_ioend_async(bp);
L
Linus Torvalds 已提交
1194 1195 1196
	bio_put(bio);
}

1197 1198 1199 1200 1201 1202
static void
xfs_buf_ioapply_map(
	struct xfs_buf	*bp,
	int		map,
	int		*buf_offset,
	int		*count,
M
Mike Christie 已提交
1203 1204
	int		op,
	int		op_flags)
L
Linus Torvalds 已提交
1205
{
1206 1207 1208 1209 1210 1211 1212
	int		page_index;
	int		total_nr_pages = bp->b_page_count;
	int		nr_pages;
	struct bio	*bio;
	sector_t	sector =  bp->b_maps[map].bm_bn;
	int		size;
	int		offset;
L
Linus Torvalds 已提交
1213

1214
	total_nr_pages = bp->b_page_count;
L
Linus Torvalds 已提交
1215

1216 1217 1218 1219 1220 1221
	/* skip the pages in the buffer before the start offset */
	page_index = 0;
	offset = *buf_offset;
	while (offset >= PAGE_SIZE) {
		page_index++;
		offset -= PAGE_SIZE;
1222 1223
	}

1224 1225 1226 1227 1228 1229 1230
	/*
	 * Limit the IO size to the length of the current vector, and update the
	 * remaining IO count for the next time around.
	 */
	size = min_t(int, BBTOB(bp->b_maps[map].bm_len), *count);
	*count -= size;
	*buf_offset += size;
1231

L
Linus Torvalds 已提交
1232
next_chunk:
1233
	atomic_inc(&bp->b_io_remaining);
1234
	nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
L
Linus Torvalds 已提交
1235 1236

	bio = bio_alloc(GFP_NOIO, nr_pages);
1237
	bio->bi_bdev = bp->b_target->bt_bdev;
1238
	bio->bi_iter.bi_sector = sector;
1239 1240
	bio->bi_end_io = xfs_buf_bio_end_io;
	bio->bi_private = bp;
M
Mike Christie 已提交
1241
	bio_set_op_attrs(bio, op, op_flags);
1242

1243
	for (; size && nr_pages; nr_pages--, page_index++) {
1244
		int	rbytes, nbytes = PAGE_SIZE - offset;
L
Linus Torvalds 已提交
1245 1246 1247 1248

		if (nbytes > size)
			nbytes = size;

1249 1250
		rbytes = bio_add_page(bio, bp->b_pages[page_index], nbytes,
				      offset);
1251
		if (rbytes < nbytes)
L
Linus Torvalds 已提交
1252 1253 1254
			break;

		offset = 0;
1255
		sector += BTOBB(nbytes);
L
Linus Torvalds 已提交
1256 1257 1258 1259
		size -= nbytes;
		total_nr_pages--;
	}

1260
	if (likely(bio->bi_iter.bi_size)) {
1261 1262 1263 1264
		if (xfs_buf_is_vmapped(bp)) {
			flush_kernel_vmap_range(bp->b_addr,
						xfs_buf_vmap_len(bp));
		}
1265
		submit_bio(bio);
L
Linus Torvalds 已提交
1266 1267 1268
		if (size)
			goto next_chunk;
	} else {
1269 1270
		/*
		 * This is guaranteed not to be the last io reference count
1271
		 * because the caller (xfs_buf_submit) holds a count itself.
1272 1273
		 */
		atomic_dec(&bp->b_io_remaining);
D
Dave Chinner 已提交
1274
		xfs_buf_ioerror(bp, -EIO);
1275
		bio_put(bio);
L
Linus Torvalds 已提交
1276
	}
1277 1278 1279 1280 1281 1282 1283 1284

}

STATIC void
_xfs_buf_ioapply(
	struct xfs_buf	*bp)
{
	struct blk_plug	plug;
M
Mike Christie 已提交
1285 1286
	int		op;
	int		op_flags = 0;
1287 1288 1289 1290
	int		offset;
	int		size;
	int		i;

1291 1292 1293 1294 1295 1296
	/*
	 * Make sure we capture only current IO errors rather than stale errors
	 * left over from previous use of the buffer (e.g. failed readahead).
	 */
	bp->b_error = 0;

1297 1298 1299 1300 1301 1302 1303
	/*
	 * Initialize the I/O completion workqueue if we haven't yet or the
	 * submitter has not opted to specify a custom one.
	 */
	if (!bp->b_ioend_wq)
		bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;

1304
	if (bp->b_flags & XBF_WRITE) {
M
Mike Christie 已提交
1305
		op = REQ_OP_WRITE;
1306
		if (bp->b_flags & XBF_SYNCIO)
M
Mike Christie 已提交
1307
			op_flags = WRITE_SYNC;
1308
		if (bp->b_flags & XBF_FUA)
M
Mike Christie 已提交
1309
			op_flags |= REQ_FUA;
1310
		if (bp->b_flags & XBF_FLUSH)
1311
			op_flags |= REQ_PREFLUSH;
1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324

		/*
		 * Run the write verifier callback function if it exists. If
		 * this function fails it will mark the buffer with an error and
		 * the IO should not be dispatched.
		 */
		if (bp->b_ops) {
			bp->b_ops->verify_write(bp);
			if (bp->b_error) {
				xfs_force_shutdown(bp->b_target->bt_mount,
						   SHUTDOWN_CORRUPT_INCORE);
				return;
			}
1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
		} else if (bp->b_bn != XFS_BUF_DADDR_NULL) {
			struct xfs_mount *mp = bp->b_target->bt_mount;

			/*
			 * non-crc filesystems don't attach verifiers during
			 * log recovery, so don't warn for such filesystems.
			 */
			if (xfs_sb_version_hascrc(&mp->m_sb)) {
				xfs_warn(mp,
					"%s: no ops on block 0x%llx/0x%x",
					__func__, bp->b_bn, bp->b_length);
				xfs_hex_dump(bp->b_addr, 64);
				dump_stack();
			}
1339
		}
1340
	} else if (bp->b_flags & XBF_READ_AHEAD) {
M
Mike Christie 已提交
1341 1342
		op = REQ_OP_READ;
		op_flags = REQ_RAHEAD;
1343
	} else {
M
Mike Christie 已提交
1344
		op = REQ_OP_READ;
1345 1346 1347
	}

	/* we only use the buffer cache for meta-data */
M
Mike Christie 已提交
1348
	op_flags |= REQ_META;
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359

	/*
	 * Walk all the vectors issuing IO on them. Set up the initial offset
	 * into the buffer and the desired IO size before we start -
	 * _xfs_buf_ioapply_vec() will modify them appropriately for each
	 * subsequent call.
	 */
	offset = bp->b_offset;
	size = BBTOB(bp->b_io_length);
	blk_start_plug(&plug);
	for (i = 0; i < bp->b_map_count; i++) {
M
Mike Christie 已提交
1360
		xfs_buf_ioapply_map(bp, i, &offset, &size, op, op_flags);
1361 1362 1363 1364 1365 1366
		if (bp->b_error)
			break;
		if (size <= 0)
			break;	/* all done */
	}
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1367 1368
}

1369 1370 1371 1372 1373 1374
/*
 * Asynchronous IO submission path. This transfers the buffer lock ownership and
 * the current reference to the IO. It is not safe to reference the buffer after
 * a call to this function unless the caller holds an additional reference
 * itself.
 */
1375
void
1376 1377
xfs_buf_submit(
	struct xfs_buf	*bp)
L
Linus Torvalds 已提交
1378
{
1379
	trace_xfs_buf_submit(bp, _RET_IP_);
L
Linus Torvalds 已提交
1380

1381
	ASSERT(!(bp->b_flags & _XBF_DELWRI_Q));
1382 1383 1384 1385 1386 1387 1388 1389 1390 1391
	ASSERT(bp->b_flags & XBF_ASYNC);

	/* on shutdown we stale and complete the buffer immediately */
	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
		xfs_buf_ioerror(bp, -EIO);
		bp->b_flags &= ~XBF_DONE;
		xfs_buf_stale(bp);
		xfs_buf_ioend(bp);
		return;
	}
L
Linus Torvalds 已提交
1392

1393
	if (bp->b_flags & XBF_WRITE)
1394
		xfs_buf_wait_unpin(bp);
1395

1396 1397 1398
	/* clear the internal error state to avoid spurious errors */
	bp->b_io_error = 0;

1399
	/*
1400 1401 1402 1403 1404 1405
	 * The caller's reference is released during I/O completion.
	 * This occurs some time after the last b_io_remaining reference is
	 * released, so after we drop our Io reference we have to have some
	 * other reference to ensure the buffer doesn't go away from underneath
	 * us. Take a direct reference to ensure we have safe access to the
	 * buffer until we are finished with it.
1406
	 */
1407
	xfs_buf_hold(bp);
L
Linus Torvalds 已提交
1408

1409
	/*
1410 1411 1412
	 * Set the count to 1 initially, this will stop an I/O completion
	 * callout which happens before we have started all the I/O from calling
	 * xfs_buf_ioend too early.
L
Linus Torvalds 已提交
1413
	 */
1414
	atomic_set(&bp->b_io_remaining, 1);
1415
	xfs_buf_ioacct_inc(bp);
1416
	_xfs_buf_ioapply(bp);
1417

1418
	/*
1419 1420 1421
	 * If _xfs_buf_ioapply failed, we can get back here with only the IO
	 * reference we took above. If we drop it to zero, run completion so
	 * that we don't return to the caller with completion still pending.
1422
	 */
1423
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
1424
		if (bp->b_error)
1425 1426 1427 1428
			xfs_buf_ioend(bp);
		else
			xfs_buf_ioend_async(bp);
	}
L
Linus Torvalds 已提交
1429

1430
	xfs_buf_rele(bp);
1431
	/* Note: it is not safe to reference bp now we've dropped our ref */
L
Linus Torvalds 已提交
1432 1433 1434
}

/*
1435
 * Synchronous buffer IO submission path, read or write.
L
Linus Torvalds 已提交
1436 1437
 */
int
1438 1439
xfs_buf_submit_wait(
	struct xfs_buf	*bp)
L
Linus Torvalds 已提交
1440
{
1441
	int		error;
C
Christoph Hellwig 已提交
1442

1443 1444 1445
	trace_xfs_buf_submit_wait(bp, _RET_IP_);

	ASSERT(!(bp->b_flags & (_XBF_DELWRI_Q | XBF_ASYNC)));
C
Christoph Hellwig 已提交
1446

1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481
	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
		xfs_buf_ioerror(bp, -EIO);
		xfs_buf_stale(bp);
		bp->b_flags &= ~XBF_DONE;
		return -EIO;
	}

	if (bp->b_flags & XBF_WRITE)
		xfs_buf_wait_unpin(bp);

	/* clear the internal error state to avoid spurious errors */
	bp->b_io_error = 0;

	/*
	 * For synchronous IO, the IO does not inherit the submitters reference
	 * count, nor the buffer lock. Hence we cannot release the reference we
	 * are about to take until we've waited for all IO completion to occur,
	 * including any xfs_buf_ioend_async() work that may be pending.
	 */
	xfs_buf_hold(bp);

	/*
	 * Set the count to 1 initially, this will stop an I/O completion
	 * callout which happens before we have started all the I/O from calling
	 * xfs_buf_ioend too early.
	 */
	atomic_set(&bp->b_io_remaining, 1);
	_xfs_buf_ioapply(bp);

	/*
	 * make sure we run completion synchronously if it raced with us and is
	 * already complete.
	 */
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
		xfs_buf_ioend(bp);
C
Christoph Hellwig 已提交
1482

1483 1484 1485
	/* wait for completion before gathering the error from the buffer */
	trace_xfs_buf_iowait(bp, _RET_IP_);
	wait_for_completion(&bp->b_iowait);
C
Christoph Hellwig 已提交
1486
	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1487 1488 1489 1490 1491 1492 1493 1494
	error = bp->b_error;

	/*
	 * all done now, we can release the hold that keeps the buffer
	 * referenced for the entire IO.
	 */
	xfs_buf_rele(bp);
	return error;
L
Linus Torvalds 已提交
1495 1496
}

1497
void *
1498
xfs_buf_offset(
1499
	struct xfs_buf		*bp,
L
Linus Torvalds 已提交
1500 1501 1502 1503
	size_t			offset)
{
	struct page		*page;

1504
	if (bp->b_addr)
1505
		return bp->b_addr + offset;
L
Linus Torvalds 已提交
1506

1507
	offset += bp->b_offset;
1508
	page = bp->b_pages[offset >> PAGE_SHIFT];
1509
	return page_address(page) + (offset & (PAGE_SIZE-1));
L
Linus Torvalds 已提交
1510 1511 1512 1513 1514 1515
}

/*
 *	Move data into or out of a buffer.
 */
void
1516 1517
xfs_buf_iomove(
	xfs_buf_t		*bp,	/* buffer to process		*/
L
Linus Torvalds 已提交
1518 1519
	size_t			boff,	/* starting buffer offset	*/
	size_t			bsize,	/* length to copy		*/
1520
	void			*data,	/* data address			*/
1521
	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
L
Linus Torvalds 已提交
1522
{
D
Dave Chinner 已提交
1523
	size_t			bend;
L
Linus Torvalds 已提交
1524 1525 1526

	bend = boff + bsize;
	while (boff < bend) {
D
Dave Chinner 已提交
1527 1528 1529 1530 1531 1532 1533 1534
		struct page	*page;
		int		page_index, page_offset, csize;

		page_index = (boff + bp->b_offset) >> PAGE_SHIFT;
		page_offset = (boff + bp->b_offset) & ~PAGE_MASK;
		page = bp->b_pages[page_index];
		csize = min_t(size_t, PAGE_SIZE - page_offset,
				      BBTOB(bp->b_io_length) - boff);
L
Linus Torvalds 已提交
1535

D
Dave Chinner 已提交
1536
		ASSERT((csize + page_offset) <= PAGE_SIZE);
L
Linus Torvalds 已提交
1537 1538

		switch (mode) {
1539
		case XBRW_ZERO:
D
Dave Chinner 已提交
1540
			memset(page_address(page) + page_offset, 0, csize);
L
Linus Torvalds 已提交
1541
			break;
1542
		case XBRW_READ:
D
Dave Chinner 已提交
1543
			memcpy(data, page_address(page) + page_offset, csize);
L
Linus Torvalds 已提交
1544
			break;
1545
		case XBRW_WRITE:
D
Dave Chinner 已提交
1546
			memcpy(page_address(page) + page_offset, data, csize);
L
Linus Torvalds 已提交
1547 1548 1549 1550 1551 1552 1553 1554
		}

		boff += csize;
		data += csize;
	}
}

/*
1555
 *	Handling of buffer targets (buftargs).
L
Linus Torvalds 已提交
1556 1557 1558
 */

/*
1559 1560 1561
 * Wait for any bufs with callbacks that have been submitted but have not yet
 * returned. These buffers will have an elevated hold count, so wait on those
 * while freeing all the buffers only held by the LRU.
L
Linus Torvalds 已提交
1562
 */
1563 1564 1565
static enum lru_status
xfs_buftarg_wait_rele(
	struct list_head	*item,
1566
	struct list_lru_one	*lru,
1567 1568 1569
	spinlock_t		*lru_lock,
	void			*arg)

L
Linus Torvalds 已提交
1570
{
1571
	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
1572
	struct list_head	*dispose = arg;
1573

1574
	if (atomic_read(&bp->b_hold) > 1) {
1575
		/* need to wait, so skip it this pass */
1576
		trace_xfs_buf_wait_buftarg(bp, _RET_IP_);
1577
		return LRU_SKIP;
L
Linus Torvalds 已提交
1578
	}
1579 1580
	if (!spin_trylock(&bp->b_lock))
		return LRU_SKIP;
1581

1582 1583 1584 1585 1586 1587
	/*
	 * clear the LRU reference count so the buffer doesn't get
	 * ignored in xfs_buf_rele().
	 */
	atomic_set(&bp->b_lru_ref, 0);
	bp->b_state |= XFS_BSTATE_DISPOSE;
1588
	list_lru_isolate_move(lru, item, dispose);
1589 1590
	spin_unlock(&bp->b_lock);
	return LRU_REMOVED;
L
Linus Torvalds 已提交
1591 1592
}

1593 1594 1595 1596
void
xfs_wait_buftarg(
	struct xfs_buftarg	*btp)
{
1597 1598 1599
	LIST_HEAD(dispose);
	int loop = 0;

1600
	/*
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	 * First wait on the buftarg I/O count for all in-flight buffers to be
	 * released. This is critical as new buffers do not make the LRU until
	 * they are released.
	 *
	 * Next, flush the buffer workqueue to ensure all completion processing
	 * has finished. Just waiting on buffer locks is not sufficient for
	 * async IO as the reference count held over IO is not released until
	 * after the buffer lock is dropped. Hence we need to ensure here that
	 * all reference counts have been dropped before we start walking the
	 * LRU list.
1611
	 */
1612 1613
	while (percpu_counter_sum(&btp->bt_io_count))
		delay(100);
1614
	flush_workqueue(btp->bt_mount->m_buf_workqueue);
1615

1616 1617
	/* loop until there is nothing left on the lru list. */
	while (list_lru_count(&btp->bt_lru)) {
1618
		list_lru_walk(&btp->bt_lru, xfs_buftarg_wait_rele,
1619 1620 1621 1622 1623 1624
			      &dispose, LONG_MAX);

		while (!list_empty(&dispose)) {
			struct xfs_buf *bp;
			bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
			list_del_init(&bp->b_lru);
1625 1626
			if (bp->b_flags & XBF_WRITE_FAIL) {
				xfs_alert(btp->bt_mount,
1627
"Corruption Alert: Buffer at block 0x%llx had permanent write failures!",
1628
					(long long)bp->b_bn);
1629 1630
				xfs_alert(btp->bt_mount,
"Please run xfs_repair to determine the extent of the problem.");
1631
			}
1632 1633 1634 1635 1636
			xfs_buf_rele(bp);
		}
		if (loop++ != 0)
			delay(100);
	}
1637 1638 1639 1640 1641
}

static enum lru_status
xfs_buftarg_isolate(
	struct list_head	*item,
1642
	struct list_lru_one	*lru,
1643 1644 1645 1646 1647 1648
	spinlock_t		*lru_lock,
	void			*arg)
{
	struct xfs_buf		*bp = container_of(item, struct xfs_buf, b_lru);
	struct list_head	*dispose = arg;

1649 1650 1651 1652 1653 1654
	/*
	 * we are inverting the lru lock/bp->b_lock here, so use a trylock.
	 * If we fail to get the lock, just skip it.
	 */
	if (!spin_trylock(&bp->b_lock))
		return LRU_SKIP;
1655 1656 1657 1658 1659
	/*
	 * Decrement the b_lru_ref count unless the value is already
	 * zero. If the value is already zero, we need to reclaim the
	 * buffer, otherwise it gets another trip through the LRU.
	 */
1660 1661
	if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
		spin_unlock(&bp->b_lock);
1662
		return LRU_ROTATE;
1663
	}
1664

1665
	bp->b_state |= XFS_BSTATE_DISPOSE;
1666
	list_lru_isolate_move(lru, item, dispose);
1667
	spin_unlock(&bp->b_lock);
1668 1669 1670
	return LRU_REMOVED;
}

1671
static unsigned long
1672
xfs_buftarg_shrink_scan(
1673
	struct shrinker		*shrink,
1674
	struct shrink_control	*sc)
1675
{
1676 1677
	struct xfs_buftarg	*btp = container_of(shrink,
					struct xfs_buftarg, bt_shrinker);
1678
	LIST_HEAD(dispose);
1679
	unsigned long		freed;
1680

1681 1682
	freed = list_lru_shrink_walk(&btp->bt_lru, sc,
				     xfs_buftarg_isolate, &dispose);
1683 1684

	while (!list_empty(&dispose)) {
1685
		struct xfs_buf *bp;
1686 1687 1688 1689 1690
		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
		list_del_init(&bp->b_lru);
		xfs_buf_rele(bp);
	}

1691 1692 1693
	return freed;
}

1694
static unsigned long
1695 1696 1697 1698 1699 1700
xfs_buftarg_shrink_count(
	struct shrinker		*shrink,
	struct shrink_control	*sc)
{
	struct xfs_buftarg	*btp = container_of(shrink,
					struct xfs_buftarg, bt_shrinker);
1701
	return list_lru_shrink_count(&btp->bt_lru, sc);
1702 1703
}

L
Linus Torvalds 已提交
1704 1705
void
xfs_free_buftarg(
1706 1707
	struct xfs_mount	*mp,
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1708
{
1709
	unregister_shrinker(&btp->bt_shrinker);
1710 1711
	ASSERT(percpu_counter_sum(&btp->bt_io_count) == 0);
	percpu_counter_destroy(&btp->bt_io_count);
G
Glauber Costa 已提交
1712
	list_lru_destroy(&btp->bt_lru);
1713

1714 1715
	if (mp->m_flags & XFS_MOUNT_BARRIER)
		xfs_blkdev_issue_flush(btp);
1716

1717
	kmem_free(btp);
L
Linus Torvalds 已提交
1718 1719
}

1720 1721
int
xfs_setsize_buftarg(
L
Linus Torvalds 已提交
1722
	xfs_buftarg_t		*btp,
1723
	unsigned int		sectorsize)
L
Linus Torvalds 已提交
1724
{
1725
	/* Set up metadata sector size info */
1726 1727
	btp->bt_meta_sectorsize = sectorsize;
	btp->bt_meta_sectormask = sectorsize - 1;
L
Linus Torvalds 已提交
1728

1729
	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1730
		xfs_warn(btp->bt_mount,
1731 1732
			"Cannot set_blocksize to %u on device %pg",
			sectorsize, btp->bt_bdev);
D
Dave Chinner 已提交
1733
		return -EINVAL;
L
Linus Torvalds 已提交
1734 1735
	}

1736 1737 1738 1739
	/* Set up device logical sector size mask */
	btp->bt_logical_sectorsize = bdev_logical_block_size(btp->bt_bdev);
	btp->bt_logical_sectormask = bdev_logical_block_size(btp->bt_bdev) - 1;

L
Linus Torvalds 已提交
1740 1741 1742 1743
	return 0;
}

/*
1744 1745 1746
 * When allocating the initial buffer target we have not yet
 * read in the superblock, so don't know what sized sectors
 * are being used at this early stage.  Play safe.
1747
 */
L
Linus Torvalds 已提交
1748 1749 1750 1751 1752
STATIC int
xfs_setsize_buftarg_early(
	xfs_buftarg_t		*btp,
	struct block_device	*bdev)
{
1753
	return xfs_setsize_buftarg(btp, bdev_logical_block_size(bdev));
L
Linus Torvalds 已提交
1754 1755 1756 1757
}

xfs_buftarg_t *
xfs_alloc_buftarg(
1758
	struct xfs_mount	*mp,
1759
	struct block_device	*bdev)
L
Linus Torvalds 已提交
1760 1761 1762
{
	xfs_buftarg_t		*btp;

1763
	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP | KM_NOFS);
L
Linus Torvalds 已提交
1764

1765
	btp->bt_mount = mp;
1766 1767
	btp->bt_dev =  bdev->bd_dev;
	btp->bt_bdev = bdev;
1768 1769
	btp->bt_bdi = blk_get_backing_dev_info(bdev);

L
Linus Torvalds 已提交
1770 1771
	if (xfs_setsize_buftarg_early(btp, bdev))
		goto error;
1772 1773 1774 1775

	if (list_lru_init(&btp->bt_lru))
		goto error;

1776 1777 1778
	if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
		goto error;

1779 1780
	btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
	btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
1781
	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
1782
	btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
1783
	register_shrinker(&btp->bt_shrinker);
L
Linus Torvalds 已提交
1784 1785 1786
	return btp;

error:
1787
	kmem_free(btp);
L
Linus Torvalds 已提交
1788 1789 1790 1791
	return NULL;
}

/*
1792 1793 1794 1795 1796 1797 1798 1799 1800
 * Add a buffer to the delayed write list.
 *
 * This queues a buffer for writeout if it hasn't already been.  Note that
 * neither this routine nor the buffer list submission functions perform
 * any internal synchronization.  It is expected that the lists are thread-local
 * to the callers.
 *
 * Returns true if we queued up the buffer, or false if it already had
 * been on the buffer list.
L
Linus Torvalds 已提交
1801
 */
1802
bool
1803
xfs_buf_delwri_queue(
1804 1805
	struct xfs_buf		*bp,
	struct list_head	*list)
L
Linus Torvalds 已提交
1806
{
1807
	ASSERT(xfs_buf_islocked(bp));
1808
	ASSERT(!(bp->b_flags & XBF_READ));
L
Linus Torvalds 已提交
1809

1810 1811 1812 1813 1814 1815 1816 1817
	/*
	 * If the buffer is already marked delwri it already is queued up
	 * by someone else for imediate writeout.  Just ignore it in that
	 * case.
	 */
	if (bp->b_flags & _XBF_DELWRI_Q) {
		trace_xfs_buf_delwri_queued(bp, _RET_IP_);
		return false;
L
Linus Torvalds 已提交
1818 1819
	}

1820
	trace_xfs_buf_delwri_queue(bp, _RET_IP_);
1821 1822

	/*
1823 1824 1825 1826 1827 1828
	 * If a buffer gets written out synchronously or marked stale while it
	 * is on a delwri list we lazily remove it. To do this, the other party
	 * clears the  _XBF_DELWRI_Q flag but otherwise leaves the buffer alone.
	 * It remains referenced and on the list.  In a rare corner case it
	 * might get readded to a delwri list after the synchronous writeout, in
	 * which case we need just need to re-add the flag here.
1829
	 */
1830 1831 1832 1833
	bp->b_flags |= _XBF_DELWRI_Q;
	if (list_empty(&bp->b_list)) {
		atomic_inc(&bp->b_hold);
		list_add_tail(&bp->b_list, list);
1834 1835
	}

1836
	return true;
1837 1838
}

1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853
/*
 * Compare function is more complex than it needs to be because
 * the return value is only 32 bits and we are doing comparisons
 * on 64 bit values
 */
static int
xfs_buf_cmp(
	void		*priv,
	struct list_head *a,
	struct list_head *b)
{
	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
	xfs_daddr_t		diff;

1854
	diff = ap->b_maps[0].bm_bn - bp->b_maps[0].bm_bn;
1855 1856 1857 1858 1859 1860 1861
	if (diff < 0)
		return -1;
	if (diff > 0)
		return 1;
	return 0;
}

1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873
/*
 * submit buffers for write.
 *
 * When we have a large buffer list, we do not want to hold all the buffers
 * locked while we block on the request queue waiting for IO dispatch. To avoid
 * this problem, we lock and submit buffers in groups of 50, thereby minimising
 * the lock hold times for lists which may contain thousands of objects.
 *
 * To do this, we sort the buffer list before we walk the list to lock and
 * submit buffers, and we plug and unplug around each group of buffers we
 * submit.
 */
1874
static int
1875
xfs_buf_delwri_submit_buffers(
1876
	struct list_head	*buffer_list,
1877
	struct list_head	*wait_list)
L
Linus Torvalds 已提交
1878
{
1879
	struct xfs_buf		*bp, *n;
1880
	LIST_HEAD		(submit_list);
1881
	int			pinned = 0;
1882
	struct blk_plug		plug;
1883

1884
	list_sort(NULL, buffer_list, xfs_buf_cmp);
1885

1886
	blk_start_plug(&plug);
1887
	list_for_each_entry_safe(bp, n, buffer_list, b_list) {
1888
		if (!wait_list) {
1889 1890 1891 1892 1893 1894 1895 1896 1897
			if (xfs_buf_ispinned(bp)) {
				pinned++;
				continue;
			}
			if (!xfs_buf_trylock(bp))
				continue;
		} else {
			xfs_buf_lock(bp);
		}
1898

1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909
		/*
		 * Someone else might have written the buffer synchronously or
		 * marked it stale in the meantime.  In that case only the
		 * _XBF_DELWRI_Q flag got cleared, and we have to drop the
		 * reference and remove it from the list here.
		 */
		if (!(bp->b_flags & _XBF_DELWRI_Q)) {
			list_del_init(&bp->b_list);
			xfs_buf_relse(bp);
			continue;
		}
D
Dave Chinner 已提交
1910

1911
		trace_xfs_buf_delwri_split(bp, _RET_IP_);
1912

1913
		/*
1914 1915 1916 1917 1918
		 * We do all IO submission async. This means if we need
		 * to wait for IO completion we need to take an extra
		 * reference so the buffer is still valid on the other
		 * side. We need to move the buffer onto the io_list
		 * at this point so the caller can still access it.
1919
		 */
1920
		bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_WRITE_FAIL);
1921 1922
		bp->b_flags |= XBF_WRITE | XBF_ASYNC;
		if (wait_list) {
1923
			xfs_buf_hold(bp);
1924 1925
			list_move_tail(&bp->b_list, wait_list);
		} else
1926
			list_del_init(&bp->b_list);
D
Dave Chinner 已提交
1927

1928
		xfs_buf_submit(bp);
1929 1930
	}
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1931

1932
	return pinned;
L
Linus Torvalds 已提交
1933 1934 1935
}

/*
1936 1937 1938 1939 1940 1941 1942
 * Write out a buffer list asynchronously.
 *
 * This will take the @buffer_list, write all non-locked and non-pinned buffers
 * out and not wait for I/O completion on any of the buffers.  This interface
 * is only safely useable for callers that can track I/O completion by higher
 * level means, e.g. AIL pushing as the @buffer_list is consumed in this
 * function.
L
Linus Torvalds 已提交
1943 1944
 */
int
1945 1946
xfs_buf_delwri_submit_nowait(
	struct list_head	*buffer_list)
L
Linus Torvalds 已提交
1947
{
1948
	return xfs_buf_delwri_submit_buffers(buffer_list, NULL);
1949
}
L
Linus Torvalds 已提交
1950

1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962
/*
 * Write out a buffer list synchronously.
 *
 * This will take the @buffer_list, write all buffers out and wait for I/O
 * completion on all of the buffers. @buffer_list is consumed by the function,
 * so callers must have some other way of tracking buffers if they require such
 * functionality.
 */
int
xfs_buf_delwri_submit(
	struct list_head	*buffer_list)
{
1963
	LIST_HEAD		(wait_list);
1964 1965
	int			error = 0, error2;
	struct xfs_buf		*bp;
L
Linus Torvalds 已提交
1966

1967
	xfs_buf_delwri_submit_buffers(buffer_list, &wait_list);
L
Linus Torvalds 已提交
1968

1969
	/* Wait for IO to complete. */
1970 1971
	while (!list_empty(&wait_list)) {
		bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1972

1973
		list_del_init(&bp->b_list);
1974 1975 1976 1977

		/* locking the buffer will wait for async IO completion. */
		xfs_buf_lock(bp);
		error2 = bp->b_error;
1978 1979 1980
		xfs_buf_relse(bp);
		if (!error)
			error = error2;
L
Linus Torvalds 已提交
1981 1982
	}

1983
	return error;
L
Linus Torvalds 已提交
1984 1985
}

1986
int __init
1987
xfs_buf_init(void)
L
Linus Torvalds 已提交
1988
{
1989 1990
	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
						KM_ZONE_HWALIGN, NULL);
1991
	if (!xfs_buf_zone)
C
Christoph Hellwig 已提交
1992
		goto out;
1993

1994
	return 0;
L
Linus Torvalds 已提交
1995

C
Christoph Hellwig 已提交
1996
 out:
1997
	return -ENOMEM;
L
Linus Torvalds 已提交
1998 1999 2000
}

void
2001
xfs_buf_terminate(void)
L
Linus Torvalds 已提交
2002
{
2003
	kmem_zone_destroy(xfs_buf_zone);
L
Linus Torvalds 已提交
2004
}