xfs_buf.c 41.1 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17
 */
18
#include "xfs.h"
L
Linus Torvalds 已提交
19 20
#include <linux/stddef.h>
#include <linux/errno.h>
21
#include <linux/gfp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include <linux/hash.h>
32
#include <linux/kthread.h>
C
Christoph Lameter 已提交
33
#include <linux/migrate.h>
34
#include <linux/backing-dev.h>
35
#include <linux/freezer.h>
L
Linus Torvalds 已提交
36

37 38
#include "xfs_sb.h"
#include "xfs_inum.h"
39
#include "xfs_log.h"
40 41
#include "xfs_ag.h"
#include "xfs_mount.h"
C
Christoph Hellwig 已提交
42
#include "xfs_trace.h"
43

44
static kmem_zone_t *xfs_buf_zone;
45
STATIC int xfsbufd(void *);
46

47
static struct workqueue_struct *xfslogd_workqueue;
48
struct workqueue_struct *xfsdatad_workqueue;
49
struct workqueue_struct *xfsconvertd_workqueue;
L
Linus Torvalds 已提交
50

51 52 53 54
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
L
Linus Torvalds 已提交
55
#else
56 57 58
# define XB_SET_OWNER(bp)	do { } while (0)
# define XB_CLEAR_OWNER(bp)	do { } while (0)
# define XB_GET_OWNER(bp)	do { } while (0)
L
Linus Torvalds 已提交
59 60
#endif

61 62 63
#define xb_to_gfp(flags) \
	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
	  ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
L
Linus Torvalds 已提交
64

65 66
#define xb_to_km(flags) \
	 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
L
Linus Torvalds 已提交
67

68 69 70 71
#define xfs_buf_allocate(flags) \
	kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
#define xfs_buf_deallocate(bp) \
	kmem_zone_free(xfs_buf_zone, (bp));
L
Linus Torvalds 已提交
72

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
static inline int
xfs_buf_is_vmapped(
	struct xfs_buf	*bp)
{
	/*
	 * Return true if the buffer is vmapped.
	 *
	 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
	 * code is clever enough to know it doesn't have to map a single page,
	 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
	 */
	return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
}

static inline int
xfs_buf_vmap_len(
	struct xfs_buf	*bp)
{
	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
}

L
Linus Torvalds 已提交
94
/*
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120
 * xfs_buf_lru_add - add a buffer to the LRU.
 *
 * The LRU takes a new reference to the buffer so that it will only be freed
 * once the shrinker takes the buffer off the LRU.
 */
STATIC void
xfs_buf_lru_add(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;

	spin_lock(&btp->bt_lru_lock);
	if (list_empty(&bp->b_lru)) {
		atomic_inc(&bp->b_hold);
		list_add_tail(&bp->b_lru, &btp->bt_lru);
		btp->bt_lru_nr++;
	}
	spin_unlock(&btp->bt_lru_lock);
}

/*
 * xfs_buf_lru_del - remove a buffer from the LRU
 *
 * The unlocked check is safe here because it only occurs when there are not
 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
 * to optimise the shrinker removing the buffer from the LRU and calling
L
Lucas De Marchi 已提交
121
 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
122
 * bt_lru_lock.
L
Linus Torvalds 已提交
123
 */
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
STATIC void
xfs_buf_lru_del(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;

	if (list_empty(&bp->b_lru))
		return;

	spin_lock(&btp->bt_lru_lock);
	if (!list_empty(&bp->b_lru)) {
		list_del_init(&bp->b_lru);
		btp->bt_lru_nr--;
	}
	spin_unlock(&btp->bt_lru_lock);
}

/*
 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
 * b_lru_ref count so that the buffer is freed immediately when the buffer
 * reference count falls to zero. If the buffer is already on the LRU, we need
 * to remove the reference that LRU holds on the buffer.
 *
 * This prevents build-up of stale buffers on the LRU.
 */
void
xfs_buf_stale(
	struct xfs_buf	*bp)
{
	bp->b_flags |= XBF_STALE;
154
	xfs_buf_delwri_dequeue(bp);
155 156 157 158 159 160 161 162 163 164 165 166 167 168
	atomic_set(&(bp)->b_lru_ref, 0);
	if (!list_empty(&bp->b_lru)) {
		struct xfs_buftarg *btp = bp->b_target;

		spin_lock(&btp->bt_lru_lock);
		if (!list_empty(&bp->b_lru)) {
			list_del_init(&bp->b_lru);
			btp->bt_lru_nr--;
			atomic_dec(&bp->b_hold);
		}
		spin_unlock(&btp->bt_lru_lock);
	}
	ASSERT(atomic_read(&bp->b_hold) >= 1);
}
L
Linus Torvalds 已提交
169 170

STATIC void
171 172
_xfs_buf_initialize(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
173
	xfs_buftarg_t		*target,
174
	xfs_off_t		range_base,
L
Linus Torvalds 已提交
175
	size_t			range_length,
176
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
177 178
{
	/*
179
	 * We don't want certain flags to appear in b_flags.
L
Linus Torvalds 已提交
180
	 */
181 182 183 184
	flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);

	memset(bp, 0, sizeof(xfs_buf_t));
	atomic_set(&bp->b_hold, 1);
185
	atomic_set(&bp->b_lru_ref, 1);
186
	init_completion(&bp->b_iowait);
187
	INIT_LIST_HEAD(&bp->b_lru);
188
	INIT_LIST_HEAD(&bp->b_list);
189
	RB_CLEAR_NODE(&bp->b_rbnode);
T
Thomas Gleixner 已提交
190
	sema_init(&bp->b_sema, 0); /* held, no waiters */
191 192 193
	XB_SET_OWNER(bp);
	bp->b_target = target;
	bp->b_file_offset = range_base;
L
Linus Torvalds 已提交
194 195 196 197 198
	/*
	 * Set buffer_length and count_desired to the same value initially.
	 * I/O routines should use count_desired, which will be the same in
	 * most cases but may be reset (e.g. XFS recovery).
	 */
199 200 201 202 203 204 205
	bp->b_buffer_length = bp->b_count_desired = range_length;
	bp->b_flags = flags;
	bp->b_bn = XFS_BUF_DADDR_NULL;
	atomic_set(&bp->b_pin_count, 0);
	init_waitqueue_head(&bp->b_waiters);

	XFS_STATS_INC(xb_create);
C
Christoph Hellwig 已提交
206 207

	trace_xfs_buf_init(bp, _RET_IP_);
L
Linus Torvalds 已提交
208 209 210
}

/*
211 212
 *	Allocate a page array capable of holding a specified number
 *	of pages, and point the page buf at it.
L
Linus Torvalds 已提交
213 214
 */
STATIC int
215 216
_xfs_buf_get_pages(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
217
	int			page_count,
218
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
219 220
{
	/* Make sure that we have a page list */
221 222 223 224 225
	if (bp->b_pages == NULL) {
		bp->b_offset = xfs_buf_poff(bp->b_file_offset);
		bp->b_page_count = page_count;
		if (page_count <= XB_PAGES) {
			bp->b_pages = bp->b_page_array;
L
Linus Torvalds 已提交
226
		} else {
227 228 229
			bp->b_pages = kmem_alloc(sizeof(struct page *) *
					page_count, xb_to_km(flags));
			if (bp->b_pages == NULL)
L
Linus Torvalds 已提交
230 231
				return -ENOMEM;
		}
232
		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
L
Linus Torvalds 已提交
233 234 235 236 237
	}
	return 0;
}

/*
238
 *	Frees b_pages if it was allocated.
L
Linus Torvalds 已提交
239 240
 */
STATIC void
241
_xfs_buf_free_pages(
L
Linus Torvalds 已提交
242 243
	xfs_buf_t	*bp)
{
244
	if (bp->b_pages != bp->b_page_array) {
245
		kmem_free(bp->b_pages);
246
		bp->b_pages = NULL;
L
Linus Torvalds 已提交
247 248 249 250 251 252 253
	}
}

/*
 *	Releases the specified buffer.
 *
 * 	The modification state of any associated pages is left unchanged.
254
 * 	The buffer most not be on any hash - use xfs_buf_rele instead for
L
Linus Torvalds 已提交
255 256 257
 * 	hashed and refcounted buffers
 */
void
258
xfs_buf_free(
L
Linus Torvalds 已提交
259 260
	xfs_buf_t		*bp)
{
C
Christoph Hellwig 已提交
261
	trace_xfs_buf_free(bp, _RET_IP_);
L
Linus Torvalds 已提交
262

263 264
	ASSERT(list_empty(&bp->b_lru));

265
	if (bp->b_flags & _XBF_PAGES) {
L
Linus Torvalds 已提交
266 267
		uint		i;

268
		if (xfs_buf_is_vmapped(bp))
A
Alex Elder 已提交
269 270
			vm_unmap_ram(bp->b_addr - bp->b_offset,
					bp->b_page_count);
L
Linus Torvalds 已提交
271

272 273 274
		for (i = 0; i < bp->b_page_count; i++) {
			struct page	*page = bp->b_pages[i];

275
			__free_page(page);
276
		}
277 278
	} else if (bp->b_flags & _XBF_KMEM)
		kmem_free(bp->b_addr);
279
	_xfs_buf_free_pages(bp);
280
	xfs_buf_deallocate(bp);
L
Linus Torvalds 已提交
281 282 283
}

/*
284
 * Allocates all the pages for buffer in question and builds it's page list.
L
Linus Torvalds 已提交
285 286
 */
STATIC int
287
xfs_buf_allocate_memory(
L
Linus Torvalds 已提交
288 289 290
	xfs_buf_t		*bp,
	uint			flags)
{
291
	size_t			size = bp->b_count_desired;
L
Linus Torvalds 已提交
292
	size_t			nbytes, offset;
293
	gfp_t			gfp_mask = xb_to_gfp(flags);
L
Linus Torvalds 已提交
294
	unsigned short		page_count, i;
295
	xfs_off_t		end;
L
Linus Torvalds 已提交
296 297
	int			error;

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	/*
	 * for buffers that are contained within a single page, just allocate
	 * the memory from the heap - there's no need for the complexity of
	 * page arrays to keep allocation down to order 0.
	 */
	if (bp->b_buffer_length < PAGE_SIZE) {
		bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
		if (!bp->b_addr) {
			/* low memory - use alloc_page loop instead */
			goto use_alloc_page;
		}

		if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
								PAGE_MASK) !=
		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
			/* b_addr spans two pages - use alloc_page instead */
			kmem_free(bp->b_addr);
			bp->b_addr = NULL;
			goto use_alloc_page;
		}
		bp->b_offset = offset_in_page(bp->b_addr);
		bp->b_pages = bp->b_page_array;
		bp->b_pages[0] = virt_to_page(bp->b_addr);
		bp->b_page_count = 1;
		bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
		return 0;
	}

use_alloc_page:
327 328 329
	end = bp->b_file_offset + bp->b_buffer_length;
	page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
	error = _xfs_buf_get_pages(bp, page_count, flags);
L
Linus Torvalds 已提交
330 331 332
	if (unlikely(error))
		return error;

333
	offset = bp->b_offset;
334
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
335

336
	for (i = 0; i < bp->b_page_count; i++) {
L
Linus Torvalds 已提交
337 338
		struct page	*page;
		uint		retries = 0;
339 340
retry:
		page = alloc_page(gfp_mask);
L
Linus Torvalds 已提交
341
		if (unlikely(page == NULL)) {
342 343
			if (flags & XBF_READ_AHEAD) {
				bp->b_page_count = i;
344 345
				error = ENOMEM;
				goto out_free_pages;
L
Linus Torvalds 已提交
346 347 348 349 350 351 352 353 354
			}

			/*
			 * This could deadlock.
			 *
			 * But until all the XFS lowlevel code is revamped to
			 * handle buffer allocation failures we can't do much.
			 */
			if (!(++retries % 100))
355 356
				xfs_err(NULL,
		"possible memory allocation deadlock in %s (mode:0x%x)",
357
					__func__, gfp_mask);
L
Linus Torvalds 已提交
358

359
			XFS_STATS_INC(xb_page_retries);
360
			congestion_wait(BLK_RW_ASYNC, HZ/50);
L
Linus Torvalds 已提交
361 362 363
			goto retry;
		}

364
		XFS_STATS_INC(xb_page_found);
L
Linus Torvalds 已提交
365

366
		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
L
Linus Torvalds 已提交
367
		size -= nbytes;
368
		bp->b_pages[i] = page;
L
Linus Torvalds 已提交
369 370
		offset = 0;
	}
371
	return 0;
L
Linus Torvalds 已提交
372

373 374 375
out_free_pages:
	for (i = 0; i < bp->b_page_count; i++)
		__free_page(bp->b_pages[i]);
L
Linus Torvalds 已提交
376 377 378 379
	return error;
}

/*
L
Lucas De Marchi 已提交
380
 *	Map buffer into kernel address-space if necessary.
L
Linus Torvalds 已提交
381 382
 */
STATIC int
383
_xfs_buf_map_pages(
L
Linus Torvalds 已提交
384 385 386
	xfs_buf_t		*bp,
	uint			flags)
{
387
	ASSERT(bp->b_flags & _XBF_PAGES);
388
	if (bp->b_page_count == 1) {
389
		/* A single page buffer is always mappable */
390 391 392
		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
	} else if (flags & XBF_MAPPED) {
393 394 395 396 397 398 399 400 401 402 403
		int retried = 0;

		do {
			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
						-1, PAGE_KERNEL);
			if (bp->b_addr)
				break;
			vm_unmap_aliases();
		} while (retried++ <= 1);

		if (!bp->b_addr)
L
Linus Torvalds 已提交
404
			return -ENOMEM;
405 406
		bp->b_addr += bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
L
Linus Torvalds 已提交
407 408 409 410 411 412 413 414 415 416
	}

	return 0;
}

/*
 *	Finding and Reading Buffers
 */

/*
417
 *	Look up, and creates if absent, a lockable buffer for
L
Linus Torvalds 已提交
418
 *	a given range of an inode.  The buffer is returned
419
 *	locked.	No I/O is implied by this call.
L
Linus Torvalds 已提交
420 421
 */
xfs_buf_t *
422
_xfs_buf_find(
L
Linus Torvalds 已提交
423
	xfs_buftarg_t		*btp,	/* block device target		*/
424
	xfs_off_t		ioff,	/* starting offset of range	*/
L
Linus Torvalds 已提交
425
	size_t			isize,	/* length of range		*/
426 427
	xfs_buf_flags_t		flags,
	xfs_buf_t		*new_bp)
L
Linus Torvalds 已提交
428
{
429
	xfs_off_t		range_base;
L
Linus Torvalds 已提交
430
	size_t			range_length;
431 432 433 434
	struct xfs_perag	*pag;
	struct rb_node		**rbp;
	struct rb_node		*parent;
	xfs_buf_t		*bp;
L
Linus Torvalds 已提交
435 436 437 438 439

	range_base = (ioff << BBSHIFT);
	range_length = (isize << BBSHIFT);

	/* Check for IOs smaller than the sector size / not sector aligned */
440
	ASSERT(!(range_length < (1 << btp->bt_sshift)));
441
	ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
L
Linus Torvalds 已提交
442

443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
	/* get tree root */
	pag = xfs_perag_get(btp->bt_mount,
				xfs_daddr_to_agno(btp->bt_mount, ioff));

	/* walk tree */
	spin_lock(&pag->pag_buf_lock);
	rbp = &pag->pag_buf_tree.rb_node;
	parent = NULL;
	bp = NULL;
	while (*rbp) {
		parent = *rbp;
		bp = rb_entry(parent, struct xfs_buf, b_rbnode);

		if (range_base < bp->b_file_offset)
			rbp = &(*rbp)->rb_left;
		else if (range_base > bp->b_file_offset)
			rbp = &(*rbp)->rb_right;
		else {
			/*
			 * found a block offset match. If the range doesn't
			 * match, the only way this is allowed is if the buffer
			 * in the cache is stale and the transaction that made
			 * it stale has not yet committed. i.e. we are
			 * reallocating a busy extent. Skip this buffer and
			 * continue searching to the right for an exact match.
			 */
			if (bp->b_buffer_length != range_length) {
				ASSERT(bp->b_flags & XBF_STALE);
				rbp = &(*rbp)->rb_right;
				continue;
			}
474
			atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
475 476 477 478 479
			goto found;
		}
	}

	/* No match found */
480
	if (new_bp) {
481 482 483 484 485
		rb_link_node(&new_bp->b_rbnode, parent, rbp);
		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
		/* the buffer keeps the perag reference until it is freed */
		new_bp->b_pag = pag;
		spin_unlock(&pag->pag_buf_lock);
L
Linus Torvalds 已提交
486
	} else {
487
		XFS_STATS_INC(xb_miss_locked);
488 489
		spin_unlock(&pag->pag_buf_lock);
		xfs_perag_put(pag);
L
Linus Torvalds 已提交
490
	}
491
	return new_bp;
L
Linus Torvalds 已提交
492 493

found:
494 495
	spin_unlock(&pag->pag_buf_lock);
	xfs_perag_put(pag);
L
Linus Torvalds 已提交
496

497 498
	if (!xfs_buf_trylock(bp)) {
		if (flags & XBF_TRYLOCK) {
499 500 501
			xfs_buf_rele(bp);
			XFS_STATS_INC(xb_busy_locked);
			return NULL;
L
Linus Torvalds 已提交
502
		}
503 504
		xfs_buf_lock(bp);
		XFS_STATS_INC(xb_get_locked_waited);
L
Linus Torvalds 已提交
505 506
	}

507 508 509 510 511
	/*
	 * if the buffer is stale, clear all the external state associated with
	 * it. We need to keep flags such as how we allocated the buffer memory
	 * intact here.
	 */
512 513
	if (bp->b_flags & XBF_STALE) {
		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
514
		bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
515
	}
C
Christoph Hellwig 已提交
516 517

	trace_xfs_buf_find(bp, flags, _RET_IP_);
518 519
	XFS_STATS_INC(xb_get_locked);
	return bp;
L
Linus Torvalds 已提交
520 521 522
}

/*
523 524 525
 * Assembles a buffer covering the specified range. The code is optimised for
 * cache hits, as metadata intensive workloads will see 3 orders of magnitude
 * more hits than misses.
L
Linus Torvalds 已提交
526
 */
527
struct xfs_buf *
528
xfs_buf_get(
L
Linus Torvalds 已提交
529
	xfs_buftarg_t		*target,/* target for buffer		*/
530
	xfs_off_t		ioff,	/* starting offset of range	*/
L
Linus Torvalds 已提交
531
	size_t			isize,	/* length of range		*/
532
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
533
{
534 535
	struct xfs_buf		*bp;
	struct xfs_buf		*new_bp;
536
	int			error = 0;
L
Linus Torvalds 已提交
537

538 539 540 541
	bp = _xfs_buf_find(target, ioff, isize, flags, NULL);
	if (likely(bp))
		goto found;

542 543
	new_bp = xfs_buf_allocate(flags);
	if (unlikely(!new_bp))
L
Linus Torvalds 已提交
544 545
		return NULL;

546 547 548
	_xfs_buf_initialize(new_bp, target,
			    ioff << BBSHIFT, isize << BBSHIFT, flags);

549
	bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
550 551 552 553 554
	if (!bp) {
		xfs_buf_deallocate(new_bp);
		return NULL;
	}

555
	if (bp == new_bp) {
556
		error = xfs_buf_allocate_memory(bp, flags);
L
Linus Torvalds 已提交
557 558
		if (error)
			goto no_buffer;
559
	} else
560
		xfs_buf_deallocate(new_bp);
L
Linus Torvalds 已提交
561

562 563 564 565 566 567 568 569
	/*
	 * Now we have a workable buffer, fill in the block number so
	 * that we can do IO on it.
	 */
	bp->b_bn = ioff;
	bp->b_count_desired = bp->b_buffer_length;

found:
570 571
	if (!(bp->b_flags & XBF_MAPPED)) {
		error = _xfs_buf_map_pages(bp, flags);
L
Linus Torvalds 已提交
572
		if (unlikely(error)) {
573 574
			xfs_warn(target->bt_mount,
				"%s: failed to map pages\n", __func__);
L
Linus Torvalds 已提交
575 576 577 578
			goto no_buffer;
		}
	}

579
	XFS_STATS_INC(xb_get);
C
Christoph Hellwig 已提交
580
	trace_xfs_buf_get(bp, flags, _RET_IP_);
581
	return bp;
L
Linus Torvalds 已提交
582

583
no_buffer:
584 585 586
	if (flags & (XBF_LOCK | XBF_TRYLOCK))
		xfs_buf_unlock(bp);
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
587 588 589
	return NULL;
}

C
Christoph Hellwig 已提交
590 591 592 593 594 595 596 597 598 599
STATIC int
_xfs_buf_read(
	xfs_buf_t		*bp,
	xfs_buf_flags_t		flags)
{
	int			status;

	ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
	ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);

600 601
	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
C
Christoph Hellwig 已提交
602 603

	status = xfs_buf_iorequest(bp);
604
	if (status || bp->b_error || (flags & XBF_ASYNC))
605 606
		return status;
	return xfs_buf_iowait(bp);
C
Christoph Hellwig 已提交
607 608
}

L
Linus Torvalds 已提交
609
xfs_buf_t *
610
xfs_buf_read(
L
Linus Torvalds 已提交
611
	xfs_buftarg_t		*target,
612
	xfs_off_t		ioff,
L
Linus Torvalds 已提交
613
	size_t			isize,
614
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
615
{
616 617 618 619
	xfs_buf_t		*bp;

	flags |= XBF_READ;

620
	bp = xfs_buf_get(target, ioff, isize, flags);
621
	if (bp) {
C
Christoph Hellwig 已提交
622 623
		trace_xfs_buf_read(bp, flags, _RET_IP_);

624 625
		if (!XFS_BUF_ISDONE(bp)) {
			XFS_STATS_INC(xb_get_read);
C
Christoph Hellwig 已提交
626
			_xfs_buf_read(bp, flags);
627
		} else if (flags & XBF_ASYNC) {
L
Linus Torvalds 已提交
628 629 630 631 632 633 634
			/*
			 * Read ahead call which is already satisfied,
			 * drop the buffer
			 */
			goto no_buffer;
		} else {
			/* We do not want read in the flags */
635
			bp->b_flags &= ~XBF_READ;
L
Linus Torvalds 已提交
636 637 638
		}
	}

639
	return bp;
L
Linus Torvalds 已提交
640 641

 no_buffer:
642 643 644
	if (flags & (XBF_LOCK | XBF_TRYLOCK))
		xfs_buf_unlock(bp);
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
645 646 647 648
	return NULL;
}

/*
649 650
 *	If we are not low on memory then do the readahead in a deadlock
 *	safe manner.
L
Linus Torvalds 已提交
651 652
 */
void
653
xfs_buf_readahead(
L
Linus Torvalds 已提交
654
	xfs_buftarg_t		*target,
655
	xfs_off_t		ioff,
C
Christoph Hellwig 已提交
656
	size_t			isize)
L
Linus Torvalds 已提交
657
{
658
	if (bdi_read_congested(target->bt_bdi))
L
Linus Torvalds 已提交
659 660
		return;

C
Christoph Hellwig 已提交
661 662
	xfs_buf_read(target, ioff, isize,
		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
L
Linus Torvalds 已提交
663 664
}

665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688
/*
 * Read an uncached buffer from disk. Allocates and returns a locked
 * buffer containing the disk contents or nothing.
 */
struct xfs_buf *
xfs_buf_read_uncached(
	struct xfs_mount	*mp,
	struct xfs_buftarg	*target,
	xfs_daddr_t		daddr,
	size_t			length,
	int			flags)
{
	xfs_buf_t		*bp;
	int			error;

	bp = xfs_buf_get_uncached(target, length, flags);
	if (!bp)
		return NULL;

	/* set up the buffer for a read IO */
	XFS_BUF_SET_ADDR(bp, daddr);
	XFS_BUF_READ(bp);

	xfsbdstrat(mp, bp);
C
Christoph Hellwig 已提交
689
	error = xfs_buf_iowait(bp);
690 691 692 693 694
	if (error || bp->b_error) {
		xfs_buf_relse(bp);
		return NULL;
	}
	return bp;
L
Linus Torvalds 已提交
695 696 697
}

xfs_buf_t *
698
xfs_buf_get_empty(
L
Linus Torvalds 已提交
699 700 701
	size_t			len,
	xfs_buftarg_t		*target)
{
702
	xfs_buf_t		*bp;
L
Linus Torvalds 已提交
703

704 705 706 707
	bp = xfs_buf_allocate(0);
	if (bp)
		_xfs_buf_initialize(bp, target, 0, len, 0);
	return bp;
L
Linus Torvalds 已提交
708 709
}

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730
/*
 * Return a buffer allocated as an empty buffer and associated to external
 * memory via xfs_buf_associate_memory() back to it's empty state.
 */
void
xfs_buf_set_empty(
	struct xfs_buf		*bp,
	size_t			len)
{
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);

	bp->b_pages = NULL;
	bp->b_page_count = 0;
	bp->b_addr = NULL;
	bp->b_file_offset = 0;
	bp->b_buffer_length = bp->b_count_desired = len;
	bp->b_bn = XFS_BUF_DADDR_NULL;
	bp->b_flags &= ~XBF_MAPPED;
}

L
Linus Torvalds 已提交
731 732 733 734
static inline struct page *
mem_to_page(
	void			*addr)
{
735
	if ((!is_vmalloc_addr(addr))) {
L
Linus Torvalds 已提交
736 737 738 739 740 741 742
		return virt_to_page(addr);
	} else {
		return vmalloc_to_page(addr);
	}
}

int
743 744
xfs_buf_associate_memory(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
745 746 747 748 749
	void			*mem,
	size_t			len)
{
	int			rval;
	int			i = 0;
750 751 752
	unsigned long		pageaddr;
	unsigned long		offset;
	size_t			buflen;
L
Linus Torvalds 已提交
753 754
	int			page_count;

755
	pageaddr = (unsigned long)mem & PAGE_MASK;
756
	offset = (unsigned long)mem - pageaddr;
757 758
	buflen = PAGE_ALIGN(len + offset);
	page_count = buflen >> PAGE_SHIFT;
L
Linus Torvalds 已提交
759 760

	/* Free any previous set of page pointers */
761 762
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
763

764 765
	bp->b_pages = NULL;
	bp->b_addr = mem;
L
Linus Torvalds 已提交
766

767
	rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
L
Linus Torvalds 已提交
768 769 770
	if (rval)
		return rval;

771
	bp->b_offset = offset;
772 773 774

	for (i = 0; i < bp->b_page_count; i++) {
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
775
		pageaddr += PAGE_SIZE;
L
Linus Torvalds 已提交
776 777
	}

778 779
	bp->b_count_desired = len;
	bp->b_buffer_length = buflen;
780
	bp->b_flags |= XBF_MAPPED;
L
Linus Torvalds 已提交
781 782 783 784 785

	return 0;
}

xfs_buf_t *
786 787
xfs_buf_get_uncached(
	struct xfs_buftarg	*target,
L
Linus Torvalds 已提交
788
	size_t			len,
789
	int			flags)
L
Linus Torvalds 已提交
790
{
791 792
	unsigned long		page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
	int			error, i;
L
Linus Torvalds 已提交
793 794
	xfs_buf_t		*bp;

795
	bp = xfs_buf_allocate(0);
L
Linus Torvalds 已提交
796 797
	if (unlikely(bp == NULL))
		goto fail;
798
	_xfs_buf_initialize(bp, target, 0, len, 0);
L
Linus Torvalds 已提交
799

800 801
	error = _xfs_buf_get_pages(bp, page_count, 0);
	if (error)
L
Linus Torvalds 已提交
802 803
		goto fail_free_buf;

804
	for (i = 0; i < page_count; i++) {
805
		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
806 807
		if (!bp->b_pages[i])
			goto fail_free_mem;
L
Linus Torvalds 已提交
808
	}
809
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
810

811 812
	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
	if (unlikely(error)) {
813 814
		xfs_warn(target->bt_mount,
			"%s: failed to map pages\n", __func__);
L
Linus Torvalds 已提交
815
		goto fail_free_mem;
816
	}
L
Linus Torvalds 已提交
817

818
	trace_xfs_buf_get_uncached(bp, _RET_IP_);
L
Linus Torvalds 已提交
819
	return bp;
820

L
Linus Torvalds 已提交
821
 fail_free_mem:
822 823
	while (--i >= 0)
		__free_page(bp->b_pages[i]);
824
	_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
825
 fail_free_buf:
826
	xfs_buf_deallocate(bp);
L
Linus Torvalds 已提交
827 828 829 830 831 832 833 834 835 836
 fail:
	return NULL;
}

/*
 *	Increment reference count on buffer, to hold the buffer concurrently
 *	with another thread which may release (free) the buffer asynchronously.
 *	Must hold the buffer already to call this function.
 */
void
837 838
xfs_buf_hold(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
839
{
C
Christoph Hellwig 已提交
840
	trace_xfs_buf_hold(bp, _RET_IP_);
841
	atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
842 843 844
}

/*
845 846
 *	Releases a hold on the specified buffer.  If the
 *	the hold count is 1, calls xfs_buf_free.
L
Linus Torvalds 已提交
847 848
 */
void
849 850
xfs_buf_rele(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
851
{
852
	struct xfs_perag	*pag = bp->b_pag;
L
Linus Torvalds 已提交
853

C
Christoph Hellwig 已提交
854
	trace_xfs_buf_rele(bp, _RET_IP_);
L
Linus Torvalds 已提交
855

856
	if (!pag) {
857
		ASSERT(list_empty(&bp->b_lru));
858
		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
859 860 861 862 863
		if (atomic_dec_and_test(&bp->b_hold))
			xfs_buf_free(bp);
		return;
	}

864
	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
865

866
	ASSERT(atomic_read(&bp->b_hold) > 0);
867
	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
868
		if (!(bp->b_flags & XBF_STALE) &&
869 870 871
			   atomic_read(&bp->b_lru_ref)) {
			xfs_buf_lru_add(bp);
			spin_unlock(&pag->pag_buf_lock);
L
Linus Torvalds 已提交
872
		} else {
873
			xfs_buf_lru_del(bp);
874
			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
875 876 877
			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
			spin_unlock(&pag->pag_buf_lock);
			xfs_perag_put(pag);
878
			xfs_buf_free(bp);
L
Linus Torvalds 已提交
879 880 881 882 883 884
		}
	}
}


/*
885
 *	Lock a buffer object, if it is not already locked.
886 887 888 889 890 891 892 893
 *
 *	If we come across a stale, pinned, locked buffer, we know that we are
 *	being asked to lock a buffer that has been reallocated. Because it is
 *	pinned, we know that the log has not been pushed to disk and hence it
 *	will still be locked.  Rather than continuing to have trylock attempts
 *	fail until someone else pushes the log, push it ourselves before
 *	returning.  This means that the xfsaild will not get stuck trying
 *	to push on stale inode buffers.
L
Linus Torvalds 已提交
894 895
 */
int
896 897
xfs_buf_trylock(
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
898 899 900
{
	int			locked;

901
	locked = down_trylock(&bp->b_sema) == 0;
C
Christoph Hellwig 已提交
902
	if (locked)
903
		XB_SET_OWNER(bp);
904 905
	else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
		xfs_log_force(bp->b_target->bt_mount, 0);
C
Christoph Hellwig 已提交
906

907 908
	trace_xfs_buf_trylock(bp, _RET_IP_);
	return locked;
L
Linus Torvalds 已提交
909 910 911
}

/*
912
 *	Lock a buffer object.
913 914 915 916 917 918
 *
 *	If we come across a stale, pinned, locked buffer, we know that we
 *	are being asked to lock a buffer that has been reallocated. Because
 *	it is pinned, we know that the log has not been pushed to disk and
 *	hence it will still be locked. Rather than sleeping until someone
 *	else pushes the log, push it ourselves before trying to get the lock.
L
Linus Torvalds 已提交
919
 */
920 921
void
xfs_buf_lock(
922
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
923
{
C
Christoph Hellwig 已提交
924 925
	trace_xfs_buf_lock(bp, _RET_IP_);

926
	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
927
		xfs_log_force(bp->b_target->bt_mount, 0);
928 929
	down(&bp->b_sema);
	XB_SET_OWNER(bp);
C
Christoph Hellwig 已提交
930 931

	trace_xfs_buf_lock_done(bp, _RET_IP_);
L
Linus Torvalds 已提交
932 933 934
}

/*
935
 *	Releases the lock on the buffer object.
936
 *	If the buffer is marked delwri but is not queued, do so before we
937
 *	unlock the buffer as we need to set flags correctly.  We also need to
938 939
 *	take a reference for the delwri queue because the unlocker is going to
 *	drop their's and they don't know we just queued it.
L
Linus Torvalds 已提交
940 941
 */
void
942
xfs_buf_unlock(
943
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
944
{
945 946
	XB_CLEAR_OWNER(bp);
	up(&bp->b_sema);
C
Christoph Hellwig 已提交
947 948

	trace_xfs_buf_unlock(bp, _RET_IP_);
L
Linus Torvalds 已提交
949 950
}

951 952 953
STATIC void
xfs_buf_wait_unpin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
954 955 956
{
	DECLARE_WAITQUEUE	(wait, current);

957
	if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
958 959
		return;

960
	add_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
961 962
	for (;;) {
		set_current_state(TASK_UNINTERRUPTIBLE);
963
		if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
964
			break;
J
Jens Axboe 已提交
965
		io_schedule();
L
Linus Torvalds 已提交
966
	}
967
	remove_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
968 969 970 971 972 973 974 975
	set_current_state(TASK_RUNNING);
}

/*
 *	Buffer Utility Routines
 */

STATIC void
976
xfs_buf_iodone_work(
D
David Howells 已提交
977
	struct work_struct	*work)
L
Linus Torvalds 已提交
978
{
D
David Howells 已提交
979 980
	xfs_buf_t		*bp =
		container_of(work, xfs_buf_t, b_iodone_work);
L
Linus Torvalds 已提交
981

982
	if (bp->b_iodone)
983 984
		(*(bp->b_iodone))(bp);
	else if (bp->b_flags & XBF_ASYNC)
L
Linus Torvalds 已提交
985 986 987 988
		xfs_buf_relse(bp);
}

void
989 990
xfs_buf_ioend(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
991 992
	int			schedule)
{
C
Christoph Hellwig 已提交
993 994
	trace_xfs_buf_iodone(bp, _RET_IP_);

995
	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
996 997
	if (bp->b_error == 0)
		bp->b_flags |= XBF_DONE;
L
Linus Torvalds 已提交
998

999
	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
L
Linus Torvalds 已提交
1000
		if (schedule) {
D
David Howells 已提交
1001
			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1002
			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
L
Linus Torvalds 已提交
1003
		} else {
D
David Howells 已提交
1004
			xfs_buf_iodone_work(&bp->b_iodone_work);
L
Linus Torvalds 已提交
1005 1006
		}
	} else {
1007
		complete(&bp->b_iowait);
L
Linus Torvalds 已提交
1008 1009 1010 1011
	}
}

void
1012 1013 1014
xfs_buf_ioerror(
	xfs_buf_t		*bp,
	int			error)
L
Linus Torvalds 已提交
1015 1016
{
	ASSERT(error >= 0 && error <= 0xffff);
1017
	bp->b_error = (unsigned short)error;
C
Christoph Hellwig 已提交
1018
	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
L
Linus Torvalds 已提交
1019 1020 1021
}

int
C
Christoph Hellwig 已提交
1022
xfs_bwrite(
C
Christoph Hellwig 已提交
1023
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1024
{
1025
	int			error;
L
Linus Torvalds 已提交
1026

C
Christoph Hellwig 已提交
1027
	bp->b_flags |= XBF_WRITE;
1028
	bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
L
Linus Torvalds 已提交
1029

C
Christoph Hellwig 已提交
1030
	xfs_buf_delwri_dequeue(bp);
1031
	xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1032

1033
	error = xfs_buf_iowait(bp);
1034 1035 1036 1037
	if (error) {
		xfs_force_shutdown(bp->b_target->bt_mount,
				   SHUTDOWN_META_IO_ERROR);
	}
C
Christoph Hellwig 已提交
1038
	return error;
C
Christoph Hellwig 已提交
1039
}
L
Linus Torvalds 已提交
1040

1041 1042
/*
 * Called when we want to stop a buffer from getting written or read.
C
Christoph Hellwig 已提交
1043
 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
 * so that the proper iodone callbacks get called.
 */
STATIC int
xfs_bioerror(
	xfs_buf_t *bp)
{
#ifdef XFSERRORDEBUG
	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
#endif

	/*
	 * No need to wait until the buffer is unpinned, we aren't flushing it.
	 */
1057
	xfs_buf_ioerror(bp, EIO);
1058 1059

	/*
C
Christoph Hellwig 已提交
1060
	 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1061 1062 1063
	 */
	XFS_BUF_UNREAD(bp);
	XFS_BUF_UNDONE(bp);
1064
	xfs_buf_stale(bp);
1065

C
Christoph Hellwig 已提交
1066
	xfs_buf_ioend(bp, 0);
1067 1068 1069 1070 1071 1072

	return EIO;
}

/*
 * Same as xfs_bioerror, except that we are releasing the buffer
C
Christoph Hellwig 已提交
1073
 * here ourselves, and avoiding the xfs_buf_ioend call.
1074 1075 1076 1077 1078 1079 1080
 * This is meant for userdata errors; metadata bufs come with
 * iodone functions attached, so that we can track down errors.
 */
STATIC int
xfs_bioerror_relse(
	struct xfs_buf	*bp)
{
1081
	int64_t		fl = bp->b_flags;
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
	/*
	 * No need to wait until the buffer is unpinned.
	 * We aren't flushing it.
	 *
	 * chunkhold expects B_DONE to be set, whether
	 * we actually finish the I/O or not. We don't want to
	 * change that interface.
	 */
	XFS_BUF_UNREAD(bp);
	XFS_BUF_DONE(bp);
1092
	xfs_buf_stale(bp);
1093
	bp->b_iodone = NULL;
1094
	if (!(fl & XBF_ASYNC)) {
1095 1096 1097 1098 1099 1100
		/*
		 * Mark b_error and B_ERROR _both_.
		 * Lot's of chunkcache code assumes that.
		 * There's no reason to mark error for
		 * ASYNC buffers.
		 */
1101
		xfs_buf_ioerror(bp, EIO);
1102
		complete(&bp->b_iowait);
1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120
	} else {
		xfs_buf_relse(bp);
	}

	return EIO;
}


/*
 * All xfs metadata buffers except log state machine buffers
 * get this attached as their b_bdstrat callback function.
 * This is so that we can catch a buffer
 * after prematurely unpinning it to forcibly shutdown the filesystem.
 */
int
xfs_bdstrat_cb(
	struct xfs_buf	*bp)
{
1121
	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156
		trace_xfs_bdstrat_shut(bp, _RET_IP_);
		/*
		 * Metadata write that didn't get logged but
		 * written delayed anyway. These aren't associated
		 * with a transaction, and can be ignored.
		 */
		if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
			return xfs_bioerror_relse(bp);
		else
			return xfs_bioerror(bp);
	}

	xfs_buf_iorequest(bp);
	return 0;
}

/*
 * Wrapper around bdstrat so that we can stop data from going to disk in case
 * we are shutting down the filesystem.  Typically user data goes thru this
 * path; one of the exceptions is the superblock.
 */
void
xfsbdstrat(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp)
{
	if (XFS_FORCED_SHUTDOWN(mp)) {
		trace_xfs_bdstrat_shut(bp, _RET_IP_);
		xfs_bioerror_relse(bp);
		return;
	}

	xfs_buf_iorequest(bp);
}

1157
STATIC void
1158 1159
_xfs_buf_ioend(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1160 1161
	int			schedule)
{
1162
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1163
		xfs_buf_ioend(bp, schedule);
L
Linus Torvalds 已提交
1164 1165
}

A
Al Viro 已提交
1166
STATIC void
1167
xfs_buf_bio_end_io(
L
Linus Torvalds 已提交
1168 1169 1170
	struct bio		*bio,
	int			error)
{
1171
	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
L
Linus Torvalds 已提交
1172

1173
	xfs_buf_ioerror(bp, -error);
L
Linus Torvalds 已提交
1174

1175 1176 1177
	if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));

1178
	_xfs_buf_ioend(bp, 1);
L
Linus Torvalds 已提交
1179 1180 1181 1182
	bio_put(bio);
}

STATIC void
1183 1184
_xfs_buf_ioapply(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1185
{
1186
	int			rw, map_i, total_nr_pages, nr_pages;
L
Linus Torvalds 已提交
1187
	struct bio		*bio;
1188 1189 1190
	int			offset = bp->b_offset;
	int			size = bp->b_count_desired;
	sector_t		sector = bp->b_bn;
L
Linus Torvalds 已提交
1191

1192
	total_nr_pages = bp->b_page_count;
L
Linus Torvalds 已提交
1193 1194
	map_i = 0;

1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205
	if (bp->b_flags & XBF_WRITE) {
		if (bp->b_flags & XBF_SYNCIO)
			rw = WRITE_SYNC;
		else
			rw = WRITE;
		if (bp->b_flags & XBF_FUA)
			rw |= REQ_FUA;
		if (bp->b_flags & XBF_FLUSH)
			rw |= REQ_FLUSH;
	} else if (bp->b_flags & XBF_READ_AHEAD) {
		rw = READA;
1206
	} else {
1207
		rw = READ;
1208 1209
	}

1210 1211 1212
	/* we only use the buffer cache for meta-data */
	rw |= REQ_META;

L
Linus Torvalds 已提交
1213
next_chunk:
1214
	atomic_inc(&bp->b_io_remaining);
L
Linus Torvalds 已提交
1215 1216 1217 1218 1219
	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
	if (nr_pages > total_nr_pages)
		nr_pages = total_nr_pages;

	bio = bio_alloc(GFP_NOIO, nr_pages);
1220
	bio->bi_bdev = bp->b_target->bt_bdev;
L
Linus Torvalds 已提交
1221
	bio->bi_sector = sector;
1222 1223
	bio->bi_end_io = xfs_buf_bio_end_io;
	bio->bi_private = bp;
L
Linus Torvalds 已提交
1224

1225

L
Linus Torvalds 已提交
1226
	for (; size && nr_pages; nr_pages--, map_i++) {
1227
		int	rbytes, nbytes = PAGE_SIZE - offset;
L
Linus Torvalds 已提交
1228 1229 1230 1231

		if (nbytes > size)
			nbytes = size;

1232 1233
		rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
		if (rbytes < nbytes)
L
Linus Torvalds 已提交
1234 1235 1236 1237 1238 1239 1240 1241 1242
			break;

		offset = 0;
		sector += nbytes >> BBSHIFT;
		size -= nbytes;
		total_nr_pages--;
	}

	if (likely(bio->bi_size)) {
1243 1244 1245 1246
		if (xfs_buf_is_vmapped(bp)) {
			flush_kernel_vmap_range(bp->b_addr,
						xfs_buf_vmap_len(bp));
		}
L
Linus Torvalds 已提交
1247 1248 1249 1250
		submit_bio(rw, bio);
		if (size)
			goto next_chunk;
	} else {
1251
		xfs_buf_ioerror(bp, EIO);
1252
		bio_put(bio);
L
Linus Torvalds 已提交
1253 1254 1255 1256
	}
}

int
1257 1258
xfs_buf_iorequest(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1259
{
C
Christoph Hellwig 已提交
1260
	trace_xfs_buf_iorequest(bp, _RET_IP_);
L
Linus Torvalds 已提交
1261

1262
	ASSERT(!(bp->b_flags & XBF_DELWRI));
L
Linus Torvalds 已提交
1263

1264
	if (bp->b_flags & XBF_WRITE)
1265 1266
		xfs_buf_wait_unpin(bp);
	xfs_buf_hold(bp);
L
Linus Torvalds 已提交
1267 1268 1269

	/* Set the count to 1 initially, this will stop an I/O
	 * completion callout which happens before we have started
1270
	 * all the I/O from calling xfs_buf_ioend too early.
L
Linus Torvalds 已提交
1271
	 */
1272 1273 1274
	atomic_set(&bp->b_io_remaining, 1);
	_xfs_buf_ioapply(bp);
	_xfs_buf_ioend(bp, 0);
L
Linus Torvalds 已提交
1275

1276
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1277 1278 1279 1280
	return 0;
}

/*
1281 1282 1283
 *	Waits for I/O to complete on the buffer supplied.
 *	It returns immediately if no I/O is pending.
 *	It returns the I/O error code, if any, or 0 if there was no error.
L
Linus Torvalds 已提交
1284 1285
 */
int
1286 1287
xfs_buf_iowait(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1288
{
C
Christoph Hellwig 已提交
1289 1290
	trace_xfs_buf_iowait(bp, _RET_IP_);

1291
	wait_for_completion(&bp->b_iowait);
C
Christoph Hellwig 已提交
1292 1293

	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1294
	return bp->b_error;
L
Linus Torvalds 已提交
1295 1296
}

1297 1298 1299
xfs_caddr_t
xfs_buf_offset(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1300 1301 1302 1303
	size_t			offset)
{
	struct page		*page;

1304
	if (bp->b_flags & XBF_MAPPED)
1305
		return bp->b_addr + offset;
L
Linus Torvalds 已提交
1306

1307
	offset += bp->b_offset;
1308 1309
	page = bp->b_pages[offset >> PAGE_SHIFT];
	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
L
Linus Torvalds 已提交
1310 1311 1312 1313 1314 1315
}

/*
 *	Move data into or out of a buffer.
 */
void
1316 1317
xfs_buf_iomove(
	xfs_buf_t		*bp,	/* buffer to process		*/
L
Linus Torvalds 已提交
1318 1319
	size_t			boff,	/* starting buffer offset	*/
	size_t			bsize,	/* length to copy		*/
1320
	void			*data,	/* data address			*/
1321
	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
L
Linus Torvalds 已提交
1322 1323 1324 1325 1326 1327
{
	size_t			bend, cpoff, csize;
	struct page		*page;

	bend = boff + bsize;
	while (boff < bend) {
1328 1329
		page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
		cpoff = xfs_buf_poff(boff + bp->b_offset);
L
Linus Torvalds 已提交
1330
		csize = min_t(size_t,
1331
			      PAGE_SIZE-cpoff, bp->b_count_desired-boff);
L
Linus Torvalds 已提交
1332

1333
		ASSERT(((csize + cpoff) <= PAGE_SIZE));
L
Linus Torvalds 已提交
1334 1335

		switch (mode) {
1336
		case XBRW_ZERO:
L
Linus Torvalds 已提交
1337 1338
			memset(page_address(page) + cpoff, 0, csize);
			break;
1339
		case XBRW_READ:
L
Linus Torvalds 已提交
1340 1341
			memcpy(data, page_address(page) + cpoff, csize);
			break;
1342
		case XBRW_WRITE:
L
Linus Torvalds 已提交
1343 1344 1345 1346 1347 1348 1349 1350 1351
			memcpy(page_address(page) + cpoff, data, csize);
		}

		boff += csize;
		data += csize;
	}
}

/*
1352
 *	Handling of buffer targets (buftargs).
L
Linus Torvalds 已提交
1353 1354 1355
 */

/*
1356 1357 1358
 * Wait for any bufs with callbacks that have been submitted but have not yet
 * returned. These buffers will have an elevated hold count, so wait on those
 * while freeing all the buffers only held by the LRU.
L
Linus Torvalds 已提交
1359 1360 1361
 */
void
xfs_wait_buftarg(
1362
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1363
{
1364 1365 1366 1367 1368 1369 1370 1371
	struct xfs_buf		*bp;

restart:
	spin_lock(&btp->bt_lru_lock);
	while (!list_empty(&btp->bt_lru)) {
		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
		if (atomic_read(&bp->b_hold) > 1) {
			spin_unlock(&btp->bt_lru_lock);
D
Dave Chinner 已提交
1372
			delay(100);
1373
			goto restart;
L
Linus Torvalds 已提交
1374
		}
1375 1376 1377 1378 1379 1380 1381 1382
		/*
		 * clear the LRU reference count so the bufer doesn't get
		 * ignored in xfs_buf_rele().
		 */
		atomic_set(&bp->b_lru_ref, 0);
		spin_unlock(&btp->bt_lru_lock);
		xfs_buf_rele(bp);
		spin_lock(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1383
	}
1384
	spin_unlock(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1385 1386
}

1387 1388 1389
int
xfs_buftarg_shrink(
	struct shrinker		*shrink,
1390
	struct shrink_control	*sc)
1391
{
1392 1393
	struct xfs_buftarg	*btp = container_of(shrink,
					struct xfs_buftarg, bt_shrinker);
1394
	struct xfs_buf		*bp;
1395
	int nr_to_scan = sc->nr_to_scan;
1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423
	LIST_HEAD(dispose);

	if (!nr_to_scan)
		return btp->bt_lru_nr;

	spin_lock(&btp->bt_lru_lock);
	while (!list_empty(&btp->bt_lru)) {
		if (nr_to_scan-- <= 0)
			break;

		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);

		/*
		 * Decrement the b_lru_ref count unless the value is already
		 * zero. If the value is already zero, we need to reclaim the
		 * buffer, otherwise it gets another trip through the LRU.
		 */
		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
			list_move_tail(&bp->b_lru, &btp->bt_lru);
			continue;
		}

		/*
		 * remove the buffer from the LRU now to avoid needing another
		 * lock round trip inside xfs_buf_rele().
		 */
		list_move(&bp->b_lru, &dispose);
		btp->bt_lru_nr--;
1424
	}
1425 1426 1427 1428 1429 1430 1431 1432 1433
	spin_unlock(&btp->bt_lru_lock);

	while (!list_empty(&dispose)) {
		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
		list_del_init(&bp->b_lru);
		xfs_buf_rele(bp);
	}

	return btp->bt_lru_nr;
1434 1435
}

L
Linus Torvalds 已提交
1436 1437
void
xfs_free_buftarg(
1438 1439
	struct xfs_mount	*mp,
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1440
{
1441 1442
	unregister_shrinker(&btp->bt_shrinker);

L
Linus Torvalds 已提交
1443
	xfs_flush_buftarg(btp, 1);
1444 1445
	if (mp->m_flags & XFS_MOUNT_BARRIER)
		xfs_blkdev_issue_flush(btp);
1446 1447

	kthread_stop(btp->bt_task);
1448
	kmem_free(btp);
L
Linus Torvalds 已提交
1449 1450 1451 1452 1453 1454 1455 1456 1457
}

STATIC int
xfs_setsize_buftarg_flags(
	xfs_buftarg_t		*btp,
	unsigned int		blocksize,
	unsigned int		sectorsize,
	int			verbose)
{
1458 1459 1460
	btp->bt_bsize = blocksize;
	btp->bt_sshift = ffs(sectorsize) - 1;
	btp->bt_smask = sectorsize - 1;
L
Linus Torvalds 已提交
1461

1462
	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1463 1464
		xfs_warn(btp->bt_mount,
			"Cannot set_blocksize to %u on device %s\n",
1465
			sectorsize, xfs_buf_target_name(btp));
L
Linus Torvalds 已提交
1466 1467 1468 1469 1470 1471 1472
		return EINVAL;
	}

	return 0;
}

/*
1473 1474 1475 1476
 *	When allocating the initial buffer target we have not yet
 *	read in the superblock, so don't know what sized sectors
 *	are being used is at this early stage.  Play safe.
 */
L
Linus Torvalds 已提交
1477 1478 1479 1480 1481 1482
STATIC int
xfs_setsize_buftarg_early(
	xfs_buftarg_t		*btp,
	struct block_device	*bdev)
{
	return xfs_setsize_buftarg_flags(btp,
1483
			PAGE_SIZE, bdev_logical_block_size(bdev), 0);
L
Linus Torvalds 已提交
1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494
}

int
xfs_setsize_buftarg(
	xfs_buftarg_t		*btp,
	unsigned int		blocksize,
	unsigned int		sectorsize)
{
	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
}

1495
STATIC int
1496
xfs_alloc_delwri_queue(
1497 1498
	xfs_buftarg_t		*btp,
	const char		*fsname)
1499
{
1500 1501
	INIT_LIST_HEAD(&btp->bt_delwri_queue);
	spin_lock_init(&btp->bt_delwri_lock);
1502
	btp->bt_flags = 0;
1503
	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1504 1505 1506
	if (IS_ERR(btp->bt_task))
		return PTR_ERR(btp->bt_task);
	return 0;
1507 1508
}

L
Linus Torvalds 已提交
1509 1510
xfs_buftarg_t *
xfs_alloc_buftarg(
1511
	struct xfs_mount	*mp,
L
Linus Torvalds 已提交
1512
	struct block_device	*bdev,
1513 1514
	int			external,
	const char		*fsname)
L
Linus Torvalds 已提交
1515 1516 1517 1518 1519
{
	xfs_buftarg_t		*btp;

	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);

1520
	btp->bt_mount = mp;
1521 1522
	btp->bt_dev =  bdev->bd_dev;
	btp->bt_bdev = bdev;
1523 1524 1525 1526
	btp->bt_bdi = blk_get_backing_dev_info(bdev);
	if (!btp->bt_bdi)
		goto error;

1527 1528
	INIT_LIST_HEAD(&btp->bt_lru);
	spin_lock_init(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1529 1530
	if (xfs_setsize_buftarg_early(btp, bdev))
		goto error;
1531
	if (xfs_alloc_delwri_queue(btp, fsname))
1532
		goto error;
1533 1534 1535
	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&btp->bt_shrinker);
L
Linus Torvalds 已提交
1536 1537 1538
	return btp;

error:
1539
	kmem_free(btp);
L
Linus Torvalds 已提交
1540 1541 1542 1543 1544
	return NULL;
}


/*
1545
 *	Delayed write buffer handling
L
Linus Torvalds 已提交
1546
 */
1547
void
1548
xfs_buf_delwri_queue(
1549
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1550
{
1551
	struct xfs_buftarg	*btp = bp->b_target;
1552

C
Christoph Hellwig 已提交
1553 1554
	trace_xfs_buf_delwri_queue(bp, _RET_IP_);

1555
	ASSERT(!(bp->b_flags & XBF_READ));
L
Linus Torvalds 已提交
1556

1557
	spin_lock(&btp->bt_delwri_lock);
1558
	if (!list_empty(&bp->b_list)) {
1559
		/* if already in the queue, move it to the tail */
1560
		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1561
		list_move_tail(&bp->b_list, &btp->bt_delwri_queue);
1562
	} else {
D
Dave Chinner 已提交
1563
		/* start xfsbufd as it is about to have something to do */
1564
		if (list_empty(&btp->bt_delwri_queue))
1565
			wake_up_process(bp->b_target->bt_task);
D
Dave Chinner 已提交
1566

1567 1568
		atomic_inc(&bp->b_hold);
		bp->b_flags |= XBF_DELWRI | _XBF_DELWRI_Q | XBF_ASYNC;
1569
		list_add_tail(&bp->b_list, &btp->bt_delwri_queue);
1570
	}
1571
	bp->b_queuetime = jiffies;
1572
	spin_unlock(&btp->bt_delwri_lock);
L
Linus Torvalds 已提交
1573 1574 1575
}

void
1576 1577
xfs_buf_delwri_dequeue(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1578 1579 1580
{
	int			dequeued = 0;

1581
	spin_lock(&bp->b_target->bt_delwri_lock);
1582 1583 1584
	if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
		list_del_init(&bp->b_list);
L
Linus Torvalds 已提交
1585 1586
		dequeued = 1;
	}
1587
	bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1588
	spin_unlock(&bp->b_target->bt_delwri_lock);
L
Linus Torvalds 已提交
1589 1590

	if (dequeued)
1591
		xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1592

C
Christoph Hellwig 已提交
1593
	trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
L
Linus Torvalds 已提交
1594 1595
}

1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
/*
 * If a delwri buffer needs to be pushed before it has aged out, then promote
 * it to the head of the delwri queue so that it will be flushed on the next
 * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
 * than the age currently needed to flush the buffer. Hence the next time the
 * xfsbufd sees it is guaranteed to be considered old enough to flush.
 */
void
xfs_buf_delwri_promote(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;
	long		age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;

	ASSERT(bp->b_flags & XBF_DELWRI);
	ASSERT(bp->b_flags & _XBF_DELWRI_Q);

	/*
	 * Check the buffer age before locking the delayed write queue as we
	 * don't need to promote buffers that are already past the flush age.
	 */
	if (bp->b_queuetime < jiffies - age)
		return;
	bp->b_queuetime = jiffies - age;
1620 1621 1622
	spin_lock(&btp->bt_delwri_lock);
	list_move(&bp->b_list, &btp->bt_delwri_queue);
	spin_unlock(&btp->bt_delwri_lock);
1623 1624
}

L
Linus Torvalds 已提交
1625
STATIC void
1626
xfs_buf_runall_queues(
L
Linus Torvalds 已提交
1627 1628 1629 1630 1631
	struct workqueue_struct	*queue)
{
	flush_workqueue(queue);
}

1632 1633 1634 1635 1636 1637 1638 1639
/*
 * Move as many buffers as specified to the supplied list
 * idicating if we skipped any buffers to prevent deadlocks.
 */
STATIC int
xfs_buf_delwri_split(
	xfs_buftarg_t	*target,
	struct list_head *list,
1640
	unsigned long	age)
1641 1642 1643
{
	xfs_buf_t	*bp, *n;
	int		skipped = 0;
1644
	int		force;
1645

1646
	force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1647
	INIT_LIST_HEAD(list);
1648 1649
	spin_lock(&target->bt_delwri_lock);
	list_for_each_entry_safe(bp, n, &target->bt_delwri_queue, b_list) {
1650 1651
		ASSERT(bp->b_flags & XBF_DELWRI);

1652
		if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
1653
			if (!force &&
1654 1655 1656 1657 1658
			    time_before(jiffies, bp->b_queuetime + age)) {
				xfs_buf_unlock(bp);
				break;
			}

1659
			bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
1660 1661
			bp->b_flags |= XBF_WRITE;
			list_move_tail(&bp->b_list, list);
1662
			trace_xfs_buf_delwri_split(bp, _RET_IP_);
1663 1664 1665 1666
		} else
			skipped++;
	}

1667
	spin_unlock(&target->bt_delwri_lock);
1668 1669 1670
	return skipped;
}

1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693
/*
 * Compare function is more complex than it needs to be because
 * the return value is only 32 bits and we are doing comparisons
 * on 64 bit values
 */
static int
xfs_buf_cmp(
	void		*priv,
	struct list_head *a,
	struct list_head *b)
{
	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
	xfs_daddr_t		diff;

	diff = ap->b_bn - bp->b_bn;
	if (diff < 0)
		return -1;
	if (diff > 0)
		return 1;
	return 0;
}

L
Linus Torvalds 已提交
1694
STATIC int
1695
xfsbufd(
1696
	void		*data)
L
Linus Torvalds 已提交
1697
{
1698
	xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
L
Linus Torvalds 已提交
1699 1700 1701

	current->flags |= PF_MEMALLOC;

1702 1703
	set_freezable();

L
Linus Torvalds 已提交
1704
	do {
D
Dave Chinner 已提交
1705 1706
		long	age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
		long	tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1707
		struct list_head tmp;
1708
		struct blk_plug plug;
D
Dave Chinner 已提交
1709

1710
		if (unlikely(freezing(current))) {
1711
			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1712
			refrigerator();
1713
		} else {
1714
			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1715
		}
L
Linus Torvalds 已提交
1716

D
Dave Chinner 已提交
1717
		/* sleep for a long time if there is nothing to do. */
1718
		if (list_empty(&target->bt_delwri_queue))
D
Dave Chinner 已提交
1719 1720
			tout = MAX_SCHEDULE_TIMEOUT;
		schedule_timeout_interruptible(tout);
L
Linus Torvalds 已提交
1721

D
Dave Chinner 已提交
1722
		xfs_buf_delwri_split(target, &tmp, age);
1723
		list_sort(NULL, &tmp, xfs_buf_cmp);
1724 1725

		blk_start_plug(&plug);
L
Linus Torvalds 已提交
1726
		while (!list_empty(&tmp)) {
1727 1728
			struct xfs_buf *bp;
			bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1729
			list_del_init(&bp->b_list);
1730
			xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1731
		}
1732
		blk_finish_plug(&plug);
1733
	} while (!kthread_should_stop());
L
Linus Torvalds 已提交
1734

1735
	return 0;
L
Linus Torvalds 已提交
1736 1737 1738
}

/*
1739 1740 1741
 *	Go through all incore buffers, and release buffers if they belong to
 *	the given device. This is used in filesystem error handling to
 *	preserve the consistency of its metadata.
L
Linus Torvalds 已提交
1742 1743 1744
 */
int
xfs_flush_buftarg(
1745 1746
	xfs_buftarg_t	*target,
	int		wait)
L
Linus Torvalds 已提交
1747
{
1748
	xfs_buf_t	*bp;
1749
	int		pincount = 0;
1750 1751
	LIST_HEAD(tmp_list);
	LIST_HEAD(wait_list);
1752
	struct blk_plug plug;
L
Linus Torvalds 已提交
1753

1754
	xfs_buf_runall_queues(xfsconvertd_workqueue);
1755 1756
	xfs_buf_runall_queues(xfsdatad_workqueue);
	xfs_buf_runall_queues(xfslogd_workqueue);
L
Linus Torvalds 已提交
1757

1758
	set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1759
	pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
L
Linus Torvalds 已提交
1760 1761

	/*
1762 1763 1764
	 * Dropped the delayed write list lock, now walk the temporary list.
	 * All I/O is issued async and then if we need to wait for completion
	 * we do that after issuing all the IO.
L
Linus Torvalds 已提交
1765
	 */
1766
	list_sort(NULL, &tmp_list, xfs_buf_cmp);
1767 1768

	blk_start_plug(&plug);
1769 1770
	while (!list_empty(&tmp_list)) {
		bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1771
		ASSERT(target == bp->b_target);
1772 1773
		list_del_init(&bp->b_list);
		if (wait) {
1774
			bp->b_flags &= ~XBF_ASYNC;
1775 1776
			list_add(&bp->b_list, &wait_list);
		}
1777
		xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1778
	}
1779
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1780

1781
	if (wait) {
1782
		/* Wait for IO to complete. */
1783 1784
		while (!list_empty(&wait_list)) {
			bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1785

1786
			list_del_init(&bp->b_list);
C
Christoph Hellwig 已提交
1787
			xfs_buf_iowait(bp);
1788 1789
			xfs_buf_relse(bp);
		}
L
Linus Torvalds 已提交
1790 1791 1792 1793 1794
	}

	return pincount;
}

1795
int __init
1796
xfs_buf_init(void)
L
Linus Torvalds 已提交
1797
{
1798 1799
	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
						KM_ZONE_HWALIGN, NULL);
1800
	if (!xfs_buf_zone)
C
Christoph Hellwig 已提交
1801
		goto out;
1802

1803
	xfslogd_workqueue = alloc_workqueue("xfslogd",
1804
					WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1805
	if (!xfslogd_workqueue)
1806
		goto out_free_buf_zone;
L
Linus Torvalds 已提交
1807

T
Tejun Heo 已提交
1808
	xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
1809 1810
	if (!xfsdatad_workqueue)
		goto out_destroy_xfslogd_workqueue;
L
Linus Torvalds 已提交
1811

T
Tejun Heo 已提交
1812 1813
	xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
						WQ_MEM_RECLAIM, 1);
1814 1815 1816
	if (!xfsconvertd_workqueue)
		goto out_destroy_xfsdatad_workqueue;

1817
	return 0;
L
Linus Torvalds 已提交
1818

1819 1820
 out_destroy_xfsdatad_workqueue:
	destroy_workqueue(xfsdatad_workqueue);
1821 1822 1823
 out_destroy_xfslogd_workqueue:
	destroy_workqueue(xfslogd_workqueue);
 out_free_buf_zone:
1824
	kmem_zone_destroy(xfs_buf_zone);
C
Christoph Hellwig 已提交
1825
 out:
1826
	return -ENOMEM;
L
Linus Torvalds 已提交
1827 1828 1829
}

void
1830
xfs_buf_terminate(void)
L
Linus Torvalds 已提交
1831
{
1832
	destroy_workqueue(xfsconvertd_workqueue);
1833 1834
	destroy_workqueue(xfsdatad_workqueue);
	destroy_workqueue(xfslogd_workqueue);
1835
	kmem_zone_destroy(xfs_buf_zone);
L
Linus Torvalds 已提交
1836
}