xfs_buf.c 41.8 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17
 */
18
#include "xfs.h"
L
Linus Torvalds 已提交
19 20
#include <linux/stddef.h>
#include <linux/errno.h>
21
#include <linux/gfp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include <linux/hash.h>
32
#include <linux/kthread.h>
C
Christoph Lameter 已提交
33
#include <linux/migrate.h>
34
#include <linux/backing-dev.h>
35
#include <linux/freezer.h>
L
Linus Torvalds 已提交
36

37 38
#include "xfs_sb.h"
#include "xfs_inum.h"
39
#include "xfs_log.h"
40 41
#include "xfs_ag.h"
#include "xfs_mount.h"
C
Christoph Hellwig 已提交
42
#include "xfs_trace.h"
43

44
static kmem_zone_t *xfs_buf_zone;
45
STATIC int xfsbufd(void *);
46
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
47

48
static struct workqueue_struct *xfslogd_workqueue;
49
struct workqueue_struct *xfsdatad_workqueue;
50
struct workqueue_struct *xfsconvertd_workqueue;
L
Linus Torvalds 已提交
51

52 53 54 55
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
L
Linus Torvalds 已提交
56
#else
57 58 59
# define XB_SET_OWNER(bp)	do { } while (0)
# define XB_CLEAR_OWNER(bp)	do { } while (0)
# define XB_GET_OWNER(bp)	do { } while (0)
L
Linus Torvalds 已提交
60 61
#endif

62 63 64
#define xb_to_gfp(flags) \
	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
	  ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
L
Linus Torvalds 已提交
65

66 67
#define xb_to_km(flags) \
	 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
L
Linus Torvalds 已提交
68

69 70 71 72
#define xfs_buf_allocate(flags) \
	kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
#define xfs_buf_deallocate(bp) \
	kmem_zone_free(xfs_buf_zone, (bp));
L
Linus Torvalds 已提交
73

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static inline int
xfs_buf_is_vmapped(
	struct xfs_buf	*bp)
{
	/*
	 * Return true if the buffer is vmapped.
	 *
	 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
	 * code is clever enough to know it doesn't have to map a single page,
	 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
	 */
	return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
}

static inline int
xfs_buf_vmap_len(
	struct xfs_buf	*bp)
{
	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
}

L
Linus Torvalds 已提交
95
/*
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
 * xfs_buf_lru_add - add a buffer to the LRU.
 *
 * The LRU takes a new reference to the buffer so that it will only be freed
 * once the shrinker takes the buffer off the LRU.
 */
STATIC void
xfs_buf_lru_add(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;

	spin_lock(&btp->bt_lru_lock);
	if (list_empty(&bp->b_lru)) {
		atomic_inc(&bp->b_hold);
		list_add_tail(&bp->b_lru, &btp->bt_lru);
		btp->bt_lru_nr++;
	}
	spin_unlock(&btp->bt_lru_lock);
}

/*
 * xfs_buf_lru_del - remove a buffer from the LRU
 *
 * The unlocked check is safe here because it only occurs when there are not
 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
 * to optimise the shrinker removing the buffer from the LRU and calling
L
Lucas De Marchi 已提交
122
 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
123
 * bt_lru_lock.
L
Linus Torvalds 已提交
124
 */
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
STATIC void
xfs_buf_lru_del(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;

	if (list_empty(&bp->b_lru))
		return;

	spin_lock(&btp->bt_lru_lock);
	if (!list_empty(&bp->b_lru)) {
		list_del_init(&bp->b_lru);
		btp->bt_lru_nr--;
	}
	spin_unlock(&btp->bt_lru_lock);
}

/*
 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
 * b_lru_ref count so that the buffer is freed immediately when the buffer
 * reference count falls to zero. If the buffer is already on the LRU, we need
 * to remove the reference that LRU holds on the buffer.
 *
 * This prevents build-up of stale buffers on the LRU.
 */
void
xfs_buf_stale(
	struct xfs_buf	*bp)
{
	bp->b_flags |= XBF_STALE;
	atomic_set(&(bp)->b_lru_ref, 0);
	if (!list_empty(&bp->b_lru)) {
		struct xfs_buftarg *btp = bp->b_target;

		spin_lock(&btp->bt_lru_lock);
		if (!list_empty(&bp->b_lru)) {
			list_del_init(&bp->b_lru);
			btp->bt_lru_nr--;
			atomic_dec(&bp->b_hold);
		}
		spin_unlock(&btp->bt_lru_lock);
	}
	ASSERT(atomic_read(&bp->b_hold) >= 1);
}
L
Linus Torvalds 已提交
169 170

STATIC void
171 172
_xfs_buf_initialize(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
173
	xfs_buftarg_t		*target,
174
	xfs_off_t		range_base,
L
Linus Torvalds 已提交
175
	size_t			range_length,
176
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
177 178
{
	/*
179
	 * We don't want certain flags to appear in b_flags.
L
Linus Torvalds 已提交
180
	 */
181 182 183 184
	flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);

	memset(bp, 0, sizeof(xfs_buf_t));
	atomic_set(&bp->b_hold, 1);
185
	atomic_set(&bp->b_lru_ref, 1);
186
	init_completion(&bp->b_iowait);
187
	INIT_LIST_HEAD(&bp->b_lru);
188
	INIT_LIST_HEAD(&bp->b_list);
189
	RB_CLEAR_NODE(&bp->b_rbnode);
T
Thomas Gleixner 已提交
190
	sema_init(&bp->b_sema, 0); /* held, no waiters */
191 192 193
	XB_SET_OWNER(bp);
	bp->b_target = target;
	bp->b_file_offset = range_base;
L
Linus Torvalds 已提交
194 195 196 197 198
	/*
	 * Set buffer_length and count_desired to the same value initially.
	 * I/O routines should use count_desired, which will be the same in
	 * most cases but may be reset (e.g. XFS recovery).
	 */
199 200 201 202 203 204 205
	bp->b_buffer_length = bp->b_count_desired = range_length;
	bp->b_flags = flags;
	bp->b_bn = XFS_BUF_DADDR_NULL;
	atomic_set(&bp->b_pin_count, 0);
	init_waitqueue_head(&bp->b_waiters);

	XFS_STATS_INC(xb_create);
C
Christoph Hellwig 已提交
206 207

	trace_xfs_buf_init(bp, _RET_IP_);
L
Linus Torvalds 已提交
208 209 210
}

/*
211 212
 *	Allocate a page array capable of holding a specified number
 *	of pages, and point the page buf at it.
L
Linus Torvalds 已提交
213 214
 */
STATIC int
215 216
_xfs_buf_get_pages(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
217
	int			page_count,
218
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
219 220
{
	/* Make sure that we have a page list */
221 222 223 224 225
	if (bp->b_pages == NULL) {
		bp->b_offset = xfs_buf_poff(bp->b_file_offset);
		bp->b_page_count = page_count;
		if (page_count <= XB_PAGES) {
			bp->b_pages = bp->b_page_array;
L
Linus Torvalds 已提交
226
		} else {
227 228 229
			bp->b_pages = kmem_alloc(sizeof(struct page *) *
					page_count, xb_to_km(flags));
			if (bp->b_pages == NULL)
L
Linus Torvalds 已提交
230 231
				return -ENOMEM;
		}
232
		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
L
Linus Torvalds 已提交
233 234 235 236 237
	}
	return 0;
}

/*
238
 *	Frees b_pages if it was allocated.
L
Linus Torvalds 已提交
239 240
 */
STATIC void
241
_xfs_buf_free_pages(
L
Linus Torvalds 已提交
242 243
	xfs_buf_t	*bp)
{
244
	if (bp->b_pages != bp->b_page_array) {
245
		kmem_free(bp->b_pages);
246
		bp->b_pages = NULL;
L
Linus Torvalds 已提交
247 248 249 250 251 252 253
	}
}

/*
 *	Releases the specified buffer.
 *
 * 	The modification state of any associated pages is left unchanged.
254
 * 	The buffer most not be on any hash - use xfs_buf_rele instead for
L
Linus Torvalds 已提交
255 256 257
 * 	hashed and refcounted buffers
 */
void
258
xfs_buf_free(
L
Linus Torvalds 已提交
259 260
	xfs_buf_t		*bp)
{
C
Christoph Hellwig 已提交
261
	trace_xfs_buf_free(bp, _RET_IP_);
L
Linus Torvalds 已提交
262

263 264
	ASSERT(list_empty(&bp->b_lru));

265
	if (bp->b_flags & _XBF_PAGES) {
L
Linus Torvalds 已提交
266 267
		uint		i;

268
		if (xfs_buf_is_vmapped(bp))
A
Alex Elder 已提交
269 270
			vm_unmap_ram(bp->b_addr - bp->b_offset,
					bp->b_page_count);
L
Linus Torvalds 已提交
271

272 273 274
		for (i = 0; i < bp->b_page_count; i++) {
			struct page	*page = bp->b_pages[i];

275
			__free_page(page);
276
		}
277 278
	} else if (bp->b_flags & _XBF_KMEM)
		kmem_free(bp->b_addr);
279
	_xfs_buf_free_pages(bp);
280
	xfs_buf_deallocate(bp);
L
Linus Torvalds 已提交
281 282 283
}

/*
284
 * Allocates all the pages for buffer in question and builds it's page list.
L
Linus Torvalds 已提交
285 286
 */
STATIC int
287
xfs_buf_allocate_memory(
L
Linus Torvalds 已提交
288 289 290
	xfs_buf_t		*bp,
	uint			flags)
{
291
	size_t			size = bp->b_count_desired;
L
Linus Torvalds 已提交
292
	size_t			nbytes, offset;
293
	gfp_t			gfp_mask = xb_to_gfp(flags);
L
Linus Torvalds 已提交
294
	unsigned short		page_count, i;
295
	xfs_off_t		end;
L
Linus Torvalds 已提交
296 297
	int			error;

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	/*
	 * for buffers that are contained within a single page, just allocate
	 * the memory from the heap - there's no need for the complexity of
	 * page arrays to keep allocation down to order 0.
	 */
	if (bp->b_buffer_length < PAGE_SIZE) {
		bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
		if (!bp->b_addr) {
			/* low memory - use alloc_page loop instead */
			goto use_alloc_page;
		}

		if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
								PAGE_MASK) !=
		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
			/* b_addr spans two pages - use alloc_page instead */
			kmem_free(bp->b_addr);
			bp->b_addr = NULL;
			goto use_alloc_page;
		}
		bp->b_offset = offset_in_page(bp->b_addr);
		bp->b_pages = bp->b_page_array;
		bp->b_pages[0] = virt_to_page(bp->b_addr);
		bp->b_page_count = 1;
		bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
		return 0;
	}

use_alloc_page:
327 328 329
	end = bp->b_file_offset + bp->b_buffer_length;
	page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
	error = _xfs_buf_get_pages(bp, page_count, flags);
L
Linus Torvalds 已提交
330 331 332
	if (unlikely(error))
		return error;

333
	offset = bp->b_offset;
334
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
335

336
	for (i = 0; i < bp->b_page_count; i++) {
L
Linus Torvalds 已提交
337 338
		struct page	*page;
		uint		retries = 0;
339 340
retry:
		page = alloc_page(gfp_mask);
L
Linus Torvalds 已提交
341
		if (unlikely(page == NULL)) {
342 343
			if (flags & XBF_READ_AHEAD) {
				bp->b_page_count = i;
344 345
				error = ENOMEM;
				goto out_free_pages;
L
Linus Torvalds 已提交
346 347 348 349 350 351 352 353 354
			}

			/*
			 * This could deadlock.
			 *
			 * But until all the XFS lowlevel code is revamped to
			 * handle buffer allocation failures we can't do much.
			 */
			if (!(++retries % 100))
355 356
				xfs_err(NULL,
		"possible memory allocation deadlock in %s (mode:0x%x)",
357
					__func__, gfp_mask);
L
Linus Torvalds 已提交
358

359
			XFS_STATS_INC(xb_page_retries);
360
			congestion_wait(BLK_RW_ASYNC, HZ/50);
L
Linus Torvalds 已提交
361 362 363
			goto retry;
		}

364
		XFS_STATS_INC(xb_page_found);
L
Linus Torvalds 已提交
365

366
		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
L
Linus Torvalds 已提交
367
		size -= nbytes;
368
		bp->b_pages[i] = page;
L
Linus Torvalds 已提交
369 370
		offset = 0;
	}
371
	return 0;
L
Linus Torvalds 已提交
372

373 374 375
out_free_pages:
	for (i = 0; i < bp->b_page_count; i++)
		__free_page(bp->b_pages[i]);
L
Linus Torvalds 已提交
376 377 378 379
	return error;
}

/*
L
Lucas De Marchi 已提交
380
 *	Map buffer into kernel address-space if necessary.
L
Linus Torvalds 已提交
381 382
 */
STATIC int
383
_xfs_buf_map_pages(
L
Linus Torvalds 已提交
384 385 386
	xfs_buf_t		*bp,
	uint			flags)
{
387
	ASSERT(bp->b_flags & _XBF_PAGES);
388
	if (bp->b_page_count == 1) {
389
		/* A single page buffer is always mappable */
390 391 392
		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
	} else if (flags & XBF_MAPPED) {
393 394 395 396 397 398 399 400 401 402 403
		int retried = 0;

		do {
			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
						-1, PAGE_KERNEL);
			if (bp->b_addr)
				break;
			vm_unmap_aliases();
		} while (retried++ <= 1);

		if (!bp->b_addr)
L
Linus Torvalds 已提交
404
			return -ENOMEM;
405 406
		bp->b_addr += bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
L
Linus Torvalds 已提交
407 408 409 410 411 412 413 414 415 416
	}

	return 0;
}

/*
 *	Finding and Reading Buffers
 */

/*
417
 *	Look up, and creates if absent, a lockable buffer for
L
Linus Torvalds 已提交
418 419 420 421 422 423 424
 *	a given range of an inode.  The buffer is returned
 *	locked.	 If other overlapping buffers exist, they are
 *	released before the new buffer is created and locked,
 *	which may imply that this call will block until those buffers
 *	are unlocked.  No I/O is implied by this call.
 */
xfs_buf_t *
425
_xfs_buf_find(
L
Linus Torvalds 已提交
426
	xfs_buftarg_t		*btp,	/* block device target		*/
427
	xfs_off_t		ioff,	/* starting offset of range	*/
L
Linus Torvalds 已提交
428
	size_t			isize,	/* length of range		*/
429 430
	xfs_buf_flags_t		flags,
	xfs_buf_t		*new_bp)
L
Linus Torvalds 已提交
431
{
432
	xfs_off_t		range_base;
L
Linus Torvalds 已提交
433
	size_t			range_length;
434 435 436 437
	struct xfs_perag	*pag;
	struct rb_node		**rbp;
	struct rb_node		*parent;
	xfs_buf_t		*bp;
L
Linus Torvalds 已提交
438 439 440 441 442

	range_base = (ioff << BBSHIFT);
	range_length = (isize << BBSHIFT);

	/* Check for IOs smaller than the sector size / not sector aligned */
443
	ASSERT(!(range_length < (1 << btp->bt_sshift)));
444
	ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
L
Linus Torvalds 已提交
445

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
	/* get tree root */
	pag = xfs_perag_get(btp->bt_mount,
				xfs_daddr_to_agno(btp->bt_mount, ioff));

	/* walk tree */
	spin_lock(&pag->pag_buf_lock);
	rbp = &pag->pag_buf_tree.rb_node;
	parent = NULL;
	bp = NULL;
	while (*rbp) {
		parent = *rbp;
		bp = rb_entry(parent, struct xfs_buf, b_rbnode);

		if (range_base < bp->b_file_offset)
			rbp = &(*rbp)->rb_left;
		else if (range_base > bp->b_file_offset)
			rbp = &(*rbp)->rb_right;
		else {
			/*
			 * found a block offset match. If the range doesn't
			 * match, the only way this is allowed is if the buffer
			 * in the cache is stale and the transaction that made
			 * it stale has not yet committed. i.e. we are
			 * reallocating a busy extent. Skip this buffer and
			 * continue searching to the right for an exact match.
			 */
			if (bp->b_buffer_length != range_length) {
				ASSERT(bp->b_flags & XBF_STALE);
				rbp = &(*rbp)->rb_right;
				continue;
			}
477
			atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
478 479 480 481 482
			goto found;
		}
	}

	/* No match found */
483 484
	if (new_bp) {
		_xfs_buf_initialize(new_bp, btp, range_base,
L
Linus Torvalds 已提交
485
				range_length, flags);
486 487 488 489 490
		rb_link_node(&new_bp->b_rbnode, parent, rbp);
		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
		/* the buffer keeps the perag reference until it is freed */
		new_bp->b_pag = pag;
		spin_unlock(&pag->pag_buf_lock);
L
Linus Torvalds 已提交
491
	} else {
492
		XFS_STATS_INC(xb_miss_locked);
493 494
		spin_unlock(&pag->pag_buf_lock);
		xfs_perag_put(pag);
L
Linus Torvalds 已提交
495
	}
496
	return new_bp;
L
Linus Torvalds 已提交
497 498

found:
499 500
	spin_unlock(&pag->pag_buf_lock);
	xfs_perag_put(pag);
L
Linus Torvalds 已提交
501

502 503
	if (!xfs_buf_trylock(bp)) {
		if (flags & XBF_TRYLOCK) {
504 505 506
			xfs_buf_rele(bp);
			XFS_STATS_INC(xb_busy_locked);
			return NULL;
L
Linus Torvalds 已提交
507
		}
508 509
		xfs_buf_lock(bp);
		XFS_STATS_INC(xb_get_locked_waited);
L
Linus Torvalds 已提交
510 511
	}

512 513 514 515 516
	/*
	 * if the buffer is stale, clear all the external state associated with
	 * it. We need to keep flags such as how we allocated the buffer memory
	 * intact here.
	 */
517 518
	if (bp->b_flags & XBF_STALE) {
		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
519
		bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
520
	}
C
Christoph Hellwig 已提交
521 522

	trace_xfs_buf_find(bp, flags, _RET_IP_);
523 524
	XFS_STATS_INC(xb_get_locked);
	return bp;
L
Linus Torvalds 已提交
525 526 527
}

/*
528
 *	Assembles a buffer covering the specified range.
L
Linus Torvalds 已提交
529 530 531 532
 *	Storage in memory for all portions of the buffer will be allocated,
 *	although backing storage may not be.
 */
xfs_buf_t *
533
xfs_buf_get(
L
Linus Torvalds 已提交
534
	xfs_buftarg_t		*target,/* target for buffer		*/
535
	xfs_off_t		ioff,	/* starting offset of range	*/
L
Linus Torvalds 已提交
536
	size_t			isize,	/* length of range		*/
537
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
538
{
539
	xfs_buf_t		*bp, *new_bp;
540
	int			error = 0;
L
Linus Torvalds 已提交
541

542 543
	new_bp = xfs_buf_allocate(flags);
	if (unlikely(!new_bp))
L
Linus Torvalds 已提交
544 545
		return NULL;

546 547
	bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
	if (bp == new_bp) {
548
		error = xfs_buf_allocate_memory(bp, flags);
L
Linus Torvalds 已提交
549 550 551
		if (error)
			goto no_buffer;
	} else {
552 553
		xfs_buf_deallocate(new_bp);
		if (unlikely(bp == NULL))
L
Linus Torvalds 已提交
554 555 556
			return NULL;
	}

557 558
	if (!(bp->b_flags & XBF_MAPPED)) {
		error = _xfs_buf_map_pages(bp, flags);
L
Linus Torvalds 已提交
559
		if (unlikely(error)) {
560 561
			xfs_warn(target->bt_mount,
				"%s: failed to map pages\n", __func__);
L
Linus Torvalds 已提交
562 563 564 565
			goto no_buffer;
		}
	}

566
	XFS_STATS_INC(xb_get);
L
Linus Torvalds 已提交
567 568 569 570 571

	/*
	 * Always fill in the block number now, the mapped cases can do
	 * their own overlay of this later.
	 */
572 573
	bp->b_bn = ioff;
	bp->b_count_desired = bp->b_buffer_length;
L
Linus Torvalds 已提交
574

C
Christoph Hellwig 已提交
575
	trace_xfs_buf_get(bp, flags, _RET_IP_);
576
	return bp;
L
Linus Torvalds 已提交
577 578

 no_buffer:
579 580 581
	if (flags & (XBF_LOCK | XBF_TRYLOCK))
		xfs_buf_unlock(bp);
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
582 583 584
	return NULL;
}

C
Christoph Hellwig 已提交
585 586 587 588 589 590 591 592 593 594
STATIC int
_xfs_buf_read(
	xfs_buf_t		*bp,
	xfs_buf_flags_t		flags)
{
	int			status;

	ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
	ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);

595 596
	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
C
Christoph Hellwig 已提交
597 598

	status = xfs_buf_iorequest(bp);
599
	if (status || bp->b_error || (flags & XBF_ASYNC))
600 601
		return status;
	return xfs_buf_iowait(bp);
C
Christoph Hellwig 已提交
602 603
}

L
Linus Torvalds 已提交
604
xfs_buf_t *
605
xfs_buf_read(
L
Linus Torvalds 已提交
606
	xfs_buftarg_t		*target,
607
	xfs_off_t		ioff,
L
Linus Torvalds 已提交
608
	size_t			isize,
609
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
610
{
611 612 613 614
	xfs_buf_t		*bp;

	flags |= XBF_READ;

615
	bp = xfs_buf_get(target, ioff, isize, flags);
616
	if (bp) {
C
Christoph Hellwig 已提交
617 618
		trace_xfs_buf_read(bp, flags, _RET_IP_);

619 620
		if (!XFS_BUF_ISDONE(bp)) {
			XFS_STATS_INC(xb_get_read);
C
Christoph Hellwig 已提交
621
			_xfs_buf_read(bp, flags);
622
		} else if (flags & XBF_ASYNC) {
L
Linus Torvalds 已提交
623 624 625 626 627 628 629
			/*
			 * Read ahead call which is already satisfied,
			 * drop the buffer
			 */
			goto no_buffer;
		} else {
			/* We do not want read in the flags */
630
			bp->b_flags &= ~XBF_READ;
L
Linus Torvalds 已提交
631 632 633
		}
	}

634
	return bp;
L
Linus Torvalds 已提交
635 636

 no_buffer:
637 638 639
	if (flags & (XBF_LOCK | XBF_TRYLOCK))
		xfs_buf_unlock(bp);
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
640 641 642 643
	return NULL;
}

/*
644 645
 *	If we are not low on memory then do the readahead in a deadlock
 *	safe manner.
L
Linus Torvalds 已提交
646 647
 */
void
648
xfs_buf_readahead(
L
Linus Torvalds 已提交
649
	xfs_buftarg_t		*target,
650
	xfs_off_t		ioff,
C
Christoph Hellwig 已提交
651
	size_t			isize)
L
Linus Torvalds 已提交
652
{
653
	if (bdi_read_congested(target->bt_bdi))
L
Linus Torvalds 已提交
654 655
		return;

C
Christoph Hellwig 已提交
656 657
	xfs_buf_read(target, ioff, isize,
		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
L
Linus Torvalds 已提交
658 659
}

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
/*
 * Read an uncached buffer from disk. Allocates and returns a locked
 * buffer containing the disk contents or nothing.
 */
struct xfs_buf *
xfs_buf_read_uncached(
	struct xfs_mount	*mp,
	struct xfs_buftarg	*target,
	xfs_daddr_t		daddr,
	size_t			length,
	int			flags)
{
	xfs_buf_t		*bp;
	int			error;

	bp = xfs_buf_get_uncached(target, length, flags);
	if (!bp)
		return NULL;

	/* set up the buffer for a read IO */
	XFS_BUF_SET_ADDR(bp, daddr);
	XFS_BUF_READ(bp);

	xfsbdstrat(mp, bp);
C
Christoph Hellwig 已提交
684
	error = xfs_buf_iowait(bp);
685 686 687 688 689
	if (error || bp->b_error) {
		xfs_buf_relse(bp);
		return NULL;
	}
	return bp;
L
Linus Torvalds 已提交
690 691 692
}

xfs_buf_t *
693
xfs_buf_get_empty(
L
Linus Torvalds 已提交
694 695 696
	size_t			len,
	xfs_buftarg_t		*target)
{
697
	xfs_buf_t		*bp;
L
Linus Torvalds 已提交
698

699 700 701 702
	bp = xfs_buf_allocate(0);
	if (bp)
		_xfs_buf_initialize(bp, target, 0, len, 0);
	return bp;
L
Linus Torvalds 已提交
703 704
}

705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
/*
 * Return a buffer allocated as an empty buffer and associated to external
 * memory via xfs_buf_associate_memory() back to it's empty state.
 */
void
xfs_buf_set_empty(
	struct xfs_buf		*bp,
	size_t			len)
{
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);

	bp->b_pages = NULL;
	bp->b_page_count = 0;
	bp->b_addr = NULL;
	bp->b_file_offset = 0;
	bp->b_buffer_length = bp->b_count_desired = len;
	bp->b_bn = XFS_BUF_DADDR_NULL;
	bp->b_flags &= ~XBF_MAPPED;
}

L
Linus Torvalds 已提交
726 727 728 729
static inline struct page *
mem_to_page(
	void			*addr)
{
730
	if ((!is_vmalloc_addr(addr))) {
L
Linus Torvalds 已提交
731 732 733 734 735 736 737
		return virt_to_page(addr);
	} else {
		return vmalloc_to_page(addr);
	}
}

int
738 739
xfs_buf_associate_memory(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
740 741 742 743 744
	void			*mem,
	size_t			len)
{
	int			rval;
	int			i = 0;
745 746 747
	unsigned long		pageaddr;
	unsigned long		offset;
	size_t			buflen;
L
Linus Torvalds 已提交
748 749
	int			page_count;

750
	pageaddr = (unsigned long)mem & PAGE_MASK;
751
	offset = (unsigned long)mem - pageaddr;
752 753
	buflen = PAGE_ALIGN(len + offset);
	page_count = buflen >> PAGE_SHIFT;
L
Linus Torvalds 已提交
754 755

	/* Free any previous set of page pointers */
756 757
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
758

759 760
	bp->b_pages = NULL;
	bp->b_addr = mem;
L
Linus Torvalds 已提交
761

762
	rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
L
Linus Torvalds 已提交
763 764 765
	if (rval)
		return rval;

766
	bp->b_offset = offset;
767 768 769

	for (i = 0; i < bp->b_page_count; i++) {
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
770
		pageaddr += PAGE_SIZE;
L
Linus Torvalds 已提交
771 772
	}

773 774
	bp->b_count_desired = len;
	bp->b_buffer_length = buflen;
775
	bp->b_flags |= XBF_MAPPED;
L
Linus Torvalds 已提交
776 777 778 779 780

	return 0;
}

xfs_buf_t *
781 782
xfs_buf_get_uncached(
	struct xfs_buftarg	*target,
L
Linus Torvalds 已提交
783
	size_t			len,
784
	int			flags)
L
Linus Torvalds 已提交
785
{
786 787
	unsigned long		page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
	int			error, i;
L
Linus Torvalds 已提交
788 789
	xfs_buf_t		*bp;

790
	bp = xfs_buf_allocate(0);
L
Linus Torvalds 已提交
791 792
	if (unlikely(bp == NULL))
		goto fail;
793
	_xfs_buf_initialize(bp, target, 0, len, 0);
L
Linus Torvalds 已提交
794

795 796
	error = _xfs_buf_get_pages(bp, page_count, 0);
	if (error)
L
Linus Torvalds 已提交
797 798
		goto fail_free_buf;

799
	for (i = 0; i < page_count; i++) {
800
		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
801 802
		if (!bp->b_pages[i])
			goto fail_free_mem;
L
Linus Torvalds 已提交
803
	}
804
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
805

806 807
	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
	if (unlikely(error)) {
808 809
		xfs_warn(target->bt_mount,
			"%s: failed to map pages\n", __func__);
L
Linus Torvalds 已提交
810
		goto fail_free_mem;
811
	}
L
Linus Torvalds 已提交
812

813
	trace_xfs_buf_get_uncached(bp, _RET_IP_);
L
Linus Torvalds 已提交
814
	return bp;
815

L
Linus Torvalds 已提交
816
 fail_free_mem:
817 818
	while (--i >= 0)
		__free_page(bp->b_pages[i]);
819
	_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
820
 fail_free_buf:
821
	xfs_buf_deallocate(bp);
L
Linus Torvalds 已提交
822 823 824 825 826 827 828 829 830 831
 fail:
	return NULL;
}

/*
 *	Increment reference count on buffer, to hold the buffer concurrently
 *	with another thread which may release (free) the buffer asynchronously.
 *	Must hold the buffer already to call this function.
 */
void
832 833
xfs_buf_hold(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
834
{
C
Christoph Hellwig 已提交
835
	trace_xfs_buf_hold(bp, _RET_IP_);
836
	atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
837 838 839
}

/*
840 841
 *	Releases a hold on the specified buffer.  If the
 *	the hold count is 1, calls xfs_buf_free.
L
Linus Torvalds 已提交
842 843
 */
void
844 845
xfs_buf_rele(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
846
{
847
	struct xfs_perag	*pag = bp->b_pag;
L
Linus Torvalds 已提交
848

C
Christoph Hellwig 已提交
849
	trace_xfs_buf_rele(bp, _RET_IP_);
L
Linus Torvalds 已提交
850

851
	if (!pag) {
852
		ASSERT(list_empty(&bp->b_lru));
853
		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
854 855 856 857 858
		if (atomic_dec_and_test(&bp->b_hold))
			xfs_buf_free(bp);
		return;
	}

859
	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
860

861
	ASSERT(atomic_read(&bp->b_hold) > 0);
862
	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
863
		if (!(bp->b_flags & XBF_STALE) &&
864 865 866
			   atomic_read(&bp->b_lru_ref)) {
			xfs_buf_lru_add(bp);
			spin_unlock(&pag->pag_buf_lock);
L
Linus Torvalds 已提交
867
		} else {
868
			xfs_buf_lru_del(bp);
869
			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
870 871 872
			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
			spin_unlock(&pag->pag_buf_lock);
			xfs_perag_put(pag);
873
			xfs_buf_free(bp);
L
Linus Torvalds 已提交
874 875 876 877 878 879
		}
	}
}


/*
880
 *	Lock a buffer object, if it is not already locked.
881 882 883 884 885 886 887 888
 *
 *	If we come across a stale, pinned, locked buffer, we know that we are
 *	being asked to lock a buffer that has been reallocated. Because it is
 *	pinned, we know that the log has not been pushed to disk and hence it
 *	will still be locked.  Rather than continuing to have trylock attempts
 *	fail until someone else pushes the log, push it ourselves before
 *	returning.  This means that the xfsaild will not get stuck trying
 *	to push on stale inode buffers.
L
Linus Torvalds 已提交
889 890
 */
int
891 892
xfs_buf_trylock(
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
893 894 895
{
	int			locked;

896
	locked = down_trylock(&bp->b_sema) == 0;
C
Christoph Hellwig 已提交
897
	if (locked)
898
		XB_SET_OWNER(bp);
899 900
	else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
		xfs_log_force(bp->b_target->bt_mount, 0);
C
Christoph Hellwig 已提交
901

902 903
	trace_xfs_buf_trylock(bp, _RET_IP_);
	return locked;
L
Linus Torvalds 已提交
904 905 906
}

/*
907
 *	Lock a buffer object.
908 909 910 911 912 913
 *
 *	If we come across a stale, pinned, locked buffer, we know that we
 *	are being asked to lock a buffer that has been reallocated. Because
 *	it is pinned, we know that the log has not been pushed to disk and
 *	hence it will still be locked. Rather than sleeping until someone
 *	else pushes the log, push it ourselves before trying to get the lock.
L
Linus Torvalds 已提交
914
 */
915 916
void
xfs_buf_lock(
917
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
918
{
C
Christoph Hellwig 已提交
919 920
	trace_xfs_buf_lock(bp, _RET_IP_);

921
	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
922
		xfs_log_force(bp->b_target->bt_mount, 0);
923 924
	down(&bp->b_sema);
	XB_SET_OWNER(bp);
C
Christoph Hellwig 已提交
925 926

	trace_xfs_buf_lock_done(bp, _RET_IP_);
L
Linus Torvalds 已提交
927 928 929
}

/*
930
 *	Releases the lock on the buffer object.
931
 *	If the buffer is marked delwri but is not queued, do so before we
932
 *	unlock the buffer as we need to set flags correctly.  We also need to
933 934
 *	take a reference for the delwri queue because the unlocker is going to
 *	drop their's and they don't know we just queued it.
L
Linus Torvalds 已提交
935 936
 */
void
937
xfs_buf_unlock(
938
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
939
{
940 941 942 943
	if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
		atomic_inc(&bp->b_hold);
		bp->b_flags |= XBF_ASYNC;
		xfs_buf_delwri_queue(bp, 0);
944 945
	}

946 947
	XB_CLEAR_OWNER(bp);
	up(&bp->b_sema);
C
Christoph Hellwig 已提交
948 949

	trace_xfs_buf_unlock(bp, _RET_IP_);
L
Linus Torvalds 已提交
950 951
}

952 953 954
STATIC void
xfs_buf_wait_unpin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
955 956 957
{
	DECLARE_WAITQUEUE	(wait, current);

958
	if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
959 960
		return;

961
	add_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
962 963
	for (;;) {
		set_current_state(TASK_UNINTERRUPTIBLE);
964
		if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
965
			break;
J
Jens Axboe 已提交
966
		io_schedule();
L
Linus Torvalds 已提交
967
	}
968
	remove_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
969 970 971 972 973 974 975 976
	set_current_state(TASK_RUNNING);
}

/*
 *	Buffer Utility Routines
 */

STATIC void
977
xfs_buf_iodone_work(
D
David Howells 已提交
978
	struct work_struct	*work)
L
Linus Torvalds 已提交
979
{
D
David Howells 已提交
980 981
	xfs_buf_t		*bp =
		container_of(work, xfs_buf_t, b_iodone_work);
L
Linus Torvalds 已提交
982

983
	if (bp->b_iodone)
984 985
		(*(bp->b_iodone))(bp);
	else if (bp->b_flags & XBF_ASYNC)
L
Linus Torvalds 已提交
986 987 988 989
		xfs_buf_relse(bp);
}

void
990 991
xfs_buf_ioend(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
992 993
	int			schedule)
{
C
Christoph Hellwig 已提交
994 995
	trace_xfs_buf_iodone(bp, _RET_IP_);

996
	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
997 998
	if (bp->b_error == 0)
		bp->b_flags |= XBF_DONE;
L
Linus Torvalds 已提交
999

1000
	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
L
Linus Torvalds 已提交
1001
		if (schedule) {
D
David Howells 已提交
1002
			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1003
			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
L
Linus Torvalds 已提交
1004
		} else {
D
David Howells 已提交
1005
			xfs_buf_iodone_work(&bp->b_iodone_work);
L
Linus Torvalds 已提交
1006 1007
		}
	} else {
1008
		complete(&bp->b_iowait);
L
Linus Torvalds 已提交
1009 1010 1011 1012
	}
}

void
1013 1014 1015
xfs_buf_ioerror(
	xfs_buf_t		*bp,
	int			error)
L
Linus Torvalds 已提交
1016 1017
{
	ASSERT(error >= 0 && error <= 0xffff);
1018
	bp->b_error = (unsigned short)error;
C
Christoph Hellwig 已提交
1019
	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
L
Linus Torvalds 已提交
1020 1021 1022
}

int
C
Christoph Hellwig 已提交
1023 1024
xfs_bwrite(
	struct xfs_mount	*mp,
C
Christoph Hellwig 已提交
1025
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1026
{
1027
	int			error;
L
Linus Torvalds 已提交
1028

C
Christoph Hellwig 已提交
1029
	bp->b_flags |= XBF_WRITE;
1030
	bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
L
Linus Torvalds 已提交
1031

C
Christoph Hellwig 已提交
1032
	xfs_buf_delwri_dequeue(bp);
1033
	xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1034

1035 1036 1037 1038
	error = xfs_buf_iowait(bp);
	if (error)
		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
	xfs_buf_relse(bp);
C
Christoph Hellwig 已提交
1039
	return error;
C
Christoph Hellwig 已提交
1040
}
L
Linus Torvalds 已提交
1041

C
Christoph Hellwig 已提交
1042 1043 1044 1045 1046
void
xfs_bdwrite(
	void			*mp,
	struct xfs_buf		*bp)
{
C
Christoph Hellwig 已提交
1047
	trace_xfs_buf_bdwrite(bp, _RET_IP_);
L
Linus Torvalds 已提交
1048

C
Christoph Hellwig 已提交
1049 1050 1051 1052
	bp->b_flags &= ~XBF_READ;
	bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);

	xfs_buf_delwri_queue(bp, 1);
L
Linus Torvalds 已提交
1053 1054
}

1055 1056
/*
 * Called when we want to stop a buffer from getting written or read.
C
Christoph Hellwig 已提交
1057
 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
 * so that the proper iodone callbacks get called.
 */
STATIC int
xfs_bioerror(
	xfs_buf_t *bp)
{
#ifdef XFSERRORDEBUG
	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
#endif

	/*
	 * No need to wait until the buffer is unpinned, we aren't flushing it.
	 */
1071
	xfs_buf_ioerror(bp, EIO);
1072 1073

	/*
C
Christoph Hellwig 已提交
1074
	 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1075 1076 1077 1078 1079 1080
	 */
	XFS_BUF_UNREAD(bp);
	XFS_BUF_UNDELAYWRITE(bp);
	XFS_BUF_UNDONE(bp);
	XFS_BUF_STALE(bp);

C
Christoph Hellwig 已提交
1081
	xfs_buf_ioend(bp, 0);
1082 1083 1084 1085 1086 1087

	return EIO;
}

/*
 * Same as xfs_bioerror, except that we are releasing the buffer
C
Christoph Hellwig 已提交
1088
 * here ourselves, and avoiding the xfs_buf_ioend call.
1089 1090 1091 1092 1093 1094 1095
 * This is meant for userdata errors; metadata bufs come with
 * iodone functions attached, so that we can track down errors.
 */
STATIC int
xfs_bioerror_relse(
	struct xfs_buf	*bp)
{
1096
	int64_t		fl = bp->b_flags;
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
	/*
	 * No need to wait until the buffer is unpinned.
	 * We aren't flushing it.
	 *
	 * chunkhold expects B_DONE to be set, whether
	 * we actually finish the I/O or not. We don't want to
	 * change that interface.
	 */
	XFS_BUF_UNREAD(bp);
	XFS_BUF_UNDELAYWRITE(bp);
	XFS_BUF_DONE(bp);
	XFS_BUF_STALE(bp);
1109
	bp->b_iodone = NULL;
1110
	if (!(fl & XBF_ASYNC)) {
1111 1112 1113 1114 1115 1116
		/*
		 * Mark b_error and B_ERROR _both_.
		 * Lot's of chunkcache code assumes that.
		 * There's no reason to mark error for
		 * ASYNC buffers.
		 */
1117
		xfs_buf_ioerror(bp, EIO);
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
		XFS_BUF_FINISH_IOWAIT(bp);
	} else {
		xfs_buf_relse(bp);
	}

	return EIO;
}


/*
 * All xfs metadata buffers except log state machine buffers
 * get this attached as their b_bdstrat callback function.
 * This is so that we can catch a buffer
 * after prematurely unpinning it to forcibly shutdown the filesystem.
 */
int
xfs_bdstrat_cb(
	struct xfs_buf	*bp)
{
1137
	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172
		trace_xfs_bdstrat_shut(bp, _RET_IP_);
		/*
		 * Metadata write that didn't get logged but
		 * written delayed anyway. These aren't associated
		 * with a transaction, and can be ignored.
		 */
		if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
			return xfs_bioerror_relse(bp);
		else
			return xfs_bioerror(bp);
	}

	xfs_buf_iorequest(bp);
	return 0;
}

/*
 * Wrapper around bdstrat so that we can stop data from going to disk in case
 * we are shutting down the filesystem.  Typically user data goes thru this
 * path; one of the exceptions is the superblock.
 */
void
xfsbdstrat(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp)
{
	if (XFS_FORCED_SHUTDOWN(mp)) {
		trace_xfs_bdstrat_shut(bp, _RET_IP_);
		xfs_bioerror_relse(bp);
		return;
	}

	xfs_buf_iorequest(bp);
}

1173
STATIC void
1174 1175
_xfs_buf_ioend(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1176 1177
	int			schedule)
{
1178
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1179
		xfs_buf_ioend(bp, schedule);
L
Linus Torvalds 已提交
1180 1181
}

A
Al Viro 已提交
1182
STATIC void
1183
xfs_buf_bio_end_io(
L
Linus Torvalds 已提交
1184 1185 1186
	struct bio		*bio,
	int			error)
{
1187
	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
L
Linus Torvalds 已提交
1188

1189
	xfs_buf_ioerror(bp, -error);
L
Linus Torvalds 已提交
1190

1191 1192 1193
	if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));

1194
	_xfs_buf_ioend(bp, 1);
L
Linus Torvalds 已提交
1195 1196 1197 1198
	bio_put(bio);
}

STATIC void
1199 1200
_xfs_buf_ioapply(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1201
{
1202
	int			rw, map_i, total_nr_pages, nr_pages;
L
Linus Torvalds 已提交
1203
	struct bio		*bio;
1204 1205 1206
	int			offset = bp->b_offset;
	int			size = bp->b_count_desired;
	sector_t		sector = bp->b_bn;
L
Linus Torvalds 已提交
1207

1208
	total_nr_pages = bp->b_page_count;
L
Linus Torvalds 已提交
1209 1210
	map_i = 0;

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
	if (bp->b_flags & XBF_WRITE) {
		if (bp->b_flags & XBF_SYNCIO)
			rw = WRITE_SYNC;
		else
			rw = WRITE;
		if (bp->b_flags & XBF_FUA)
			rw |= REQ_FUA;
		if (bp->b_flags & XBF_FLUSH)
			rw |= REQ_FLUSH;
	} else if (bp->b_flags & XBF_READ_AHEAD) {
		rw = READA;
1222
	} else {
1223
		rw = READ;
1224 1225
	}

1226 1227 1228
	/* we only use the buffer cache for meta-data */
	rw |= REQ_META;

L
Linus Torvalds 已提交
1229
next_chunk:
1230
	atomic_inc(&bp->b_io_remaining);
L
Linus Torvalds 已提交
1231 1232 1233 1234 1235
	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
	if (nr_pages > total_nr_pages)
		nr_pages = total_nr_pages;

	bio = bio_alloc(GFP_NOIO, nr_pages);
1236
	bio->bi_bdev = bp->b_target->bt_bdev;
L
Linus Torvalds 已提交
1237
	bio->bi_sector = sector;
1238 1239
	bio->bi_end_io = xfs_buf_bio_end_io;
	bio->bi_private = bp;
L
Linus Torvalds 已提交
1240

1241

L
Linus Torvalds 已提交
1242
	for (; size && nr_pages; nr_pages--, map_i++) {
1243
		int	rbytes, nbytes = PAGE_SIZE - offset;
L
Linus Torvalds 已提交
1244 1245 1246 1247

		if (nbytes > size)
			nbytes = size;

1248 1249
		rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
		if (rbytes < nbytes)
L
Linus Torvalds 已提交
1250 1251 1252 1253 1254 1255 1256 1257 1258
			break;

		offset = 0;
		sector += nbytes >> BBSHIFT;
		size -= nbytes;
		total_nr_pages--;
	}

	if (likely(bio->bi_size)) {
1259 1260 1261 1262
		if (xfs_buf_is_vmapped(bp)) {
			flush_kernel_vmap_range(bp->b_addr,
						xfs_buf_vmap_len(bp));
		}
L
Linus Torvalds 已提交
1263 1264 1265 1266
		submit_bio(rw, bio);
		if (size)
			goto next_chunk;
	} else {
1267
		xfs_buf_ioerror(bp, EIO);
1268
		bio_put(bio);
L
Linus Torvalds 已提交
1269 1270 1271 1272
	}
}

int
1273 1274
xfs_buf_iorequest(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1275
{
C
Christoph Hellwig 已提交
1276
	trace_xfs_buf_iorequest(bp, _RET_IP_);
L
Linus Torvalds 已提交
1277

1278 1279
	if (bp->b_flags & XBF_DELWRI) {
		xfs_buf_delwri_queue(bp, 1);
L
Linus Torvalds 已提交
1280 1281 1282
		return 0;
	}

1283 1284
	if (bp->b_flags & XBF_WRITE) {
		xfs_buf_wait_unpin(bp);
L
Linus Torvalds 已提交
1285 1286
	}

1287
	xfs_buf_hold(bp);
L
Linus Torvalds 已提交
1288 1289 1290

	/* Set the count to 1 initially, this will stop an I/O
	 * completion callout which happens before we have started
1291
	 * all the I/O from calling xfs_buf_ioend too early.
L
Linus Torvalds 已提交
1292
	 */
1293 1294 1295
	atomic_set(&bp->b_io_remaining, 1);
	_xfs_buf_ioapply(bp);
	_xfs_buf_ioend(bp, 0);
L
Linus Torvalds 已提交
1296

1297
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1298 1299 1300 1301
	return 0;
}

/*
1302 1303 1304
 *	Waits for I/O to complete on the buffer supplied.
 *	It returns immediately if no I/O is pending.
 *	It returns the I/O error code, if any, or 0 if there was no error.
L
Linus Torvalds 已提交
1305 1306
 */
int
1307 1308
xfs_buf_iowait(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1309
{
C
Christoph Hellwig 已提交
1310 1311
	trace_xfs_buf_iowait(bp, _RET_IP_);

1312
	wait_for_completion(&bp->b_iowait);
C
Christoph Hellwig 已提交
1313 1314

	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1315
	return bp->b_error;
L
Linus Torvalds 已提交
1316 1317
}

1318 1319 1320
xfs_caddr_t
xfs_buf_offset(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1321 1322 1323 1324
	size_t			offset)
{
	struct page		*page;

1325
	if (bp->b_flags & XBF_MAPPED)
1326
		return bp->b_addr + offset;
L
Linus Torvalds 已提交
1327

1328
	offset += bp->b_offset;
1329 1330
	page = bp->b_pages[offset >> PAGE_SHIFT];
	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
L
Linus Torvalds 已提交
1331 1332 1333 1334 1335 1336
}

/*
 *	Move data into or out of a buffer.
 */
void
1337 1338
xfs_buf_iomove(
	xfs_buf_t		*bp,	/* buffer to process		*/
L
Linus Torvalds 已提交
1339 1340
	size_t			boff,	/* starting buffer offset	*/
	size_t			bsize,	/* length to copy		*/
1341
	void			*data,	/* data address			*/
1342
	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
L
Linus Torvalds 已提交
1343 1344 1345 1346 1347 1348
{
	size_t			bend, cpoff, csize;
	struct page		*page;

	bend = boff + bsize;
	while (boff < bend) {
1349 1350
		page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
		cpoff = xfs_buf_poff(boff + bp->b_offset);
L
Linus Torvalds 已提交
1351
		csize = min_t(size_t,
1352
			      PAGE_SIZE-cpoff, bp->b_count_desired-boff);
L
Linus Torvalds 已提交
1353

1354
		ASSERT(((csize + cpoff) <= PAGE_SIZE));
L
Linus Torvalds 已提交
1355 1356

		switch (mode) {
1357
		case XBRW_ZERO:
L
Linus Torvalds 已提交
1358 1359
			memset(page_address(page) + cpoff, 0, csize);
			break;
1360
		case XBRW_READ:
L
Linus Torvalds 已提交
1361 1362
			memcpy(data, page_address(page) + cpoff, csize);
			break;
1363
		case XBRW_WRITE:
L
Linus Torvalds 已提交
1364 1365 1366 1367 1368 1369 1370 1371 1372
			memcpy(page_address(page) + cpoff, data, csize);
		}

		boff += csize;
		data += csize;
	}
}

/*
1373
 *	Handling of buffer targets (buftargs).
L
Linus Torvalds 已提交
1374 1375 1376
 */

/*
1377 1378 1379
 * Wait for any bufs with callbacks that have been submitted but have not yet
 * returned. These buffers will have an elevated hold count, so wait on those
 * while freeing all the buffers only held by the LRU.
L
Linus Torvalds 已提交
1380 1381 1382
 */
void
xfs_wait_buftarg(
1383
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1384
{
1385 1386 1387 1388 1389 1390 1391 1392
	struct xfs_buf		*bp;

restart:
	spin_lock(&btp->bt_lru_lock);
	while (!list_empty(&btp->bt_lru)) {
		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
		if (atomic_read(&bp->b_hold) > 1) {
			spin_unlock(&btp->bt_lru_lock);
D
Dave Chinner 已提交
1393
			delay(100);
1394
			goto restart;
L
Linus Torvalds 已提交
1395
		}
1396 1397 1398 1399 1400 1401 1402 1403
		/*
		 * clear the LRU reference count so the bufer doesn't get
		 * ignored in xfs_buf_rele().
		 */
		atomic_set(&bp->b_lru_ref, 0);
		spin_unlock(&btp->bt_lru_lock);
		xfs_buf_rele(bp);
		spin_lock(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1404
	}
1405
	spin_unlock(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1406 1407
}

1408 1409 1410
int
xfs_buftarg_shrink(
	struct shrinker		*shrink,
1411
	struct shrink_control	*sc)
1412
{
1413 1414
	struct xfs_buftarg	*btp = container_of(shrink,
					struct xfs_buftarg, bt_shrinker);
1415
	struct xfs_buf		*bp;
1416
	int nr_to_scan = sc->nr_to_scan;
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444
	LIST_HEAD(dispose);

	if (!nr_to_scan)
		return btp->bt_lru_nr;

	spin_lock(&btp->bt_lru_lock);
	while (!list_empty(&btp->bt_lru)) {
		if (nr_to_scan-- <= 0)
			break;

		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);

		/*
		 * Decrement the b_lru_ref count unless the value is already
		 * zero. If the value is already zero, we need to reclaim the
		 * buffer, otherwise it gets another trip through the LRU.
		 */
		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
			list_move_tail(&bp->b_lru, &btp->bt_lru);
			continue;
		}

		/*
		 * remove the buffer from the LRU now to avoid needing another
		 * lock round trip inside xfs_buf_rele().
		 */
		list_move(&bp->b_lru, &dispose);
		btp->bt_lru_nr--;
1445
	}
1446 1447 1448 1449 1450 1451 1452 1453 1454
	spin_unlock(&btp->bt_lru_lock);

	while (!list_empty(&dispose)) {
		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
		list_del_init(&bp->b_lru);
		xfs_buf_rele(bp);
	}

	return btp->bt_lru_nr;
1455 1456
}

L
Linus Torvalds 已提交
1457 1458
void
xfs_free_buftarg(
1459 1460
	struct xfs_mount	*mp,
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1461
{
1462 1463
	unregister_shrinker(&btp->bt_shrinker);

L
Linus Torvalds 已提交
1464
	xfs_flush_buftarg(btp, 1);
1465 1466
	if (mp->m_flags & XFS_MOUNT_BARRIER)
		xfs_blkdev_issue_flush(btp);
1467 1468

	kthread_stop(btp->bt_task);
1469
	kmem_free(btp);
L
Linus Torvalds 已提交
1470 1471 1472 1473 1474 1475 1476 1477 1478
}

STATIC int
xfs_setsize_buftarg_flags(
	xfs_buftarg_t		*btp,
	unsigned int		blocksize,
	unsigned int		sectorsize,
	int			verbose)
{
1479 1480 1481
	btp->bt_bsize = blocksize;
	btp->bt_sshift = ffs(sectorsize) - 1;
	btp->bt_smask = sectorsize - 1;
L
Linus Torvalds 已提交
1482

1483
	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1484 1485
		xfs_warn(btp->bt_mount,
			"Cannot set_blocksize to %u on device %s\n",
1486
			sectorsize, xfs_buf_target_name(btp));
L
Linus Torvalds 已提交
1487 1488 1489 1490 1491 1492 1493
		return EINVAL;
	}

	return 0;
}

/*
1494 1495 1496 1497
 *	When allocating the initial buffer target we have not yet
 *	read in the superblock, so don't know what sized sectors
 *	are being used is at this early stage.  Play safe.
 */
L
Linus Torvalds 已提交
1498 1499 1500 1501 1502 1503
STATIC int
xfs_setsize_buftarg_early(
	xfs_buftarg_t		*btp,
	struct block_device	*bdev)
{
	return xfs_setsize_buftarg_flags(btp,
1504
			PAGE_SIZE, bdev_logical_block_size(bdev), 0);
L
Linus Torvalds 已提交
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
}

int
xfs_setsize_buftarg(
	xfs_buftarg_t		*btp,
	unsigned int		blocksize,
	unsigned int		sectorsize)
{
	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
}

1516 1517
STATIC int
xfs_alloc_delwrite_queue(
1518 1519
	xfs_buftarg_t		*btp,
	const char		*fsname)
1520 1521
{
	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
E
Eric Sandeen 已提交
1522
	spin_lock_init(&btp->bt_delwrite_lock);
1523
	btp->bt_flags = 0;
1524
	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1525 1526 1527
	if (IS_ERR(btp->bt_task))
		return PTR_ERR(btp->bt_task);
	return 0;
1528 1529
}

L
Linus Torvalds 已提交
1530 1531
xfs_buftarg_t *
xfs_alloc_buftarg(
1532
	struct xfs_mount	*mp,
L
Linus Torvalds 已提交
1533
	struct block_device	*bdev,
1534 1535
	int			external,
	const char		*fsname)
L
Linus Torvalds 已提交
1536 1537 1538 1539 1540
{
	xfs_buftarg_t		*btp;

	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);

1541
	btp->bt_mount = mp;
1542 1543
	btp->bt_dev =  bdev->bd_dev;
	btp->bt_bdev = bdev;
1544 1545 1546 1547
	btp->bt_bdi = blk_get_backing_dev_info(bdev);
	if (!btp->bt_bdi)
		goto error;

1548 1549
	INIT_LIST_HEAD(&btp->bt_lru);
	spin_lock_init(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1550 1551
	if (xfs_setsize_buftarg_early(btp, bdev))
		goto error;
1552
	if (xfs_alloc_delwrite_queue(btp, fsname))
1553
		goto error;
1554 1555 1556
	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&btp->bt_shrinker);
L
Linus Torvalds 已提交
1557 1558 1559
	return btp;

error:
1560
	kmem_free(btp);
L
Linus Torvalds 已提交
1561 1562 1563 1564 1565
	return NULL;
}


/*
1566
 *	Delayed write buffer handling
L
Linus Torvalds 已提交
1567 1568
 */
STATIC void
1569 1570
xfs_buf_delwri_queue(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1571 1572
	int			unlock)
{
1573 1574
	struct list_head	*dwq = &bp->b_target->bt_delwrite_queue;
	spinlock_t		*dwlk = &bp->b_target->bt_delwrite_lock;
1575

C
Christoph Hellwig 已提交
1576 1577
	trace_xfs_buf_delwri_queue(bp, _RET_IP_);

1578
	ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
L
Linus Torvalds 已提交
1579

1580
	spin_lock(dwlk);
L
Linus Torvalds 已提交
1581
	/* If already in the queue, dequeue and place at tail */
1582 1583 1584 1585 1586
	if (!list_empty(&bp->b_list)) {
		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
		if (unlock)
			atomic_dec(&bp->b_hold);
		list_del(&bp->b_list);
L
Linus Torvalds 已提交
1587 1588
	}

D
Dave Chinner 已提交
1589 1590 1591 1592 1593
	if (list_empty(dwq)) {
		/* start xfsbufd as it is about to have something to do */
		wake_up_process(bp->b_target->bt_task);
	}

1594 1595 1596
	bp->b_flags |= _XBF_DELWRI_Q;
	list_add_tail(&bp->b_list, dwq);
	bp->b_queuetime = jiffies;
1597
	spin_unlock(dwlk);
L
Linus Torvalds 已提交
1598 1599

	if (unlock)
1600
		xfs_buf_unlock(bp);
L
Linus Torvalds 已提交
1601 1602 1603
}

void
1604 1605
xfs_buf_delwri_dequeue(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1606
{
1607
	spinlock_t		*dwlk = &bp->b_target->bt_delwrite_lock;
L
Linus Torvalds 已提交
1608 1609
	int			dequeued = 0;

1610
	spin_lock(dwlk);
1611 1612 1613
	if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
		list_del_init(&bp->b_list);
L
Linus Torvalds 已提交
1614 1615
		dequeued = 1;
	}
1616
	bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1617
	spin_unlock(dwlk);
L
Linus Torvalds 已提交
1618 1619

	if (dequeued)
1620
		xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1621

C
Christoph Hellwig 已提交
1622
	trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
L
Linus Torvalds 已提交
1623 1624
}

1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
/*
 * If a delwri buffer needs to be pushed before it has aged out, then promote
 * it to the head of the delwri queue so that it will be flushed on the next
 * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
 * than the age currently needed to flush the buffer. Hence the next time the
 * xfsbufd sees it is guaranteed to be considered old enough to flush.
 */
void
xfs_buf_delwri_promote(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;
	long		age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;

	ASSERT(bp->b_flags & XBF_DELWRI);
	ASSERT(bp->b_flags & _XBF_DELWRI_Q);

	/*
	 * Check the buffer age before locking the delayed write queue as we
	 * don't need to promote buffers that are already past the flush age.
	 */
	if (bp->b_queuetime < jiffies - age)
		return;
	bp->b_queuetime = jiffies - age;
	spin_lock(&btp->bt_delwrite_lock);
	list_move(&bp->b_list, &btp->bt_delwrite_queue);
	spin_unlock(&btp->bt_delwrite_lock);
}

L
Linus Torvalds 已提交
1654
STATIC void
1655
xfs_buf_runall_queues(
L
Linus Torvalds 已提交
1656 1657 1658 1659 1660
	struct workqueue_struct	*queue)
{
	flush_workqueue(queue);
}

1661 1662 1663 1664 1665 1666 1667 1668
/*
 * Move as many buffers as specified to the supplied list
 * idicating if we skipped any buffers to prevent deadlocks.
 */
STATIC int
xfs_buf_delwri_split(
	xfs_buftarg_t	*target,
	struct list_head *list,
1669
	unsigned long	age)
1670 1671 1672 1673 1674
{
	xfs_buf_t	*bp, *n;
	struct list_head *dwq = &target->bt_delwrite_queue;
	spinlock_t	*dwlk = &target->bt_delwrite_lock;
	int		skipped = 0;
1675
	int		force;
1676

1677
	force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1678 1679 1680 1681 1682
	INIT_LIST_HEAD(list);
	spin_lock(dwlk);
	list_for_each_entry_safe(bp, n, dwq, b_list) {
		ASSERT(bp->b_flags & XBF_DELWRI);

1683
		if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
1684
			if (!force &&
1685 1686 1687 1688 1689
			    time_before(jiffies, bp->b_queuetime + age)) {
				xfs_buf_unlock(bp);
				break;
			}

1690
			bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
1691 1692
			bp->b_flags |= XBF_WRITE;
			list_move_tail(&bp->b_list, list);
1693
			trace_xfs_buf_delwri_split(bp, _RET_IP_);
1694 1695 1696 1697 1698 1699 1700 1701 1702
		} else
			skipped++;
	}
	spin_unlock(dwlk);

	return skipped;

}

1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725
/*
 * Compare function is more complex than it needs to be because
 * the return value is only 32 bits and we are doing comparisons
 * on 64 bit values
 */
static int
xfs_buf_cmp(
	void		*priv,
	struct list_head *a,
	struct list_head *b)
{
	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
	xfs_daddr_t		diff;

	diff = ap->b_bn - bp->b_bn;
	if (diff < 0)
		return -1;
	if (diff > 0)
		return 1;
	return 0;
}

L
Linus Torvalds 已提交
1726
STATIC int
1727
xfsbufd(
1728
	void		*data)
L
Linus Torvalds 已提交
1729
{
1730
	xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
L
Linus Torvalds 已提交
1731 1732 1733

	current->flags |= PF_MEMALLOC;

1734 1735
	set_freezable();

L
Linus Torvalds 已提交
1736
	do {
D
Dave Chinner 已提交
1737 1738
		long	age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
		long	tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1739
		struct list_head tmp;
1740
		struct blk_plug plug;
D
Dave Chinner 已提交
1741

1742
		if (unlikely(freezing(current))) {
1743
			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1744
			refrigerator();
1745
		} else {
1746
			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1747
		}
L
Linus Torvalds 已提交
1748

D
Dave Chinner 已提交
1749 1750 1751 1752
		/* sleep for a long time if there is nothing to do. */
		if (list_empty(&target->bt_delwrite_queue))
			tout = MAX_SCHEDULE_TIMEOUT;
		schedule_timeout_interruptible(tout);
L
Linus Torvalds 已提交
1753

D
Dave Chinner 已提交
1754
		xfs_buf_delwri_split(target, &tmp, age);
1755
		list_sort(NULL, &tmp, xfs_buf_cmp);
1756 1757

		blk_start_plug(&plug);
L
Linus Torvalds 已提交
1758
		while (!list_empty(&tmp)) {
1759 1760
			struct xfs_buf *bp;
			bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1761
			list_del_init(&bp->b_list);
1762
			xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1763
		}
1764
		blk_finish_plug(&plug);
1765
	} while (!kthread_should_stop());
L
Linus Torvalds 已提交
1766

1767
	return 0;
L
Linus Torvalds 已提交
1768 1769 1770
}

/*
1771 1772 1773
 *	Go through all incore buffers, and release buffers if they belong to
 *	the given device. This is used in filesystem error handling to
 *	preserve the consistency of its metadata.
L
Linus Torvalds 已提交
1774 1775 1776
 */
int
xfs_flush_buftarg(
1777 1778
	xfs_buftarg_t	*target,
	int		wait)
L
Linus Torvalds 已提交
1779
{
1780
	xfs_buf_t	*bp;
1781
	int		pincount = 0;
1782 1783
	LIST_HEAD(tmp_list);
	LIST_HEAD(wait_list);
1784
	struct blk_plug plug;
L
Linus Torvalds 已提交
1785

1786
	xfs_buf_runall_queues(xfsconvertd_workqueue);
1787 1788
	xfs_buf_runall_queues(xfsdatad_workqueue);
	xfs_buf_runall_queues(xfslogd_workqueue);
L
Linus Torvalds 已提交
1789

1790
	set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1791
	pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
L
Linus Torvalds 已提交
1792 1793

	/*
1794 1795 1796
	 * Dropped the delayed write list lock, now walk the temporary list.
	 * All I/O is issued async and then if we need to wait for completion
	 * we do that after issuing all the IO.
L
Linus Torvalds 已提交
1797
	 */
1798
	list_sort(NULL, &tmp_list, xfs_buf_cmp);
1799 1800

	blk_start_plug(&plug);
1801 1802
	while (!list_empty(&tmp_list)) {
		bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1803
		ASSERT(target == bp->b_target);
1804 1805
		list_del_init(&bp->b_list);
		if (wait) {
1806
			bp->b_flags &= ~XBF_ASYNC;
1807 1808
			list_add(&bp->b_list, &wait_list);
		}
1809
		xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1810
	}
1811
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1812

1813
	if (wait) {
1814
		/* Wait for IO to complete. */
1815 1816
		while (!list_empty(&wait_list)) {
			bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1817

1818
			list_del_init(&bp->b_list);
C
Christoph Hellwig 已提交
1819
			xfs_buf_iowait(bp);
1820 1821
			xfs_buf_relse(bp);
		}
L
Linus Torvalds 已提交
1822 1823 1824 1825 1826
	}

	return pincount;
}

1827
int __init
1828
xfs_buf_init(void)
L
Linus Torvalds 已提交
1829
{
1830 1831
	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
						KM_ZONE_HWALIGN, NULL);
1832
	if (!xfs_buf_zone)
C
Christoph Hellwig 已提交
1833
		goto out;
1834

1835
	xfslogd_workqueue = alloc_workqueue("xfslogd",
1836
					WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1837
	if (!xfslogd_workqueue)
1838
		goto out_free_buf_zone;
L
Linus Torvalds 已提交
1839

T
Tejun Heo 已提交
1840
	xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
1841 1842
	if (!xfsdatad_workqueue)
		goto out_destroy_xfslogd_workqueue;
L
Linus Torvalds 已提交
1843

T
Tejun Heo 已提交
1844 1845
	xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
						WQ_MEM_RECLAIM, 1);
1846 1847 1848
	if (!xfsconvertd_workqueue)
		goto out_destroy_xfsdatad_workqueue;

1849
	return 0;
L
Linus Torvalds 已提交
1850

1851 1852
 out_destroy_xfsdatad_workqueue:
	destroy_workqueue(xfsdatad_workqueue);
1853 1854 1855
 out_destroy_xfslogd_workqueue:
	destroy_workqueue(xfslogd_workqueue);
 out_free_buf_zone:
1856
	kmem_zone_destroy(xfs_buf_zone);
C
Christoph Hellwig 已提交
1857
 out:
1858
	return -ENOMEM;
L
Linus Torvalds 已提交
1859 1860 1861
}

void
1862
xfs_buf_terminate(void)
L
Linus Torvalds 已提交
1863
{
1864
	destroy_workqueue(xfsconvertd_workqueue);
1865 1866
	destroy_workqueue(xfsdatad_workqueue);
	destroy_workqueue(xfslogd_workqueue);
1867
	kmem_zone_destroy(xfs_buf_zone);
L
Linus Torvalds 已提交
1868
}
1869 1870 1871 1872 1873 1874 1875 1876

#ifdef CONFIG_KDB_MODULES
struct list_head *
xfs_get_buftarg_list(void)
{
	return &xfs_buftarg_list;
}
#endif