xfs_buf.c 41.6 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17
 */
18
#include "xfs.h"
L
Linus Torvalds 已提交
19 20
#include <linux/stddef.h>
#include <linux/errno.h>
21
#include <linux/gfp.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28 29 30 31
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include <linux/hash.h>
32
#include <linux/kthread.h>
C
Christoph Lameter 已提交
33
#include <linux/migrate.h>
34
#include <linux/backing-dev.h>
35
#include <linux/freezer.h>
L
Linus Torvalds 已提交
36

37 38
#include "xfs_sb.h"
#include "xfs_inum.h"
39
#include "xfs_log.h"
40 41
#include "xfs_ag.h"
#include "xfs_mount.h"
C
Christoph Hellwig 已提交
42
#include "xfs_trace.h"
43

44
static kmem_zone_t *xfs_buf_zone;
45
STATIC int xfsbufd(void *);
46
STATIC void xfs_buf_delwri_queue(xfs_buf_t *);
47

48
static struct workqueue_struct *xfslogd_workqueue;
49
struct workqueue_struct *xfsdatad_workqueue;
50
struct workqueue_struct *xfsconvertd_workqueue;
L
Linus Torvalds 已提交
51

52 53 54 55
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
L
Linus Torvalds 已提交
56
#else
57 58 59
# define XB_SET_OWNER(bp)	do { } while (0)
# define XB_CLEAR_OWNER(bp)	do { } while (0)
# define XB_GET_OWNER(bp)	do { } while (0)
L
Linus Torvalds 已提交
60 61
#endif

62 63 64
#define xb_to_gfp(flags) \
	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
	  ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
L
Linus Torvalds 已提交
65

66 67
#define xb_to_km(flags) \
	 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
L
Linus Torvalds 已提交
68

69 70 71 72
#define xfs_buf_allocate(flags) \
	kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
#define xfs_buf_deallocate(bp) \
	kmem_zone_free(xfs_buf_zone, (bp));
L
Linus Torvalds 已提交
73

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
static inline int
xfs_buf_is_vmapped(
	struct xfs_buf	*bp)
{
	/*
	 * Return true if the buffer is vmapped.
	 *
	 * The XBF_MAPPED flag is set if the buffer should be mapped, but the
	 * code is clever enough to know it doesn't have to map a single page,
	 * so the check has to be both for XBF_MAPPED and bp->b_page_count > 1.
	 */
	return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1;
}

static inline int
xfs_buf_vmap_len(
	struct xfs_buf	*bp)
{
	return (bp->b_page_count * PAGE_SIZE) - bp->b_offset;
}

L
Linus Torvalds 已提交
95
/*
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
 * xfs_buf_lru_add - add a buffer to the LRU.
 *
 * The LRU takes a new reference to the buffer so that it will only be freed
 * once the shrinker takes the buffer off the LRU.
 */
STATIC void
xfs_buf_lru_add(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;

	spin_lock(&btp->bt_lru_lock);
	if (list_empty(&bp->b_lru)) {
		atomic_inc(&bp->b_hold);
		list_add_tail(&bp->b_lru, &btp->bt_lru);
		btp->bt_lru_nr++;
	}
	spin_unlock(&btp->bt_lru_lock);
}

/*
 * xfs_buf_lru_del - remove a buffer from the LRU
 *
 * The unlocked check is safe here because it only occurs when there are not
 * b_lru_ref counts left on the inode under the pag->pag_buf_lock. it is there
 * to optimise the shrinker removing the buffer from the LRU and calling
L
Lucas De Marchi 已提交
122
 * xfs_buf_free(). i.e. it removes an unnecessary round trip on the
123
 * bt_lru_lock.
L
Linus Torvalds 已提交
124
 */
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
STATIC void
xfs_buf_lru_del(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;

	if (list_empty(&bp->b_lru))
		return;

	spin_lock(&btp->bt_lru_lock);
	if (!list_empty(&bp->b_lru)) {
		list_del_init(&bp->b_lru);
		btp->bt_lru_nr--;
	}
	spin_unlock(&btp->bt_lru_lock);
}

/*
 * When we mark a buffer stale, we remove the buffer from the LRU and clear the
 * b_lru_ref count so that the buffer is freed immediately when the buffer
 * reference count falls to zero. If the buffer is already on the LRU, we need
 * to remove the reference that LRU holds on the buffer.
 *
 * This prevents build-up of stale buffers on the LRU.
 */
void
xfs_buf_stale(
	struct xfs_buf	*bp)
{
	bp->b_flags |= XBF_STALE;
	atomic_set(&(bp)->b_lru_ref, 0);
	if (!list_empty(&bp->b_lru)) {
		struct xfs_buftarg *btp = bp->b_target;

		spin_lock(&btp->bt_lru_lock);
		if (!list_empty(&bp->b_lru)) {
			list_del_init(&bp->b_lru);
			btp->bt_lru_nr--;
			atomic_dec(&bp->b_hold);
		}
		spin_unlock(&btp->bt_lru_lock);
	}
	ASSERT(atomic_read(&bp->b_hold) >= 1);
}
L
Linus Torvalds 已提交
169 170

STATIC void
171 172
_xfs_buf_initialize(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
173
	xfs_buftarg_t		*target,
174
	xfs_off_t		range_base,
L
Linus Torvalds 已提交
175
	size_t			range_length,
176
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
177 178
{
	/*
179
	 * We don't want certain flags to appear in b_flags.
L
Linus Torvalds 已提交
180
	 */
181 182 183 184
	flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);

	memset(bp, 0, sizeof(xfs_buf_t));
	atomic_set(&bp->b_hold, 1);
185
	atomic_set(&bp->b_lru_ref, 1);
186
	init_completion(&bp->b_iowait);
187
	INIT_LIST_HEAD(&bp->b_lru);
188
	INIT_LIST_HEAD(&bp->b_list);
189
	RB_CLEAR_NODE(&bp->b_rbnode);
T
Thomas Gleixner 已提交
190
	sema_init(&bp->b_sema, 0); /* held, no waiters */
191 192 193
	XB_SET_OWNER(bp);
	bp->b_target = target;
	bp->b_file_offset = range_base;
L
Linus Torvalds 已提交
194 195 196 197 198
	/*
	 * Set buffer_length and count_desired to the same value initially.
	 * I/O routines should use count_desired, which will be the same in
	 * most cases but may be reset (e.g. XFS recovery).
	 */
199 200 201 202 203 204 205
	bp->b_buffer_length = bp->b_count_desired = range_length;
	bp->b_flags = flags;
	bp->b_bn = XFS_BUF_DADDR_NULL;
	atomic_set(&bp->b_pin_count, 0);
	init_waitqueue_head(&bp->b_waiters);

	XFS_STATS_INC(xb_create);
C
Christoph Hellwig 已提交
206 207

	trace_xfs_buf_init(bp, _RET_IP_);
L
Linus Torvalds 已提交
208 209 210
}

/*
211 212
 *	Allocate a page array capable of holding a specified number
 *	of pages, and point the page buf at it.
L
Linus Torvalds 已提交
213 214
 */
STATIC int
215 216
_xfs_buf_get_pages(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
217
	int			page_count,
218
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
219 220
{
	/* Make sure that we have a page list */
221 222 223 224 225
	if (bp->b_pages == NULL) {
		bp->b_offset = xfs_buf_poff(bp->b_file_offset);
		bp->b_page_count = page_count;
		if (page_count <= XB_PAGES) {
			bp->b_pages = bp->b_page_array;
L
Linus Torvalds 已提交
226
		} else {
227 228 229
			bp->b_pages = kmem_alloc(sizeof(struct page *) *
					page_count, xb_to_km(flags));
			if (bp->b_pages == NULL)
L
Linus Torvalds 已提交
230 231
				return -ENOMEM;
		}
232
		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
L
Linus Torvalds 已提交
233 234 235 236 237
	}
	return 0;
}

/*
238
 *	Frees b_pages if it was allocated.
L
Linus Torvalds 已提交
239 240
 */
STATIC void
241
_xfs_buf_free_pages(
L
Linus Torvalds 已提交
242 243
	xfs_buf_t	*bp)
{
244
	if (bp->b_pages != bp->b_page_array) {
245
		kmem_free(bp->b_pages);
246
		bp->b_pages = NULL;
L
Linus Torvalds 已提交
247 248 249 250 251 252 253
	}
}

/*
 *	Releases the specified buffer.
 *
 * 	The modification state of any associated pages is left unchanged.
254
 * 	The buffer most not be on any hash - use xfs_buf_rele instead for
L
Linus Torvalds 已提交
255 256 257
 * 	hashed and refcounted buffers
 */
void
258
xfs_buf_free(
L
Linus Torvalds 已提交
259 260
	xfs_buf_t		*bp)
{
C
Christoph Hellwig 已提交
261
	trace_xfs_buf_free(bp, _RET_IP_);
L
Linus Torvalds 已提交
262

263 264
	ASSERT(list_empty(&bp->b_lru));

265
	if (bp->b_flags & _XBF_PAGES) {
L
Linus Torvalds 已提交
266 267
		uint		i;

268
		if (xfs_buf_is_vmapped(bp))
A
Alex Elder 已提交
269 270
			vm_unmap_ram(bp->b_addr - bp->b_offset,
					bp->b_page_count);
L
Linus Torvalds 已提交
271

272 273 274
		for (i = 0; i < bp->b_page_count; i++) {
			struct page	*page = bp->b_pages[i];

275
			__free_page(page);
276
		}
277 278
	} else if (bp->b_flags & _XBF_KMEM)
		kmem_free(bp->b_addr);
279
	_xfs_buf_free_pages(bp);
280
	xfs_buf_deallocate(bp);
L
Linus Torvalds 已提交
281 282 283
}

/*
284
 * Allocates all the pages for buffer in question and builds it's page list.
L
Linus Torvalds 已提交
285 286
 */
STATIC int
287
xfs_buf_allocate_memory(
L
Linus Torvalds 已提交
288 289 290
	xfs_buf_t		*bp,
	uint			flags)
{
291
	size_t			size = bp->b_count_desired;
L
Linus Torvalds 已提交
292
	size_t			nbytes, offset;
293
	gfp_t			gfp_mask = xb_to_gfp(flags);
L
Linus Torvalds 已提交
294
	unsigned short		page_count, i;
295
	xfs_off_t		end;
L
Linus Torvalds 已提交
296 297
	int			error;

298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
	/*
	 * for buffers that are contained within a single page, just allocate
	 * the memory from the heap - there's no need for the complexity of
	 * page arrays to keep allocation down to order 0.
	 */
	if (bp->b_buffer_length < PAGE_SIZE) {
		bp->b_addr = kmem_alloc(bp->b_buffer_length, xb_to_km(flags));
		if (!bp->b_addr) {
			/* low memory - use alloc_page loop instead */
			goto use_alloc_page;
		}

		if (((unsigned long)(bp->b_addr + bp->b_buffer_length - 1) &
								PAGE_MASK) !=
		    ((unsigned long)bp->b_addr & PAGE_MASK)) {
			/* b_addr spans two pages - use alloc_page instead */
			kmem_free(bp->b_addr);
			bp->b_addr = NULL;
			goto use_alloc_page;
		}
		bp->b_offset = offset_in_page(bp->b_addr);
		bp->b_pages = bp->b_page_array;
		bp->b_pages[0] = virt_to_page(bp->b_addr);
		bp->b_page_count = 1;
		bp->b_flags |= XBF_MAPPED | _XBF_KMEM;
		return 0;
	}

use_alloc_page:
327 328 329
	end = bp->b_file_offset + bp->b_buffer_length;
	page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
	error = _xfs_buf_get_pages(bp, page_count, flags);
L
Linus Torvalds 已提交
330 331 332
	if (unlikely(error))
		return error;

333
	offset = bp->b_offset;
334
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
335

336
	for (i = 0; i < bp->b_page_count; i++) {
L
Linus Torvalds 已提交
337 338
		struct page	*page;
		uint		retries = 0;
339 340
retry:
		page = alloc_page(gfp_mask);
L
Linus Torvalds 已提交
341
		if (unlikely(page == NULL)) {
342 343
			if (flags & XBF_READ_AHEAD) {
				bp->b_page_count = i;
344 345
				error = ENOMEM;
				goto out_free_pages;
L
Linus Torvalds 已提交
346 347 348 349 350 351 352 353 354
			}

			/*
			 * This could deadlock.
			 *
			 * But until all the XFS lowlevel code is revamped to
			 * handle buffer allocation failures we can't do much.
			 */
			if (!(++retries % 100))
355 356
				xfs_err(NULL,
		"possible memory allocation deadlock in %s (mode:0x%x)",
357
					__func__, gfp_mask);
L
Linus Torvalds 已提交
358

359
			XFS_STATS_INC(xb_page_retries);
360
			congestion_wait(BLK_RW_ASYNC, HZ/50);
L
Linus Torvalds 已提交
361 362 363
			goto retry;
		}

364
		XFS_STATS_INC(xb_page_found);
L
Linus Torvalds 已提交
365

366
		nbytes = min_t(size_t, size, PAGE_SIZE - offset);
L
Linus Torvalds 已提交
367
		size -= nbytes;
368
		bp->b_pages[i] = page;
L
Linus Torvalds 已提交
369 370
		offset = 0;
	}
371
	return 0;
L
Linus Torvalds 已提交
372

373 374 375
out_free_pages:
	for (i = 0; i < bp->b_page_count; i++)
		__free_page(bp->b_pages[i]);
L
Linus Torvalds 已提交
376 377 378 379
	return error;
}

/*
L
Lucas De Marchi 已提交
380
 *	Map buffer into kernel address-space if necessary.
L
Linus Torvalds 已提交
381 382
 */
STATIC int
383
_xfs_buf_map_pages(
L
Linus Torvalds 已提交
384 385 386
	xfs_buf_t		*bp,
	uint			flags)
{
387
	ASSERT(bp->b_flags & _XBF_PAGES);
388
	if (bp->b_page_count == 1) {
389
		/* A single page buffer is always mappable */
390 391 392
		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
	} else if (flags & XBF_MAPPED) {
393 394 395 396 397 398 399 400 401 402 403
		int retried = 0;

		do {
			bp->b_addr = vm_map_ram(bp->b_pages, bp->b_page_count,
						-1, PAGE_KERNEL);
			if (bp->b_addr)
				break;
			vm_unmap_aliases();
		} while (retried++ <= 1);

		if (!bp->b_addr)
L
Linus Torvalds 已提交
404
			return -ENOMEM;
405 406
		bp->b_addr += bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
L
Linus Torvalds 已提交
407 408 409 410 411 412 413 414 415 416
	}

	return 0;
}

/*
 *	Finding and Reading Buffers
 */

/*
417
 *	Look up, and creates if absent, a lockable buffer for
L
Linus Torvalds 已提交
418 419 420 421 422 423 424
 *	a given range of an inode.  The buffer is returned
 *	locked.	 If other overlapping buffers exist, they are
 *	released before the new buffer is created and locked,
 *	which may imply that this call will block until those buffers
 *	are unlocked.  No I/O is implied by this call.
 */
xfs_buf_t *
425
_xfs_buf_find(
L
Linus Torvalds 已提交
426
	xfs_buftarg_t		*btp,	/* block device target		*/
427
	xfs_off_t		ioff,	/* starting offset of range	*/
L
Linus Torvalds 已提交
428
	size_t			isize,	/* length of range		*/
429 430
	xfs_buf_flags_t		flags,
	xfs_buf_t		*new_bp)
L
Linus Torvalds 已提交
431
{
432
	xfs_off_t		range_base;
L
Linus Torvalds 已提交
433
	size_t			range_length;
434 435 436 437
	struct xfs_perag	*pag;
	struct rb_node		**rbp;
	struct rb_node		*parent;
	xfs_buf_t		*bp;
L
Linus Torvalds 已提交
438 439 440 441 442

	range_base = (ioff << BBSHIFT);
	range_length = (isize << BBSHIFT);

	/* Check for IOs smaller than the sector size / not sector aligned */
443
	ASSERT(!(range_length < (1 << btp->bt_sshift)));
444
	ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
L
Linus Torvalds 已提交
445

446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
	/* get tree root */
	pag = xfs_perag_get(btp->bt_mount,
				xfs_daddr_to_agno(btp->bt_mount, ioff));

	/* walk tree */
	spin_lock(&pag->pag_buf_lock);
	rbp = &pag->pag_buf_tree.rb_node;
	parent = NULL;
	bp = NULL;
	while (*rbp) {
		parent = *rbp;
		bp = rb_entry(parent, struct xfs_buf, b_rbnode);

		if (range_base < bp->b_file_offset)
			rbp = &(*rbp)->rb_left;
		else if (range_base > bp->b_file_offset)
			rbp = &(*rbp)->rb_right;
		else {
			/*
			 * found a block offset match. If the range doesn't
			 * match, the only way this is allowed is if the buffer
			 * in the cache is stale and the transaction that made
			 * it stale has not yet committed. i.e. we are
			 * reallocating a busy extent. Skip this buffer and
			 * continue searching to the right for an exact match.
			 */
			if (bp->b_buffer_length != range_length) {
				ASSERT(bp->b_flags & XBF_STALE);
				rbp = &(*rbp)->rb_right;
				continue;
			}
477
			atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
478 479 480 481 482
			goto found;
		}
	}

	/* No match found */
483 484
	if (new_bp) {
		_xfs_buf_initialize(new_bp, btp, range_base,
L
Linus Torvalds 已提交
485
				range_length, flags);
486 487 488 489 490
		rb_link_node(&new_bp->b_rbnode, parent, rbp);
		rb_insert_color(&new_bp->b_rbnode, &pag->pag_buf_tree);
		/* the buffer keeps the perag reference until it is freed */
		new_bp->b_pag = pag;
		spin_unlock(&pag->pag_buf_lock);
L
Linus Torvalds 已提交
491
	} else {
492
		XFS_STATS_INC(xb_miss_locked);
493 494
		spin_unlock(&pag->pag_buf_lock);
		xfs_perag_put(pag);
L
Linus Torvalds 已提交
495
	}
496
	return new_bp;
L
Linus Torvalds 已提交
497 498

found:
499 500
	spin_unlock(&pag->pag_buf_lock);
	xfs_perag_put(pag);
L
Linus Torvalds 已提交
501

502 503
	if (!xfs_buf_trylock(bp)) {
		if (flags & XBF_TRYLOCK) {
504 505 506
			xfs_buf_rele(bp);
			XFS_STATS_INC(xb_busy_locked);
			return NULL;
L
Linus Torvalds 已提交
507
		}
508 509
		xfs_buf_lock(bp);
		XFS_STATS_INC(xb_get_locked_waited);
L
Linus Torvalds 已提交
510 511
	}

512 513 514 515 516
	/*
	 * if the buffer is stale, clear all the external state associated with
	 * it. We need to keep flags such as how we allocated the buffer memory
	 * intact here.
	 */
517 518
	if (bp->b_flags & XBF_STALE) {
		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
519
		bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES;
520
	}
C
Christoph Hellwig 已提交
521 522

	trace_xfs_buf_find(bp, flags, _RET_IP_);
523 524
	XFS_STATS_INC(xb_get_locked);
	return bp;
L
Linus Torvalds 已提交
525 526 527
}

/*
528
 *	Assembles a buffer covering the specified range.
L
Linus Torvalds 已提交
529 530 531 532
 *	Storage in memory for all portions of the buffer will be allocated,
 *	although backing storage may not be.
 */
xfs_buf_t *
533
xfs_buf_get(
L
Linus Torvalds 已提交
534
	xfs_buftarg_t		*target,/* target for buffer		*/
535
	xfs_off_t		ioff,	/* starting offset of range	*/
L
Linus Torvalds 已提交
536
	size_t			isize,	/* length of range		*/
537
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
538
{
539
	xfs_buf_t		*bp, *new_bp;
540
	int			error = 0;
L
Linus Torvalds 已提交
541

542 543
	new_bp = xfs_buf_allocate(flags);
	if (unlikely(!new_bp))
L
Linus Torvalds 已提交
544 545
		return NULL;

546 547
	bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
	if (bp == new_bp) {
548
		error = xfs_buf_allocate_memory(bp, flags);
L
Linus Torvalds 已提交
549 550 551
		if (error)
			goto no_buffer;
	} else {
552 553
		xfs_buf_deallocate(new_bp);
		if (unlikely(bp == NULL))
L
Linus Torvalds 已提交
554 555 556
			return NULL;
	}

557 558
	if (!(bp->b_flags & XBF_MAPPED)) {
		error = _xfs_buf_map_pages(bp, flags);
L
Linus Torvalds 已提交
559
		if (unlikely(error)) {
560 561
			xfs_warn(target->bt_mount,
				"%s: failed to map pages\n", __func__);
L
Linus Torvalds 已提交
562 563 564 565
			goto no_buffer;
		}
	}

566
	XFS_STATS_INC(xb_get);
L
Linus Torvalds 已提交
567 568 569 570 571

	/*
	 * Always fill in the block number now, the mapped cases can do
	 * their own overlay of this later.
	 */
572 573
	bp->b_bn = ioff;
	bp->b_count_desired = bp->b_buffer_length;
L
Linus Torvalds 已提交
574

C
Christoph Hellwig 已提交
575
	trace_xfs_buf_get(bp, flags, _RET_IP_);
576
	return bp;
L
Linus Torvalds 已提交
577 578

 no_buffer:
579 580 581
	if (flags & (XBF_LOCK | XBF_TRYLOCK))
		xfs_buf_unlock(bp);
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
582 583 584
	return NULL;
}

C
Christoph Hellwig 已提交
585 586 587 588 589 590 591 592 593 594
STATIC int
_xfs_buf_read(
	xfs_buf_t		*bp,
	xfs_buf_flags_t		flags)
{
	int			status;

	ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
	ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);

595 596
	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | XBF_READ_AHEAD);
	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | XBF_READ_AHEAD);
C
Christoph Hellwig 已提交
597 598

	status = xfs_buf_iorequest(bp);
599
	if (status || bp->b_error || (flags & XBF_ASYNC))
600 601
		return status;
	return xfs_buf_iowait(bp);
C
Christoph Hellwig 已提交
602 603
}

L
Linus Torvalds 已提交
604
xfs_buf_t *
605
xfs_buf_read(
L
Linus Torvalds 已提交
606
	xfs_buftarg_t		*target,
607
	xfs_off_t		ioff,
L
Linus Torvalds 已提交
608
	size_t			isize,
609
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
610
{
611 612 613 614
	xfs_buf_t		*bp;

	flags |= XBF_READ;

615
	bp = xfs_buf_get(target, ioff, isize, flags);
616
	if (bp) {
C
Christoph Hellwig 已提交
617 618
		trace_xfs_buf_read(bp, flags, _RET_IP_);

619 620
		if (!XFS_BUF_ISDONE(bp)) {
			XFS_STATS_INC(xb_get_read);
C
Christoph Hellwig 已提交
621
			_xfs_buf_read(bp, flags);
622
		} else if (flags & XBF_ASYNC) {
L
Linus Torvalds 已提交
623 624 625 626 627 628 629
			/*
			 * Read ahead call which is already satisfied,
			 * drop the buffer
			 */
			goto no_buffer;
		} else {
			/* We do not want read in the flags */
630
			bp->b_flags &= ~XBF_READ;
L
Linus Torvalds 已提交
631 632 633
		}
	}

634
	return bp;
L
Linus Torvalds 已提交
635 636

 no_buffer:
637 638 639
	if (flags & (XBF_LOCK | XBF_TRYLOCK))
		xfs_buf_unlock(bp);
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
640 641 642 643
	return NULL;
}

/*
644 645
 *	If we are not low on memory then do the readahead in a deadlock
 *	safe manner.
L
Linus Torvalds 已提交
646 647
 */
void
648
xfs_buf_readahead(
L
Linus Torvalds 已提交
649
	xfs_buftarg_t		*target,
650
	xfs_off_t		ioff,
C
Christoph Hellwig 已提交
651
	size_t			isize)
L
Linus Torvalds 已提交
652
{
653
	if (bdi_read_congested(target->bt_bdi))
L
Linus Torvalds 已提交
654 655
		return;

C
Christoph Hellwig 已提交
656 657
	xfs_buf_read(target, ioff, isize,
		     XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD|XBF_DONT_BLOCK);
L
Linus Torvalds 已提交
658 659
}

660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
/*
 * Read an uncached buffer from disk. Allocates and returns a locked
 * buffer containing the disk contents or nothing.
 */
struct xfs_buf *
xfs_buf_read_uncached(
	struct xfs_mount	*mp,
	struct xfs_buftarg	*target,
	xfs_daddr_t		daddr,
	size_t			length,
	int			flags)
{
	xfs_buf_t		*bp;
	int			error;

	bp = xfs_buf_get_uncached(target, length, flags);
	if (!bp)
		return NULL;

	/* set up the buffer for a read IO */
	XFS_BUF_SET_ADDR(bp, daddr);
	XFS_BUF_READ(bp);

	xfsbdstrat(mp, bp);
C
Christoph Hellwig 已提交
684
	error = xfs_buf_iowait(bp);
685 686 687 688 689
	if (error || bp->b_error) {
		xfs_buf_relse(bp);
		return NULL;
	}
	return bp;
L
Linus Torvalds 已提交
690 691 692
}

xfs_buf_t *
693
xfs_buf_get_empty(
L
Linus Torvalds 已提交
694 695 696
	size_t			len,
	xfs_buftarg_t		*target)
{
697
	xfs_buf_t		*bp;
L
Linus Torvalds 已提交
698

699 700 701 702
	bp = xfs_buf_allocate(0);
	if (bp)
		_xfs_buf_initialize(bp, target, 0, len, 0);
	return bp;
L
Linus Torvalds 已提交
703 704
}

705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725
/*
 * Return a buffer allocated as an empty buffer and associated to external
 * memory via xfs_buf_associate_memory() back to it's empty state.
 */
void
xfs_buf_set_empty(
	struct xfs_buf		*bp,
	size_t			len)
{
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);

	bp->b_pages = NULL;
	bp->b_page_count = 0;
	bp->b_addr = NULL;
	bp->b_file_offset = 0;
	bp->b_buffer_length = bp->b_count_desired = len;
	bp->b_bn = XFS_BUF_DADDR_NULL;
	bp->b_flags &= ~XBF_MAPPED;
}

L
Linus Torvalds 已提交
726 727 728 729
static inline struct page *
mem_to_page(
	void			*addr)
{
730
	if ((!is_vmalloc_addr(addr))) {
L
Linus Torvalds 已提交
731 732 733 734 735 736 737
		return virt_to_page(addr);
	} else {
		return vmalloc_to_page(addr);
	}
}

int
738 739
xfs_buf_associate_memory(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
740 741 742 743 744
	void			*mem,
	size_t			len)
{
	int			rval;
	int			i = 0;
745 746 747
	unsigned long		pageaddr;
	unsigned long		offset;
	size_t			buflen;
L
Linus Torvalds 已提交
748 749
	int			page_count;

750
	pageaddr = (unsigned long)mem & PAGE_MASK;
751
	offset = (unsigned long)mem - pageaddr;
752 753
	buflen = PAGE_ALIGN(len + offset);
	page_count = buflen >> PAGE_SHIFT;
L
Linus Torvalds 已提交
754 755

	/* Free any previous set of page pointers */
756 757
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
758

759 760
	bp->b_pages = NULL;
	bp->b_addr = mem;
L
Linus Torvalds 已提交
761

762
	rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
L
Linus Torvalds 已提交
763 764 765
	if (rval)
		return rval;

766
	bp->b_offset = offset;
767 768 769

	for (i = 0; i < bp->b_page_count; i++) {
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
770
		pageaddr += PAGE_SIZE;
L
Linus Torvalds 已提交
771 772
	}

773 774
	bp->b_count_desired = len;
	bp->b_buffer_length = buflen;
775
	bp->b_flags |= XBF_MAPPED;
L
Linus Torvalds 已提交
776 777 778 779 780

	return 0;
}

xfs_buf_t *
781 782
xfs_buf_get_uncached(
	struct xfs_buftarg	*target,
L
Linus Torvalds 已提交
783
	size_t			len,
784
	int			flags)
L
Linus Torvalds 已提交
785
{
786 787
	unsigned long		page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
	int			error, i;
L
Linus Torvalds 已提交
788 789
	xfs_buf_t		*bp;

790
	bp = xfs_buf_allocate(0);
L
Linus Torvalds 已提交
791 792
	if (unlikely(bp == NULL))
		goto fail;
793
	_xfs_buf_initialize(bp, target, 0, len, 0);
L
Linus Torvalds 已提交
794

795 796
	error = _xfs_buf_get_pages(bp, page_count, 0);
	if (error)
L
Linus Torvalds 已提交
797 798
		goto fail_free_buf;

799
	for (i = 0; i < page_count; i++) {
800
		bp->b_pages[i] = alloc_page(xb_to_gfp(flags));
801 802
		if (!bp->b_pages[i])
			goto fail_free_mem;
L
Linus Torvalds 已提交
803
	}
804
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
805

806 807
	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
	if (unlikely(error)) {
808 809
		xfs_warn(target->bt_mount,
			"%s: failed to map pages\n", __func__);
L
Linus Torvalds 已提交
810
		goto fail_free_mem;
811
	}
L
Linus Torvalds 已提交
812

813
	trace_xfs_buf_get_uncached(bp, _RET_IP_);
L
Linus Torvalds 已提交
814
	return bp;
815

L
Linus Torvalds 已提交
816
 fail_free_mem:
817 818
	while (--i >= 0)
		__free_page(bp->b_pages[i]);
819
	_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
820
 fail_free_buf:
821
	xfs_buf_deallocate(bp);
L
Linus Torvalds 已提交
822 823 824 825 826 827 828 829 830 831
 fail:
	return NULL;
}

/*
 *	Increment reference count on buffer, to hold the buffer concurrently
 *	with another thread which may release (free) the buffer asynchronously.
 *	Must hold the buffer already to call this function.
 */
void
832 833
xfs_buf_hold(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
834
{
C
Christoph Hellwig 已提交
835
	trace_xfs_buf_hold(bp, _RET_IP_);
836
	atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
837 838 839
}

/*
840 841
 *	Releases a hold on the specified buffer.  If the
 *	the hold count is 1, calls xfs_buf_free.
L
Linus Torvalds 已提交
842 843
 */
void
844 845
xfs_buf_rele(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
846
{
847
	struct xfs_perag	*pag = bp->b_pag;
L
Linus Torvalds 已提交
848

C
Christoph Hellwig 已提交
849
	trace_xfs_buf_rele(bp, _RET_IP_);
L
Linus Torvalds 已提交
850

851
	if (!pag) {
852
		ASSERT(list_empty(&bp->b_lru));
853
		ASSERT(RB_EMPTY_NODE(&bp->b_rbnode));
854 855 856 857 858
		if (atomic_dec_and_test(&bp->b_hold))
			xfs_buf_free(bp);
		return;
	}

859
	ASSERT(!RB_EMPTY_NODE(&bp->b_rbnode));
860

861
	ASSERT(atomic_read(&bp->b_hold) > 0);
862
	if (atomic_dec_and_lock(&bp->b_hold, &pag->pag_buf_lock)) {
863
		if (!(bp->b_flags & XBF_STALE) &&
864 865 866
			   atomic_read(&bp->b_lru_ref)) {
			xfs_buf_lru_add(bp);
			spin_unlock(&pag->pag_buf_lock);
L
Linus Torvalds 已提交
867
		} else {
868
			xfs_buf_lru_del(bp);
869
			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
870 871 872
			rb_erase(&bp->b_rbnode, &pag->pag_buf_tree);
			spin_unlock(&pag->pag_buf_lock);
			xfs_perag_put(pag);
873
			xfs_buf_free(bp);
L
Linus Torvalds 已提交
874 875 876 877 878 879
		}
	}
}


/*
880
 *	Lock a buffer object, if it is not already locked.
881 882 883 884 885 886 887 888
 *
 *	If we come across a stale, pinned, locked buffer, we know that we are
 *	being asked to lock a buffer that has been reallocated. Because it is
 *	pinned, we know that the log has not been pushed to disk and hence it
 *	will still be locked.  Rather than continuing to have trylock attempts
 *	fail until someone else pushes the log, push it ourselves before
 *	returning.  This means that the xfsaild will not get stuck trying
 *	to push on stale inode buffers.
L
Linus Torvalds 已提交
889 890
 */
int
891 892
xfs_buf_trylock(
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
893 894 895
{
	int			locked;

896
	locked = down_trylock(&bp->b_sema) == 0;
C
Christoph Hellwig 已提交
897
	if (locked)
898
		XB_SET_OWNER(bp);
899 900
	else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
		xfs_log_force(bp->b_target->bt_mount, 0);
C
Christoph Hellwig 已提交
901

902 903
	trace_xfs_buf_trylock(bp, _RET_IP_);
	return locked;
L
Linus Torvalds 已提交
904 905 906
}

/*
907
 *	Lock a buffer object.
908 909 910 911 912 913
 *
 *	If we come across a stale, pinned, locked buffer, we know that we
 *	are being asked to lock a buffer that has been reallocated. Because
 *	it is pinned, we know that the log has not been pushed to disk and
 *	hence it will still be locked. Rather than sleeping until someone
 *	else pushes the log, push it ourselves before trying to get the lock.
L
Linus Torvalds 已提交
914
 */
915 916
void
xfs_buf_lock(
917
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
918
{
C
Christoph Hellwig 已提交
919 920
	trace_xfs_buf_lock(bp, _RET_IP_);

921
	if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
922
		xfs_log_force(bp->b_target->bt_mount, 0);
923 924
	down(&bp->b_sema);
	XB_SET_OWNER(bp);
C
Christoph Hellwig 已提交
925 926

	trace_xfs_buf_lock_done(bp, _RET_IP_);
L
Linus Torvalds 已提交
927 928 929
}

/*
930
 *	Releases the lock on the buffer object.
931
 *	If the buffer is marked delwri but is not queued, do so before we
932
 *	unlock the buffer as we need to set flags correctly.  We also need to
933 934
 *	take a reference for the delwri queue because the unlocker is going to
 *	drop their's and they don't know we just queued it.
L
Linus Torvalds 已提交
935 936
 */
void
937
xfs_buf_unlock(
938
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
939
{
940
	if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI)
941
		xfs_buf_delwri_queue(bp);
942

943 944
	XB_CLEAR_OWNER(bp);
	up(&bp->b_sema);
C
Christoph Hellwig 已提交
945 946

	trace_xfs_buf_unlock(bp, _RET_IP_);
L
Linus Torvalds 已提交
947 948
}

949 950 951
STATIC void
xfs_buf_wait_unpin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
952 953 954
{
	DECLARE_WAITQUEUE	(wait, current);

955
	if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
956 957
		return;

958
	add_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
959 960
	for (;;) {
		set_current_state(TASK_UNINTERRUPTIBLE);
961
		if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
962
			break;
J
Jens Axboe 已提交
963
		io_schedule();
L
Linus Torvalds 已提交
964
	}
965
	remove_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
966 967 968 969 970 971 972 973
	set_current_state(TASK_RUNNING);
}

/*
 *	Buffer Utility Routines
 */

STATIC void
974
xfs_buf_iodone_work(
D
David Howells 已提交
975
	struct work_struct	*work)
L
Linus Torvalds 已提交
976
{
D
David Howells 已提交
977 978
	xfs_buf_t		*bp =
		container_of(work, xfs_buf_t, b_iodone_work);
L
Linus Torvalds 已提交
979

980
	if (bp->b_iodone)
981 982
		(*(bp->b_iodone))(bp);
	else if (bp->b_flags & XBF_ASYNC)
L
Linus Torvalds 已提交
983 984 985 986
		xfs_buf_relse(bp);
}

void
987 988
xfs_buf_ioend(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
989 990
	int			schedule)
{
C
Christoph Hellwig 已提交
991 992
	trace_xfs_buf_iodone(bp, _RET_IP_);

993
	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
994 995
	if (bp->b_error == 0)
		bp->b_flags |= XBF_DONE;
L
Linus Torvalds 已提交
996

997
	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
L
Linus Torvalds 已提交
998
		if (schedule) {
D
David Howells 已提交
999
			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1000
			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
L
Linus Torvalds 已提交
1001
		} else {
D
David Howells 已提交
1002
			xfs_buf_iodone_work(&bp->b_iodone_work);
L
Linus Torvalds 已提交
1003 1004
		}
	} else {
1005
		complete(&bp->b_iowait);
L
Linus Torvalds 已提交
1006 1007 1008 1009
	}
}

void
1010 1011 1012
xfs_buf_ioerror(
	xfs_buf_t		*bp,
	int			error)
L
Linus Torvalds 已提交
1013 1014
{
	ASSERT(error >= 0 && error <= 0xffff);
1015
	bp->b_error = (unsigned short)error;
C
Christoph Hellwig 已提交
1016
	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
L
Linus Torvalds 已提交
1017 1018 1019
}

int
C
Christoph Hellwig 已提交
1020 1021
xfs_bwrite(
	struct xfs_mount	*mp,
C
Christoph Hellwig 已提交
1022
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1023
{
1024
	int			error;
L
Linus Torvalds 已提交
1025

C
Christoph Hellwig 已提交
1026
	bp->b_flags |= XBF_WRITE;
1027
	bp->b_flags &= ~(XBF_ASYNC | XBF_READ);
L
Linus Torvalds 已提交
1028

C
Christoph Hellwig 已提交
1029
	xfs_buf_delwri_dequeue(bp);
1030
	xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1031

1032 1033 1034 1035
	error = xfs_buf_iowait(bp);
	if (error)
		xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
	xfs_buf_relse(bp);
C
Christoph Hellwig 已提交
1036
	return error;
C
Christoph Hellwig 已提交
1037
}
L
Linus Torvalds 已提交
1038

C
Christoph Hellwig 已提交
1039 1040 1041 1042 1043
void
xfs_bdwrite(
	void			*mp,
	struct xfs_buf		*bp)
{
C
Christoph Hellwig 已提交
1044
	trace_xfs_buf_bdwrite(bp, _RET_IP_);
L
Linus Torvalds 已提交
1045

1046
	xfs_buf_delwri_queue(bp);
1047
	xfs_buf_relse(bp);
L
Linus Torvalds 已提交
1048 1049
}

1050 1051
/*
 * Called when we want to stop a buffer from getting written or read.
C
Christoph Hellwig 已提交
1052
 * We attach the EIO error, muck with its flags, and call xfs_buf_ioend
1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
 * so that the proper iodone callbacks get called.
 */
STATIC int
xfs_bioerror(
	xfs_buf_t *bp)
{
#ifdef XFSERRORDEBUG
	ASSERT(XFS_BUF_ISREAD(bp) || bp->b_iodone);
#endif

	/*
	 * No need to wait until the buffer is unpinned, we aren't flushing it.
	 */
1066
	xfs_buf_ioerror(bp, EIO);
1067 1068

	/*
C
Christoph Hellwig 已提交
1069
	 * We're calling xfs_buf_ioend, so delete XBF_DONE flag.
1070 1071 1072 1073 1074 1075
	 */
	XFS_BUF_UNREAD(bp);
	XFS_BUF_UNDELAYWRITE(bp);
	XFS_BUF_UNDONE(bp);
	XFS_BUF_STALE(bp);

C
Christoph Hellwig 已提交
1076
	xfs_buf_ioend(bp, 0);
1077 1078 1079 1080 1081 1082

	return EIO;
}

/*
 * Same as xfs_bioerror, except that we are releasing the buffer
C
Christoph Hellwig 已提交
1083
 * here ourselves, and avoiding the xfs_buf_ioend call.
1084 1085 1086 1087 1088 1089 1090
 * This is meant for userdata errors; metadata bufs come with
 * iodone functions attached, so that we can track down errors.
 */
STATIC int
xfs_bioerror_relse(
	struct xfs_buf	*bp)
{
1091
	int64_t		fl = bp->b_flags;
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
	/*
	 * No need to wait until the buffer is unpinned.
	 * We aren't flushing it.
	 *
	 * chunkhold expects B_DONE to be set, whether
	 * we actually finish the I/O or not. We don't want to
	 * change that interface.
	 */
	XFS_BUF_UNREAD(bp);
	XFS_BUF_UNDELAYWRITE(bp);
	XFS_BUF_DONE(bp);
	XFS_BUF_STALE(bp);
1104
	bp->b_iodone = NULL;
1105
	if (!(fl & XBF_ASYNC)) {
1106 1107 1108 1109 1110 1111
		/*
		 * Mark b_error and B_ERROR _both_.
		 * Lot's of chunkcache code assumes that.
		 * There's no reason to mark error for
		 * ASYNC buffers.
		 */
1112
		xfs_buf_ioerror(bp, EIO);
1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
		XFS_BUF_FINISH_IOWAIT(bp);
	} else {
		xfs_buf_relse(bp);
	}

	return EIO;
}


/*
 * All xfs metadata buffers except log state machine buffers
 * get this attached as their b_bdstrat callback function.
 * This is so that we can catch a buffer
 * after prematurely unpinning it to forcibly shutdown the filesystem.
 */
int
xfs_bdstrat_cb(
	struct xfs_buf	*bp)
{
1132
	if (XFS_FORCED_SHUTDOWN(bp->b_target->bt_mount)) {
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
		trace_xfs_bdstrat_shut(bp, _RET_IP_);
		/*
		 * Metadata write that didn't get logged but
		 * written delayed anyway. These aren't associated
		 * with a transaction, and can be ignored.
		 */
		if (!bp->b_iodone && !XFS_BUF_ISREAD(bp))
			return xfs_bioerror_relse(bp);
		else
			return xfs_bioerror(bp);
	}

	xfs_buf_iorequest(bp);
	return 0;
}

/*
 * Wrapper around bdstrat so that we can stop data from going to disk in case
 * we are shutting down the filesystem.  Typically user data goes thru this
 * path; one of the exceptions is the superblock.
 */
void
xfsbdstrat(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp)
{
	if (XFS_FORCED_SHUTDOWN(mp)) {
		trace_xfs_bdstrat_shut(bp, _RET_IP_);
		xfs_bioerror_relse(bp);
		return;
	}

	xfs_buf_iorequest(bp);
}

1168
STATIC void
1169 1170
_xfs_buf_ioend(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1171 1172
	int			schedule)
{
1173
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
1174
		xfs_buf_ioend(bp, schedule);
L
Linus Torvalds 已提交
1175 1176
}

A
Al Viro 已提交
1177
STATIC void
1178
xfs_buf_bio_end_io(
L
Linus Torvalds 已提交
1179 1180 1181
	struct bio		*bio,
	int			error)
{
1182
	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
L
Linus Torvalds 已提交
1183

1184
	xfs_buf_ioerror(bp, -error);
L
Linus Torvalds 已提交
1185

1186 1187 1188
	if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
		invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));

1189
	_xfs_buf_ioend(bp, 1);
L
Linus Torvalds 已提交
1190 1191 1192 1193
	bio_put(bio);
}

STATIC void
1194 1195
_xfs_buf_ioapply(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1196
{
1197
	int			rw, map_i, total_nr_pages, nr_pages;
L
Linus Torvalds 已提交
1198
	struct bio		*bio;
1199 1200 1201
	int			offset = bp->b_offset;
	int			size = bp->b_count_desired;
	sector_t		sector = bp->b_bn;
L
Linus Torvalds 已提交
1202

1203
	total_nr_pages = bp->b_page_count;
L
Linus Torvalds 已提交
1204 1205
	map_i = 0;

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216
	if (bp->b_flags & XBF_WRITE) {
		if (bp->b_flags & XBF_SYNCIO)
			rw = WRITE_SYNC;
		else
			rw = WRITE;
		if (bp->b_flags & XBF_FUA)
			rw |= REQ_FUA;
		if (bp->b_flags & XBF_FLUSH)
			rw |= REQ_FLUSH;
	} else if (bp->b_flags & XBF_READ_AHEAD) {
		rw = READA;
1217
	} else {
1218
		rw = READ;
1219 1220
	}

1221 1222 1223
	/* we only use the buffer cache for meta-data */
	rw |= REQ_META;

L
Linus Torvalds 已提交
1224
next_chunk:
1225
	atomic_inc(&bp->b_io_remaining);
L
Linus Torvalds 已提交
1226 1227 1228 1229 1230
	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
	if (nr_pages > total_nr_pages)
		nr_pages = total_nr_pages;

	bio = bio_alloc(GFP_NOIO, nr_pages);
1231
	bio->bi_bdev = bp->b_target->bt_bdev;
L
Linus Torvalds 已提交
1232
	bio->bi_sector = sector;
1233 1234
	bio->bi_end_io = xfs_buf_bio_end_io;
	bio->bi_private = bp;
L
Linus Torvalds 已提交
1235

1236

L
Linus Torvalds 已提交
1237
	for (; size && nr_pages; nr_pages--, map_i++) {
1238
		int	rbytes, nbytes = PAGE_SIZE - offset;
L
Linus Torvalds 已提交
1239 1240 1241 1242

		if (nbytes > size)
			nbytes = size;

1243 1244
		rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
		if (rbytes < nbytes)
L
Linus Torvalds 已提交
1245 1246 1247 1248 1249 1250 1251 1252 1253
			break;

		offset = 0;
		sector += nbytes >> BBSHIFT;
		size -= nbytes;
		total_nr_pages--;
	}

	if (likely(bio->bi_size)) {
1254 1255 1256 1257
		if (xfs_buf_is_vmapped(bp)) {
			flush_kernel_vmap_range(bp->b_addr,
						xfs_buf_vmap_len(bp));
		}
L
Linus Torvalds 已提交
1258 1259 1260 1261
		submit_bio(rw, bio);
		if (size)
			goto next_chunk;
	} else {
1262
		xfs_buf_ioerror(bp, EIO);
1263
		bio_put(bio);
L
Linus Torvalds 已提交
1264 1265 1266 1267
	}
}

int
1268 1269
xfs_buf_iorequest(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1270
{
C
Christoph Hellwig 已提交
1271
	trace_xfs_buf_iorequest(bp, _RET_IP_);
L
Linus Torvalds 已提交
1272

1273
	ASSERT(!(bp->b_flags & XBF_DELWRI));
L
Linus Torvalds 已提交
1274

1275
	if (bp->b_flags & XBF_WRITE)
1276 1277
		xfs_buf_wait_unpin(bp);
	xfs_buf_hold(bp);
L
Linus Torvalds 已提交
1278 1279 1280

	/* Set the count to 1 initially, this will stop an I/O
	 * completion callout which happens before we have started
1281
	 * all the I/O from calling xfs_buf_ioend too early.
L
Linus Torvalds 已提交
1282
	 */
1283 1284 1285
	atomic_set(&bp->b_io_remaining, 1);
	_xfs_buf_ioapply(bp);
	_xfs_buf_ioend(bp, 0);
L
Linus Torvalds 已提交
1286

1287
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1288 1289 1290 1291
	return 0;
}

/*
1292 1293 1294
 *	Waits for I/O to complete on the buffer supplied.
 *	It returns immediately if no I/O is pending.
 *	It returns the I/O error code, if any, or 0 if there was no error.
L
Linus Torvalds 已提交
1295 1296
 */
int
1297 1298
xfs_buf_iowait(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1299
{
C
Christoph Hellwig 已提交
1300 1301
	trace_xfs_buf_iowait(bp, _RET_IP_);

1302
	wait_for_completion(&bp->b_iowait);
C
Christoph Hellwig 已提交
1303 1304

	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1305
	return bp->b_error;
L
Linus Torvalds 已提交
1306 1307
}

1308 1309 1310
xfs_caddr_t
xfs_buf_offset(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1311 1312 1313 1314
	size_t			offset)
{
	struct page		*page;

1315
	if (bp->b_flags & XBF_MAPPED)
1316
		return bp->b_addr + offset;
L
Linus Torvalds 已提交
1317

1318
	offset += bp->b_offset;
1319 1320
	page = bp->b_pages[offset >> PAGE_SHIFT];
	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_SIZE-1));
L
Linus Torvalds 已提交
1321 1322 1323 1324 1325 1326
}

/*
 *	Move data into or out of a buffer.
 */
void
1327 1328
xfs_buf_iomove(
	xfs_buf_t		*bp,	/* buffer to process		*/
L
Linus Torvalds 已提交
1329 1330
	size_t			boff,	/* starting buffer offset	*/
	size_t			bsize,	/* length to copy		*/
1331
	void			*data,	/* data address			*/
1332
	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
L
Linus Torvalds 已提交
1333 1334 1335 1336 1337 1338
{
	size_t			bend, cpoff, csize;
	struct page		*page;

	bend = boff + bsize;
	while (boff < bend) {
1339 1340
		page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
		cpoff = xfs_buf_poff(boff + bp->b_offset);
L
Linus Torvalds 已提交
1341
		csize = min_t(size_t,
1342
			      PAGE_SIZE-cpoff, bp->b_count_desired-boff);
L
Linus Torvalds 已提交
1343

1344
		ASSERT(((csize + cpoff) <= PAGE_SIZE));
L
Linus Torvalds 已提交
1345 1346

		switch (mode) {
1347
		case XBRW_ZERO:
L
Linus Torvalds 已提交
1348 1349
			memset(page_address(page) + cpoff, 0, csize);
			break;
1350
		case XBRW_READ:
L
Linus Torvalds 已提交
1351 1352
			memcpy(data, page_address(page) + cpoff, csize);
			break;
1353
		case XBRW_WRITE:
L
Linus Torvalds 已提交
1354 1355 1356 1357 1358 1359 1360 1361 1362
			memcpy(page_address(page) + cpoff, data, csize);
		}

		boff += csize;
		data += csize;
	}
}

/*
1363
 *	Handling of buffer targets (buftargs).
L
Linus Torvalds 已提交
1364 1365 1366
 */

/*
1367 1368 1369
 * Wait for any bufs with callbacks that have been submitted but have not yet
 * returned. These buffers will have an elevated hold count, so wait on those
 * while freeing all the buffers only held by the LRU.
L
Linus Torvalds 已提交
1370 1371 1372
 */
void
xfs_wait_buftarg(
1373
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1374
{
1375 1376 1377 1378 1379 1380 1381 1382
	struct xfs_buf		*bp;

restart:
	spin_lock(&btp->bt_lru_lock);
	while (!list_empty(&btp->bt_lru)) {
		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);
		if (atomic_read(&bp->b_hold) > 1) {
			spin_unlock(&btp->bt_lru_lock);
D
Dave Chinner 已提交
1383
			delay(100);
1384
			goto restart;
L
Linus Torvalds 已提交
1385
		}
1386 1387 1388 1389 1390 1391 1392 1393
		/*
		 * clear the LRU reference count so the bufer doesn't get
		 * ignored in xfs_buf_rele().
		 */
		atomic_set(&bp->b_lru_ref, 0);
		spin_unlock(&btp->bt_lru_lock);
		xfs_buf_rele(bp);
		spin_lock(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1394
	}
1395
	spin_unlock(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1396 1397
}

1398 1399 1400
int
xfs_buftarg_shrink(
	struct shrinker		*shrink,
1401
	struct shrink_control	*sc)
1402
{
1403 1404
	struct xfs_buftarg	*btp = container_of(shrink,
					struct xfs_buftarg, bt_shrinker);
1405
	struct xfs_buf		*bp;
1406
	int nr_to_scan = sc->nr_to_scan;
1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
	LIST_HEAD(dispose);

	if (!nr_to_scan)
		return btp->bt_lru_nr;

	spin_lock(&btp->bt_lru_lock);
	while (!list_empty(&btp->bt_lru)) {
		if (nr_to_scan-- <= 0)
			break;

		bp = list_first_entry(&btp->bt_lru, struct xfs_buf, b_lru);

		/*
		 * Decrement the b_lru_ref count unless the value is already
		 * zero. If the value is already zero, we need to reclaim the
		 * buffer, otherwise it gets another trip through the LRU.
		 */
		if (!atomic_add_unless(&bp->b_lru_ref, -1, 0)) {
			list_move_tail(&bp->b_lru, &btp->bt_lru);
			continue;
		}

		/*
		 * remove the buffer from the LRU now to avoid needing another
		 * lock round trip inside xfs_buf_rele().
		 */
		list_move(&bp->b_lru, &dispose);
		btp->bt_lru_nr--;
1435
	}
1436 1437 1438 1439 1440 1441 1442 1443 1444
	spin_unlock(&btp->bt_lru_lock);

	while (!list_empty(&dispose)) {
		bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
		list_del_init(&bp->b_lru);
		xfs_buf_rele(bp);
	}

	return btp->bt_lru_nr;
1445 1446
}

L
Linus Torvalds 已提交
1447 1448
void
xfs_free_buftarg(
1449 1450
	struct xfs_mount	*mp,
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1451
{
1452 1453
	unregister_shrinker(&btp->bt_shrinker);

L
Linus Torvalds 已提交
1454
	xfs_flush_buftarg(btp, 1);
1455 1456
	if (mp->m_flags & XFS_MOUNT_BARRIER)
		xfs_blkdev_issue_flush(btp);
1457 1458

	kthread_stop(btp->bt_task);
1459
	kmem_free(btp);
L
Linus Torvalds 已提交
1460 1461 1462 1463 1464 1465 1466 1467 1468
}

STATIC int
xfs_setsize_buftarg_flags(
	xfs_buftarg_t		*btp,
	unsigned int		blocksize,
	unsigned int		sectorsize,
	int			verbose)
{
1469 1470 1471
	btp->bt_bsize = blocksize;
	btp->bt_sshift = ffs(sectorsize) - 1;
	btp->bt_smask = sectorsize - 1;
L
Linus Torvalds 已提交
1472

1473
	if (set_blocksize(btp->bt_bdev, sectorsize)) {
1474 1475
		xfs_warn(btp->bt_mount,
			"Cannot set_blocksize to %u on device %s\n",
1476
			sectorsize, xfs_buf_target_name(btp));
L
Linus Torvalds 已提交
1477 1478 1479 1480 1481 1482 1483
		return EINVAL;
	}

	return 0;
}

/*
1484 1485 1486 1487
 *	When allocating the initial buffer target we have not yet
 *	read in the superblock, so don't know what sized sectors
 *	are being used is at this early stage.  Play safe.
 */
L
Linus Torvalds 已提交
1488 1489 1490 1491 1492 1493
STATIC int
xfs_setsize_buftarg_early(
	xfs_buftarg_t		*btp,
	struct block_device	*bdev)
{
	return xfs_setsize_buftarg_flags(btp,
1494
			PAGE_SIZE, bdev_logical_block_size(bdev), 0);
L
Linus Torvalds 已提交
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505
}

int
xfs_setsize_buftarg(
	xfs_buftarg_t		*btp,
	unsigned int		blocksize,
	unsigned int		sectorsize)
{
	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
}

1506 1507
STATIC int
xfs_alloc_delwrite_queue(
1508 1509
	xfs_buftarg_t		*btp,
	const char		*fsname)
1510 1511
{
	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
E
Eric Sandeen 已提交
1512
	spin_lock_init(&btp->bt_delwrite_lock);
1513
	btp->bt_flags = 0;
1514
	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd/%s", fsname);
1515 1516 1517
	if (IS_ERR(btp->bt_task))
		return PTR_ERR(btp->bt_task);
	return 0;
1518 1519
}

L
Linus Torvalds 已提交
1520 1521
xfs_buftarg_t *
xfs_alloc_buftarg(
1522
	struct xfs_mount	*mp,
L
Linus Torvalds 已提交
1523
	struct block_device	*bdev,
1524 1525
	int			external,
	const char		*fsname)
L
Linus Torvalds 已提交
1526 1527 1528 1529 1530
{
	xfs_buftarg_t		*btp;

	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);

1531
	btp->bt_mount = mp;
1532 1533
	btp->bt_dev =  bdev->bd_dev;
	btp->bt_bdev = bdev;
1534 1535 1536 1537
	btp->bt_bdi = blk_get_backing_dev_info(bdev);
	if (!btp->bt_bdi)
		goto error;

1538 1539
	INIT_LIST_HEAD(&btp->bt_lru);
	spin_lock_init(&btp->bt_lru_lock);
L
Linus Torvalds 已提交
1540 1541
	if (xfs_setsize_buftarg_early(btp, bdev))
		goto error;
1542
	if (xfs_alloc_delwrite_queue(btp, fsname))
1543
		goto error;
1544 1545 1546
	btp->bt_shrinker.shrink = xfs_buftarg_shrink;
	btp->bt_shrinker.seeks = DEFAULT_SEEKS;
	register_shrinker(&btp->bt_shrinker);
L
Linus Torvalds 已提交
1547 1548 1549
	return btp;

error:
1550
	kmem_free(btp);
L
Linus Torvalds 已提交
1551 1552 1553 1554 1555
	return NULL;
}


/*
1556
 *	Delayed write buffer handling
L
Linus Torvalds 已提交
1557 1558
 */
STATIC void
1559
xfs_buf_delwri_queue(
1560
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1561
{
1562 1563
	struct list_head	*dwq = &bp->b_target->bt_delwrite_queue;
	spinlock_t		*dwlk = &bp->b_target->bt_delwrite_lock;
1564

C
Christoph Hellwig 已提交
1565 1566
	trace_xfs_buf_delwri_queue(bp, _RET_IP_);

1567
	ASSERT(!(bp->b_flags & XBF_READ));
L
Linus Torvalds 已提交
1568

1569
	spin_lock(dwlk);
1570
	if (!list_empty(&bp->b_list)) {
1571
		/* if already in the queue, move it to the tail */
1572
		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
1573 1574
		list_move_tail(&bp->b_list, dwq);
	} else {
D
Dave Chinner 已提交
1575
		/* start xfsbufd as it is about to have something to do */
1576 1577
		if (list_empty(dwq))
			wake_up_process(bp->b_target->bt_task);
D
Dave Chinner 已提交
1578

1579 1580 1581 1582
		atomic_inc(&bp->b_hold);
		bp->b_flags |= XBF_DELWRI | _XBF_DELWRI_Q | XBF_ASYNC;
		list_add_tail(&bp->b_list, dwq);
	}
1583
	bp->b_queuetime = jiffies;
1584
	spin_unlock(dwlk);
L
Linus Torvalds 已提交
1585 1586 1587
}

void
1588 1589
xfs_buf_delwri_dequeue(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1590
{
1591
	spinlock_t		*dwlk = &bp->b_target->bt_delwrite_lock;
L
Linus Torvalds 已提交
1592 1593
	int			dequeued = 0;

1594
	spin_lock(dwlk);
1595 1596 1597
	if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
		list_del_init(&bp->b_list);
L
Linus Torvalds 已提交
1598 1599
		dequeued = 1;
	}
1600
	bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1601
	spin_unlock(dwlk);
L
Linus Torvalds 已提交
1602 1603

	if (dequeued)
1604
		xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1605

C
Christoph Hellwig 已提交
1606
	trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
L
Linus Torvalds 已提交
1607 1608
}

1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
/*
 * If a delwri buffer needs to be pushed before it has aged out, then promote
 * it to the head of the delwri queue so that it will be flushed on the next
 * xfsbufd run. We do this by resetting the queuetime of the buffer to be older
 * than the age currently needed to flush the buffer. Hence the next time the
 * xfsbufd sees it is guaranteed to be considered old enough to flush.
 */
void
xfs_buf_delwri_promote(
	struct xfs_buf	*bp)
{
	struct xfs_buftarg *btp = bp->b_target;
	long		age = xfs_buf_age_centisecs * msecs_to_jiffies(10) + 1;

	ASSERT(bp->b_flags & XBF_DELWRI);
	ASSERT(bp->b_flags & _XBF_DELWRI_Q);

	/*
	 * Check the buffer age before locking the delayed write queue as we
	 * don't need to promote buffers that are already past the flush age.
	 */
	if (bp->b_queuetime < jiffies - age)
		return;
	bp->b_queuetime = jiffies - age;
	spin_lock(&btp->bt_delwrite_lock);
	list_move(&bp->b_list, &btp->bt_delwrite_queue);
	spin_unlock(&btp->bt_delwrite_lock);
}

L
Linus Torvalds 已提交
1638
STATIC void
1639
xfs_buf_runall_queues(
L
Linus Torvalds 已提交
1640 1641 1642 1643 1644
	struct workqueue_struct	*queue)
{
	flush_workqueue(queue);
}

1645 1646 1647 1648 1649 1650 1651 1652
/*
 * Move as many buffers as specified to the supplied list
 * idicating if we skipped any buffers to prevent deadlocks.
 */
STATIC int
xfs_buf_delwri_split(
	xfs_buftarg_t	*target,
	struct list_head *list,
1653
	unsigned long	age)
1654 1655 1656 1657 1658
{
	xfs_buf_t	*bp, *n;
	struct list_head *dwq = &target->bt_delwrite_queue;
	spinlock_t	*dwlk = &target->bt_delwrite_lock;
	int		skipped = 0;
1659
	int		force;
1660

1661
	force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1662 1663 1664 1665 1666
	INIT_LIST_HEAD(list);
	spin_lock(dwlk);
	list_for_each_entry_safe(bp, n, dwq, b_list) {
		ASSERT(bp->b_flags & XBF_DELWRI);

1667
		if (!xfs_buf_ispinned(bp) && xfs_buf_trylock(bp)) {
1668
			if (!force &&
1669 1670 1671 1672 1673
			    time_before(jiffies, bp->b_queuetime + age)) {
				xfs_buf_unlock(bp);
				break;
			}

1674
			bp->b_flags &= ~(XBF_DELWRI | _XBF_DELWRI_Q);
1675 1676
			bp->b_flags |= XBF_WRITE;
			list_move_tail(&bp->b_list, list);
1677
			trace_xfs_buf_delwri_split(bp, _RET_IP_);
1678 1679 1680 1681 1682 1683 1684 1685 1686
		} else
			skipped++;
	}
	spin_unlock(dwlk);

	return skipped;

}

1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709
/*
 * Compare function is more complex than it needs to be because
 * the return value is only 32 bits and we are doing comparisons
 * on 64 bit values
 */
static int
xfs_buf_cmp(
	void		*priv,
	struct list_head *a,
	struct list_head *b)
{
	struct xfs_buf	*ap = container_of(a, struct xfs_buf, b_list);
	struct xfs_buf	*bp = container_of(b, struct xfs_buf, b_list);
	xfs_daddr_t		diff;

	diff = ap->b_bn - bp->b_bn;
	if (diff < 0)
		return -1;
	if (diff > 0)
		return 1;
	return 0;
}

L
Linus Torvalds 已提交
1710
STATIC int
1711
xfsbufd(
1712
	void		*data)
L
Linus Torvalds 已提交
1713
{
1714
	xfs_buftarg_t   *target = (xfs_buftarg_t *)data;
L
Linus Torvalds 已提交
1715 1716 1717

	current->flags |= PF_MEMALLOC;

1718 1719
	set_freezable();

L
Linus Torvalds 已提交
1720
	do {
D
Dave Chinner 已提交
1721 1722
		long	age = xfs_buf_age_centisecs * msecs_to_jiffies(10);
		long	tout = xfs_buf_timer_centisecs * msecs_to_jiffies(10);
1723
		struct list_head tmp;
1724
		struct blk_plug plug;
D
Dave Chinner 已提交
1725

1726
		if (unlikely(freezing(current))) {
1727
			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1728
			refrigerator();
1729
		} else {
1730
			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1731
		}
L
Linus Torvalds 已提交
1732

D
Dave Chinner 已提交
1733 1734 1735 1736
		/* sleep for a long time if there is nothing to do. */
		if (list_empty(&target->bt_delwrite_queue))
			tout = MAX_SCHEDULE_TIMEOUT;
		schedule_timeout_interruptible(tout);
L
Linus Torvalds 已提交
1737

D
Dave Chinner 已提交
1738
		xfs_buf_delwri_split(target, &tmp, age);
1739
		list_sort(NULL, &tmp, xfs_buf_cmp);
1740 1741

		blk_start_plug(&plug);
L
Linus Torvalds 已提交
1742
		while (!list_empty(&tmp)) {
1743 1744
			struct xfs_buf *bp;
			bp = list_first_entry(&tmp, struct xfs_buf, b_list);
1745
			list_del_init(&bp->b_list);
1746
			xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1747
		}
1748
		blk_finish_plug(&plug);
1749
	} while (!kthread_should_stop());
L
Linus Torvalds 已提交
1750

1751
	return 0;
L
Linus Torvalds 已提交
1752 1753 1754
}

/*
1755 1756 1757
 *	Go through all incore buffers, and release buffers if they belong to
 *	the given device. This is used in filesystem error handling to
 *	preserve the consistency of its metadata.
L
Linus Torvalds 已提交
1758 1759 1760
 */
int
xfs_flush_buftarg(
1761 1762
	xfs_buftarg_t	*target,
	int		wait)
L
Linus Torvalds 已提交
1763
{
1764
	xfs_buf_t	*bp;
1765
	int		pincount = 0;
1766 1767
	LIST_HEAD(tmp_list);
	LIST_HEAD(wait_list);
1768
	struct blk_plug plug;
L
Linus Torvalds 已提交
1769

1770
	xfs_buf_runall_queues(xfsconvertd_workqueue);
1771 1772
	xfs_buf_runall_queues(xfsdatad_workqueue);
	xfs_buf_runall_queues(xfslogd_workqueue);
L
Linus Torvalds 已提交
1773

1774
	set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1775
	pincount = xfs_buf_delwri_split(target, &tmp_list, 0);
L
Linus Torvalds 已提交
1776 1777

	/*
1778 1779 1780
	 * Dropped the delayed write list lock, now walk the temporary list.
	 * All I/O is issued async and then if we need to wait for completion
	 * we do that after issuing all the IO.
L
Linus Torvalds 已提交
1781
	 */
1782
	list_sort(NULL, &tmp_list, xfs_buf_cmp);
1783 1784

	blk_start_plug(&plug);
1785 1786
	while (!list_empty(&tmp_list)) {
		bp = list_first_entry(&tmp_list, struct xfs_buf, b_list);
1787
		ASSERT(target == bp->b_target);
1788 1789
		list_del_init(&bp->b_list);
		if (wait) {
1790
			bp->b_flags &= ~XBF_ASYNC;
1791 1792
			list_add(&bp->b_list, &wait_list);
		}
1793
		xfs_bdstrat_cb(bp);
L
Linus Torvalds 已提交
1794
	}
1795
	blk_finish_plug(&plug);
L
Linus Torvalds 已提交
1796

1797
	if (wait) {
1798
		/* Wait for IO to complete. */
1799 1800
		while (!list_empty(&wait_list)) {
			bp = list_first_entry(&wait_list, struct xfs_buf, b_list);
1801

1802
			list_del_init(&bp->b_list);
C
Christoph Hellwig 已提交
1803
			xfs_buf_iowait(bp);
1804 1805
			xfs_buf_relse(bp);
		}
L
Linus Torvalds 已提交
1806 1807 1808 1809 1810
	}

	return pincount;
}

1811
int __init
1812
xfs_buf_init(void)
L
Linus Torvalds 已提交
1813
{
1814 1815
	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
						KM_ZONE_HWALIGN, NULL);
1816
	if (!xfs_buf_zone)
C
Christoph Hellwig 已提交
1817
		goto out;
1818

1819
	xfslogd_workqueue = alloc_workqueue("xfslogd",
1820
					WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
1821
	if (!xfslogd_workqueue)
1822
		goto out_free_buf_zone;
L
Linus Torvalds 已提交
1823

T
Tejun Heo 已提交
1824
	xfsdatad_workqueue = alloc_workqueue("xfsdatad", WQ_MEM_RECLAIM, 1);
1825 1826
	if (!xfsdatad_workqueue)
		goto out_destroy_xfslogd_workqueue;
L
Linus Torvalds 已提交
1827

T
Tejun Heo 已提交
1828 1829
	xfsconvertd_workqueue = alloc_workqueue("xfsconvertd",
						WQ_MEM_RECLAIM, 1);
1830 1831 1832
	if (!xfsconvertd_workqueue)
		goto out_destroy_xfsdatad_workqueue;

1833
	return 0;
L
Linus Torvalds 已提交
1834

1835 1836
 out_destroy_xfsdatad_workqueue:
	destroy_workqueue(xfsdatad_workqueue);
1837 1838 1839
 out_destroy_xfslogd_workqueue:
	destroy_workqueue(xfslogd_workqueue);
 out_free_buf_zone:
1840
	kmem_zone_destroy(xfs_buf_zone);
C
Christoph Hellwig 已提交
1841
 out:
1842
	return -ENOMEM;
L
Linus Torvalds 已提交
1843 1844 1845
}

void
1846
xfs_buf_terminate(void)
L
Linus Torvalds 已提交
1847
{
1848
	destroy_workqueue(xfsconvertd_workqueue);
1849 1850
	destroy_workqueue(xfsdatad_workqueue);
	destroy_workqueue(xfslogd_workqueue);
1851
	kmem_zone_destroy(xfs_buf_zone);
L
Linus Torvalds 已提交
1852
}
1853 1854 1855 1856 1857 1858 1859 1860

#ifdef CONFIG_KDB_MODULES
struct list_head *
xfs_get_buftarg_list(void)
{
	return &xfs_buftarg_list;
}
#endif