xfs_buf.c 40.4 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17
 */
18
#include "xfs.h"
L
Linus Torvalds 已提交
19 20 21 22 23 24 25 26 27 28 29 30 31
#include <linux/stddef.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/vmalloc.h>
#include <linux/bio.h>
#include <linux/sysctl.h>
#include <linux/proc_fs.h>
#include <linux/workqueue.h>
#include <linux/percpu.h>
#include <linux/blkdev.h>
#include <linux/hash.h>
32
#include <linux/kthread.h>
C
Christoph Lameter 已提交
33
#include <linux/migrate.h>
34
#include <linux/backing-dev.h>
35
#include <linux/freezer.h>
L
Linus Torvalds 已提交
36

37 38 39 40 41
#include "xfs_sb.h"
#include "xfs_inum.h"
#include "xfs_ag.h"
#include "xfs_dmapi.h"
#include "xfs_mount.h"
C
Christoph Hellwig 已提交
42
#include "xfs_trace.h"
43

44
static kmem_zone_t *xfs_buf_zone;
45
STATIC int xfsbufd(void *);
A
Al Viro 已提交
46
STATIC int xfsbufd_wakeup(int, gfp_t);
47
STATIC void xfs_buf_delwri_queue(xfs_buf_t *, int);
48 49 50 51
static struct shrinker xfs_buf_shake = {
	.shrink = xfsbufd_wakeup,
	.seeks = DEFAULT_SEEKS,
};
52

53
static struct workqueue_struct *xfslogd_workqueue;
54
struct workqueue_struct *xfsdatad_workqueue;
55
struct workqueue_struct *xfsconvertd_workqueue;
L
Linus Torvalds 已提交
56

57 58 59 60
#ifdef XFS_BUF_LOCK_TRACKING
# define XB_SET_OWNER(bp)	((bp)->b_last_holder = current->pid)
# define XB_CLEAR_OWNER(bp)	((bp)->b_last_holder = -1)
# define XB_GET_OWNER(bp)	((bp)->b_last_holder)
L
Linus Torvalds 已提交
61
#else
62 63 64
# define XB_SET_OWNER(bp)	do { } while (0)
# define XB_CLEAR_OWNER(bp)	do { } while (0)
# define XB_GET_OWNER(bp)	do { } while (0)
L
Linus Torvalds 已提交
65 66
#endif

67 68 69
#define xb_to_gfp(flags) \
	((((flags) & XBF_READ_AHEAD) ? __GFP_NORETRY : \
	  ((flags) & XBF_DONT_BLOCK) ? GFP_NOFS : GFP_KERNEL) | __GFP_NOWARN)
L
Linus Torvalds 已提交
70

71 72
#define xb_to_km(flags) \
	 (((flags) & XBF_DONT_BLOCK) ? KM_NOFS : KM_SLEEP)
L
Linus Torvalds 已提交
73

74 75 76 77
#define xfs_buf_allocate(flags) \
	kmem_zone_alloc(xfs_buf_zone, xb_to_km(flags))
#define xfs_buf_deallocate(bp) \
	kmem_zone_free(xfs_buf_zone, (bp));
L
Linus Torvalds 已提交
78 79

/*
80
 *	Page Region interfaces.
L
Linus Torvalds 已提交
81
 *
82 83 84
 *	For pages in filesystems where the blocksize is smaller than the
 *	pagesize, we use the page->private field (long) to hold a bitmap
 * 	of uptodate regions within the page.
L
Linus Torvalds 已提交
85
 *
86
 *	Each such region is "bytes per page / bits per long" bytes long.
L
Linus Torvalds 已提交
87
 *
88 89 90
 *	NBPPR == number-of-bytes-per-page-region
 *	BTOPR == bytes-to-page-region (rounded up)
 *	BTOPRT == bytes-to-page-region-truncated (rounded down)
L
Linus Torvalds 已提交
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
 */
#if (BITS_PER_LONG == 32)
#define PRSHIFT		(PAGE_CACHE_SHIFT - 5)	/* (32 == 1<<5) */
#elif (BITS_PER_LONG == 64)
#define PRSHIFT		(PAGE_CACHE_SHIFT - 6)	/* (64 == 1<<6) */
#else
#error BITS_PER_LONG must be 32 or 64
#endif
#define NBPPR		(PAGE_CACHE_SIZE/BITS_PER_LONG)
#define BTOPR(b)	(((unsigned int)(b) + (NBPPR - 1)) >> PRSHIFT)
#define BTOPRT(b)	(((unsigned int)(b) >> PRSHIFT))

STATIC unsigned long
page_region_mask(
	size_t		offset,
	size_t		length)
{
	unsigned long	mask;
	int		first, final;

	first = BTOPR(offset);
	final = BTOPRT(offset + length - 1);
	first = min(first, final);

	mask = ~0UL;
	mask <<= BITS_PER_LONG - (final - first);
	mask >>= BITS_PER_LONG - (final);

	ASSERT(offset + length <= PAGE_CACHE_SIZE);
	ASSERT((final - first) < BITS_PER_LONG && (final - first) >= 0);

	return mask;
}

125
STATIC void
L
Linus Torvalds 已提交
126 127 128 129 130
set_page_region(
	struct page	*page,
	size_t		offset,
	size_t		length)
{
H
Hugh Dickins 已提交
131 132 133
	set_page_private(page,
		page_private(page) | page_region_mask(offset, length));
	if (page_private(page) == ~0UL)
L
Linus Torvalds 已提交
134 135 136
		SetPageUptodate(page);
}

137
STATIC int
L
Linus Torvalds 已提交
138 139 140 141 142 143 144
test_page_region(
	struct page	*page,
	size_t		offset,
	size_t		length)
{
	unsigned long	mask = page_region_mask(offset, length);

H
Hugh Dickins 已提交
145
	return (mask && (page_private(page) & mask) == mask);
L
Linus Torvalds 已提交
146 147
}

148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
/*
 *	Mapping of multi-page buffers into contiguous virtual space
 */

typedef struct a_list {
	void		*vm_addr;
	struct a_list	*next;
} a_list_t;

static a_list_t		*as_free_head;
static int		as_list_len;
static DEFINE_SPINLOCK(as_lock);

/*
 *	Try to batch vunmaps because they are costly.
 */
STATIC void
free_address(
	void		*addr)
{
	a_list_t	*aentry;

#ifdef CONFIG_XEN
	/*
	 * Xen needs to be able to make sure it can get an exclusive
	 * RO mapping of pages it wants to turn into a pagetable.  If
	 * a newly allocated page is also still being vmap()ed by xfs,
	 * it will cause pagetable construction to fail.  This is a
	 * quick workaround to always eagerly unmap pages so that Xen
	 * is happy.
	 */
	vunmap(addr);
	return;
#endif

	aentry = kmalloc(sizeof(a_list_t), GFP_NOWAIT);
	if (likely(aentry)) {
		spin_lock(&as_lock);
		aentry->next = as_free_head;
		aentry->vm_addr = addr;
		as_free_head = aentry;
		as_list_len++;
		spin_unlock(&as_lock);
	} else {
		vunmap(addr);
	}
}

STATIC void
purge_addresses(void)
{
	a_list_t	*aentry, *old;

	if (as_free_head == NULL)
		return;

	spin_lock(&as_lock);
	aentry = as_free_head;
	as_free_head = NULL;
	as_list_len = 0;
	spin_unlock(&as_lock);

	while ((old = aentry) != NULL) {
		vunmap(aentry->vm_addr);
		aentry = aentry->next;
		kfree(old);
	}
}

L
Linus Torvalds 已提交
217
/*
218
 *	Internal xfs_buf_t object manipulation
L
Linus Torvalds 已提交
219 220 221
 */

STATIC void
222 223
_xfs_buf_initialize(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
224
	xfs_buftarg_t		*target,
225
	xfs_off_t		range_base,
L
Linus Torvalds 已提交
226
	size_t			range_length,
227
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
228 229
{
	/*
230
	 * We don't want certain flags to appear in b_flags.
L
Linus Torvalds 已提交
231
	 */
232 233 234 235
	flags &= ~(XBF_LOCK|XBF_MAPPED|XBF_DONT_BLOCK|XBF_READ_AHEAD);

	memset(bp, 0, sizeof(xfs_buf_t));
	atomic_set(&bp->b_hold, 1);
236
	init_completion(&bp->b_iowait);
237 238 239 240 241 242
	INIT_LIST_HEAD(&bp->b_list);
	INIT_LIST_HEAD(&bp->b_hash_list);
	init_MUTEX_LOCKED(&bp->b_sema); /* held, no waiters */
	XB_SET_OWNER(bp);
	bp->b_target = target;
	bp->b_file_offset = range_base;
L
Linus Torvalds 已提交
243 244 245 246 247
	/*
	 * Set buffer_length and count_desired to the same value initially.
	 * I/O routines should use count_desired, which will be the same in
	 * most cases but may be reset (e.g. XFS recovery).
	 */
248 249 250 251 252 253 254
	bp->b_buffer_length = bp->b_count_desired = range_length;
	bp->b_flags = flags;
	bp->b_bn = XFS_BUF_DADDR_NULL;
	atomic_set(&bp->b_pin_count, 0);
	init_waitqueue_head(&bp->b_waiters);

	XFS_STATS_INC(xb_create);
C
Christoph Hellwig 已提交
255 256

	trace_xfs_buf_init(bp, _RET_IP_);
L
Linus Torvalds 已提交
257 258 259
}

/*
260 261
 *	Allocate a page array capable of holding a specified number
 *	of pages, and point the page buf at it.
L
Linus Torvalds 已提交
262 263
 */
STATIC int
264 265
_xfs_buf_get_pages(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
266
	int			page_count,
267
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
268 269
{
	/* Make sure that we have a page list */
270 271 272 273 274
	if (bp->b_pages == NULL) {
		bp->b_offset = xfs_buf_poff(bp->b_file_offset);
		bp->b_page_count = page_count;
		if (page_count <= XB_PAGES) {
			bp->b_pages = bp->b_page_array;
L
Linus Torvalds 已提交
275
		} else {
276 277 278
			bp->b_pages = kmem_alloc(sizeof(struct page *) *
					page_count, xb_to_km(flags));
			if (bp->b_pages == NULL)
L
Linus Torvalds 已提交
279 280
				return -ENOMEM;
		}
281
		memset(bp->b_pages, 0, sizeof(struct page *) * page_count);
L
Linus Torvalds 已提交
282 283 284 285 286
	}
	return 0;
}

/*
287
 *	Frees b_pages if it was allocated.
L
Linus Torvalds 已提交
288 289
 */
STATIC void
290
_xfs_buf_free_pages(
L
Linus Torvalds 已提交
291 292
	xfs_buf_t	*bp)
{
293
	if (bp->b_pages != bp->b_page_array) {
294
		kmem_free(bp->b_pages);
295
		bp->b_pages = NULL;
L
Linus Torvalds 已提交
296 297 298 299 300 301 302
	}
}

/*
 *	Releases the specified buffer.
 *
 * 	The modification state of any associated pages is left unchanged.
303
 * 	The buffer most not be on any hash - use xfs_buf_rele instead for
L
Linus Torvalds 已提交
304 305 306
 * 	hashed and refcounted buffers
 */
void
307
xfs_buf_free(
L
Linus Torvalds 已提交
308 309
	xfs_buf_t		*bp)
{
C
Christoph Hellwig 已提交
310
	trace_xfs_buf_free(bp, _RET_IP_);
L
Linus Torvalds 已提交
311

312
	ASSERT(list_empty(&bp->b_hash_list));
L
Linus Torvalds 已提交
313

314
	if (bp->b_flags & (_XBF_PAGE_CACHE|_XBF_PAGES)) {
L
Linus Torvalds 已提交
315 316
		uint		i;

317
		if ((bp->b_flags & XBF_MAPPED) && (bp->b_page_count > 1))
318
			free_address(bp->b_addr - bp->b_offset);
L
Linus Torvalds 已提交
319

320 321 322
		for (i = 0; i < bp->b_page_count; i++) {
			struct page	*page = bp->b_pages[i];

323 324
			if (bp->b_flags & _XBF_PAGE_CACHE)
				ASSERT(!PagePrivate(page));
325 326
			page_cache_release(page);
		}
L
Linus Torvalds 已提交
327
	}
328
	_xfs_buf_free_pages(bp);
329
	xfs_buf_deallocate(bp);
L
Linus Torvalds 已提交
330 331 332 333 334 335
}

/*
 *	Finds all pages for buffer in question and builds it's page list.
 */
STATIC int
336
_xfs_buf_lookup_pages(
L
Linus Torvalds 已提交
337 338 339
	xfs_buf_t		*bp,
	uint			flags)
{
340 341 342
	struct address_space	*mapping = bp->b_target->bt_mapping;
	size_t			blocksize = bp->b_target->bt_bsize;
	size_t			size = bp->b_count_desired;
L
Linus Torvalds 已提交
343
	size_t			nbytes, offset;
344
	gfp_t			gfp_mask = xb_to_gfp(flags);
L
Linus Torvalds 已提交
345 346
	unsigned short		page_count, i;
	pgoff_t			first;
347
	xfs_off_t		end;
L
Linus Torvalds 已提交
348 349
	int			error;

350 351
	end = bp->b_file_offset + bp->b_buffer_length;
	page_count = xfs_buf_btoc(end) - xfs_buf_btoct(bp->b_file_offset);
L
Linus Torvalds 已提交
352

353
	error = _xfs_buf_get_pages(bp, page_count, flags);
L
Linus Torvalds 已提交
354 355
	if (unlikely(error))
		return error;
356
	bp->b_flags |= _XBF_PAGE_CACHE;
L
Linus Torvalds 已提交
357

358 359
	offset = bp->b_offset;
	first = bp->b_file_offset >> PAGE_CACHE_SHIFT;
L
Linus Torvalds 已提交
360

361
	for (i = 0; i < bp->b_page_count; i++) {
L
Linus Torvalds 已提交
362 363 364 365 366 367
		struct page	*page;
		uint		retries = 0;

	      retry:
		page = find_or_create_page(mapping, first + i, gfp_mask);
		if (unlikely(page == NULL)) {
368 369
			if (flags & XBF_READ_AHEAD) {
				bp->b_page_count = i;
370 371
				for (i = 0; i < bp->b_page_count; i++)
					unlock_page(bp->b_pages[i]);
L
Linus Torvalds 已提交
372 373 374 375 376 377 378 379 380 381 382 383 384
				return -ENOMEM;
			}

			/*
			 * This could deadlock.
			 *
			 * But until all the XFS lowlevel code is revamped to
			 * handle buffer allocation failures we can't do much.
			 */
			if (!(++retries % 100))
				printk(KERN_ERR
					"XFS: possible memory allocation "
					"deadlock in %s (mode:0x%x)\n",
385
					__func__, gfp_mask);
L
Linus Torvalds 已提交
386

387
			XFS_STATS_INC(xb_page_retries);
388
			xfsbufd_wakeup(0, gfp_mask);
389
			congestion_wait(BLK_RW_ASYNC, HZ/50);
L
Linus Torvalds 已提交
390 391 392
			goto retry;
		}

393
		XFS_STATS_INC(xb_page_found);
L
Linus Torvalds 已提交
394 395 396 397

		nbytes = min_t(size_t, size, PAGE_CACHE_SIZE - offset);
		size -= nbytes;

398
		ASSERT(!PagePrivate(page));
L
Linus Torvalds 已提交
399 400
		if (!PageUptodate(page)) {
			page_count--;
401 402 403 404
			if (blocksize >= PAGE_CACHE_SIZE) {
				if (flags & XBF_READ)
					bp->b_flags |= _XBF_PAGE_LOCKED;
			} else if (!PagePrivate(page)) {
L
Linus Torvalds 已提交
405 406 407 408 409
				if (test_page_region(page, offset, nbytes))
					page_count++;
			}
		}

410
		bp->b_pages[i] = page;
L
Linus Torvalds 已提交
411 412 413
		offset = 0;
	}

414 415 416 417 418
	if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
		for (i = 0; i < bp->b_page_count; i++)
			unlock_page(bp->b_pages[i]);
	}

419 420
	if (page_count == bp->b_page_count)
		bp->b_flags |= XBF_DONE;
L
Linus Torvalds 已提交
421 422 423 424 425 426 427 428

	return error;
}

/*
 *	Map buffer into kernel address-space if nessecary.
 */
STATIC int
429
_xfs_buf_map_pages(
L
Linus Torvalds 已提交
430 431 432 433
	xfs_buf_t		*bp,
	uint			flags)
{
	/* A single page buffer is always mappable */
434 435 436 437
	if (bp->b_page_count == 1) {
		bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
	} else if (flags & XBF_MAPPED) {
438 439
		if (as_list_len > 64)
			purge_addresses();
440 441
		bp->b_addr = vmap(bp->b_pages, bp->b_page_count,
					VM_MAP, PAGE_KERNEL);
442
		if (unlikely(bp->b_addr == NULL))
L
Linus Torvalds 已提交
443
			return -ENOMEM;
444 445
		bp->b_addr += bp->b_offset;
		bp->b_flags |= XBF_MAPPED;
L
Linus Torvalds 已提交
446 447 448 449 450 451 452 453 454 455
	}

	return 0;
}

/*
 *	Finding and Reading Buffers
 */

/*
456
 *	Look up, and creates if absent, a lockable buffer for
L
Linus Torvalds 已提交
457 458 459 460 461 462 463
 *	a given range of an inode.  The buffer is returned
 *	locked.	 If other overlapping buffers exist, they are
 *	released before the new buffer is created and locked,
 *	which may imply that this call will block until those buffers
 *	are unlocked.  No I/O is implied by this call.
 */
xfs_buf_t *
464
_xfs_buf_find(
L
Linus Torvalds 已提交
465
	xfs_buftarg_t		*btp,	/* block device target		*/
466
	xfs_off_t		ioff,	/* starting offset of range	*/
L
Linus Torvalds 已提交
467
	size_t			isize,	/* length of range		*/
468 469
	xfs_buf_flags_t		flags,
	xfs_buf_t		*new_bp)
L
Linus Torvalds 已提交
470
{
471
	xfs_off_t		range_base;
L
Linus Torvalds 已提交
472 473
	size_t			range_length;
	xfs_bufhash_t		*hash;
474
	xfs_buf_t		*bp, *n;
L
Linus Torvalds 已提交
475 476 477 478 479

	range_base = (ioff << BBSHIFT);
	range_length = (isize << BBSHIFT);

	/* Check for IOs smaller than the sector size / not sector aligned */
480
	ASSERT(!(range_length < (1 << btp->bt_sshift)));
481
	ASSERT(!(range_base & (xfs_off_t)btp->bt_smask));
L
Linus Torvalds 已提交
482 483 484 485 486

	hash = &btp->bt_hash[hash_long((unsigned long)ioff, btp->bt_hashshift)];

	spin_lock(&hash->bh_lock);

487 488 489 490
	list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
		ASSERT(btp == bp->b_target);
		if (bp->b_file_offset == range_base &&
		    bp->b_buffer_length == range_length) {
L
Linus Torvalds 已提交
491
			/*
492
			 * If we look at something, bring it to the
L
Linus Torvalds 已提交
493 494
			 * front of the list for next time.
			 */
495 496
			atomic_inc(&bp->b_hold);
			list_move(&bp->b_hash_list, &hash->bh_list);
L
Linus Torvalds 已提交
497 498 499 500 501
			goto found;
		}
	}

	/* No match found */
502 503
	if (new_bp) {
		_xfs_buf_initialize(new_bp, btp, range_base,
L
Linus Torvalds 已提交
504
				range_length, flags);
505 506
		new_bp->b_hash = hash;
		list_add(&new_bp->b_hash_list, &hash->bh_list);
L
Linus Torvalds 已提交
507
	} else {
508
		XFS_STATS_INC(xb_miss_locked);
L
Linus Torvalds 已提交
509 510 511
	}

	spin_unlock(&hash->bh_lock);
512
	return new_bp;
L
Linus Torvalds 已提交
513 514 515 516 517 518 519 520

found:
	spin_unlock(&hash->bh_lock);

	/* Attempt to get the semaphore without sleeping,
	 * if this does not work then we need to drop the
	 * spinlock and do a hard attempt on the semaphore.
	 */
521 522
	if (down_trylock(&bp->b_sema)) {
		if (!(flags & XBF_TRYLOCK)) {
L
Linus Torvalds 已提交
523
			/* wait for buffer ownership */
524 525
			xfs_buf_lock(bp);
			XFS_STATS_INC(xb_get_locked_waited);
L
Linus Torvalds 已提交
526 527 528
		} else {
			/* We asked for a trylock and failed, no need
			 * to look at file offset and length here, we
529 530 531
			 * know that this buffer at least overlaps our
			 * buffer and is locked, therefore our buffer
			 * either does not exist, or is this buffer.
L
Linus Torvalds 已提交
532
			 */
533 534 535
			xfs_buf_rele(bp);
			XFS_STATS_INC(xb_busy_locked);
			return NULL;
L
Linus Torvalds 已提交
536 537 538
		}
	} else {
		/* trylock worked */
539
		XB_SET_OWNER(bp);
L
Linus Torvalds 已提交
540 541
	}

542 543 544
	if (bp->b_flags & XBF_STALE) {
		ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
		bp->b_flags &= XBF_MAPPED;
545
	}
C
Christoph Hellwig 已提交
546 547

	trace_xfs_buf_find(bp, flags, _RET_IP_);
548 549
	XFS_STATS_INC(xb_get_locked);
	return bp;
L
Linus Torvalds 已提交
550 551 552
}

/*
553
 *	Assembles a buffer covering the specified range.
L
Linus Torvalds 已提交
554 555 556 557
 *	Storage in memory for all portions of the buffer will be allocated,
 *	although backing storage may not be.
 */
xfs_buf_t *
558
xfs_buf_get(
L
Linus Torvalds 已提交
559
	xfs_buftarg_t		*target,/* target for buffer		*/
560
	xfs_off_t		ioff,	/* starting offset of range	*/
L
Linus Torvalds 已提交
561
	size_t			isize,	/* length of range		*/
562
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
563
{
564
	xfs_buf_t		*bp, *new_bp;
L
Linus Torvalds 已提交
565 566
	int			error = 0, i;

567 568
	new_bp = xfs_buf_allocate(flags);
	if (unlikely(!new_bp))
L
Linus Torvalds 已提交
569 570
		return NULL;

571 572 573
	bp = _xfs_buf_find(target, ioff, isize, flags, new_bp);
	if (bp == new_bp) {
		error = _xfs_buf_lookup_pages(bp, flags);
L
Linus Torvalds 已提交
574 575 576
		if (error)
			goto no_buffer;
	} else {
577 578
		xfs_buf_deallocate(new_bp);
		if (unlikely(bp == NULL))
L
Linus Torvalds 已提交
579 580 581
			return NULL;
	}

582 583
	for (i = 0; i < bp->b_page_count; i++)
		mark_page_accessed(bp->b_pages[i]);
L
Linus Torvalds 已提交
584

585 586
	if (!(bp->b_flags & XBF_MAPPED)) {
		error = _xfs_buf_map_pages(bp, flags);
L
Linus Torvalds 已提交
587 588
		if (unlikely(error)) {
			printk(KERN_WARNING "%s: failed to map pages\n",
589
					__func__);
L
Linus Torvalds 已提交
590 591 592 593
			goto no_buffer;
		}
	}

594
	XFS_STATS_INC(xb_get);
L
Linus Torvalds 已提交
595 596 597 598 599

	/*
	 * Always fill in the block number now, the mapped cases can do
	 * their own overlay of this later.
	 */
600 601
	bp->b_bn = ioff;
	bp->b_count_desired = bp->b_buffer_length;
L
Linus Torvalds 已提交
602

C
Christoph Hellwig 已提交
603
	trace_xfs_buf_get(bp, flags, _RET_IP_);
604
	return bp;
L
Linus Torvalds 已提交
605 606

 no_buffer:
607 608 609
	if (flags & (XBF_LOCK | XBF_TRYLOCK))
		xfs_buf_unlock(bp);
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
610 611 612
	return NULL;
}

C
Christoph Hellwig 已提交
613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633
STATIC int
_xfs_buf_read(
	xfs_buf_t		*bp,
	xfs_buf_flags_t		flags)
{
	int			status;

	ASSERT(!(flags & (XBF_DELWRI|XBF_WRITE)));
	ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);

	bp->b_flags &= ~(XBF_WRITE | XBF_ASYNC | XBF_DELWRI | \
			XBF_READ_AHEAD | _XBF_RUN_QUEUES);
	bp->b_flags |= flags & (XBF_READ | XBF_ASYNC | \
			XBF_READ_AHEAD | _XBF_RUN_QUEUES);

	status = xfs_buf_iorequest(bp);
	if (!status && !(flags & XBF_ASYNC))
		status = xfs_buf_iowait(bp);
	return status;
}

L
Linus Torvalds 已提交
634
xfs_buf_t *
635
xfs_buf_read(
L
Linus Torvalds 已提交
636
	xfs_buftarg_t		*target,
637
	xfs_off_t		ioff,
L
Linus Torvalds 已提交
638
	size_t			isize,
639
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
640
{
641 642 643 644
	xfs_buf_t		*bp;

	flags |= XBF_READ;

645
	bp = xfs_buf_get(target, ioff, isize, flags);
646
	if (bp) {
C
Christoph Hellwig 已提交
647 648
		trace_xfs_buf_read(bp, flags, _RET_IP_);

649 650
		if (!XFS_BUF_ISDONE(bp)) {
			XFS_STATS_INC(xb_get_read);
C
Christoph Hellwig 已提交
651
			_xfs_buf_read(bp, flags);
652
		} else if (flags & XBF_ASYNC) {
L
Linus Torvalds 已提交
653 654 655 656 657 658 659
			/*
			 * Read ahead call which is already satisfied,
			 * drop the buffer
			 */
			goto no_buffer;
		} else {
			/* We do not want read in the flags */
660
			bp->b_flags &= ~XBF_READ;
L
Linus Torvalds 已提交
661 662 663
		}
	}

664
	return bp;
L
Linus Torvalds 已提交
665 666

 no_buffer:
667 668 669
	if (flags & (XBF_LOCK | XBF_TRYLOCK))
		xfs_buf_unlock(bp);
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
670 671 672 673
	return NULL;
}

/*
674 675
 *	If we are not low on memory then do the readahead in a deadlock
 *	safe manner.
L
Linus Torvalds 已提交
676 677
 */
void
678
xfs_buf_readahead(
L
Linus Torvalds 已提交
679
	xfs_buftarg_t		*target,
680
	xfs_off_t		ioff,
L
Linus Torvalds 已提交
681
	size_t			isize,
682
	xfs_buf_flags_t		flags)
L
Linus Torvalds 已提交
683 684 685
{
	struct backing_dev_info *bdi;

686
	bdi = target->bt_mapping->backing_dev_info;
L
Linus Torvalds 已提交
687 688 689
	if (bdi_read_congested(bdi))
		return;

690
	flags |= (XBF_TRYLOCK|XBF_ASYNC|XBF_READ_AHEAD);
691
	xfs_buf_read(target, ioff, isize, flags);
L
Linus Torvalds 已提交
692 693 694
}

xfs_buf_t *
695
xfs_buf_get_empty(
L
Linus Torvalds 已提交
696 697 698
	size_t			len,
	xfs_buftarg_t		*target)
{
699
	xfs_buf_t		*bp;
L
Linus Torvalds 已提交
700

701 702 703 704
	bp = xfs_buf_allocate(0);
	if (bp)
		_xfs_buf_initialize(bp, target, 0, len, 0);
	return bp;
L
Linus Torvalds 已提交
705 706 707 708 709 710
}

static inline struct page *
mem_to_page(
	void			*addr)
{
711
	if ((!is_vmalloc_addr(addr))) {
L
Linus Torvalds 已提交
712 713 714 715 716 717 718
		return virt_to_page(addr);
	} else {
		return vmalloc_to_page(addr);
	}
}

int
719 720
xfs_buf_associate_memory(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
721 722 723 724 725
	void			*mem,
	size_t			len)
{
	int			rval;
	int			i = 0;
726 727 728
	unsigned long		pageaddr;
	unsigned long		offset;
	size_t			buflen;
L
Linus Torvalds 已提交
729 730
	int			page_count;

731 732 733 734
	pageaddr = (unsigned long)mem & PAGE_CACHE_MASK;
	offset = (unsigned long)mem - pageaddr;
	buflen = PAGE_CACHE_ALIGN(len + offset);
	page_count = buflen >> PAGE_CACHE_SHIFT;
L
Linus Torvalds 已提交
735 736

	/* Free any previous set of page pointers */
737 738
	if (bp->b_pages)
		_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
739

740 741
	bp->b_pages = NULL;
	bp->b_addr = mem;
L
Linus Torvalds 已提交
742

743
	rval = _xfs_buf_get_pages(bp, page_count, XBF_DONT_BLOCK);
L
Linus Torvalds 已提交
744 745 746
	if (rval)
		return rval;

747
	bp->b_offset = offset;
748 749 750 751

	for (i = 0; i < bp->b_page_count; i++) {
		bp->b_pages[i] = mem_to_page((void *)pageaddr);
		pageaddr += PAGE_CACHE_SIZE;
L
Linus Torvalds 已提交
752 753
	}

754 755
	bp->b_count_desired = len;
	bp->b_buffer_length = buflen;
756
	bp->b_flags |= XBF_MAPPED;
757
	bp->b_flags &= ~_XBF_PAGE_LOCKED;
L
Linus Torvalds 已提交
758 759 760 761 762

	return 0;
}

xfs_buf_t *
763
xfs_buf_get_noaddr(
L
Linus Torvalds 已提交
764 765 766
	size_t			len,
	xfs_buftarg_t		*target)
{
767 768
	unsigned long		page_count = PAGE_ALIGN(len) >> PAGE_SHIFT;
	int			error, i;
L
Linus Torvalds 已提交
769 770
	xfs_buf_t		*bp;

771
	bp = xfs_buf_allocate(0);
L
Linus Torvalds 已提交
772 773
	if (unlikely(bp == NULL))
		goto fail;
774
	_xfs_buf_initialize(bp, target, 0, len, 0);
L
Linus Torvalds 已提交
775

776 777
	error = _xfs_buf_get_pages(bp, page_count, 0);
	if (error)
L
Linus Torvalds 已提交
778 779
		goto fail_free_buf;

780 781 782 783
	for (i = 0; i < page_count; i++) {
		bp->b_pages[i] = alloc_page(GFP_KERNEL);
		if (!bp->b_pages[i])
			goto fail_free_mem;
L
Linus Torvalds 已提交
784
	}
785
	bp->b_flags |= _XBF_PAGES;
L
Linus Torvalds 已提交
786

787 788 789
	error = _xfs_buf_map_pages(bp, XBF_MAPPED);
	if (unlikely(error)) {
		printk(KERN_WARNING "%s: failed to map pages\n",
790
				__func__);
L
Linus Torvalds 已提交
791
		goto fail_free_mem;
792
	}
L
Linus Torvalds 已提交
793

794
	xfs_buf_unlock(bp);
L
Linus Torvalds 已提交
795

C
Christoph Hellwig 已提交
796
	trace_xfs_buf_get_noaddr(bp, _RET_IP_);
L
Linus Torvalds 已提交
797
	return bp;
798

L
Linus Torvalds 已提交
799
 fail_free_mem:
800 801
	while (--i >= 0)
		__free_page(bp->b_pages[i]);
802
	_xfs_buf_free_pages(bp);
L
Linus Torvalds 已提交
803
 fail_free_buf:
804
	xfs_buf_deallocate(bp);
L
Linus Torvalds 已提交
805 806 807 808 809 810 811 812 813 814
 fail:
	return NULL;
}

/*
 *	Increment reference count on buffer, to hold the buffer concurrently
 *	with another thread which may release (free) the buffer asynchronously.
 *	Must hold the buffer already to call this function.
 */
void
815 816
xfs_buf_hold(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
817
{
C
Christoph Hellwig 已提交
818
	trace_xfs_buf_hold(bp, _RET_IP_);
819
	atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
820 821 822
}

/*
823 824
 *	Releases a hold on the specified buffer.  If the
 *	the hold count is 1, calls xfs_buf_free.
L
Linus Torvalds 已提交
825 826
 */
void
827 828
xfs_buf_rele(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
829
{
830
	xfs_bufhash_t		*hash = bp->b_hash;
L
Linus Torvalds 已提交
831

C
Christoph Hellwig 已提交
832
	trace_xfs_buf_rele(bp, _RET_IP_);
L
Linus Torvalds 已提交
833

834 835 836 837 838 839 840
	if (unlikely(!hash)) {
		ASSERT(!bp->b_relse);
		if (atomic_dec_and_test(&bp->b_hold))
			xfs_buf_free(bp);
		return;
	}

841
	ASSERT(atomic_read(&bp->b_hold) > 0);
842 843 844
	if (atomic_dec_and_lock(&bp->b_hold, &hash->bh_lock)) {
		if (bp->b_relse) {
			atomic_inc(&bp->b_hold);
L
Linus Torvalds 已提交
845
			spin_unlock(&hash->bh_lock);
846 847
			(*(bp->b_relse)) (bp);
		} else if (bp->b_flags & XBF_FS_MANAGED) {
L
Linus Torvalds 已提交
848 849
			spin_unlock(&hash->bh_lock);
		} else {
850 851
			ASSERT(!(bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)));
			list_del_init(&bp->b_hash_list);
L
Linus Torvalds 已提交
852
			spin_unlock(&hash->bh_lock);
853
			xfs_buf_free(bp);
L
Linus Torvalds 已提交
854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869
		}
	}
}


/*
 *	Mutual exclusion on buffers.  Locking model:
 *
 *	Buffers associated with inodes for which buffer locking
 *	is not enabled are not protected by semaphores, and are
 *	assumed to be exclusively owned by the caller.  There is a
 *	spinlock in the buffer, used by the caller when concurrent
 *	access is possible.
 */

/*
870 871 872 873
 *	Locks a buffer object, if it is not already locked.
 *	Note that this in no way locks the underlying pages, so it is only
 *	useful for synchronizing concurrent use of buffer objects, not for
 *	synchronizing independent access to the underlying pages.
L
Linus Torvalds 已提交
874 875
 */
int
876 877
xfs_buf_cond_lock(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
878 879 880
{
	int			locked;

881
	locked = down_trylock(&bp->b_sema) == 0;
C
Christoph Hellwig 已提交
882
	if (locked)
883
		XB_SET_OWNER(bp);
C
Christoph Hellwig 已提交
884 885

	trace_xfs_buf_cond_lock(bp, _RET_IP_);
886
	return locked ? 0 : -EBUSY;
L
Linus Torvalds 已提交
887 888 889
}

int
890 891
xfs_buf_lock_value(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
892
{
893
	return bp->b_sema.count;
L
Linus Torvalds 已提交
894 895 896
}

/*
897 898 899 900
 *	Locks a buffer object.
 *	Note that this in no way locks the underlying pages, so it is only
 *	useful for synchronizing concurrent use of buffer objects, not for
 *	synchronizing independent access to the underlying pages.
L
Linus Torvalds 已提交
901
 */
902 903 904
void
xfs_buf_lock(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
905
{
C
Christoph Hellwig 已提交
906 907
	trace_xfs_buf_lock(bp, _RET_IP_);

908 909 910 911
	if (atomic_read(&bp->b_io_remaining))
		blk_run_address_space(bp->b_target->bt_mapping);
	down(&bp->b_sema);
	XB_SET_OWNER(bp);
C
Christoph Hellwig 已提交
912 913

	trace_xfs_buf_lock_done(bp, _RET_IP_);
L
Linus Torvalds 已提交
914 915 916
}

/*
917
 *	Releases the lock on the buffer object.
918
 *	If the buffer is marked delwri but is not queued, do so before we
919
 *	unlock the buffer as we need to set flags correctly.  We also need to
920 921
 *	take a reference for the delwri queue because the unlocker is going to
 *	drop their's and they don't know we just queued it.
L
Linus Torvalds 已提交
922 923
 */
void
924 925
xfs_buf_unlock(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
926
{
927 928 929 930
	if ((bp->b_flags & (XBF_DELWRI|_XBF_DELWRI_Q)) == XBF_DELWRI) {
		atomic_inc(&bp->b_hold);
		bp->b_flags |= XBF_ASYNC;
		xfs_buf_delwri_queue(bp, 0);
931 932
	}

933 934
	XB_CLEAR_OWNER(bp);
	up(&bp->b_sema);
C
Christoph Hellwig 已提交
935 936

	trace_xfs_buf_unlock(bp, _RET_IP_);
L
Linus Torvalds 已提交
937 938 939 940 941
}


/*
 *	Pinning Buffer Storage in Memory
942
 *	Ensure that no attempt to force a buffer to disk will succeed.
L
Linus Torvalds 已提交
943 944
 */
void
945 946
xfs_buf_pin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
947
{
C
Christoph Hellwig 已提交
948
	trace_xfs_buf_pin(bp, _RET_IP_);
949
	atomic_inc(&bp->b_pin_count);
L
Linus Torvalds 已提交
950 951 952
}

void
953 954
xfs_buf_unpin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
955
{
C
Christoph Hellwig 已提交
956 957
	trace_xfs_buf_unpin(bp, _RET_IP_);

958 959
	if (atomic_dec_and_test(&bp->b_pin_count))
		wake_up_all(&bp->b_waiters);
L
Linus Torvalds 已提交
960 961 962
}

int
963 964
xfs_buf_ispin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
965
{
966
	return atomic_read(&bp->b_pin_count);
L
Linus Torvalds 已提交
967 968
}

969 970 971
STATIC void
xfs_buf_wait_unpin(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
972 973 974
{
	DECLARE_WAITQUEUE	(wait, current);

975
	if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
976 977
		return;

978
	add_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
979 980
	for (;;) {
		set_current_state(TASK_UNINTERRUPTIBLE);
981
		if (atomic_read(&bp->b_pin_count) == 0)
L
Linus Torvalds 已提交
982
			break;
983 984
		if (atomic_read(&bp->b_io_remaining))
			blk_run_address_space(bp->b_target->bt_mapping);
L
Linus Torvalds 已提交
985 986
		schedule();
	}
987
	remove_wait_queue(&bp->b_waiters, &wait);
L
Linus Torvalds 已提交
988 989 990 991 992 993 994 995
	set_current_state(TASK_RUNNING);
}

/*
 *	Buffer Utility Routines
 */

STATIC void
996
xfs_buf_iodone_work(
D
David Howells 已提交
997
	struct work_struct	*work)
L
Linus Torvalds 已提交
998
{
D
David Howells 已提交
999 1000
	xfs_buf_t		*bp =
		container_of(work, xfs_buf_t, b_iodone_work);
L
Linus Torvalds 已提交
1001

1002 1003 1004 1005
	/*
	 * We can get an EOPNOTSUPP to ordered writes.  Here we clear the
	 * ordered flag and reissue them.  Because we can't tell the higher
	 * layers directly that they should not issue ordered I/O anymore, they
1006
	 * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
1007 1008 1009
	 */
	if ((bp->b_error == EOPNOTSUPP) &&
	    (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
C
Christoph Hellwig 已提交
1010
		trace_xfs_buf_ordered_retry(bp, _RET_IP_);
1011
		bp->b_flags &= ~XBF_ORDERED;
1012
		bp->b_flags |= _XFS_BARRIER_FAILED;
1013 1014
		xfs_buf_iorequest(bp);
	} else if (bp->b_iodone)
1015 1016
		(*(bp->b_iodone))(bp);
	else if (bp->b_flags & XBF_ASYNC)
L
Linus Torvalds 已提交
1017 1018 1019 1020
		xfs_buf_relse(bp);
}

void
1021 1022
xfs_buf_ioend(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1023 1024
	int			schedule)
{
C
Christoph Hellwig 已提交
1025 1026
	trace_xfs_buf_iodone(bp, _RET_IP_);

1027
	bp->b_flags &= ~(XBF_READ | XBF_WRITE | XBF_READ_AHEAD);
1028 1029
	if (bp->b_error == 0)
		bp->b_flags |= XBF_DONE;
L
Linus Torvalds 已提交
1030

1031
	if ((bp->b_iodone) || (bp->b_flags & XBF_ASYNC)) {
L
Linus Torvalds 已提交
1032
		if (schedule) {
D
David Howells 已提交
1033
			INIT_WORK(&bp->b_iodone_work, xfs_buf_iodone_work);
1034
			queue_work(xfslogd_workqueue, &bp->b_iodone_work);
L
Linus Torvalds 已提交
1035
		} else {
D
David Howells 已提交
1036
			xfs_buf_iodone_work(&bp->b_iodone_work);
L
Linus Torvalds 已提交
1037 1038
		}
	} else {
1039
		complete(&bp->b_iowait);
L
Linus Torvalds 已提交
1040 1041 1042 1043
	}
}

void
1044 1045 1046
xfs_buf_ioerror(
	xfs_buf_t		*bp,
	int			error)
L
Linus Torvalds 已提交
1047 1048
{
	ASSERT(error >= 0 && error <= 0xffff);
1049
	bp->b_error = (unsigned short)error;
C
Christoph Hellwig 已提交
1050
	trace_xfs_buf_ioerror(bp, error, _RET_IP_);
L
Linus Torvalds 已提交
1051 1052 1053
}

int
C
Christoph Hellwig 已提交
1054 1055 1056
xfs_bawrite(
	void			*mp,
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1057
{
C
Christoph Hellwig 已提交
1058
	trace_xfs_buf_bawrite(bp, _RET_IP_);
L
Linus Torvalds 已提交
1059

C
Christoph Hellwig 已提交
1060
	ASSERT(bp->b_bn != XFS_BUF_DADDR_NULL);
L
Linus Torvalds 已提交
1061

C
Christoph Hellwig 已提交
1062
	xfs_buf_delwri_dequeue(bp);
L
Linus Torvalds 已提交
1063

C
Christoph Hellwig 已提交
1064 1065
	bp->b_flags &= ~(XBF_READ | XBF_DELWRI | XBF_READ_AHEAD);
	bp->b_flags |= (XBF_WRITE | XBF_ASYNC | _XBF_RUN_QUEUES);
L
Linus Torvalds 已提交
1066

1067
	bp->b_mount = mp;
C
Christoph Hellwig 已提交
1068 1069 1070
	bp->b_strat = xfs_bdstrat_cb;
	return xfs_bdstrat_cb(bp);
}
L
Linus Torvalds 已提交
1071

C
Christoph Hellwig 已提交
1072 1073 1074 1075 1076
void
xfs_bdwrite(
	void			*mp,
	struct xfs_buf		*bp)
{
C
Christoph Hellwig 已提交
1077
	trace_xfs_buf_bdwrite(bp, _RET_IP_);
L
Linus Torvalds 已提交
1078

C
Christoph Hellwig 已提交
1079
	bp->b_strat = xfs_bdstrat_cb;
1080
	bp->b_mount = mp;
L
Linus Torvalds 已提交
1081

C
Christoph Hellwig 已提交
1082 1083 1084 1085
	bp->b_flags &= ~XBF_READ;
	bp->b_flags |= (XBF_DELWRI | XBF_ASYNC);

	xfs_buf_delwri_queue(bp, 1);
L
Linus Torvalds 已提交
1086 1087
}

1088
STATIC void
1089 1090
_xfs_buf_ioend(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1091 1092
	int			schedule)
{
1093 1094
	if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
		bp->b_flags &= ~_XBF_PAGE_LOCKED;
1095
		xfs_buf_ioend(bp, schedule);
1096
	}
L
Linus Torvalds 已提交
1097 1098
}

A
Al Viro 已提交
1099
STATIC void
1100
xfs_buf_bio_end_io(
L
Linus Torvalds 已提交
1101 1102 1103
	struct bio		*bio,
	int			error)
{
1104 1105
	xfs_buf_t		*bp = (xfs_buf_t *)bio->bi_private;
	unsigned int		blocksize = bp->b_target->bt_bsize;
1106
	struct bio_vec		*bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
L
Linus Torvalds 已提交
1107

1108
	xfs_buf_ioerror(bp, -error);
L
Linus Torvalds 已提交
1109

1110
	do {
L
Linus Torvalds 已提交
1111 1112
		struct page	*page = bvec->bv_page;

1113
		ASSERT(!PagePrivate(page));
1114 1115
		if (unlikely(bp->b_error)) {
			if (bp->b_flags & XBF_READ)
1116
				ClearPageUptodate(page);
1117
		} else if (blocksize >= PAGE_CACHE_SIZE) {
L
Linus Torvalds 已提交
1118 1119
			SetPageUptodate(page);
		} else if (!PagePrivate(page) &&
1120
				(bp->b_flags & _XBF_PAGE_CACHE)) {
L
Linus Torvalds 已提交
1121 1122 1123
			set_page_region(page, bvec->bv_offset, bvec->bv_len);
		}

1124 1125
		if (--bvec >= bio->bi_io_vec)
			prefetchw(&bvec->bv_page->flags);
1126 1127 1128

		if (bp->b_flags & _XBF_PAGE_LOCKED)
			unlock_page(page);
1129
	} while (bvec >= bio->bi_io_vec);
L
Linus Torvalds 已提交
1130

1131
	_xfs_buf_ioend(bp, 1);
L
Linus Torvalds 已提交
1132 1133 1134 1135
	bio_put(bio);
}

STATIC void
1136 1137
_xfs_buf_ioapply(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1138
{
1139
	int			rw, map_i, total_nr_pages, nr_pages;
L
Linus Torvalds 已提交
1140
	struct bio		*bio;
1141 1142 1143 1144
	int			offset = bp->b_offset;
	int			size = bp->b_count_desired;
	sector_t		sector = bp->b_bn;
	unsigned int		blocksize = bp->b_target->bt_bsize;
L
Linus Torvalds 已提交
1145

1146
	total_nr_pages = bp->b_page_count;
L
Linus Torvalds 已提交
1147 1148
	map_i = 0;

1149 1150
	if (bp->b_flags & XBF_ORDERED) {
		ASSERT(!(bp->b_flags & XBF_READ));
1151
		rw = WRITE_BARRIER;
1152
	} else if (bp->b_flags & XBF_LOG_BUFFER) {
1153 1154 1155
		ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
		bp->b_flags &= ~_XBF_RUN_QUEUES;
		rw = (bp->b_flags & XBF_WRITE) ? WRITE_SYNC : READ_SYNC;
1156 1157 1158 1159
	} else if (bp->b_flags & _XBF_RUN_QUEUES) {
		ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
		bp->b_flags &= ~_XBF_RUN_QUEUES;
		rw = (bp->b_flags & XBF_WRITE) ? WRITE_META : READ_META;
1160 1161 1162
	} else {
		rw = (bp->b_flags & XBF_WRITE) ? WRITE :
		     (bp->b_flags & XBF_READ_AHEAD) ? READA : READ;
1163 1164
	}

1165
	/* Special code path for reading a sub page size buffer in --
L
Linus Torvalds 已提交
1166 1167
	 * we populate up the whole page, and hence the other metadata
	 * in the same page.  This optimization is only valid when the
1168
	 * filesystem block size is not smaller than the page size.
L
Linus Torvalds 已提交
1169
	 */
1170
	if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
1171 1172
	    ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
	      (XBF_READ|_XBF_PAGE_LOCKED)) &&
1173
	    (blocksize >= PAGE_CACHE_SIZE)) {
L
Linus Torvalds 已提交
1174 1175
		bio = bio_alloc(GFP_NOIO, 1);

1176
		bio->bi_bdev = bp->b_target->bt_bdev;
L
Linus Torvalds 已提交
1177
		bio->bi_sector = sector - (offset >> BBSHIFT);
1178 1179
		bio->bi_end_io = xfs_buf_bio_end_io;
		bio->bi_private = bp;
L
Linus Torvalds 已提交
1180

1181
		bio_add_page(bio, bp->b_pages[0], PAGE_CACHE_SIZE, 0);
L
Linus Torvalds 已提交
1182 1183
		size = 0;

1184
		atomic_inc(&bp->b_io_remaining);
L
Linus Torvalds 已提交
1185 1186 1187 1188 1189

		goto submit_io;
	}

next_chunk:
1190
	atomic_inc(&bp->b_io_remaining);
L
Linus Torvalds 已提交
1191 1192 1193 1194 1195
	nr_pages = BIO_MAX_SECTORS >> (PAGE_SHIFT - BBSHIFT);
	if (nr_pages > total_nr_pages)
		nr_pages = total_nr_pages;

	bio = bio_alloc(GFP_NOIO, nr_pages);
1196
	bio->bi_bdev = bp->b_target->bt_bdev;
L
Linus Torvalds 已提交
1197
	bio->bi_sector = sector;
1198 1199
	bio->bi_end_io = xfs_buf_bio_end_io;
	bio->bi_private = bp;
L
Linus Torvalds 已提交
1200 1201

	for (; size && nr_pages; nr_pages--, map_i++) {
1202
		int	rbytes, nbytes = PAGE_CACHE_SIZE - offset;
L
Linus Torvalds 已提交
1203 1204 1205 1206

		if (nbytes > size)
			nbytes = size;

1207 1208
		rbytes = bio_add_page(bio, bp->b_pages[map_i], nbytes, offset);
		if (rbytes < nbytes)
L
Linus Torvalds 已提交
1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
			break;

		offset = 0;
		sector += nbytes >> BBSHIFT;
		size -= nbytes;
		total_nr_pages--;
	}

submit_io:
	if (likely(bio->bi_size)) {
		submit_bio(rw, bio);
		if (size)
			goto next_chunk;
	} else {
		bio_put(bio);
1224
		xfs_buf_ioerror(bp, EIO);
L
Linus Torvalds 已提交
1225 1226 1227 1228
	}
}

int
1229 1230
xfs_buf_iorequest(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1231
{
C
Christoph Hellwig 已提交
1232
	trace_xfs_buf_iorequest(bp, _RET_IP_);
L
Linus Torvalds 已提交
1233

1234 1235
	if (bp->b_flags & XBF_DELWRI) {
		xfs_buf_delwri_queue(bp, 1);
L
Linus Torvalds 已提交
1236 1237 1238
		return 0;
	}

1239 1240
	if (bp->b_flags & XBF_WRITE) {
		xfs_buf_wait_unpin(bp);
L
Linus Torvalds 已提交
1241 1242
	}

1243
	xfs_buf_hold(bp);
L
Linus Torvalds 已提交
1244 1245 1246

	/* Set the count to 1 initially, this will stop an I/O
	 * completion callout which happens before we have started
1247
	 * all the I/O from calling xfs_buf_ioend too early.
L
Linus Torvalds 已提交
1248
	 */
1249 1250 1251
	atomic_set(&bp->b_io_remaining, 1);
	_xfs_buf_ioapply(bp);
	_xfs_buf_ioend(bp, 0);
L
Linus Torvalds 已提交
1252

1253
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1254 1255 1256 1257
	return 0;
}

/*
1258 1259 1260
 *	Waits for I/O to complete on the buffer supplied.
 *	It returns immediately if no I/O is pending.
 *	It returns the I/O error code, if any, or 0 if there was no error.
L
Linus Torvalds 已提交
1261 1262
 */
int
1263 1264
xfs_buf_iowait(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1265
{
C
Christoph Hellwig 已提交
1266 1267
	trace_xfs_buf_iowait(bp, _RET_IP_);

1268 1269
	if (atomic_read(&bp->b_io_remaining))
		blk_run_address_space(bp->b_target->bt_mapping);
1270
	wait_for_completion(&bp->b_iowait);
C
Christoph Hellwig 已提交
1271 1272

	trace_xfs_buf_iowait_done(bp, _RET_IP_);
1273
	return bp->b_error;
L
Linus Torvalds 已提交
1274 1275
}

1276 1277 1278
xfs_caddr_t
xfs_buf_offset(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1279 1280 1281 1282
	size_t			offset)
{
	struct page		*page;

1283 1284
	if (bp->b_flags & XBF_MAPPED)
		return XFS_BUF_PTR(bp) + offset;
L
Linus Torvalds 已提交
1285

1286 1287 1288
	offset += bp->b_offset;
	page = bp->b_pages[offset >> PAGE_CACHE_SHIFT];
	return (xfs_caddr_t)page_address(page) + (offset & (PAGE_CACHE_SIZE-1));
L
Linus Torvalds 已提交
1289 1290 1291 1292 1293 1294
}

/*
 *	Move data into or out of a buffer.
 */
void
1295 1296
xfs_buf_iomove(
	xfs_buf_t		*bp,	/* buffer to process		*/
L
Linus Torvalds 已提交
1297 1298 1299
	size_t			boff,	/* starting buffer offset	*/
	size_t			bsize,	/* length to copy		*/
	caddr_t			data,	/* data address			*/
1300
	xfs_buf_rw_t		mode)	/* read/write/zero flag		*/
L
Linus Torvalds 已提交
1301 1302 1303 1304 1305 1306
{
	size_t			bend, cpoff, csize;
	struct page		*page;

	bend = boff + bsize;
	while (boff < bend) {
1307 1308
		page = bp->b_pages[xfs_buf_btoct(boff + bp->b_offset)];
		cpoff = xfs_buf_poff(boff + bp->b_offset);
L
Linus Torvalds 已提交
1309
		csize = min_t(size_t,
1310
			      PAGE_CACHE_SIZE-cpoff, bp->b_count_desired-boff);
L
Linus Torvalds 已提交
1311 1312 1313 1314

		ASSERT(((csize + cpoff) <= PAGE_CACHE_SIZE));

		switch (mode) {
1315
		case XBRW_ZERO:
L
Linus Torvalds 已提交
1316 1317
			memset(page_address(page) + cpoff, 0, csize);
			break;
1318
		case XBRW_READ:
L
Linus Torvalds 已提交
1319 1320
			memcpy(data, page_address(page) + cpoff, csize);
			break;
1321
		case XBRW_WRITE:
L
Linus Torvalds 已提交
1322 1323 1324 1325 1326 1327 1328 1329 1330
			memcpy(page_address(page) + cpoff, data, csize);
		}

		boff += csize;
		data += csize;
	}
}

/*
1331
 *	Handling of buffer targets (buftargs).
L
Linus Torvalds 已提交
1332 1333 1334
 */

/*
1335 1336
 *	Wait for any bufs with callbacks that have been submitted but
 *	have not yet returned... walk the hash list for the target.
L
Linus Torvalds 已提交
1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
 */
void
xfs_wait_buftarg(
	xfs_buftarg_t	*btp)
{
	xfs_buf_t	*bp, *n;
	xfs_bufhash_t	*hash;
	uint		i;

	for (i = 0; i < (1 << btp->bt_hashshift); i++) {
		hash = &btp->bt_hash[i];
again:
		spin_lock(&hash->bh_lock);
1350 1351 1352
		list_for_each_entry_safe(bp, n, &hash->bh_list, b_hash_list) {
			ASSERT(btp == bp->b_target);
			if (!(bp->b_flags & XBF_FS_MANAGED)) {
L
Linus Torvalds 已提交
1353
				spin_unlock(&hash->bh_lock);
1354 1355 1356 1357
				/*
				 * Catch superblock reference count leaks
				 * immediately
				 */
1358
				BUG_ON(bp->b_bn == 0);
L
Linus Torvalds 已提交
1359 1360 1361 1362 1363 1364 1365 1366 1367
				delay(100);
				goto again;
			}
		}
		spin_unlock(&hash->bh_lock);
	}
}

/*
1368 1369 1370
 *	Allocate buffer hash table for a given target.
 *	For devices containing metadata (i.e. not the log/realtime devices)
 *	we need to allocate a much larger hash table.
L
Linus Torvalds 已提交
1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381
 */
STATIC void
xfs_alloc_bufhash(
	xfs_buftarg_t		*btp,
	int			external)
{
	unsigned int		i;

	btp->bt_hashshift = external ? 3 : 8;	/* 8 or 256 buckets */
	btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
	btp->bt_hash = kmem_zalloc((1 << btp->bt_hashshift) *
1382
					sizeof(xfs_bufhash_t), KM_SLEEP | KM_LARGE);
L
Linus Torvalds 已提交
1383 1384 1385 1386 1387 1388 1389 1390 1391 1392
	for (i = 0; i < (1 << btp->bt_hashshift); i++) {
		spin_lock_init(&btp->bt_hash[i].bh_lock);
		INIT_LIST_HEAD(&btp->bt_hash[i].bh_list);
	}
}

STATIC void
xfs_free_bufhash(
	xfs_buftarg_t		*btp)
{
1393
	kmem_free(btp->bt_hash);
L
Linus Torvalds 已提交
1394 1395 1396
	btp->bt_hash = NULL;
}

1397
/*
1398
 *	buftarg list for delwrite queue processing
1399
 */
1400
static LIST_HEAD(xfs_buftarg_list);
1401
static DEFINE_SPINLOCK(xfs_buftarg_lock);
1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420

STATIC void
xfs_register_buftarg(
	xfs_buftarg_t           *btp)
{
	spin_lock(&xfs_buftarg_lock);
	list_add(&btp->bt_list, &xfs_buftarg_list);
	spin_unlock(&xfs_buftarg_lock);
}

STATIC void
xfs_unregister_buftarg(
	xfs_buftarg_t           *btp)
{
	spin_lock(&xfs_buftarg_lock);
	list_del(&btp->bt_list);
	spin_unlock(&xfs_buftarg_lock);
}

L
Linus Torvalds 已提交
1421 1422
void
xfs_free_buftarg(
1423 1424
	struct xfs_mount	*mp,
	struct xfs_buftarg	*btp)
L
Linus Torvalds 已提交
1425 1426
{
	xfs_flush_buftarg(btp, 1);
1427 1428
	if (mp->m_flags & XFS_MOUNT_BARRIER)
		xfs_blkdev_issue_flush(btp);
L
Linus Torvalds 已提交
1429
	xfs_free_bufhash(btp);
1430
	iput(btp->bt_mapping->host);
1431

1432 1433 1434
	/* Unregister the buftarg first so that we don't get a
	 * wakeup finding a non-existent task
	 */
1435 1436 1437
	xfs_unregister_buftarg(btp);
	kthread_stop(btp->bt_task);

1438
	kmem_free(btp);
L
Linus Torvalds 已提交
1439 1440 1441 1442 1443 1444 1445 1446 1447
}

STATIC int
xfs_setsize_buftarg_flags(
	xfs_buftarg_t		*btp,
	unsigned int		blocksize,
	unsigned int		sectorsize,
	int			verbose)
{
1448 1449 1450
	btp->bt_bsize = blocksize;
	btp->bt_sshift = ffs(sectorsize) - 1;
	btp->bt_smask = sectorsize - 1;
L
Linus Torvalds 已提交
1451

1452
	if (set_blocksize(btp->bt_bdev, sectorsize)) {
L
Linus Torvalds 已提交
1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
		printk(KERN_WARNING
			"XFS: Cannot set_blocksize to %u on device %s\n",
			sectorsize, XFS_BUFTARG_NAME(btp));
		return EINVAL;
	}

	if (verbose &&
	    (PAGE_CACHE_SIZE / BITS_PER_LONG) > sectorsize) {
		printk(KERN_WARNING
			"XFS: %u byte sectors in use on device %s.  "
			"This is suboptimal; %u or greater is ideal.\n",
			sectorsize, XFS_BUFTARG_NAME(btp),
			(unsigned int)PAGE_CACHE_SIZE / BITS_PER_LONG);
	}

	return 0;
}

/*
1472 1473 1474 1475
 *	When allocating the initial buffer target we have not yet
 *	read in the superblock, so don't know what sized sectors
 *	are being used is at this early stage.  Play safe.
 */
L
Linus Torvalds 已提交
1476 1477 1478 1479 1480 1481
STATIC int
xfs_setsize_buftarg_early(
	xfs_buftarg_t		*btp,
	struct block_device	*bdev)
{
	return xfs_setsize_buftarg_flags(btp,
1482
			PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0);
L
Linus Torvalds 已提交
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
}

int
xfs_setsize_buftarg(
	xfs_buftarg_t		*btp,
	unsigned int		blocksize,
	unsigned int		sectorsize)
{
	return xfs_setsize_buftarg_flags(btp, blocksize, sectorsize, 1);
}

STATIC int
xfs_mapping_buftarg(
	xfs_buftarg_t		*btp,
	struct block_device	*bdev)
{
	struct backing_dev_info	*bdi;
	struct inode		*inode;
	struct address_space	*mapping;
1502
	static const struct address_space_operations mapping_aops = {
L
Linus Torvalds 已提交
1503
		.sync_page = block_sync_page,
1504
		.migratepage = fail_migrate_page,
L
Linus Torvalds 已提交
1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
	};

	inode = new_inode(bdev->bd_inode->i_sb);
	if (!inode) {
		printk(KERN_WARNING
			"XFS: Cannot allocate mapping inode for device %s\n",
			XFS_BUFTARG_NAME(btp));
		return ENOMEM;
	}
	inode->i_mode = S_IFBLK;
	inode->i_bdev = bdev;
	inode->i_rdev = bdev->bd_dev;
	bdi = blk_get_backing_dev_info(bdev);
	if (!bdi)
		bdi = &default_backing_dev_info;
	mapping = &inode->i_data;
	mapping->a_ops = &mapping_aops;
	mapping->backing_dev_info = bdi;
	mapping_set_gfp_mask(mapping, GFP_NOFS);
1524
	btp->bt_mapping = mapping;
L
Linus Torvalds 已提交
1525 1526 1527
	return 0;
}

1528 1529 1530 1531 1532 1533 1534 1535
STATIC int
xfs_alloc_delwrite_queue(
	xfs_buftarg_t		*btp)
{
	int	error = 0;

	INIT_LIST_HEAD(&btp->bt_list);
	INIT_LIST_HEAD(&btp->bt_delwrite_queue);
E
Eric Sandeen 已提交
1536
	spin_lock_init(&btp->bt_delwrite_lock);
1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547
	btp->bt_flags = 0;
	btp->bt_task = kthread_run(xfsbufd, btp, "xfsbufd");
	if (IS_ERR(btp->bt_task)) {
		error = PTR_ERR(btp->bt_task);
		goto out_error;
	}
	xfs_register_buftarg(btp);
out_error:
	return error;
}

L
Linus Torvalds 已提交
1548 1549 1550 1551 1552 1553 1554 1555 1556
xfs_buftarg_t *
xfs_alloc_buftarg(
	struct block_device	*bdev,
	int			external)
{
	xfs_buftarg_t		*btp;

	btp = kmem_zalloc(sizeof(*btp), KM_SLEEP);

1557 1558
	btp->bt_dev =  bdev->bd_dev;
	btp->bt_bdev = bdev;
L
Linus Torvalds 已提交
1559 1560 1561 1562
	if (xfs_setsize_buftarg_early(btp, bdev))
		goto error;
	if (xfs_mapping_buftarg(btp, bdev))
		goto error;
1563 1564
	if (xfs_alloc_delwrite_queue(btp))
		goto error;
L
Linus Torvalds 已提交
1565 1566 1567 1568
	xfs_alloc_bufhash(btp, external);
	return btp;

error:
1569
	kmem_free(btp);
L
Linus Torvalds 已提交
1570 1571 1572 1573 1574
	return NULL;
}


/*
1575
 *	Delayed write buffer handling
L
Linus Torvalds 已提交
1576 1577
 */
STATIC void
1578 1579
xfs_buf_delwri_queue(
	xfs_buf_t		*bp,
L
Linus Torvalds 已提交
1580 1581
	int			unlock)
{
1582 1583
	struct list_head	*dwq = &bp->b_target->bt_delwrite_queue;
	spinlock_t		*dwlk = &bp->b_target->bt_delwrite_lock;
1584

C
Christoph Hellwig 已提交
1585 1586
	trace_xfs_buf_delwri_queue(bp, _RET_IP_);

1587
	ASSERT((bp->b_flags&(XBF_DELWRI|XBF_ASYNC)) == (XBF_DELWRI|XBF_ASYNC));
L
Linus Torvalds 已提交
1588

1589
	spin_lock(dwlk);
L
Linus Torvalds 已提交
1590
	/* If already in the queue, dequeue and place at tail */
1591 1592 1593 1594 1595
	if (!list_empty(&bp->b_list)) {
		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
		if (unlock)
			atomic_dec(&bp->b_hold);
		list_del(&bp->b_list);
L
Linus Torvalds 已提交
1596 1597
	}

1598 1599 1600
	bp->b_flags |= _XBF_DELWRI_Q;
	list_add_tail(&bp->b_list, dwq);
	bp->b_queuetime = jiffies;
1601
	spin_unlock(dwlk);
L
Linus Torvalds 已提交
1602 1603

	if (unlock)
1604
		xfs_buf_unlock(bp);
L
Linus Torvalds 已提交
1605 1606 1607
}

void
1608 1609
xfs_buf_delwri_dequeue(
	xfs_buf_t		*bp)
L
Linus Torvalds 已提交
1610
{
1611
	spinlock_t		*dwlk = &bp->b_target->bt_delwrite_lock;
L
Linus Torvalds 已提交
1612 1613
	int			dequeued = 0;

1614
	spin_lock(dwlk);
1615 1616 1617
	if ((bp->b_flags & XBF_DELWRI) && !list_empty(&bp->b_list)) {
		ASSERT(bp->b_flags & _XBF_DELWRI_Q);
		list_del_init(&bp->b_list);
L
Linus Torvalds 已提交
1618 1619
		dequeued = 1;
	}
1620
	bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q);
1621
	spin_unlock(dwlk);
L
Linus Torvalds 已提交
1622 1623

	if (dequeued)
1624
		xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1625

C
Christoph Hellwig 已提交
1626
	trace_xfs_buf_delwri_dequeue(bp, _RET_IP_);
L
Linus Torvalds 已提交
1627 1628 1629
}

STATIC void
1630
xfs_buf_runall_queues(
L
Linus Torvalds 已提交
1631 1632 1633 1634 1635 1636
	struct workqueue_struct	*queue)
{
	flush_workqueue(queue);
}

STATIC int
1637
xfsbufd_wakeup(
1638 1639
	int			priority,
	gfp_t			mask)
L
Linus Torvalds 已提交
1640
{
1641
	xfs_buftarg_t		*btp;
1642 1643

	spin_lock(&xfs_buftarg_lock);
1644
	list_for_each_entry(btp, &xfs_buftarg_list, bt_list) {
1645
		if (test_bit(XBT_FORCE_SLEEP, &btp->bt_flags))
1646
			continue;
1647
		set_bit(XBT_FORCE_FLUSH, &btp->bt_flags);
1648 1649 1650
		wake_up_process(btp->bt_task);
	}
	spin_unlock(&xfs_buftarg_lock);
L
Linus Torvalds 已提交
1651 1652 1653
	return 0;
}

1654 1655 1656 1657 1658 1659 1660 1661
/*
 * Move as many buffers as specified to the supplied list
 * idicating if we skipped any buffers to prevent deadlocks.
 */
STATIC int
xfs_buf_delwri_split(
	xfs_buftarg_t	*target,
	struct list_head *list,
1662
	unsigned long	age)
1663 1664 1665 1666 1667
{
	xfs_buf_t	*bp, *n;
	struct list_head *dwq = &target->bt_delwrite_queue;
	spinlock_t	*dwlk = &target->bt_delwrite_lock;
	int		skipped = 0;
1668
	int		force;
1669

1670
	force = test_and_clear_bit(XBT_FORCE_FLUSH, &target->bt_flags);
1671 1672 1673
	INIT_LIST_HEAD(list);
	spin_lock(dwlk);
	list_for_each_entry_safe(bp, n, dwq, b_list) {
C
Christoph Hellwig 已提交
1674
		trace_xfs_buf_delwri_split(bp, _RET_IP_);
1675 1676 1677
		ASSERT(bp->b_flags & XBF_DELWRI);

		if (!xfs_buf_ispin(bp) && !xfs_buf_cond_lock(bp)) {
1678
			if (!force &&
1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696
			    time_before(jiffies, bp->b_queuetime + age)) {
				xfs_buf_unlock(bp);
				break;
			}

			bp->b_flags &= ~(XBF_DELWRI|_XBF_DELWRI_Q|
					 _XBF_RUN_QUEUES);
			bp->b_flags |= XBF_WRITE;
			list_move_tail(&bp->b_list, list);
		} else
			skipped++;
	}
	spin_unlock(dwlk);

	return skipped;

}

L
Linus Torvalds 已提交
1697
STATIC int
1698
xfsbufd(
1699
	void		*data)
L
Linus Torvalds 已提交
1700
{
1701 1702 1703 1704
	struct list_head tmp;
	xfs_buftarg_t	*target = (xfs_buftarg_t *)data;
	int		count;
	xfs_buf_t	*bp;
L
Linus Torvalds 已提交
1705 1706 1707

	current->flags |= PF_MEMALLOC;

1708 1709
	set_freezable();

L
Linus Torvalds 已提交
1710
	do {
1711
		if (unlikely(freezing(current))) {
1712
			set_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1713
			refrigerator();
1714
		} else {
1715
			clear_bit(XBT_FORCE_SLEEP, &target->bt_flags);
1716
		}
L
Linus Torvalds 已提交
1717

1718 1719
		schedule_timeout_interruptible(
			xfs_buf_timer_centisecs * msecs_to_jiffies(10));
L
Linus Torvalds 已提交
1720

1721
		xfs_buf_delwri_split(target, &tmp,
1722
				xfs_buf_age_centisecs * msecs_to_jiffies(10));
L
Linus Torvalds 已提交
1723

1724
		count = 0;
L
Linus Torvalds 已提交
1725
		while (!list_empty(&tmp)) {
1726 1727
			bp = list_entry(tmp.next, xfs_buf_t, b_list);
			ASSERT(target == bp->b_target);
L
Linus Torvalds 已提交
1728

1729 1730
			list_del_init(&bp->b_list);
			xfs_buf_iostrategy(bp);
1731
			count++;
L
Linus Torvalds 已提交
1732 1733
		}

1734 1735
		if (as_list_len > 0)
			purge_addresses();
1736 1737
		if (count)
			blk_run_address_space(target->bt_mapping);
L
Linus Torvalds 已提交
1738

1739
	} while (!kthread_should_stop());
L
Linus Torvalds 已提交
1740

1741
	return 0;
L
Linus Torvalds 已提交
1742 1743 1744
}

/*
1745 1746 1747
 *	Go through all incore buffers, and release buffers if they belong to
 *	the given device. This is used in filesystem error handling to
 *	preserve the consistency of its metadata.
L
Linus Torvalds 已提交
1748 1749 1750
 */
int
xfs_flush_buftarg(
1751 1752
	xfs_buftarg_t	*target,
	int		wait)
L
Linus Torvalds 已提交
1753
{
1754 1755 1756
	struct list_head tmp;
	xfs_buf_t	*bp, *n;
	int		pincount = 0;
L
Linus Torvalds 已提交
1757

1758
	xfs_buf_runall_queues(xfsconvertd_workqueue);
1759 1760
	xfs_buf_runall_queues(xfsdatad_workqueue);
	xfs_buf_runall_queues(xfslogd_workqueue);
L
Linus Torvalds 已提交
1761

1762 1763
	set_bit(XBT_FORCE_FLUSH, &target->bt_flags);
	pincount = xfs_buf_delwri_split(target, &tmp, 0);
L
Linus Torvalds 已提交
1764 1765 1766 1767

	/*
	 * Dropped the delayed write list lock, now walk the temporary list
	 */
1768
	list_for_each_entry_safe(bp, n, &tmp, b_list) {
1769
		ASSERT(target == bp->b_target);
L
Linus Torvalds 已提交
1770
		if (wait)
1771
			bp->b_flags &= ~XBF_ASYNC;
L
Linus Torvalds 已提交
1772
		else
1773
			list_del_init(&bp->b_list);
L
Linus Torvalds 已提交
1774

1775
		xfs_buf_iostrategy(bp);
L
Linus Torvalds 已提交
1776 1777
	}

1778 1779 1780
	if (wait)
		blk_run_address_space(target->bt_mapping);

L
Linus Torvalds 已提交
1781 1782 1783 1784
	/*
	 * Remaining list items must be flushed before returning
	 */
	while (!list_empty(&tmp)) {
1785
		bp = list_entry(tmp.next, xfs_buf_t, b_list);
L
Linus Torvalds 已提交
1786

1787 1788 1789
		list_del_init(&bp->b_list);
		xfs_iowait(bp);
		xfs_buf_relse(bp);
L
Linus Torvalds 已提交
1790 1791 1792 1793 1794
	}

	return pincount;
}

1795
int __init
1796
xfs_buf_init(void)
L
Linus Torvalds 已提交
1797
{
1798 1799
	xfs_buf_zone = kmem_zone_init_flags(sizeof(xfs_buf_t), "xfs_buf",
						KM_ZONE_HWALIGN, NULL);
1800
	if (!xfs_buf_zone)
C
Christoph Hellwig 已提交
1801
		goto out;
1802

1803
	xfslogd_workqueue = create_workqueue("xfslogd");
1804
	if (!xfslogd_workqueue)
1805
		goto out_free_buf_zone;
L
Linus Torvalds 已提交
1806

1807
	xfsdatad_workqueue = create_workqueue("xfsdatad");
1808 1809
	if (!xfsdatad_workqueue)
		goto out_destroy_xfslogd_workqueue;
L
Linus Torvalds 已提交
1810

1811 1812 1813 1814
	xfsconvertd_workqueue = create_workqueue("xfsconvertd");
	if (!xfsconvertd_workqueue)
		goto out_destroy_xfsdatad_workqueue;

1815
	register_shrinker(&xfs_buf_shake);
1816
	return 0;
L
Linus Torvalds 已提交
1817

1818 1819
 out_destroy_xfsdatad_workqueue:
	destroy_workqueue(xfsdatad_workqueue);
1820 1821 1822
 out_destroy_xfslogd_workqueue:
	destroy_workqueue(xfslogd_workqueue);
 out_free_buf_zone:
1823
	kmem_zone_destroy(xfs_buf_zone);
C
Christoph Hellwig 已提交
1824
 out:
1825
	return -ENOMEM;
L
Linus Torvalds 已提交
1826 1827 1828
}

void
1829
xfs_buf_terminate(void)
L
Linus Torvalds 已提交
1830
{
1831
	unregister_shrinker(&xfs_buf_shake);
1832
	destroy_workqueue(xfsconvertd_workqueue);
1833 1834
	destroy_workqueue(xfsdatad_workqueue);
	destroy_workqueue(xfslogd_workqueue);
1835
	kmem_zone_destroy(xfs_buf_zone);
L
Linus Torvalds 已提交
1836
}
1837 1838 1839 1840 1841 1842 1843 1844

#ifdef CONFIG_KDB_MODULES
struct list_head *
xfs_get_buftarg_list(void)
{
	return &xfs_buftarg_list;
}
#endif