xfs_buf_item.c 33.4 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2 3
 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
 * All Rights Reserved.
L
Linus Torvalds 已提交
4
 *
5 6
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License as
L
Linus Torvalds 已提交
7 8
 * published by the Free Software Foundation.
 *
9 10 11 12
 * This program is distributed in the hope that it would be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
L
Linus Torvalds 已提交
13
 *
14 15 16
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write the Free Software Foundation,
 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
L
Linus Torvalds 已提交
17 18
 */
#include "xfs.h"
19
#include "xfs_fs.h"
20
#include "xfs_format.h"
21 22
#include "xfs_log_format.h"
#include "xfs_trans_resv.h"
23
#include "xfs_bit.h"
L
Linus Torvalds 已提交
24 25
#include "xfs_sb.h"
#include "xfs_mount.h"
26
#include "xfs_trans.h"
27
#include "xfs_buf_item.h"
L
Linus Torvalds 已提交
28 29
#include "xfs_trans_priv.h"
#include "xfs_error.h"
C
Christoph Hellwig 已提交
30
#include "xfs_trace.h"
31
#include "xfs_log.h"
L
Linus Torvalds 已提交
32 33 34 35


kmem_zone_t	*xfs_buf_item_zone;

36 37 38 39 40
static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
{
	return container_of(lip, struct xfs_buf_log_item, bli_item);
}

41
STATIC void	xfs_buf_do_callbacks(struct xfs_buf *bp);
L
Linus Torvalds 已提交
42

43 44 45 46 47 48 49 50
static inline int
xfs_buf_log_format_size(
	struct xfs_buf_log_format *blfp)
{
	return offsetof(struct xfs_buf_log_format, blf_data_map) +
			(blfp->blf_map_size * sizeof(blfp->blf_data_map[0]));
}

L
Linus Torvalds 已提交
51 52 53 54 55 56 57 58 59 60
/*
 * This returns the number of log iovecs needed to log the
 * given buf log item.
 *
 * It calculates this as 1 iovec for the buf log format structure
 * and 1 for each stretch of non-contiguous chunks to be logged.
 * Contiguous chunks are logged in a single iovec.
 *
 * If the XFS_BLI_STALE flag has been set, then log nothing.
 */
61
STATIC void
62 63
xfs_buf_item_size_segment(
	struct xfs_buf_log_item	*bip,
64 65 66
	struct xfs_buf_log_format *blfp,
	int			*nvecs,
	int			*nbytes)
L
Linus Torvalds 已提交
67
{
68 69 70
	struct xfs_buf		*bp = bip->bli_buf;
	int			next_bit;
	int			last_bit;
L
Linus Torvalds 已提交
71

72 73
	last_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
	if (last_bit == -1)
74
		return;
75 76 77 78 79

	/*
	 * initial count for a dirty buffer is 2 vectors - the format structure
	 * and the first dirty region.
	 */
80 81
	*nvecs += 2;
	*nbytes += xfs_buf_log_format_size(blfp) + XFS_BLF_CHUNK;
L
Linus Torvalds 已提交
82 83 84 85 86 87 88 89

	while (last_bit != -1) {
		/*
		 * This takes the bit number to start looking from and
		 * returns the next set bit from there.  It returns -1
		 * if there are no more bits set or the start bit is
		 * beyond the end of the bitmap.
		 */
90 91
		next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
					last_bit + 1);
L
Linus Torvalds 已提交
92 93 94 95 96 97
		/*
		 * If we run out of bits, leave the loop,
		 * else if we find a new set of bits bump the number of vecs,
		 * else keep scanning the current set of bits.
		 */
		if (next_bit == -1) {
98
			break;
L
Linus Torvalds 已提交
99 100
		} else if (next_bit != last_bit + 1) {
			last_bit = next_bit;
101
			(*nvecs)++;
102 103 104
		} else if (xfs_buf_offset(bp, next_bit * XFS_BLF_CHUNK) !=
			   (xfs_buf_offset(bp, last_bit * XFS_BLF_CHUNK) +
			    XFS_BLF_CHUNK)) {
L
Linus Torvalds 已提交
105
			last_bit = next_bit;
106
			(*nvecs)++;
L
Linus Torvalds 已提交
107 108 109
		} else {
			last_bit++;
		}
110
		*nbytes += XFS_BLF_CHUNK;
L
Linus Torvalds 已提交
111 112 113 114
	}
}

/*
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
 * This returns the number of log iovecs needed to log the given buf log item.
 *
 * It calculates this as 1 iovec for the buf log format structure and 1 for each
 * stretch of non-contiguous chunks to be logged.  Contiguous chunks are logged
 * in a single iovec.
 *
 * Discontiguous buffers need a format structure per region that that is being
 * logged. This makes the changes in the buffer appear to log recovery as though
 * they came from separate buffers, just like would occur if multiple buffers
 * were used instead of a single discontiguous buffer. This enables
 * discontiguous buffers to be in-memory constructs, completely transparent to
 * what ends up on disk.
 *
 * If the XFS_BLI_STALE flag has been set, then log nothing but the buf log
 * format structures.
L
Linus Torvalds 已提交
130
 */
131
STATIC void
132
xfs_buf_item_size(
133 134 135
	struct xfs_log_item	*lip,
	int			*nvecs,
	int			*nbytes)
L
Linus Torvalds 已提交
136
{
137
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
138 139 140 141 142 143 144 145 146 147
	int			i;

	ASSERT(atomic_read(&bip->bli_refcount) > 0);
	if (bip->bli_flags & XFS_BLI_STALE) {
		/*
		 * The buffer is stale, so all we need to log
		 * is the buf log format structure with the
		 * cancel flag in it.
		 */
		trace_xfs_buf_item_size_stale(bip);
148
		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
149 150 151 152 153
		*nvecs += bip->bli_format_count;
		for (i = 0; i < bip->bli_format_count; i++) {
			*nbytes += xfs_buf_log_format_size(&bip->bli_formats[i]);
		}
		return;
154 155 156 157
	}

	ASSERT(bip->bli_flags & XFS_BLI_LOGGED);

158 159 160 161 162 163 164
	if (bip->bli_flags & XFS_BLI_ORDERED) {
		/*
		 * The buffer has been logged just to order it.
		 * It is not being included in the transaction
		 * commit, so no vectors are used at all.
		 */
		trace_xfs_buf_item_size_ordered(bip);
165 166
		*nvecs = XFS_LOG_VEC_ORDERED;
		return;
167 168
	}

169 170 171 172 173 174 175 176 177 178
	/*
	 * the vector count is based on the number of buffer vectors we have
	 * dirty bits in. This will only be greater than one when we have a
	 * compound buffer with more than one segment dirty. Hence for compound
	 * buffers we need to track which segment the dirty bits correspond to,
	 * and when we move from one segment to the next increment the vector
	 * count for the extra buf log format structure that will need to be
	 * written.
	 */
	for (i = 0; i < bip->bli_format_count; i++) {
179 180
		xfs_buf_item_size_segment(bip, &bip->bli_formats[i],
					  nvecs, nbytes);
181 182 183 184
	}
	trace_xfs_buf_item_size(bip);
}

C
Christoph Hellwig 已提交
185
static inline void
186
xfs_buf_item_copy_iovec(
187
	struct xfs_log_vec	*lv,
C
Christoph Hellwig 已提交
188
	struct xfs_log_iovec	**vecp,
189 190 191 192 193 194
	struct xfs_buf		*bp,
	uint			offset,
	int			first_bit,
	uint			nbits)
{
	offset += first_bit * XFS_BLF_CHUNK;
195
	xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BCHUNK,
C
Christoph Hellwig 已提交
196 197
			xfs_buf_offset(bp, offset),
			nbits * XFS_BLF_CHUNK);
198 199 200 201 202 203 204 205 206 207 208 209 210 211
}

static inline bool
xfs_buf_item_straddle(
	struct xfs_buf		*bp,
	uint			offset,
	int			next_bit,
	int			last_bit)
{
	return xfs_buf_offset(bp, offset + (next_bit << XFS_BLF_SHIFT)) !=
		(xfs_buf_offset(bp, offset + (last_bit << XFS_BLF_SHIFT)) +
		 XFS_BLF_CHUNK);
}

C
Christoph Hellwig 已提交
212
static void
213 214
xfs_buf_item_format_segment(
	struct xfs_buf_log_item	*bip,
215
	struct xfs_log_vec	*lv,
C
Christoph Hellwig 已提交
216
	struct xfs_log_iovec	**vecp,
217 218 219
	uint			offset,
	struct xfs_buf_log_format *blfp)
{
220
	struct xfs_buf	*bp = bip->bli_buf;
L
Linus Torvalds 已提交
221 222 223 224 225 226
	uint		base_size;
	int		first_bit;
	int		last_bit;
	int		next_bit;
	uint		nbits;

227
	/* copy the flags across from the base format item */
228
	blfp->blf_flags = bip->__bli_format.blf_flags;
L
Linus Torvalds 已提交
229 230

	/*
231 232 233
	 * Base size is the actual size of the ondisk structure - it reflects
	 * the actual size of the dirty bitmap rather than the size of the in
	 * memory structure.
L
Linus Torvalds 已提交
234
	 */
235
	base_size = xfs_buf_log_format_size(blfp);
236 237 238 239 240 241 242

	first_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size, 0);
	if (!(bip->bli_flags & XFS_BLI_STALE) && first_bit == -1) {
		/*
		 * If the map is not be dirty in the transaction, mark
		 * the size as zero and do not advance the vector pointer.
		 */
243
		return;
244 245
	}

246 247
	blfp = xlog_copy_iovec(lv, vecp, XLOG_REG_TYPE_BFORMAT, blfp, base_size);
	blfp->blf_size = 1;
L
Linus Torvalds 已提交
248 249 250 251 252 253 254

	if (bip->bli_flags & XFS_BLI_STALE) {
		/*
		 * The buffer is stale, so all we need to log
		 * is the buf log format structure with the
		 * cancel flag in it.
		 */
C
Christoph Hellwig 已提交
255
		trace_xfs_buf_item_format_stale(bip);
256
		ASSERT(blfp->blf_flags & XFS_BLF_CANCEL);
257
		return;
L
Linus Torvalds 已提交
258 259
	}

260

L
Linus Torvalds 已提交
261 262 263 264 265 266 267 268 269 270 271 272
	/*
	 * Fill in an iovec for each set of contiguous chunks.
	 */
	last_bit = first_bit;
	nbits = 1;
	for (;;) {
		/*
		 * This takes the bit number to start looking from and
		 * returns the next set bit from there.  It returns -1
		 * if there are no more bits set or the start bit is
		 * beyond the end of the bitmap.
		 */
273 274
		next_bit = xfs_next_bit(blfp->blf_data_map, blfp->blf_map_size,
					(uint)last_bit + 1);
L
Linus Torvalds 已提交
275
		/*
276 277 278 279 280
		 * If we run out of bits fill in the last iovec and get out of
		 * the loop.  Else if we start a new set of bits then fill in
		 * the iovec for the series we were looking at and start
		 * counting the bits in the new one.  Else we're still in the
		 * same set of bits so just keep counting and scanning.
L
Linus Torvalds 已提交
281 282
		 */
		if (next_bit == -1) {
283
			xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
284
						first_bit, nbits);
285
			blfp->blf_size++;
L
Linus Torvalds 已提交
286
			break;
287 288
		} else if (next_bit != last_bit + 1 ||
		           xfs_buf_item_straddle(bp, offset, next_bit, last_bit)) {
289
			xfs_buf_item_copy_iovec(lv, vecp, bp, offset,
C
Christoph Hellwig 已提交
290
						first_bit, nbits);
291
			blfp->blf_size++;
L
Linus Torvalds 已提交
292 293 294 295 296 297 298 299
			first_bit = next_bit;
			last_bit = next_bit;
			nbits = 1;
		} else {
			last_bit++;
			nbits++;
		}
	}
300 301 302 303 304 305 306 307 308 309 310
}

/*
 * This is called to fill in the vector of log iovecs for the
 * given log buf item.  It fills the first entry with a buf log
 * format structure, and the rest point to contiguous chunks
 * within the buffer.
 */
STATIC void
xfs_buf_item_format(
	struct xfs_log_item	*lip,
311
	struct xfs_log_vec	*lv)
312 313 314
{
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	struct xfs_buf		*bp = bip->bli_buf;
315
	struct xfs_log_iovec	*vecp = NULL;
316 317 318 319 320 321
	uint			offset = 0;
	int			i;

	ASSERT(atomic_read(&bip->bli_refcount) > 0);
	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
	       (bip->bli_flags & XFS_BLI_STALE));
322 323 324 325
	ASSERT((bip->bli_flags & XFS_BLI_STALE) ||
	       (xfs_blft_from_flags(&bip->__bli_format) > XFS_BLFT_UNKNOWN_BUF
	        && xfs_blft_from_flags(&bip->__bli_format) < XFS_BLFT_MAX_BUF));

326 327 328

	/*
	 * If it is an inode buffer, transfer the in-memory state to the
D
Dave Chinner 已提交
329 330 331
	 * format flags and clear the in-memory state.
	 *
	 * For buffer based inode allocation, we do not transfer
332 333 334
	 * this state if the inode buffer allocation has not yet been committed
	 * to the log as setting the XFS_BLI_INODE_BUF flag will prevent
	 * correct replay of the inode allocation.
D
Dave Chinner 已提交
335 336 337 338 339
	 *
	 * For icreate item based inode allocation, the buffers aren't written
	 * to the journal during allocation, and hence we should always tag the
	 * buffer as an inode buffer so that the correct unlinked list replay
	 * occurs during recovery.
340 341
	 */
	if (bip->bli_flags & XFS_BLI_INODE_BUF) {
D
Dave Chinner 已提交
342 343
		if (xfs_sb_version_hascrc(&lip->li_mountp->m_sb) ||
		    !((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) &&
344
		      xfs_log_item_in_current_chkpt(lip)))
345
			bip->__bli_format.blf_flags |= XFS_BLF_INODE_BUF;
346 347 348
		bip->bli_flags &= ~XFS_BLI_INODE_BUF;
	}

349 350 351 352 353 354 355 356 357 358
	if ((bip->bli_flags & (XFS_BLI_ORDERED|XFS_BLI_STALE)) ==
							XFS_BLI_ORDERED) {
		/*
		 * The buffer has been logged just to order it.  It is not being
		 * included in the transaction commit, so don't format it.
		 */
		trace_xfs_buf_item_format_ordered(bip);
		return;
	}

359
	for (i = 0; i < bip->bli_format_count; i++) {
360
		xfs_buf_item_format_segment(bip, lv, &vecp, offset,
C
Christoph Hellwig 已提交
361
					    &bip->bli_formats[i]);
362 363
		offset += bp->b_maps[i].bm_len;
	}
L
Linus Torvalds 已提交
364 365 366 367

	/*
	 * Check to make sure everything is consistent.
	 */
C
Christoph Hellwig 已提交
368
	trace_xfs_buf_item_format(bip);
L
Linus Torvalds 已提交
369 370 371
}

/*
372
 * This is called to pin the buffer associated with the buf log item in memory
C
Christoph Hellwig 已提交
373
 * so it cannot be written out.
374 375 376 377 378
 *
 * We also always take a reference to the buffer log item here so that the bli
 * is held while the item is pinned in memory. This means that we can
 * unconditionally drop the reference count a transaction holds when the
 * transaction is completed.
L
Linus Torvalds 已提交
379
 */
380
STATIC void
L
Linus Torvalds 已提交
381
xfs_buf_item_pin(
382
	struct xfs_log_item	*lip)
L
Linus Torvalds 已提交
383
{
384
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
L
Linus Torvalds 已提交
385 386 387

	ASSERT(atomic_read(&bip->bli_refcount) > 0);
	ASSERT((bip->bli_flags & XFS_BLI_LOGGED) ||
388
	       (bip->bli_flags & XFS_BLI_ORDERED) ||
L
Linus Torvalds 已提交
389
	       (bip->bli_flags & XFS_BLI_STALE));
390

C
Christoph Hellwig 已提交
391
	trace_xfs_buf_item_pin(bip);
C
Christoph Hellwig 已提交
392 393 394

	atomic_inc(&bip->bli_refcount);
	atomic_inc(&bip->bli_buf->b_pin_count);
L
Linus Torvalds 已提交
395 396 397 398 399 400 401 402 403
}

/*
 * This is called to unpin the buffer associated with the buf log
 * item which was previously pinned with a call to xfs_buf_item_pin().
 *
 * Also drop the reference to the buf item for the current transaction.
 * If the XFS_BLI_STALE flag is set and we are the last reference,
 * then free up the buf log item and unlock the buffer.
404 405 406 407 408
 *
 * If the remove flag is set we are called from uncommit in the
 * forced-shutdown path.  If that is true and the reference count on
 * the log item is going to drop to zero we need to free the item's
 * descriptor in the transaction.
L
Linus Torvalds 已提交
409
 */
410
STATIC void
L
Linus Torvalds 已提交
411
xfs_buf_item_unpin(
412
	struct xfs_log_item	*lip,
413
	int			remove)
L
Linus Torvalds 已提交
414
{
415
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
416
	xfs_buf_t	*bp = bip->bli_buf;
417
	struct xfs_ail	*ailp = lip->li_ailp;
418
	int		stale = bip->bli_flags & XFS_BLI_STALE;
419
	int		freed;
L
Linus Torvalds 已提交
420

421
	ASSERT(bp->b_fspriv == bip);
L
Linus Torvalds 已提交
422
	ASSERT(atomic_read(&bip->bli_refcount) > 0);
423

C
Christoph Hellwig 已提交
424
	trace_xfs_buf_item_unpin(bip);
L
Linus Torvalds 已提交
425 426

	freed = atomic_dec_and_test(&bip->bli_refcount);
C
Christoph Hellwig 已提交
427 428 429

	if (atomic_dec_and_test(&bp->b_pin_count))
		wake_up_all(&bp->b_waiters);
430

L
Linus Torvalds 已提交
431 432
	if (freed && stale) {
		ASSERT(bip->bli_flags & XFS_BLI_STALE);
433
		ASSERT(xfs_buf_islocked(bp));
434
		ASSERT(bp->b_flags & XBF_STALE);
435
		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
436

C
Christoph Hellwig 已提交
437 438
		trace_xfs_buf_item_unpin_stale(bip);

439 440
		if (remove) {
			/*
441 442 443 444 445
			 * If we are in a transaction context, we have to
			 * remove the log item from the transaction as we are
			 * about to release our reference to the buffer.  If we
			 * don't, the unlock that occurs later in
			 * xfs_trans_uncommit() will try to reference the
446 447
			 * buffer which we no longer have a hold on.
			 */
448 449
			if (lip->li_desc)
				xfs_trans_del_item(lip);
450 451 452 453 454

			/*
			 * Since the transaction no longer refers to the buffer,
			 * the buffer should no longer refer to the transaction.
			 */
455
			bp->b_transp = NULL;
456 457
		}

L
Linus Torvalds 已提交
458 459
		/*
		 * If we get called here because of an IO error, we may
460
		 * or may not have the item on the AIL. xfs_trans_ail_delete()
L
Linus Torvalds 已提交
461
		 * will take care of that situation.
462
		 * xfs_trans_ail_delete() drops the AIL lock.
L
Linus Torvalds 已提交
463 464
		 */
		if (bip->bli_flags & XFS_BLI_STALE_INODE) {
465
			xfs_buf_do_callbacks(bp);
466
			bp->b_fspriv = NULL;
467
			bp->b_iodone = NULL;
L
Linus Torvalds 已提交
468
		} else {
469
			spin_lock(&ailp->xa_lock);
470
			xfs_trans_ail_delete(ailp, lip, SHUTDOWN_LOG_IO_ERROR);
L
Linus Torvalds 已提交
471
			xfs_buf_item_relse(bp);
472
			ASSERT(bp->b_fspriv == NULL);
L
Linus Torvalds 已提交
473 474
		}
		xfs_buf_relse(bp);
475
	} else if (freed && remove) {
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491
		/*
		 * There are currently two references to the buffer - the active
		 * LRU reference and the buf log item. What we are about to do
		 * here - simulate a failed IO completion - requires 3
		 * references.
		 *
		 * The LRU reference is removed by the xfs_buf_stale() call. The
		 * buf item reference is removed by the xfs_buf_iodone()
		 * callback that is run by xfs_buf_do_callbacks() during ioend
		 * processing (via the bp->b_iodone callback), and then finally
		 * the ioend processing will drop the IO reference if the buffer
		 * is marked XBF_ASYNC.
		 *
		 * Hence we need to take an additional reference here so that IO
		 * completion processing doesn't free the buffer prematurely.
		 */
492
		xfs_buf_lock(bp);
493 494
		xfs_buf_hold(bp);
		bp->b_flags |= XBF_ASYNC;
D
Dave Chinner 已提交
495
		xfs_buf_ioerror(bp, -EIO);
496
		bp->b_flags &= ~XBF_DONE;
497
		xfs_buf_stale(bp);
498
		xfs_buf_ioend(bp);
L
Linus Torvalds 已提交
499 500 501
	}
}

502 503 504 505 506 507
/*
 * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
 * seconds so as to not spam logs too much on repeated detection of the same
 * buffer being bad..
 */

508
static DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
509

510
STATIC uint
511 512 513
xfs_buf_item_push(
	struct xfs_log_item	*lip,
	struct list_head	*buffer_list)
L
Linus Torvalds 已提交
514
{
515 516
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	struct xfs_buf		*bp = bip->bli_buf;
517
	uint			rval = XFS_ITEM_SUCCESS;
L
Linus Torvalds 已提交
518

519
	if (xfs_buf_ispinned(bp))
L
Linus Torvalds 已提交
520
		return XFS_ITEM_PINNED;
521 522 523 524 525 526 527 528 529 530
	if (!xfs_buf_trylock(bp)) {
		/*
		 * If we have just raced with a buffer being pinned and it has
		 * been marked stale, we could end up stalling until someone else
		 * issues a log force to unpin the stale buffer. Check for the
		 * race condition here so xfsaild recognizes the buffer is pinned
		 * and queues a log force to move it along.
		 */
		if (xfs_buf_ispinned(bp))
			return XFS_ITEM_PINNED;
L
Linus Torvalds 已提交
531
		return XFS_ITEM_LOCKED;
532
	}
L
Linus Torvalds 已提交
533 534

	ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
535 536 537

	trace_xfs_buf_item_push(bip);

538 539
	/* has a previous flush failed due to IO errors? */
	if ((bp->b_flags & XBF_WRITE_FAIL) &&
540
	    ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS: Failing async write")) {
541
		xfs_warn(bp->b_target->bt_mount,
542
"Failing async write on buffer block 0x%llx. Retrying async write.",
543 544 545
			 (long long)bp->b_bn);
	}

546 547 548 549
	if (!xfs_buf_delwri_queue(bp, buffer_list))
		rval = XFS_ITEM_FLUSHING;
	xfs_buf_unlock(bp);
	return rval;
L
Linus Torvalds 已提交
550 551 552
}

/*
553 554 555
 * Release the buffer associated with the buf log item.  If there is no dirty
 * logged data associated with the buffer recorded in the buf log item, then
 * free the buf log item and remove the reference to it in the buffer.
L
Linus Torvalds 已提交
556
 *
557 558
 * This call ignores the recursion count.  It is only called when the buffer
 * should REALLY be unlocked, regardless of the recursion count.
L
Linus Torvalds 已提交
559
 *
560 561 562 563 564 565 566 567 568 569
 * We unconditionally drop the transaction's reference to the log item. If the
 * item was logged, then another reference was taken when it was pinned, so we
 * can safely drop the transaction reference now.  This also allows us to avoid
 * potential races with the unpin code freeing the bli by not referencing the
 * bli after we've dropped the reference count.
 *
 * If the XFS_BLI_HOLD flag is set in the buf log item, then free the log item
 * if necessary but do not unlock the buffer.  This is for support of
 * xfs_trans_bhold(). Make sure the XFS_BLI_HOLD field is cleared if we don't
 * free the item.
L
Linus Torvalds 已提交
570
 */
571
STATIC void
L
Linus Torvalds 已提交
572
xfs_buf_item_unlock(
573
	struct xfs_log_item	*lip)
L
Linus Torvalds 已提交
574
{
575 576
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);
	struct xfs_buf		*bp = bip->bli_buf;
577 578 579
	bool			clean;
	bool			aborted;
	int			flags;
L
Linus Torvalds 已提交
580

581
	/* Clear the buffer's association with this transaction. */
582
	bp->b_transp = NULL;
L
Linus Torvalds 已提交
583 584

	/*
585 586 587 588
	 * If this is a transaction abort, don't return early.  Instead, allow
	 * the brelse to happen.  Normally it would be done for stale
	 * (cancelled) buffers at unpin time, but we'll never go through the
	 * pin/unpin cycle if we abort inside commit.
L
Linus Torvalds 已提交
589
	 */
590
	aborted = (lip->li_flags & XFS_LI_ABORTED) ? true : false;
L
Linus Torvalds 已提交
591
	/*
592 593 594
	 * Before possibly freeing the buf item, copy the per-transaction state
	 * so we can reference it safely later after clearing it from the
	 * buffer log item.
595
	 */
596 597
	flags = bip->bli_flags;
	bip->bli_flags &= ~(XFS_BLI_LOGGED | XFS_BLI_HOLD | XFS_BLI_ORDERED);
598 599 600 601 602

	/*
	 * If the buf item is marked stale, then don't do anything.  We'll
	 * unlock the buffer and free the buf item when the buffer is unpinned
	 * for the last time.
L
Linus Torvalds 已提交
603
	 */
604
	if (flags & XFS_BLI_STALE) {
C
Christoph Hellwig 已提交
605
		trace_xfs_buf_item_unlock_stale(bip);
606
		ASSERT(bip->__bli_format.blf_flags & XFS_BLF_CANCEL);
607 608
		if (!aborted) {
			atomic_dec(&bip->bli_refcount);
L
Linus Torvalds 已提交
609
			return;
610
		}
L
Linus Torvalds 已提交
611 612
	}

C
Christoph Hellwig 已提交
613
	trace_xfs_buf_item_unlock(bip);
L
Linus Torvalds 已提交
614 615

	/*
616
	 * If the buf item isn't tracking any data, free it, otherwise drop the
617 618 619 620
	 * reference we hold to it. If we are aborting the transaction, this may
	 * be the only reference to the buf item, so we free it anyway
	 * regardless of whether it is dirty or not. A dirty abort implies a
	 * shutdown, anyway.
621 622 623
	 *
	 * Ordered buffers are dirty but may have no recorded changes, so ensure
	 * we only release clean items here.
L
Linus Torvalds 已提交
624
	 */
625 626 627 628 629 630 631 632 633
	clean = (flags & XFS_BLI_DIRTY) ? false : true;
	if (clean) {
		int i;
		for (i = 0; i < bip->bli_format_count; i++) {
			if (!xfs_bitmap_empty(bip->bli_formats[i].blf_data_map,
				     bip->bli_formats[i].blf_map_size)) {
				clean = false;
				break;
			}
634 635
		}
	}
636 637 638 639 640 641 642 643 644 645 646 647 648 649

	/*
	 * Clean buffers, by definition, cannot be in the AIL. However, aborted
	 * buffers may be dirty and hence in the AIL. Therefore if we are
	 * aborting a buffer and we've just taken the last refernce away, we
	 * have to check if it is in the AIL before freeing it. We need to free
	 * it in this case, because an aborted transaction has already shut the
	 * filesystem down and this is the last chance we will have to do so.
	 */
	if (atomic_dec_and_test(&bip->bli_refcount)) {
		if (clean)
			xfs_buf_item_relse(bp);
		else if (aborted) {
			ASSERT(XFS_FORCED_SHUTDOWN(lip->li_mountp));
650
			xfs_trans_ail_remove(lip, SHUTDOWN_LOG_IO_ERROR);
651 652
			xfs_buf_item_relse(bp);
		}
653
	}
L
Linus Torvalds 已提交
654

655
	if (!(flags & XFS_BLI_HOLD))
L
Linus Torvalds 已提交
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
		xfs_buf_relse(bp);
}

/*
 * This is called to find out where the oldest active copy of the
 * buf log item in the on disk log resides now that the last log
 * write of it completed at the given lsn.
 * We always re-log all the dirty data in a buffer, so usually the
 * latest copy in the on disk log is the only one that matters.  For
 * those cases we simply return the given lsn.
 *
 * The one exception to this is for buffers full of newly allocated
 * inodes.  These buffers are only relogged with the XFS_BLI_INODE_BUF
 * flag set, indicating that only the di_next_unlinked fields from the
 * inodes in the buffers will be replayed during recovery.  If the
 * original newly allocated inode images have not yet been flushed
 * when the buffer is so relogged, then we need to make sure that we
 * keep the old images in the 'active' portion of the log.  We do this
 * by returning the original lsn of that transaction here rather than
 * the current one.
 */
677
STATIC xfs_lsn_t
L
Linus Torvalds 已提交
678
xfs_buf_item_committed(
679
	struct xfs_log_item	*lip,
L
Linus Torvalds 已提交
680 681
	xfs_lsn_t		lsn)
{
682 683
	struct xfs_buf_log_item	*bip = BUF_ITEM(lip);

C
Christoph Hellwig 已提交
684 685
	trace_xfs_buf_item_committed(bip);

686 687 688
	if ((bip->bli_flags & XFS_BLI_INODE_ALLOC_BUF) && lip->li_lsn != 0)
		return lip->li_lsn;
	return lsn;
L
Linus Torvalds 已提交
689 690
}

691
STATIC void
692 693 694
xfs_buf_item_committing(
	struct xfs_log_item	*lip,
	xfs_lsn_t		commit_lsn)
L
Linus Torvalds 已提交
695 696 697 698 699 700
{
}

/*
 * This is the ops vector shared by all buf log items.
 */
C
Christoph Hellwig 已提交
701
static const struct xfs_item_ops xfs_buf_item_ops = {
702 703 704 705 706 707 708 709
	.iop_size	= xfs_buf_item_size,
	.iop_format	= xfs_buf_item_format,
	.iop_pin	= xfs_buf_item_pin,
	.iop_unpin	= xfs_buf_item_unpin,
	.iop_unlock	= xfs_buf_item_unlock,
	.iop_committed	= xfs_buf_item_committed,
	.iop_push	= xfs_buf_item_push,
	.iop_committing = xfs_buf_item_committing
L
Linus Torvalds 已提交
710 711
};

712 713 714 715 716 717 718 719 720
STATIC int
xfs_buf_item_get_format(
	struct xfs_buf_log_item	*bip,
	int			count)
{
	ASSERT(bip->bli_formats == NULL);
	bip->bli_format_count = count;

	if (count == 1) {
721
		bip->bli_formats = &bip->__bli_format;
722 723 724 725 726 727
		return 0;
	}

	bip->bli_formats = kmem_zalloc(count * sizeof(struct xfs_buf_log_format),
				KM_SLEEP);
	if (!bip->bli_formats)
D
Dave Chinner 已提交
728
		return -ENOMEM;
729 730 731 732 733 734 735
	return 0;
}

STATIC void
xfs_buf_item_free_format(
	struct xfs_buf_log_item	*bip)
{
736
	if (bip->bli_formats != &bip->__bli_format) {
737 738 739 740
		kmem_free(bip->bli_formats);
		bip->bli_formats = NULL;
	}
}
L
Linus Torvalds 已提交
741 742 743 744 745 746 747 748

/*
 * Allocate a new buf log item to go with the given buffer.
 * Set the buffer's b_fsprivate field to point to the new
 * buf log item.  If there are other item's attached to the
 * buffer (see xfs_buf_attach_iodone() below), then put the
 * buf log item at the front.
 */
D
Dave Chinner 已提交
749
int
L
Linus Torvalds 已提交
750
xfs_buf_item_init(
D
Dave Chinner 已提交
751 752
	struct xfs_buf	*bp,
	struct xfs_mount *mp)
L
Linus Torvalds 已提交
753
{
D
Dave Chinner 已提交
754 755
	struct xfs_log_item	*lip = bp->b_fspriv;
	struct xfs_buf_log_item	*bip;
L
Linus Torvalds 已提交
756 757
	int			chunks;
	int			map_size;
758 759
	int			error;
	int			i;
L
Linus Torvalds 已提交
760 761 762 763 764 765 766

	/*
	 * Check to see if there is already a buf log item for
	 * this buffer.  If there is, it is guaranteed to be
	 * the first.  If we do already have one, there is
	 * nothing to do here so return.
	 */
767
	ASSERT(bp->b_target->bt_mount == mp);
768
	if (lip != NULL && lip->li_type == XFS_LI_BUF)
D
Dave Chinner 已提交
769
		return 0;
L
Linus Torvalds 已提交
770

771
	bip = kmem_zone_zalloc(xfs_buf_item_zone, KM_SLEEP);
772
	xfs_log_item_init(mp, &bip->bli_item, XFS_LI_BUF, &xfs_buf_item_ops);
L
Linus Torvalds 已提交
773
	bip->bli_buf = bp;
774 775 776 777 778 779 780 781 782 783 784 785

	/*
	 * chunks is the number of XFS_BLF_CHUNK size pieces the buffer
	 * can be divided into. Make sure not to truncate any pieces.
	 * map_size is the size of the bitmap needed to describe the
	 * chunks of the buffer.
	 *
	 * Discontiguous buffer support follows the layout of the underlying
	 * buffer. This makes the implementation as simple as possible.
	 */
	error = xfs_buf_item_get_format(bip, bp->b_map_count);
	ASSERT(error == 0);
D
Dave Chinner 已提交
786 787 788 789 790
	if (error) {	/* to stop gcc throwing set-but-unused warnings */
		kmem_zone_free(xfs_buf_item_zone, bip);
		return error;
	}

791 792 793 794 795 796 797 798 799 800 801

	for (i = 0; i < bip->bli_format_count; i++) {
		chunks = DIV_ROUND_UP(BBTOB(bp->b_maps[i].bm_len),
				      XFS_BLF_CHUNK);
		map_size = DIV_ROUND_UP(chunks, NBWORD);

		bip->bli_formats[i].blf_type = XFS_LI_BUF;
		bip->bli_formats[i].blf_blkno = bp->b_maps[i].bm_bn;
		bip->bli_formats[i].blf_len = bp->b_maps[i].bm_len;
		bip->bli_formats[i].blf_map_size = map_size;
	}
L
Linus Torvalds 已提交
802 803 804 805 806

	/*
	 * Put the buf item into the list of items attached to the
	 * buffer at the front.
	 */
807 808 809
	if (bp->b_fspriv)
		bip->bli_item.li_bio_list = bp->b_fspriv;
	bp->b_fspriv = bip;
D
Dave Chinner 已提交
810 811
	xfs_buf_hold(bp);
	return 0;
L
Linus Torvalds 已提交
812 813 814 815 816 817 818
}


/*
 * Mark bytes first through last inclusive as dirty in the buf
 * item's bitmap.
 */
819
static void
820
xfs_buf_item_log_segment(
L
Linus Torvalds 已提交
821
	uint			first,
822 823
	uint			last,
	uint			*map)
L
Linus Torvalds 已提交
824 825 826 827 828 829 830 831 832 833 834 835 836 837
{
	uint		first_bit;
	uint		last_bit;
	uint		bits_to_set;
	uint		bits_set;
	uint		word_num;
	uint		*wordp;
	uint		bit;
	uint		end_bit;
	uint		mask;

	/*
	 * Convert byte offsets to bit numbers.
	 */
838 839
	first_bit = first >> XFS_BLF_SHIFT;
	last_bit = last >> XFS_BLF_SHIFT;
L
Linus Torvalds 已提交
840 841 842 843 844 845 846 847 848 849 850

	/*
	 * Calculate the total number of bits to be set.
	 */
	bits_to_set = last_bit - first_bit + 1;

	/*
	 * Get a pointer to the first word in the bitmap
	 * to set a bit in.
	 */
	word_num = first_bit >> BIT_TO_WORD_SHIFT;
851
	wordp = &map[word_num];
L
Linus Torvalds 已提交
852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895

	/*
	 * Calculate the starting bit in the first word.
	 */
	bit = first_bit & (uint)(NBWORD - 1);

	/*
	 * First set any bits in the first word of our range.
	 * If it starts at bit 0 of the word, it will be
	 * set below rather than here.  That is what the variable
	 * bit tells us. The variable bits_set tracks the number
	 * of bits that have been set so far.  End_bit is the number
	 * of the last bit to be set in this word plus one.
	 */
	if (bit) {
		end_bit = MIN(bit + bits_to_set, (uint)NBWORD);
		mask = ((1 << (end_bit - bit)) - 1) << bit;
		*wordp |= mask;
		wordp++;
		bits_set = end_bit - bit;
	} else {
		bits_set = 0;
	}

	/*
	 * Now set bits a whole word at a time that are between
	 * first_bit and last_bit.
	 */
	while ((bits_to_set - bits_set) >= NBWORD) {
		*wordp |= 0xffffffff;
		bits_set += NBWORD;
		wordp++;
	}

	/*
	 * Finally, set any bits left to be set in one last partial word.
	 */
	end_bit = bits_to_set - bits_set;
	if (end_bit) {
		mask = (1 << end_bit) - 1;
		*wordp |= mask;
	}
}

896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927
/*
 * Mark bytes first through last inclusive as dirty in the buf
 * item's bitmap.
 */
void
xfs_buf_item_log(
	xfs_buf_log_item_t	*bip,
	uint			first,
	uint			last)
{
	int			i;
	uint			start;
	uint			end;
	struct xfs_buf		*bp = bip->bli_buf;

	/*
	 * walk each buffer segment and mark them dirty appropriately.
	 */
	start = 0;
	for (i = 0; i < bip->bli_format_count; i++) {
		if (start > last)
			break;
		end = start + BBTOB(bp->b_maps[i].bm_len);
		if (first > end) {
			start += BBTOB(bp->b_maps[i].bm_len);
			continue;
		}
		if (first < start)
			first = start;
		if (end > last)
			end = last;

928
		xfs_buf_item_log_segment(first, end,
929 930 931 932 933 934
					 &bip->bli_formats[i].blf_data_map[0]);

		start += bp->b_maps[i].bm_len;
	}
}

L
Linus Torvalds 已提交
935 936

/*
937
 * Return 1 if the buffer has been logged or ordered in a transaction (at any
L
Linus Torvalds 已提交
938 939 940 941 942 943 944 945 946
 * point, not just the current transaction) and 0 if not.
 */
uint
xfs_buf_item_dirty(
	xfs_buf_log_item_t	*bip)
{
	return (bip->bli_flags & XFS_BLI_DIRTY);
}

947 948 949 950
STATIC void
xfs_buf_item_free(
	xfs_buf_log_item_t	*bip)
{
951
	xfs_buf_item_free_format(bip);
952 953 954
	kmem_zone_free(xfs_buf_item_zone, bip);
}

L
Linus Torvalds 已提交
955 956 957 958 959 960 961 962 963 964 965
/*
 * This is called when the buf log item is no longer needed.  It should
 * free the buf log item associated with the given buffer and clear
 * the buffer's pointer to the buf log item.  If there are no more
 * items in the list, clear the b_iodone field of the buffer (see
 * xfs_buf_attach_iodone() below).
 */
void
xfs_buf_item_relse(
	xfs_buf_t	*bp)
{
966
	xfs_buf_log_item_t	*bip = bp->b_fspriv;
L
Linus Torvalds 已提交
967

C
Christoph Hellwig 已提交
968
	trace_xfs_buf_item_relse(bp, _RET_IP_);
969
	ASSERT(!(bip->bli_item.li_flags & XFS_LI_IN_AIL));
C
Christoph Hellwig 已提交
970

971
	bp->b_fspriv = bip->bli_item.li_bio_list;
972 973
	if (bp->b_fspriv == NULL)
		bp->b_iodone = NULL;
974

975 976
	xfs_buf_rele(bp);
	xfs_buf_item_free(bip);
L
Linus Torvalds 已提交
977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
}


/*
 * Add the given log item with its callback to the list of callbacks
 * to be called when the buffer's I/O completes.  If it is not set
 * already, set the buffer's b_iodone() routine to be
 * xfs_buf_iodone_callbacks() and link the log item into the list of
 * items rooted at b_fsprivate.  Items are always added as the second
 * entry in the list if there is a first, because the buf item code
 * assumes that the buf log item is first.
 */
void
xfs_buf_attach_iodone(
	xfs_buf_t	*bp,
	void		(*cb)(xfs_buf_t *, xfs_log_item_t *),
	xfs_log_item_t	*lip)
{
	xfs_log_item_t	*head_lip;

997
	ASSERT(xfs_buf_islocked(bp));
L
Linus Torvalds 已提交
998 999

	lip->li_cb = cb;
1000 1001
	head_lip = bp->b_fspriv;
	if (head_lip) {
L
Linus Torvalds 已提交
1002 1003 1004
		lip->li_bio_list = head_lip->li_bio_list;
		head_lip->li_bio_list = lip;
	} else {
1005
		bp->b_fspriv = lip;
L
Linus Torvalds 已提交
1006 1007
	}

1008 1009 1010
	ASSERT(bp->b_iodone == NULL ||
	       bp->b_iodone == xfs_buf_iodone_callbacks);
	bp->b_iodone = xfs_buf_iodone_callbacks;
L
Linus Torvalds 已提交
1011 1012
}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
/*
 * We can have many callbacks on a buffer. Running the callbacks individually
 * can cause a lot of contention on the AIL lock, so we allow for a single
 * callback to be able to scan the remaining lip->li_bio_list for other items
 * of the same type and callback to be processed in the first call.
 *
 * As a result, the loop walking the callback list below will also modify the
 * list. it removes the first item from the list and then runs the callback.
 * The loop then restarts from the new head of the list. This allows the
 * callback to scan and modify the list attached to the buffer and we don't
 * have to care about maintaining a next item pointer.
 */
L
Linus Torvalds 已提交
1025 1026
STATIC void
xfs_buf_do_callbacks(
1027
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1028
{
1029
	struct xfs_log_item	*lip;
L
Linus Torvalds 已提交
1030

1031 1032
	while ((lip = bp->b_fspriv) != NULL) {
		bp->b_fspriv = lip->li_bio_list;
L
Linus Torvalds 已提交
1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
		ASSERT(lip->li_cb != NULL);
		/*
		 * Clear the next pointer so we don't have any
		 * confusion if the item is added to another buf.
		 * Don't touch the log item after calling its
		 * callback, because it could have freed itself.
		 */
		lip->li_bio_list = NULL;
		lip->li_cb(bp, lip);
	}
}

1045 1046
static bool
xfs_buf_iodone_callback_error(
1047
	struct xfs_buf		*bp)
L
Linus Torvalds 已提交
1048
{
1049 1050 1051 1052
	struct xfs_log_item	*lip = bp->b_fspriv;
	struct xfs_mount	*mp = lip->li_mountp;
	static ulong		lasttime;
	static xfs_buftarg_t	*lasttarg;
1053
	struct xfs_error_cfg	*cfg;
L
Linus Torvalds 已提交
1054

1055 1056 1057 1058
	/*
	 * If we've already decided to shutdown the filesystem because of
	 * I/O errors, there's no point in giving this a retry.
	 */
1059 1060
	if (XFS_FORCED_SHUTDOWN(mp))
		goto out_stale;
L
Linus Torvalds 已提交
1061

1062
	if (bp->b_target != lasttarg ||
1063 1064
	    time_after(jiffies, (lasttime + 5*HZ))) {
		lasttime = jiffies;
1065
		xfs_buf_ioerror_alert(bp, __func__);
1066
	}
1067
	lasttarg = bp->b_target;
L
Linus Torvalds 已提交
1068

1069 1070 1071 1072 1073 1074 1075
	/* synchronous writes will have callers process the error */
	if (!(bp->b_flags & XBF_ASYNC))
		goto out_stale;

	trace_xfs_buf_item_iodone_async(bp, _RET_IP_);
	ASSERT(bp->b_iodone != NULL);

1076
	/*
L
Lucas De Marchi 已提交
1077
	 * If the write was asynchronous then no one will be looking for the
1078 1079 1080 1081
	 * error.  If this is the first failure of this type, clear the error
	 * state and write the buffer out again. This means we always retry an
	 * async write failure at least once, but we also need to set the buffer
	 * up to behave correctly now for repeated failures.
1082
	 */
1083 1084 1085 1086 1087
	if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL)) ||
	     bp->b_last_error != bp->b_error) {
		bp->b_flags |= (XBF_WRITE | XBF_ASYNC |
			        XBF_DONE | XBF_WRITE_FAIL);
		bp->b_last_error = bp->b_error;
1088 1089 1090
		bp->b_retries = 0;
		bp->b_first_retry_time = jiffies;

1091 1092 1093 1094
		xfs_buf_ioerror(bp, 0);
		xfs_buf_submit(bp);
		return true;
	}
1095

1096 1097 1098 1099 1100
	/*
	 * Repeated failure on an async write. Take action according to the
	 * error configuration we have been set up to use.
	 */
	cfg = xfs_error_get_cfg(mp, XFS_ERR_METADATA, bp->b_error);
1101 1102 1103 1104 1105 1106 1107

	if (cfg->max_retries != XFS_ERR_RETRY_FOREVER &&
	    ++bp->b_retries > cfg->max_retries)
			goto permanent_error;
	if (cfg->retry_timeout &&
	    time_after(jiffies, cfg->retry_timeout + bp->b_first_retry_time))
			goto permanent_error;
1108

1109 1110 1111 1112
	/* At unmount we may treat errors differently */
	if ((mp->m_flags & XFS_MOUNT_UNMOUNTING) && mp->m_fail_unmount)
		goto permanent_error;

1113 1114 1115 1116
	/* still a transient error, higher layers will retry */
	xfs_buf_ioerror(bp, 0);
	xfs_buf_relse(bp);
	return true;
C
Christoph Hellwig 已提交
1117

1118
	/*
1119 1120
	 * Permanent error - we need to trigger a shutdown if we haven't already
	 * to indicate that inconsistency will result from this action.
1121
	 */
1122 1123 1124
permanent_error:
	xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
out_stale:
1125
	xfs_buf_stale(bp);
1126
	bp->b_flags |= XBF_DONE;
C
Christoph Hellwig 已提交
1127
	trace_xfs_buf_error_relse(bp, _RET_IP_);
1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153
	return false;
}

/*
 * This is the iodone() function for buffers which have had callbacks attached
 * to them by xfs_buf_attach_iodone(). We need to iterate the items on the
 * callback list, mark the buffer as having no more callbacks and then push the
 * buffer through IO completion processing.
 */
void
xfs_buf_iodone_callbacks(
	struct xfs_buf		*bp)
{
	/*
	 * If there is an error, process it. Some errors require us
	 * to run callbacks after failure processing is done so we
	 * detect that and take appropriate action.
	 */
	if (bp->b_error && xfs_buf_iodone_callback_error(bp))
		return;

	/*
	 * Successful IO or permanent error. Either way, we can clear the
	 * retry state here in preparation for the next error that may occur.
	 */
	bp->b_last_error = 0;
1154
	bp->b_retries = 0;
C
Christoph Hellwig 已提交
1155

1156
	xfs_buf_do_callbacks(bp);
1157
	bp->b_fspriv = NULL;
1158
	bp->b_iodone = NULL;
1159
	xfs_buf_ioend(bp);
L
Linus Torvalds 已提交
1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
}

/*
 * This is the iodone() function for buffers which have been
 * logged.  It is called when they are eventually flushed out.
 * It should remove the buf item from the AIL, and free the buf item.
 * It is called by xfs_buf_iodone_callbacks() above which will take
 * care of cleaning up the buffer itself.
 */
void
xfs_buf_iodone(
1171 1172
	struct xfs_buf		*bp,
	struct xfs_log_item	*lip)
L
Linus Torvalds 已提交
1173
{
1174
	struct xfs_ail		*ailp = lip->li_ailp;
L
Linus Torvalds 已提交
1175

1176
	ASSERT(BUF_ITEM(lip)->bli_buf == bp);
L
Linus Torvalds 已提交
1177

1178
	xfs_buf_rele(bp);
L
Linus Torvalds 已提交
1179 1180 1181 1182 1183 1184

	/*
	 * If we are forcibly shutting down, this may well be
	 * off the AIL already. That's because we simulate the
	 * log-committed callbacks to unpin these buffers. Or we may never
	 * have put this item on AIL because of the transaction was
1185
	 * aborted forcibly. xfs_trans_ail_delete() takes care of these.
L
Linus Torvalds 已提交
1186 1187 1188
	 *
	 * Either way, AIL is useless if we're forcing a shutdown.
	 */
1189
	spin_lock(&ailp->xa_lock);
1190
	xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
1191
	xfs_buf_item_free(BUF_ITEM(lip));
L
Linus Torvalds 已提交
1192
}