page-io.c 12.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * linux/fs/ext4/page-io.c
 *
 * This contains the new page_io functions for ext4
 *
 * Written by Theodore Ts'o, 2010.
 */

#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
#include <linux/namei.h>
21
#include <linux/aio.h>
22 23 24 25 26
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/slab.h>
27
#include <linux/mm.h>
28 29 30 31 32

#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"

33
static struct kmem_cache *io_end_cachep;
34

35
int __init ext4_init_pageio(void)
36 37
{
	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
38
	if (io_end_cachep == NULL)
39 40 41 42
		return -ENOMEM;
	return 0;
}

43
void ext4_exit_pageio(void)
44 45 46 47
{
	kmem_cache_destroy(io_end_cachep);
}

48 49 50 51 52
/*
 * This function is called by ext4_evict_inode() to make sure there is
 * no more pending I/O completion work left to do.
 */
void ext4_ioend_shutdown(struct inode *inode)
53
{
54
	wait_queue_head_t *wq = ext4_ioend_wq(inode);
55 56

	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
57 58 59 60 61 62
	/*
	 * We need to make sure the work structure is finished being
	 * used before we let the inode get destroyed.
	 */
	if (work_pending(&EXT4_I(inode)->i_unwritten_work))
		cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
63 64
}

J
Jan Kara 已提交
65
static void ext4_release_io_end(ext4_io_end_t *io_end)
66
{
J
Jan Kara 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
	BUG_ON(!list_empty(&io_end->list));
	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);

	if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
		wake_up_all(ext4_ioend_wq(io_end->inode));
	if (io_end->flag & EXT4_IO_END_DIRECT)
		inode_dio_done(io_end->inode);
	if (io_end->iocb)
		aio_complete(io_end->iocb, io_end->result, 0);
	kmem_cache_free(io_end_cachep, io_end);
}

static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
{
	struct inode *inode = io_end->inode;
82

J
Jan Kara 已提交
83 84 85 86
	io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
	/* Wake up anyone waiting on unwritten extent conversion */
	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
		wake_up_all(ext4_ioend_wq(inode));
87 88
}

89 90
/* check a range of space and convert unwritten extents to written. */
static int ext4_end_io(ext4_io_end_t *io)
91 92 93 94 95 96 97 98 99 100 101 102
{
	struct inode *inode = io->inode;
	loff_t offset = io->offset;
	ssize_t size = io->size;
	int ret = 0;

	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
		   "list->prev 0x%p\n",
		   io, inode->i_ino, io->list.next, io->list.prev);

	ret = ext4_convert_unwritten_extents(inode, offset, size);
	if (ret < 0) {
103 104 105 106 107
		ext4_msg(inode->i_sb, KERN_EMERG,
			 "failed to convert unwritten extents to written "
			 "extents -- potential data loss!  "
			 "(inode %lu, offset %llu, size %zd, error %d)",
			 inode->i_ino, offset, size, ret);
108
	}
J
Jan Kara 已提交
109 110
	ext4_clear_io_unwritten_flag(io);
	ext4_release_io_end(io);
111 112 113
	return ret;
}

114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140
static void dump_completed_IO(struct inode *inode)
{
#ifdef	EXT4FS_DEBUG
	struct list_head *cur, *before, *after;
	ext4_io_end_t *io, *io0, *io1;

	if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
		ext4_debug("inode %lu completed_io list is empty\n",
			   inode->i_ino);
		return;
	}

	ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
	list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
		cur = &io->list;
		before = cur->prev;
		io0 = container_of(before, ext4_io_end_t, list);
		after = cur->next;
		io1 = container_of(after, ext4_io_end_t, list);

		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
			    io, inode->i_ino, io0, io1);
	}
#endif
}

/* Add the io_end to per-inode completed end_io list. */
J
Jan Kara 已提交
141
static void ext4_add_complete_io(ext4_io_end_t *io_end)
142
{
143 144 145 146 147 148
	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
	struct workqueue_struct *wq;
	unsigned long flags;

	BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
149

150
	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
J
Jan Kara 已提交
151 152
	if (list_empty(&ei->i_completed_io_list))
		queue_work(wq, &ei->i_unwritten_work);
153 154 155
	list_add_tail(&io_end->list, &ei->i_completed_io_list);
	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
}
156

J
Jan Kara 已提交
157
static int ext4_do_flush_completed_IO(struct inode *inode)
158 159
{
	ext4_io_end_t *io;
160
	struct list_head unwritten;
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
	unsigned long flags;
	struct ext4_inode_info *ei = EXT4_I(inode);
	int err, ret = 0;

	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
	dump_completed_IO(inode);
	list_replace_init(&ei->i_completed_io_list, &unwritten);
	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);

	while (!list_empty(&unwritten)) {
		io = list_entry(unwritten.next, ext4_io_end_t, list);
		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
		list_del_init(&io->list);

		err = ext4_end_io(io);
		if (unlikely(!ret && err))
			ret = err;
	}
	return ret;
}

/*
 * work on completed aio dio IO, to convert unwritten extents to extents
 */
J
Jan Kara 已提交
185
void ext4_end_io_work(struct work_struct *work)
186
{
J
Jan Kara 已提交
187 188 189
	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
						  i_unwritten_work);
	ext4_do_flush_completed_IO(&ei->vfs_inode);
190 191
}

192
int ext4_flush_unwritten_io(struct inode *inode)
193
{
194 195 196
	int ret;
	WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
		     !(inode->i_state & I_FREEING));
J
Jan Kara 已提交
197
	ret = ext4_do_flush_completed_IO(inode);
198 199
	ext4_unwritten_wait(inode);
	return ret;
200 201 202 203
}

ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
{
204
	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
205
	if (io) {
206 207
		atomic_inc(&EXT4_I(inode)->i_ioend_count);
		io->inode = inode;
208
		INIT_LIST_HEAD(&io->list);
J
Jan Kara 已提交
209
		atomic_set(&io->count, 1);
210 211 212 213
	}
	return io;
}

J
Jan Kara 已提交
214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
{
	if (atomic_dec_and_test(&io_end->count)) {
		if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
			ext4_release_io_end(io_end);
			return;
		}
		ext4_add_complete_io(io_end);
	}
}

int ext4_put_io_end(ext4_io_end_t *io_end)
{
	int err = 0;

	if (atomic_dec_and_test(&io_end->count)) {
		if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
			err = ext4_convert_unwritten_extents(io_end->inode,
						io_end->offset, io_end->size);
			ext4_clear_io_unwritten_flag(io_end);
		}
		ext4_release_io_end(io_end);
	}
	return err;
}

ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
{
	atomic_inc(&io_end->count);
	return io_end;
}

246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
/*
 * Print an buffer I/O error compatible with the fs/buffer.c.  This
 * provides compatibility with dmesg scrapers that look for a specific
 * buffer I/O error message.  We really need a unified error reporting
 * structure to userspace ala Digital Unix's uerf system, but it's
 * probably not going to happen in my lifetime, due to LKML politics...
 */
static void buffer_io_error(struct buffer_head *bh)
{
	char b[BDEVNAME_SIZE];
	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
			bdevname(bh->b_bdev, b),
			(unsigned long long)bh->b_blocknr);
}

static void ext4_end_bio(struct bio *bio, int error)
{
	ext4_io_end_t *io_end = bio->bi_private;
	struct inode *inode;
	int i;
266
	int blocksize;
267
	sector_t bi_sector = bio->bi_sector;
268 269

	BUG_ON(!io_end);
270 271
	inode = io_end->inode;
	blocksize = 1 << inode->i_blkbits;
272 273 274 275
	bio->bi_private = NULL;
	bio->bi_end_io = NULL;
	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
		error = 0;
276 277 278
	for (i = 0; i < bio->bi_vcnt; i++) {
		struct bio_vec *bvec = &bio->bi_io_vec[i];
		struct page *page = bvec->bv_page;
279
		struct buffer_head *bh, *head;
280 281 282 283 284 285 286
		unsigned bio_start = bvec->bv_offset;
		unsigned bio_end = bio_start + bvec->bv_len;
		unsigned under_io = 0;
		unsigned long flags;

		if (!page)
			continue;
287

288
		if (error) {
289
			SetPageError(page);
290
			set_bit(AS_EIO, &page->mapping->flags);
291
		}
292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
		bh = head = page_buffers(page);
		/*
		 * We check all buffers in the page under BH_Uptodate_Lock
		 * to avoid races with other end io clearing async_write flags
		 */
		local_irq_save(flags);
		bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
		do {
			if (bh_offset(bh) < bio_start ||
			    bh_offset(bh) + blocksize > bio_end) {
				if (buffer_async_write(bh))
					under_io++;
				continue;
			}
			clear_buffer_async_write(bh);
			if (error)
				buffer_io_error(bh);
		} while ((bh = bh->b_this_page) != head);
		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
		local_irq_restore(flags);
		if (!under_io)
			end_page_writeback(page);
314
	}
315
	bio_put(bio);
316 317 318 319 320 321 322 323 324

	if (error) {
		io_end->flag |= EXT4_IO_END_ERROR;
		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
			     "(offset %llu size %ld starting block %llu)",
			     inode->i_ino,
			     (unsigned long long) io_end->offset,
			     (long) io_end->size,
			     (unsigned long long)
325
			     bi_sector >> (inode->i_blkbits - 9));
326
	}
327

J
Jan Kara 已提交
328
	ext4_put_io_end_defer(io_end);
329 330 331 332 333 334 335 336 337 338 339 340
}

void ext4_io_submit(struct ext4_io_submit *io)
{
	struct bio *bio = io->io_bio;

	if (bio) {
		bio_get(io->io_bio);
		submit_bio(io->io_op, io->io_bio);
		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
		bio_put(io->io_bio);
	}
341
	io->io_bio = NULL;
J
Jan Kara 已提交
342 343 344 345 346 347 348
}

void ext4_io_submit_init(struct ext4_io_submit *io,
			 struct writeback_control *wbc)
{
	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
	io->io_bio = NULL;
349
	io->io_end = NULL;
350 351
}

J
Jan Kara 已提交
352 353
static int io_submit_init_bio(struct ext4_io_submit *io,
			      struct buffer_head *bh)
354 355 356 357
{
	int nvecs = bio_get_nr_vecs(bh->b_bdev);
	struct bio *bio;

358
	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
359 360 361
	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
	bio->bi_bdev = bh->b_bdev;
	bio->bi_end_io = ext4_end_bio;
J
Jan Kara 已提交
362
	bio->bi_private = ext4_get_io_end(io->io_end);
363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
	io->io_bio = bio;
	io->io_next_block = bh->b_blocknr;
	return 0;
}

static int io_submit_add_bh(struct ext4_io_submit *io,
			    struct inode *inode,
			    struct buffer_head *bh)
{
	ext4_io_end_t *io_end;
	int ret;

	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
submit_and_retry:
		ext4_io_submit(io);
	}
	if (io->io_bio == NULL) {
J
Jan Kara 已提交
380
		ret = io_submit_init_bio(io, bh);
381 382 383
		if (ret)
			return ret;
	}
J
Jan Kara 已提交
384 385 386
	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
	if (ret != bh->b_size)
		goto submit_and_retry;
387
	io_end = io->io_end;
388
	if (test_clear_buffer_uninit(bh))
389
		ext4_set_io_unwritten_flag(inode, io_end);
390 391 392 393 394 395 396 397 398 399
	io->io_next_block++;
	return 0;
}

int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
400
	unsigned block_start, blocksize;
401 402
	struct buffer_head *bh, *head;
	int ret = 0;
403
	int nr_submitted = 0;
404 405 406

	blocksize = 1 << inode->i_blkbits;

407
	BUG_ON(!PageLocked(page));
408 409
	BUG_ON(PageWriteback(page));

410 411
	set_page_writeback(page);
	ClearPageError(page);
412

413 414 415 416 417 418 419 420 421 422
	/*
	 * In the first loop we prepare and mark buffers to submit. We have to
	 * mark all buffers in the page before submitting so that
	 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
	 * on the first buffer finishes and we are still working on submitting
	 * the second buffer.
	 */
	bh = head = page_buffers(page);
	do {
		block_start = bh_offset(bh);
423
		if (block_start >= len) {
424 425 426 427 428 429 430 431 432 433 434
			/*
			 * Comments copied from block_write_full_page_endio:
			 *
			 * The page straddles i_size.  It must be zeroed out on
			 * each and every writepage invocation because it may
			 * be mmapped.  "A file is mapped in multiples of the
			 * page size.  For a file that is not a multiple of
			 * the  page size, the remaining memory is zeroed when
			 * mapped, and writes to that region are not written
			 * out to the file."
			 */
435 436
			zero_user_segment(page, block_start,
					  block_start + blocksize);
437 438 439 440
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
441 442 443 444 445 446 447 448 449
		if (!buffer_dirty(bh) || buffer_delay(bh) ||
		    !buffer_mapped(bh) || buffer_unwritten(bh)) {
			/* A hole? We can safely clear the dirty bit */
			if (!buffer_mapped(bh))
				clear_buffer_dirty(bh);
			if (io->io_bio)
				ext4_io_submit(io);
			continue;
		}
450 451 452 453 454 455 456 457 458 459 460 461
		if (buffer_new(bh)) {
			clear_buffer_new(bh);
			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
		}
		set_buffer_async_write(bh);
	} while ((bh = bh->b_this_page) != head);

	/* Now submit buffers to write */
	bh = head = page_buffers(page);
	do {
		if (!buffer_async_write(bh))
			continue;
J
Jan Kara 已提交
462
		ret = io_submit_add_bh(io, inode, bh);
463 464 465 466 467 468
		if (ret) {
			/*
			 * We only get here on ENOMEM.  Not much else
			 * we can do but mark the page as dirty, and
			 * better luck next time.
			 */
469
			redirty_page_for_writepage(wbc, page);
470 471
			break;
		}
472
		nr_submitted++;
473
		clear_buffer_dirty(bh);
474 475 476 477 478 479 480 481
	} while ((bh = bh->b_this_page) != head);

	/* Error stopped previous loop? Clean up buffers... */
	if (ret) {
		do {
			clear_buffer_async_write(bh);
			bh = bh->b_this_page;
		} while (bh != head);
482 483
	}
	unlock_page(page);
484 485 486
	/* Nothing submitted - we have to end page writeback */
	if (!nr_submitted)
		end_page_writeback(page);
487 488
	return ret;
}