page-io.c 12.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 * linux/fs/ext4/page-io.c
 *
 * This contains the new page_io functions for ext4
 *
 * Written by Theodore Ts'o, 2010.
 */

#include <linux/fs.h>
#include <linux/time.h>
#include <linux/jbd2.h>
#include <linux/highuid.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
#include <linux/string.h>
#include <linux/buffer_head.h>
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/mpage.h>
#include <linux/namei.h>
#include <linux/uio.h>
#include <linux/bio.h>
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/slab.h>
26
#include <linux/mm.h>
27 28 29 30 31

#include "ext4_jbd2.h"
#include "xattr.h"
#include "acl.h"

32
static struct kmem_cache *io_end_cachep;
33

34
int __init ext4_init_pageio(void)
35 36
{
	io_end_cachep = KMEM_CACHE(ext4_io_end, SLAB_RECLAIM_ACCOUNT);
37
	if (io_end_cachep == NULL)
38 39 40 41
		return -ENOMEM;
	return 0;
}

42
void ext4_exit_pageio(void)
43 44 45 46
{
	kmem_cache_destroy(io_end_cachep);
}

47 48 49 50 51
/*
 * This function is called by ext4_evict_inode() to make sure there is
 * no more pending I/O completion work left to do.
 */
void ext4_ioend_shutdown(struct inode *inode)
52
{
53
	wait_queue_head_t *wq = ext4_ioend_wq(inode);
54 55

	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
56 57 58 59 60 61
	/*
	 * We need to make sure the work structure is finished being
	 * used before we let the inode get destroyed.
	 */
	if (work_pending(&EXT4_I(inode)->i_unwritten_work))
		cancel_work_sync(&EXT4_I(inode)->i_unwritten_work);
62 63
}

J
Jan Kara 已提交
64
static void ext4_release_io_end(ext4_io_end_t *io_end)
65
{
J
Jan Kara 已提交
66 67 68 69 70 71 72 73 74 75 76
	BUG_ON(!list_empty(&io_end->list));
	BUG_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);

	if (atomic_dec_and_test(&EXT4_I(io_end->inode)->i_ioend_count))
		wake_up_all(ext4_ioend_wq(io_end->inode));
	if (io_end->flag & EXT4_IO_END_DIRECT)
		inode_dio_done(io_end->inode);
	if (io_end->iocb)
		aio_complete(io_end->iocb, io_end->result, 0);
	kmem_cache_free(io_end_cachep, io_end);
}
77

J
Jan Kara 已提交
78 79 80
static void ext4_clear_io_unwritten_flag(ext4_io_end_t *io_end)
{
	struct inode *inode = io_end->inode;
81

J
Jan Kara 已提交
82 83 84 85
	io_end->flag &= ~EXT4_IO_END_UNWRITTEN;
	/* Wake up anyone waiting on unwritten extent conversion */
	if (atomic_dec_and_test(&EXT4_I(inode)->i_unwritten))
		wake_up_all(ext4_ioend_wq(inode));
86 87
}

88 89
/* check a range of space and convert unwritten extents to written. */
static int ext4_end_io(ext4_io_end_t *io)
90 91 92 93 94 95 96 97 98 99 100 101
{
	struct inode *inode = io->inode;
	loff_t offset = io->offset;
	ssize_t size = io->size;
	int ret = 0;

	ext4_debug("ext4_end_io_nolock: io 0x%p from inode %lu,list->next 0x%p,"
		   "list->prev 0x%p\n",
		   io, inode->i_ino, io->list.next, io->list.prev);

	ret = ext4_convert_unwritten_extents(inode, offset, size);
	if (ret < 0) {
102 103 104 105 106
		ext4_msg(inode->i_sb, KERN_EMERG,
			 "failed to convert unwritten extents to written "
			 "extents -- potential data loss!  "
			 "(inode %lu, offset %llu, size %zd, error %d)",
			 inode->i_ino, offset, size, ret);
107
	}
J
Jan Kara 已提交
108 109
	ext4_clear_io_unwritten_flag(io);
	ext4_release_io_end(io);
110 111 112
	return ret;
}

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
static void dump_completed_IO(struct inode *inode)
{
#ifdef	EXT4FS_DEBUG
	struct list_head *cur, *before, *after;
	ext4_io_end_t *io, *io0, *io1;

	if (list_empty(&EXT4_I(inode)->i_completed_io_list)) {
		ext4_debug("inode %lu completed_io list is empty\n",
			   inode->i_ino);
		return;
	}

	ext4_debug("Dump inode %lu completed_io list\n", inode->i_ino);
	list_for_each_entry(io, &EXT4_I(inode)->i_completed_io_list, list) {
		cur = &io->list;
		before = cur->prev;
		io0 = container_of(before, ext4_io_end_t, list);
		after = cur->next;
		io1 = container_of(after, ext4_io_end_t, list);

		ext4_debug("io 0x%p from inode %lu,prev 0x%p,next 0x%p\n",
			    io, inode->i_ino, io0, io1);
	}
#endif
}

/* Add the io_end to per-inode completed end_io list. */
J
Jan Kara 已提交
140
static void ext4_add_complete_io(ext4_io_end_t *io_end)
141
{
142 143 144 145 146 147
	struct ext4_inode_info *ei = EXT4_I(io_end->inode);
	struct workqueue_struct *wq;
	unsigned long flags;

	BUG_ON(!(io_end->flag & EXT4_IO_END_UNWRITTEN));
	wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
148

149
	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
J
Jan Kara 已提交
150 151
	if (list_empty(&ei->i_completed_io_list))
		queue_work(wq, &ei->i_unwritten_work);
152 153 154
	list_add_tail(&io_end->list, &ei->i_completed_io_list);
	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
}
155

J
Jan Kara 已提交
156
static int ext4_do_flush_completed_IO(struct inode *inode)
157 158
{
	ext4_io_end_t *io;
159
	struct list_head unwritten;
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183
	unsigned long flags;
	struct ext4_inode_info *ei = EXT4_I(inode);
	int err, ret = 0;

	spin_lock_irqsave(&ei->i_completed_io_lock, flags);
	dump_completed_IO(inode);
	list_replace_init(&ei->i_completed_io_list, &unwritten);
	spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);

	while (!list_empty(&unwritten)) {
		io = list_entry(unwritten.next, ext4_io_end_t, list);
		BUG_ON(!(io->flag & EXT4_IO_END_UNWRITTEN));
		list_del_init(&io->list);

		err = ext4_end_io(io);
		if (unlikely(!ret && err))
			ret = err;
	}
	return ret;
}

/*
 * work on completed aio dio IO, to convert unwritten extents to extents
 */
J
Jan Kara 已提交
184
void ext4_end_io_work(struct work_struct *work)
185
{
J
Jan Kara 已提交
186 187 188
	struct ext4_inode_info *ei = container_of(work, struct ext4_inode_info,
						  i_unwritten_work);
	ext4_do_flush_completed_IO(&ei->vfs_inode);
189 190
}

191
int ext4_flush_unwritten_io(struct inode *inode)
192
{
193 194 195
	int ret;
	WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex) &&
		     !(inode->i_state & I_FREEING));
J
Jan Kara 已提交
196
	ret = ext4_do_flush_completed_IO(inode);
197 198
	ext4_unwritten_wait(inode);
	return ret;
199 200 201 202
}

ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags)
{
203
	ext4_io_end_t *io = kmem_cache_zalloc(io_end_cachep, flags);
204
	if (io) {
205 206
		atomic_inc(&EXT4_I(inode)->i_ioend_count);
		io->inode = inode;
207
		INIT_LIST_HEAD(&io->list);
J
Jan Kara 已提交
208
		atomic_set(&io->count, 1);
209 210 211 212
	}
	return io;
}

J
Jan Kara 已提交
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244
void ext4_put_io_end_defer(ext4_io_end_t *io_end)
{
	if (atomic_dec_and_test(&io_end->count)) {
		if (!(io_end->flag & EXT4_IO_END_UNWRITTEN) || !io_end->size) {
			ext4_release_io_end(io_end);
			return;
		}
		ext4_add_complete_io(io_end);
	}
}

int ext4_put_io_end(ext4_io_end_t *io_end)
{
	int err = 0;

	if (atomic_dec_and_test(&io_end->count)) {
		if (io_end->flag & EXT4_IO_END_UNWRITTEN) {
			err = ext4_convert_unwritten_extents(io_end->inode,
						io_end->offset, io_end->size);
			ext4_clear_io_unwritten_flag(io_end);
		}
		ext4_release_io_end(io_end);
	}
	return err;
}

ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
{
	atomic_inc(&io_end->count);
	return io_end;
}

245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
/*
 * Print an buffer I/O error compatible with the fs/buffer.c.  This
 * provides compatibility with dmesg scrapers that look for a specific
 * buffer I/O error message.  We really need a unified error reporting
 * structure to userspace ala Digital Unix's uerf system, but it's
 * probably not going to happen in my lifetime, due to LKML politics...
 */
static void buffer_io_error(struct buffer_head *bh)
{
	char b[BDEVNAME_SIZE];
	printk(KERN_ERR "Buffer I/O error on device %s, logical block %llu\n",
			bdevname(bh->b_bdev, b),
			(unsigned long long)bh->b_blocknr);
}

static void ext4_end_bio(struct bio *bio, int error)
{
	ext4_io_end_t *io_end = bio->bi_private;
	struct inode *inode;
	int i;
265
	int blocksize;
266
	sector_t bi_sector = bio->bi_sector;
267 268

	BUG_ON(!io_end);
269 270
	inode = io_end->inode;
	blocksize = 1 << inode->i_blkbits;
271 272 273 274
	bio->bi_private = NULL;
	bio->bi_end_io = NULL;
	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
		error = 0;
275 276 277
	for (i = 0; i < bio->bi_vcnt; i++) {
		struct bio_vec *bvec = &bio->bi_io_vec[i];
		struct page *page = bvec->bv_page;
278
		struct buffer_head *bh, *head;
279 280 281 282 283 284 285
		unsigned bio_start = bvec->bv_offset;
		unsigned bio_end = bio_start + bvec->bv_len;
		unsigned under_io = 0;
		unsigned long flags;

		if (!page)
			continue;
286

287
		if (error) {
288
			SetPageError(page);
289
			set_bit(AS_EIO, &page->mapping->flags);
290
		}
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
		bh = head = page_buffers(page);
		/*
		 * We check all buffers in the page under BH_Uptodate_Lock
		 * to avoid races with other end io clearing async_write flags
		 */
		local_irq_save(flags);
		bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
		do {
			if (bh_offset(bh) < bio_start ||
			    bh_offset(bh) + blocksize > bio_end) {
				if (buffer_async_write(bh))
					under_io++;
				continue;
			}
			clear_buffer_async_write(bh);
			if (error)
				buffer_io_error(bh);
		} while ((bh = bh->b_this_page) != head);
		bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
		local_irq_restore(flags);
		if (!under_io)
			end_page_writeback(page);
313
	}
314
	bio_put(bio);
315 316 317 318 319 320 321 322 323

	if (error) {
		io_end->flag |= EXT4_IO_END_ERROR;
		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
			     "(offset %llu size %ld starting block %llu)",
			     inode->i_ino,
			     (unsigned long long) io_end->offset,
			     (long) io_end->size,
			     (unsigned long long)
324
			     bi_sector >> (inode->i_blkbits - 9));
325
	}
326

J
Jan Kara 已提交
327
	ext4_put_io_end_defer(io_end);
328 329 330 331 332 333 334 335 336 337 338 339
}

void ext4_io_submit(struct ext4_io_submit *io)
{
	struct bio *bio = io->io_bio;

	if (bio) {
		bio_get(io->io_bio);
		submit_bio(io->io_op, io->io_bio);
		BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
		bio_put(io->io_bio);
	}
340
	io->io_bio = NULL;
J
Jan Kara 已提交
341 342 343 344 345 346 347
}

void ext4_io_submit_init(struct ext4_io_submit *io,
			 struct writeback_control *wbc)
{
	io->io_op = (wbc->sync_mode == WB_SYNC_ALL ?  WRITE_SYNC : WRITE);
	io->io_bio = NULL;
348
	io->io_end = NULL;
349 350
}

J
Jan Kara 已提交
351 352
static int io_submit_init_bio(struct ext4_io_submit *io,
			      struct buffer_head *bh)
353 354 355 356
{
	int nvecs = bio_get_nr_vecs(bh->b_bdev);
	struct bio *bio;

357
	bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
358 359 360
	bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
	bio->bi_bdev = bh->b_bdev;
	bio->bi_end_io = ext4_end_bio;
J
Jan Kara 已提交
361 362 363 364
	bio->bi_private = ext4_get_io_end(io->io_end);
	if (!io->io_end->size)
		io->io_end->offset = (bh->b_page->index << PAGE_CACHE_SHIFT)
				     + bh_offset(bh);
365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
	io->io_bio = bio;
	io->io_next_block = bh->b_blocknr;
	return 0;
}

static int io_submit_add_bh(struct ext4_io_submit *io,
			    struct inode *inode,
			    struct buffer_head *bh)
{
	ext4_io_end_t *io_end;
	int ret;

	if (io->io_bio && bh->b_blocknr != io->io_next_block) {
submit_and_retry:
		ext4_io_submit(io);
	}
	if (io->io_bio == NULL) {
J
Jan Kara 已提交
382
		ret = io_submit_init_bio(io, bh);
383 384 385
		if (ret)
			return ret;
	}
J
Jan Kara 已提交
386 387 388
	ret = bio_add_page(io->io_bio, bh->b_page, bh->b_size, bh_offset(bh));
	if (ret != bh->b_size)
		goto submit_and_retry;
389
	io_end = io->io_end;
390 391
	if (buffer_uninit(bh))
		ext4_set_io_unwritten_flag(inode, io_end);
J
Jan Kara 已提交
392
	io_end->size += bh->b_size;
393 394 395 396 397 398 399 400 401 402
	io->io_next_block++;
	return 0;
}

int ext4_bio_write_page(struct ext4_io_submit *io,
			struct page *page,
			int len,
			struct writeback_control *wbc)
{
	struct inode *inode = page->mapping->host;
403
	unsigned block_start, blocksize;
404 405
	struct buffer_head *bh, *head;
	int ret = 0;
406
	int nr_submitted = 0;
407 408 409

	blocksize = 1 << inode->i_blkbits;

410
	BUG_ON(!PageLocked(page));
411 412
	BUG_ON(PageWriteback(page));

413 414
	set_page_writeback(page);
	ClearPageError(page);
415

416 417 418 419 420 421 422 423 424 425
	/*
	 * In the first loop we prepare and mark buffers to submit. We have to
	 * mark all buffers in the page before submitting so that
	 * end_page_writeback() cannot be called from ext4_bio_end_io() when IO
	 * on the first buffer finishes and we are still working on submitting
	 * the second buffer.
	 */
	bh = head = page_buffers(page);
	do {
		block_start = bh_offset(bh);
426
		if (block_start >= len) {
427 428 429 430 431 432 433 434 435 436 437
			/*
			 * Comments copied from block_write_full_page_endio:
			 *
			 * The page straddles i_size.  It must be zeroed out on
			 * each and every writepage invocation because it may
			 * be mmapped.  "A file is mapped in multiples of the
			 * page size.  For a file that is not a multiple of
			 * the  page size, the remaining memory is zeroed when
			 * mapped, and writes to that region are not written
			 * out to the file."
			 */
438 439
			zero_user_segment(page, block_start,
					  block_start + blocksize);
440 441 442 443
			clear_buffer_dirty(bh);
			set_buffer_uptodate(bh);
			continue;
		}
444 445 446 447 448 449 450 451 452
		if (!buffer_dirty(bh) || buffer_delay(bh) ||
		    !buffer_mapped(bh) || buffer_unwritten(bh)) {
			/* A hole? We can safely clear the dirty bit */
			if (!buffer_mapped(bh))
				clear_buffer_dirty(bh);
			if (io->io_bio)
				ext4_io_submit(io);
			continue;
		}
453 454 455 456 457 458 459 460 461 462 463 464
		if (buffer_new(bh)) {
			clear_buffer_new(bh);
			unmap_underlying_metadata(bh->b_bdev, bh->b_blocknr);
		}
		set_buffer_async_write(bh);
	} while ((bh = bh->b_this_page) != head);

	/* Now submit buffers to write */
	bh = head = page_buffers(page);
	do {
		if (!buffer_async_write(bh))
			continue;
J
Jan Kara 已提交
465
		ret = io_submit_add_bh(io, inode, bh);
466 467 468 469 470 471
		if (ret) {
			/*
			 * We only get here on ENOMEM.  Not much else
			 * we can do but mark the page as dirty, and
			 * better luck next time.
			 */
472
			redirty_page_for_writepage(wbc, page);
473 474
			break;
		}
475
		nr_submitted++;
476
		clear_buffer_dirty(bh);
477 478 479 480 481 482 483 484
	} while ((bh = bh->b_this_page) != head);

	/* Error stopped previous loop? Clean up buffers... */
	if (ret) {
		do {
			clear_buffer_async_write(bh);
			bh = bh->b_this_page;
		} while (bh != head);
485 486
	}
	unlock_page(page);
487 488 489
	/* Nothing submitted - we have to end page writeback */
	if (!nr_submitted)
		end_page_writeback(page);
490 491
	return ret;
}