compression.c 27.7 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/bit_spinlock.h>
34
#include <linux/slab.h>
C
Chris Mason 已提交
35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "volumes.h"
#include "ordered-data.h"
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"

struct compressed_bio {
	/* number of bios pending for this compressed extent */
	atomic_t pending_bios;

	/* the pages with the compressed data on them */
	struct page **compressed_pages;

	/* inode that owns this data */
	struct inode *inode;

	/* starting offset in the inode for our pages */
	u64 start;

	/* number of bytes in the inode we're working on */
	unsigned long len;

	/* number of bytes on disk */
	unsigned long compressed_len;

64 65 66
	/* the compression algorithm for this bio */
	int compress_type;

C
Chris Mason 已提交
67 68 69 70 71
	/* number of compressed pages in the array */
	unsigned long nr_pages;

	/* IO errors */
	int errors;
72
	int mirror_num;
C
Chris Mason 已提交
73 74 75

	/* for reads, this is the bio we are copying the data into */
	struct bio *orig_bio;
76 77 78 79 80 81

	/*
	 * the start of a variable length array of checksums only
	 * used by reads
	 */
	u32 sums;
C
Chris Mason 已提交
82 83
};

84 85 86
static int btrfs_decompress_bio(int type, struct page **pages_in,
				   u64 disk_start, struct bio *orig_bio,
				   size_t srclen);
87

88 89 90
static inline int compressed_bio_size(struct btrfs_root *root,
				      unsigned long disk_size)
{
91 92
	struct btrfs_fs_info *fs_info = root->fs_info;
	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
93

94
	return sizeof(struct compressed_bio) +
95
		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
96 97
}

C
Chris Mason 已提交
98 99 100
static struct bio *compressed_bio_alloc(struct block_device *bdev,
					u64 first_byte, gfp_t gfp_flags)
{
101
	return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
C
Chris Mason 已提交
102 103
}

104 105 106 107 108 109 110 111 112 113 114
static int check_compressed_csum(struct inode *inode,
				 struct compressed_bio *cb,
				 u64 disk_start)
{
	int ret;
	struct page *page;
	unsigned long i;
	char *kaddr;
	u32 csum;
	u32 *cb_sum = &cb->sums;

115
	if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
116 117 118 119 120 121
		return 0;

	for (i = 0; i < cb->nr_pages; i++) {
		page = cb->compressed_pages[i];
		csum = ~(u32)0;

122
		kaddr = kmap_atomic(page);
123
		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
124
		btrfs_csum_final(csum, (u8 *)&csum);
125
		kunmap_atomic(kaddr);
126 127

		if (csum != *cb_sum) {
128 129 130 131
			btrfs_info(BTRFS_I(inode)->root->fs_info,
			   "csum failed ino %llu extent %llu csum %u wanted %u mirror %d",
			   btrfs_ino(inode), disk_start, csum, *cb_sum,
			   cb->mirror_num);
132 133 134 135 136 137 138 139 140 141 142
			ret = -EIO;
			goto fail;
		}
		cb_sum++;

	}
	ret = 0;
fail:
	return ret;
}

C
Chris Mason 已提交
143 144 145 146 147 148 149 150 151 152
/* when we finish reading compressed pages from the disk, we
 * decompress them and then run the bio end_io routines on the
 * decompressed pages (in the inode address space).
 *
 * This allows the checksumming and other IO error handling routines
 * to work normally
 *
 * The compressed pages are freed here, and it must be run
 * in process context
 */
153
static void end_compressed_bio_read(struct bio *bio)
C
Chris Mason 已提交
154 155 156 157 158 159 160
{
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
	struct page *page;
	unsigned long index;
	int ret;

161
	if (bio->bi_error)
C
Chris Mason 已提交
162 163 164 165 166 167 168 169
		cb->errors = 1;

	/* if there are more bios still pending for this compressed
	 * extent, just exit
	 */
	if (!atomic_dec_and_test(&cb->pending_bios))
		goto out;

170
	inode = cb->inode;
171 172
	ret = check_compressed_csum(inode, cb,
				    (u64)bio->bi_iter.bi_sector << 9);
173 174 175
	if (ret)
		goto csum_failed;

C
Chris Mason 已提交
176 177 178
	/* ok, we're the last bio for this extent, lets start
	 * the decompression.
	 */
179
	ret = btrfs_decompress_bio(cb->compress_type,
180 181
				      cb->compressed_pages,
				      cb->start,
182
				      cb->orig_bio,
183
				      cb->compressed_len);
184
csum_failed:
C
Chris Mason 已提交
185 186 187 188 189 190 191 192
	if (ret)
		cb->errors = 1;

	/* release the compressed pages */
	index = 0;
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
193
		put_page(page);
C
Chris Mason 已提交
194 195 196
	}

	/* do io completion on the original bio */
197
	if (cb->errors) {
C
Chris Mason 已提交
198
		bio_io_error(cb->orig_bio);
199
	} else {
200 201
		int i;
		struct bio_vec *bvec;
202 203 204 205 206

		/*
		 * we have verified the checksum already, set page
		 * checked so the end_io handlers know about it
		 */
207
		bio_for_each_segment_all(bvec, cb->orig_bio, i)
208
			SetPageChecked(bvec->bv_page);
209

210
		bio_endio(cb->orig_bio);
211
	}
C
Chris Mason 已提交
212 213 214 215 216 217 218 219 220 221 222 223

	/* finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}

/*
 * Clear the writeback bits on all of the file
 * pages for a compressed write
 */
224 225
static noinline void end_compressed_writeback(struct inode *inode,
					      const struct compressed_bio *cb)
C
Chris Mason 已提交
226
{
227 228
	unsigned long index = cb->start >> PAGE_SHIFT;
	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
229 230 231 232 233
	struct page *pages[16];
	unsigned long nr_pages = end_index - index + 1;
	int i;
	int ret;

234 235 236
	if (cb->errors)
		mapping_set_error(inode->i_mapping, -EIO);

C
Chris Mason 已提交
237
	while (nr_pages > 0) {
C
Chris Mason 已提交
238
		ret = find_get_pages_contig(inode->i_mapping, index,
239 240
				     min_t(unsigned long,
				     nr_pages, ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
241 242 243 244 245 246
		if (ret == 0) {
			nr_pages -= 1;
			index += 1;
			continue;
		}
		for (i = 0; i < ret; i++) {
247 248
			if (cb->errors)
				SetPageError(pages[i]);
C
Chris Mason 已提交
249
			end_page_writeback(pages[i]);
250
			put_page(pages[i]);
C
Chris Mason 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
		}
		nr_pages -= ret;
		index += ret;
	}
	/* the inode may be gone now */
}

/*
 * do the cleanup once all the compressed pages hit the disk.
 * This will clear writeback on the file pages and free the compressed
 * pages.
 *
 * This also calls the writeback end hooks for the file pages so that
 * metadata and checksums can be updated in the file.
 */
266
static void end_compressed_bio_write(struct bio *bio)
C
Chris Mason 已提交
267 268 269 270 271 272 273
{
	struct extent_io_tree *tree;
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
	struct page *page;
	unsigned long index;

274
	if (bio->bi_error)
C
Chris Mason 已提交
275 276 277 278 279 280 281 282 283 284 285 286 287
		cb->errors = 1;

	/* if there are more bios still pending for this compressed
	 * extent, just exit
	 */
	if (!atomic_dec_and_test(&cb->pending_bios))
		goto out;

	/* ok, we're the last bio for this extent, step one is to
	 * call back into the FS and do all the end_io operations
	 */
	inode = cb->inode;
	tree = &BTRFS_I(inode)->io_tree;
C
Chris Mason 已提交
288
	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
C
Chris Mason 已提交
289 290 291
	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
					 cb->start,
					 cb->start + cb->len - 1,
292
					 NULL,
293
					 bio->bi_error ? 0 : 1);
C
Chris Mason 已提交
294
	cb->compressed_pages[0]->mapping = NULL;
C
Chris Mason 已提交
295

296
	end_compressed_writeback(inode, cb);
C
Chris Mason 已提交
297 298 299 300 301 302 303 304 305 306
	/* note, our inode could be gone now */

	/*
	 * release the compressed pages, these came from alloc_page and
	 * are not attached to the inode at all
	 */
	index = 0;
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
307
		put_page(page);
C
Chris Mason 已提交
308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
	}

	/* finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}

/*
 * worker function to build and submit bios for previously compressed pages.
 * The corresponding pages in the inode should be marked for writeback
 * and the compressed pages should have a reference on them for dropping
 * when the IO is complete.
 *
 * This also checksums the file bytes and gets things ready for
 * the end io hooks.
 */
int btrfs_submit_compressed_write(struct inode *inode, u64 start,
				 unsigned long len, u64 disk_start,
				 unsigned long compressed_len,
				 struct page **compressed_pages,
				 unsigned long nr_pages)
{
332
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
333 334 335 336 337
	struct bio *bio = NULL;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	struct compressed_bio *cb;
	unsigned long bytes_left;
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
338
	int pg_index = 0;
C
Chris Mason 已提交
339 340 341 342
	struct page *page;
	u64 first_byte = disk_start;
	struct block_device *bdev;
	int ret;
343
	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
C
Chris Mason 已提交
344

345
	WARN_ON(start & ((u64)PAGE_SIZE - 1));
346
	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
347 348
	if (!cb)
		return -ENOMEM;
C
Chris Mason 已提交
349 350 351 352 353
	atomic_set(&cb->pending_bios, 0);
	cb->errors = 0;
	cb->inode = inode;
	cb->start = start;
	cb->len = len;
354
	cb->mirror_num = 0;
C
Chris Mason 已提交
355 356 357 358 359
	cb->compressed_pages = compressed_pages;
	cb->compressed_len = compressed_len;
	cb->orig_bio = NULL;
	cb->nr_pages = nr_pages;

360
	bdev = fs_info->fs_devices->latest_bdev;
C
Chris Mason 已提交
361 362

	bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
363
	if (!bio) {
364 365 366
		kfree(cb);
		return -ENOMEM;
	}
M
Mike Christie 已提交
367
	bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
C
Chris Mason 已提交
368 369 370 371 372 373
	bio->bi_private = cb;
	bio->bi_end_io = end_compressed_bio_write;
	atomic_inc(&cb->pending_bios);

	/* create and submit bios for the compressed pages */
	bytes_left = compressed_len;
374 375
	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
		page = compressed_pages[pg_index];
C
Chris Mason 已提交
376
		page->mapping = inode->i_mapping;
377
		if (bio->bi_iter.bi_size)
378
			ret = io_tree->ops->merge_bio_hook(page, 0,
379
							   PAGE_SIZE,
C
Chris Mason 已提交
380 381 382 383
							   bio, 0);
		else
			ret = 0;

C
Chris Mason 已提交
384
		page->mapping = NULL;
385 386
		if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
		    PAGE_SIZE) {
C
Chris Mason 已提交
387 388
			bio_get(bio);

389 390 391 392 393 394 395
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count.  Otherwise, the cb might get
			 * freed before we're done setting it up
			 */
			atomic_inc(&cb->pending_bios);
396 397
			ret = btrfs_bio_wq_end_io(fs_info, bio,
						  BTRFS_WQ_ENDIO_DATA);
398
			BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
399

400 401 402
			if (!skip_sum) {
				ret = btrfs_csum_one_bio(root, inode, bio,
							 start, 1);
403
				BUG_ON(ret); /* -ENOMEM */
404
			}
405

406
			ret = btrfs_map_bio(root, bio, 0, 1);
407 408 409 410
			if (ret) {
				bio->bi_error = ret;
				bio_endio(bio);
			}
C
Chris Mason 已提交
411 412 413 414

			bio_put(bio);

			bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
415
			BUG_ON(!bio);
M
Mike Christie 已提交
416
			bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
C
Chris Mason 已提交
417 418
			bio->bi_private = cb;
			bio->bi_end_io = end_compressed_bio_write;
419
			bio_add_page(bio, page, PAGE_SIZE, 0);
C
Chris Mason 已提交
420
		}
421
		if (bytes_left < PAGE_SIZE) {
422
			btrfs_info(fs_info,
423
					"bytes left %lu compress len %lu nr %lu",
424 425
			       bytes_left, cb->compressed_len, cb->nr_pages);
		}
426 427
		bytes_left -= PAGE_SIZE;
		first_byte += PAGE_SIZE;
428
		cond_resched();
C
Chris Mason 已提交
429 430 431
	}
	bio_get(bio);

432
	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
433
	BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
434

435 436
	if (!skip_sum) {
		ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
437
		BUG_ON(ret); /* -ENOMEM */
438
	}
439

440
	ret = btrfs_map_bio(root, bio, 0, 1);
441 442 443 444
	if (ret) {
		bio->bi_error = ret;
		bio_endio(bio);
	}
C
Chris Mason 已提交
445 446 447 448 449

	bio_put(bio);
	return 0;
}

450 451 452 453 454 455 456
static u64 bio_end_offset(struct bio *bio)
{
	struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];

	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}

457 458 459 460 461
static noinline int add_ra_bio_pages(struct inode *inode,
				     u64 compressed_end,
				     struct compressed_bio *cb)
{
	unsigned long end_index;
462
	unsigned long pg_index;
463 464 465 466 467 468 469 470 471 472 473 474
	u64 last_offset;
	u64 isize = i_size_read(inode);
	int ret;
	struct page *page;
	unsigned long nr_pages = 0;
	struct extent_map *em;
	struct address_space *mapping = inode->i_mapping;
	struct extent_map_tree *em_tree;
	struct extent_io_tree *tree;
	u64 end;
	int misses = 0;

475
	last_offset = bio_end_offset(cb->orig_bio);
476 477 478 479 480 481
	em_tree = &BTRFS_I(inode)->extent_tree;
	tree = &BTRFS_I(inode)->io_tree;

	if (isize == 0)
		return 0;

482
	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
483

C
Chris Mason 已提交
484
	while (last_offset < compressed_end) {
485
		pg_index = last_offset >> PAGE_SHIFT;
486

487
		if (pg_index > end_index)
488 489 490
			break;

		rcu_read_lock();
491
		page = radix_tree_lookup(&mapping->page_tree, pg_index);
492
		rcu_read_unlock();
493
		if (page && !radix_tree_exceptional_entry(page)) {
494 495 496 497 498 499
			misses++;
			if (misses > 4)
				break;
			goto next;
		}

500 501
		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
								 ~__GFP_FS));
502 503 504
		if (!page)
			break;

505
		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
506
			put_page(page);
507 508 509
			goto next;
		}

510
		end = last_offset + PAGE_SIZE - 1;
511 512 513 514 515 516
		/*
		 * at this point, we have a locked page in the page cache
		 * for these bytes in the file.  But, we have to make
		 * sure they map to this compressed extent on disk.
		 */
		set_page_extent_mapped(page);
517
		lock_extent(tree, last_offset, end);
518
		read_lock(&em_tree->lock);
519
		em = lookup_extent_mapping(em_tree, last_offset,
520
					   PAGE_SIZE);
521
		read_unlock(&em_tree->lock);
522 523

		if (!em || last_offset < em->start ||
524
		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
525
		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
526
			free_extent_map(em);
527
			unlock_extent(tree, last_offset, end);
528
			unlock_page(page);
529
			put_page(page);
530 531 532 533 534 535
			break;
		}
		free_extent_map(em);

		if (page->index == end_index) {
			char *userpage;
536
			size_t zero_offset = isize & (PAGE_SIZE - 1);
537 538 539

			if (zero_offset) {
				int zeros;
540
				zeros = PAGE_SIZE - zero_offset;
541
				userpage = kmap_atomic(page);
542 543
				memset(userpage + zero_offset, 0, zeros);
				flush_dcache_page(page);
544
				kunmap_atomic(userpage);
545 546 547 548
			}
		}

		ret = bio_add_page(cb->orig_bio, page,
549
				   PAGE_SIZE, 0);
550

551
		if (ret == PAGE_SIZE) {
552
			nr_pages++;
553
			put_page(page);
554
		} else {
555
			unlock_extent(tree, last_offset, end);
556
			unlock_page(page);
557
			put_page(page);
558 559 560
			break;
		}
next:
561
		last_offset += PAGE_SIZE;
562 563 564 565
	}
	return 0;
}

C
Chris Mason 已提交
566 567 568 569 570
/*
 * for a compressed read, the bio we get passed has all the inode pages
 * in it.  We don't actually do IO on those pages but allocate new ones
 * to hold the compressed pages on disk.
 *
571
 * bio->bi_iter.bi_sector points to the compressed extent on disk
C
Chris Mason 已提交
572 573 574 575 576 577 578 579
 * bio->bi_io_vec points to all of the inode pages
 *
 * After the compressed pages are read, we copy the bytes into the
 * bio we were passed and then call the bio end_io calls
 */
int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
				 int mirror_num, unsigned long bio_flags)
{
580
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
581 582 583 584 585 586
	struct extent_io_tree *tree;
	struct extent_map_tree *em_tree;
	struct compressed_bio *cb;
	struct btrfs_root *root = BTRFS_I(inode)->root;
	unsigned long compressed_len;
	unsigned long nr_pages;
587
	unsigned long pg_index;
C
Chris Mason 已提交
588 589 590
	struct page *page;
	struct block_device *bdev;
	struct bio *comp_bio;
591
	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
592 593
	u64 em_len;
	u64 em_start;
C
Chris Mason 已提交
594
	struct extent_map *em;
595
	int ret = -ENOMEM;
596
	int faili = 0;
597
	u32 *sums;
C
Chris Mason 已提交
598 599 600 601 602

	tree = &BTRFS_I(inode)->io_tree;
	em_tree = &BTRFS_I(inode)->extent_tree;

	/* we need the actual starting offset of this extent in the file */
603
	read_lock(&em_tree->lock);
C
Chris Mason 已提交
604 605
	em = lookup_extent_mapping(em_tree,
				   page_offset(bio->bi_io_vec->bv_page),
606
				   PAGE_SIZE);
607
	read_unlock(&em_tree->lock);
608 609
	if (!em)
		return -EIO;
C
Chris Mason 已提交
610

611 612
	compressed_len = em->block_len;
	cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
613 614 615
	if (!cb)
		goto out;

C
Chris Mason 已提交
616 617 618
	atomic_set(&cb->pending_bios, 0);
	cb->errors = 0;
	cb->inode = inode;
619 620
	cb->mirror_num = mirror_num;
	sums = &cb->sums;
C
Chris Mason 已提交
621

622
	cb->start = em->orig_start;
623 624
	em_len = em->len;
	em_start = em->start;
625

C
Chris Mason 已提交
626
	free_extent_map(em);
627
	em = NULL;
C
Chris Mason 已提交
628

C
Christoph Hellwig 已提交
629
	cb->len = bio->bi_iter.bi_size;
C
Chris Mason 已提交
630
	cb->compressed_len = compressed_len;
631
	cb->compress_type = extent_compress_type(bio_flags);
C
Chris Mason 已提交
632 633
	cb->orig_bio = bio;

634
	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
635
	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
C
Chris Mason 已提交
636
				       GFP_NOFS);
637 638 639
	if (!cb->compressed_pages)
		goto fail1;

640
	bdev = fs_info->fs_devices->latest_bdev;
C
Chris Mason 已提交
641

642 643
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
C
Chris Mason 已提交
644
							      __GFP_HIGHMEM);
645 646 647
		if (!cb->compressed_pages[pg_index]) {
			faili = pg_index - 1;
			ret = -ENOMEM;
648
			goto fail2;
649
		}
C
Chris Mason 已提交
650
	}
651
	faili = nr_pages - 1;
C
Chris Mason 已提交
652 653
	cb->nr_pages = nr_pages;

654
	add_ra_bio_pages(inode, em_start + em_len, cb);
655 656

	/* include any pages we added in add_ra-bio_pages */
C
Christoph Hellwig 已提交
657
	cb->len = bio->bi_iter.bi_size;
658

C
Chris Mason 已提交
659
	comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
660 661
	if (!comp_bio)
		goto fail2;
M
Mike Christie 已提交
662
	bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
C
Chris Mason 已提交
663 664 665 666
	comp_bio->bi_private = cb;
	comp_bio->bi_end_io = end_compressed_bio_read;
	atomic_inc(&cb->pending_bios);

667 668
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
		page = cb->compressed_pages[pg_index];
C
Chris Mason 已提交
669
		page->mapping = inode->i_mapping;
670
		page->index = em_start >> PAGE_SHIFT;
671

672
		if (comp_bio->bi_iter.bi_size)
673
			ret = tree->ops->merge_bio_hook(page, 0,
674
							PAGE_SIZE,
C
Chris Mason 已提交
675 676 677 678
							comp_bio, 0);
		else
			ret = 0;

C
Chris Mason 已提交
679
		page->mapping = NULL;
680 681
		if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
		    PAGE_SIZE) {
C
Chris Mason 已提交
682 683
			bio_get(comp_bio);

684 685
			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
						  BTRFS_WQ_ENDIO_DATA);
686
			BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
687

688 689 690 691 692 693 694 695
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count.  Otherwise, the cb might get
			 * freed before we're done setting it up
			 */
			atomic_inc(&cb->pending_bios);

696
			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
697 698
				ret = btrfs_lookup_bio_sums(root, inode,
							comp_bio, sums);
699
				BUG_ON(ret); /* -ENOMEM */
700
			}
701
			sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
702
					     fs_info->sectorsize);
703

704
			ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
705
			if (ret) {
706
				comp_bio->bi_error = ret;
707 708
				bio_endio(comp_bio);
			}
C
Chris Mason 已提交
709 710 711 712 713

			bio_put(comp_bio);

			comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
							GFP_NOFS);
714
			BUG_ON(!comp_bio);
M
Mike Christie 已提交
715
			bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
716 717 718
			comp_bio->bi_private = cb;
			comp_bio->bi_end_io = end_compressed_bio_read;

719
			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
C
Chris Mason 已提交
720
		}
721
		cur_disk_byte += PAGE_SIZE;
C
Chris Mason 已提交
722 723 724
	}
	bio_get(comp_bio);

725
	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
726
	BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
727

728 729
	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
		ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
730
		BUG_ON(ret); /* -ENOMEM */
731
	}
732

733
	ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
734
	if (ret) {
735
		comp_bio->bi_error = ret;
736 737
		bio_endio(comp_bio);
	}
C
Chris Mason 已提交
738 739 740

	bio_put(comp_bio);
	return 0;
741 742

fail2:
743 744 745 746
	while (faili >= 0) {
		__free_page(cb->compressed_pages[faili]);
		faili--;
	}
747 748 749 750 751 752 753

	kfree(cb->compressed_pages);
fail1:
	kfree(cb);
out:
	free_extent_map(em);
	return ret;
C
Chris Mason 已提交
754
}
755

756 757 758
static struct {
	struct list_head idle_ws;
	spinlock_t ws_lock;
759 760 761 762 763
	/* Number of free workspaces */
	int free_ws;
	/* Total number of allocated workspaces */
	atomic_t total_ws;
	/* Waiters for a free workspace */
764 765
	wait_queue_head_t ws_wait;
} btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
766

767
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
768
	&btrfs_zlib_compress,
L
Li Zefan 已提交
769
	&btrfs_lzo_compress,
770 771
};

772
void __init btrfs_init_compress(void)
773 774 775 776
{
	int i;

	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
777 778
		struct list_head *workspace;

779 780
		INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
		spin_lock_init(&btrfs_comp_ws[i].ws_lock);
781
		atomic_set(&btrfs_comp_ws[i].total_ws, 0);
782
		init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
783 784 785 786 787 788 789

		/*
		 * Preallocate one workspace for each compression type so
		 * we can guarantee forward progress in the worst case
		 */
		workspace = btrfs_compress_op[i]->alloc_workspace();
		if (IS_ERR(workspace)) {
790
			pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
791 792 793 794 795
		} else {
			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
			btrfs_comp_ws[i].free_ws = 1;
			list_add(workspace, &btrfs_comp_ws[i].idle_ws);
		}
796 797 798 799
	}
}

/*
800 801 802 803
 * This finds an available workspace or allocates a new one.
 * If it's not possible to allocate a new one, waits until there's one.
 * Preallocation makes a forward progress guarantees and we do not return
 * errors.
804 805 806 807 808 809 810
 */
static struct list_head *find_workspace(int type)
{
	struct list_head *workspace;
	int cpus = num_online_cpus();
	int idx = type - 1;

811 812
	struct list_head *idle_ws	= &btrfs_comp_ws[idx].idle_ws;
	spinlock_t *ws_lock		= &btrfs_comp_ws[idx].ws_lock;
813
	atomic_t *total_ws		= &btrfs_comp_ws[idx].total_ws;
814
	wait_queue_head_t *ws_wait	= &btrfs_comp_ws[idx].ws_wait;
815
	int *free_ws			= &btrfs_comp_ws[idx].free_ws;
816
again:
817 818 819
	spin_lock(ws_lock);
	if (!list_empty(idle_ws)) {
		workspace = idle_ws->next;
820
		list_del(workspace);
821
		(*free_ws)--;
822
		spin_unlock(ws_lock);
823 824 825
		return workspace;

	}
826
	if (atomic_read(total_ws) > cpus) {
827 828
		DEFINE_WAIT(wait);

829 830
		spin_unlock(ws_lock);
		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
831
		if (atomic_read(total_ws) > cpus && !*free_ws)
832
			schedule();
833
		finish_wait(ws_wait, &wait);
834 835
		goto again;
	}
836
	atomic_inc(total_ws);
837
	spin_unlock(ws_lock);
838 839 840

	workspace = btrfs_compress_op[idx]->alloc_workspace();
	if (IS_ERR(workspace)) {
841
		atomic_dec(total_ws);
842
		wake_up(ws_wait);
843 844 845 846 847 848

		/*
		 * Do not return the error but go back to waiting. There's a
		 * workspace preallocated for each type and the compression
		 * time is bounded so we get to a workspace eventually. This
		 * makes our caller's life easier.
849 850 851 852
		 *
		 * To prevent silent and low-probability deadlocks (when the
		 * initial preallocation fails), check if there are any
		 * workspaces at all.
853
		 */
854 855 856 857 858 859
		if (atomic_read(total_ws) == 0) {
			static DEFINE_RATELIMIT_STATE(_rs,
					/* once per minute */ 60 * HZ,
					/* no burst */ 1);

			if (__ratelimit(&_rs)) {
860
				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
861 862
			}
		}
863
		goto again;
864 865 866 867 868 869 870 871 872 873 874
	}
	return workspace;
}

/*
 * put a workspace struct back on the list or free it if we have enough
 * idle ones sitting around
 */
static void free_workspace(int type, struct list_head *workspace)
{
	int idx = type - 1;
875 876
	struct list_head *idle_ws	= &btrfs_comp_ws[idx].idle_ws;
	spinlock_t *ws_lock		= &btrfs_comp_ws[idx].ws_lock;
877
	atomic_t *total_ws		= &btrfs_comp_ws[idx].total_ws;
878
	wait_queue_head_t *ws_wait	= &btrfs_comp_ws[idx].ws_wait;
879
	int *free_ws			= &btrfs_comp_ws[idx].free_ws;
880 881

	spin_lock(ws_lock);
882
	if (*free_ws < num_online_cpus()) {
883
		list_add(workspace, idle_ws);
884
		(*free_ws)++;
885
		spin_unlock(ws_lock);
886 887
		goto wake;
	}
888
	spin_unlock(ws_lock);
889 890

	btrfs_compress_op[idx]->free_workspace(workspace);
891
	atomic_dec(total_ws);
892
wake:
893 894 895
	/*
	 * Make sure counter is updated before we wake up waiters.
	 */
896
	smp_mb();
897 898
	if (waitqueue_active(ws_wait))
		wake_up(ws_wait);
899 900 901 902 903 904 905 906 907 908 909
}

/*
 * cleanup function for module exit
 */
static void free_workspaces(void)
{
	struct list_head *workspace;
	int i;

	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
910 911
		while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
			workspace = btrfs_comp_ws[i].idle_ws.next;
912 913
			list_del(workspace);
			btrfs_compress_op[i]->free_workspace(workspace);
914
			atomic_dec(&btrfs_comp_ws[i].total_ws);
915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965
		}
	}
}

/*
 * given an address space and start/len, compress the bytes.
 *
 * pages are allocated to hold the compressed result and stored
 * in 'pages'
 *
 * out_pages is used to return the number of pages allocated.  There
 * may be pages allocated even if we return an error
 *
 * total_in is used to return the number of bytes actually read.  It
 * may be smaller then len if we had to exit early because we
 * ran out of room in the pages array or because we cross the
 * max_out threshold.
 *
 * total_out is used to return the total number of compressed bytes
 *
 * max_out tells us the max number of bytes that we're allowed to
 * stuff into pages
 */
int btrfs_compress_pages(int type, struct address_space *mapping,
			 u64 start, unsigned long len,
			 struct page **pages,
			 unsigned long nr_dest_pages,
			 unsigned long *out_pages,
			 unsigned long *total_in,
			 unsigned long *total_out,
			 unsigned long max_out)
{
	struct list_head *workspace;
	int ret;

	workspace = find_workspace(type);

	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
						      start, len, pages,
						      nr_dest_pages, out_pages,
						      total_in, total_out,
						      max_out);
	free_workspace(type, workspace);
	return ret;
}

/*
 * pages_in is an array of pages with compressed data.
 *
 * disk_start is the starting logical offset of this array in the file
 *
966
 * orig_bio contains the pages from the file that we want to decompress into
967 968 969 970 971 972 973 974
 *
 * srclen is the number of bytes in pages_in
 *
 * The basic idea is that we have a bio that was created by readpages.
 * The pages in the bio are for the uncompressed data, and they may not
 * be contiguous.  They all correspond to the range of bytes covered by
 * the compressed extent.
 */
975 976 977
static int btrfs_decompress_bio(int type, struct page **pages_in,
				   u64 disk_start, struct bio *orig_bio,
				   size_t srclen)
978 979 980 981 982 983
{
	struct list_head *workspace;
	int ret;

	workspace = find_workspace(type);

984 985 986
	ret = btrfs_compress_op[type-1]->decompress_bio(workspace, pages_in,
							 disk_start, orig_bio,
							 srclen);
987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
	free_workspace(type, workspace);
	return ret;
}

/*
 * a less complex decompression routine.  Our compressed data fits in a
 * single page, and we want to read a single page out of it.
 * start_byte tells us the offset into the compressed data we're interested in
 */
int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
		     unsigned long start_byte, size_t srclen, size_t destlen)
{
	struct list_head *workspace;
	int ret;

	workspace = find_workspace(type);

	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
						  dest_page, start_byte,
						  srclen, destlen);

	free_workspace(type, workspace);
	return ret;
}

1012
void btrfs_exit_compress(void)
1013 1014 1015
{
	free_workspaces();
}
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025

/*
 * Copy uncompressed data from working buffer to pages.
 *
 * buf_start is the byte offset we're of the start of our workspace buffer.
 *
 * total_out is the last byte of the buffer
 */
int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
			      unsigned long total_out, u64 disk_start,
1026
			      struct bio *bio)
1027 1028 1029 1030 1031 1032 1033
{
	unsigned long buf_offset;
	unsigned long current_buf_start;
	unsigned long start_byte;
	unsigned long working_bytes = total_out - buf_start;
	unsigned long bytes;
	char *kaddr;
1034
	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1035 1036 1037 1038 1039

	/*
	 * start byte is the first byte of the page we're currently
	 * copying into relative to the start of the compressed data.
	 */
1040
	start_byte = page_offset(bvec.bv_page) - disk_start;
1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059

	/* we haven't yet hit data corresponding to this page */
	if (total_out <= start_byte)
		return 1;

	/*
	 * the start of the data we care about is offset into
	 * the middle of our working buffer
	 */
	if (total_out > start_byte && buf_start < start_byte) {
		buf_offset = start_byte - buf_start;
		working_bytes -= buf_offset;
	} else {
		buf_offset = 0;
	}
	current_buf_start = buf_start;

	/* copy bytes from the working buffer into the pages */
	while (working_bytes > 0) {
1060 1061
		bytes = min_t(unsigned long, bvec.bv_len,
				PAGE_SIZE - buf_offset);
1062
		bytes = min(bytes, working_bytes);
1063 1064 1065

		kaddr = kmap_atomic(bvec.bv_page);
		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1066
		kunmap_atomic(kaddr);
1067
		flush_dcache_page(bvec.bv_page);
1068 1069 1070 1071 1072 1073

		buf_offset += bytes;
		working_bytes -= bytes;
		current_buf_start += bytes;

		/* check if we need to pick another page */
1074 1075 1076 1077
		bio_advance(bio, bytes);
		if (!bio->bi_iter.bi_size)
			return 0;
		bvec = bio_iter_iovec(bio, bio->bi_iter);
1078

1079
		start_byte = page_offset(bvec.bv_page) - disk_start;
1080

1081 1082 1083 1084 1085 1086
		/*
		 * make sure our new page is covered by this
		 * working buffer
		 */
		if (total_out <= start_byte)
			return 1;
1087

1088 1089 1090 1091 1092 1093 1094 1095 1096 1097
		/*
		 * the next page in the biovec might not be adjacent
		 * to the last page, but it might still be found
		 * inside this working buffer. bump our offset pointer
		 */
		if (total_out > start_byte &&
		    current_buf_start < start_byte) {
			buf_offset = start_byte - buf_start;
			working_bytes = total_out - start_byte;
			current_buf_start = buf_start + buf_offset;
1098 1099 1100 1101 1102
		}
	}

	return 1;
}