compression.c 39.3 KB
Newer Older
C
Chris Mason 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public
 * License v2 as published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * General Public License for more details.
 *
 * You should have received a copy of the GNU General Public
 * License along with this program; if not, write to the
 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
 * Boston, MA 021110-1307, USA.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/bit_spinlock.h>
34
#include <linux/slab.h>
35
#include <linux/sched/mm.h>
36
#include <linux/sort.h>
37
#include <linux/log2.h>
C
Chris Mason 已提交
38 39 40 41 42 43 44 45 46 47
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "volumes.h"
#include "ordered-data.h"
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"

48
static int btrfs_decompress_bio(struct compressed_bio *cb);
49

50
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
51 52
				      unsigned long disk_size)
{
53
	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
54

55
	return sizeof(struct compressed_bio) +
56
		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
57 58
}

59
static int check_compressed_csum(struct btrfs_inode *inode,
60 61 62 63 64 65 66 67 68 69
				 struct compressed_bio *cb,
				 u64 disk_start)
{
	int ret;
	struct page *page;
	unsigned long i;
	char *kaddr;
	u32 csum;
	u32 *cb_sum = &cb->sums;

70
	if (inode->flags & BTRFS_INODE_NODATASUM)
71 72 73 74 75 76
		return 0;

	for (i = 0; i < cb->nr_pages; i++) {
		page = cb->compressed_pages[i];
		csum = ~(u32)0;

77
		kaddr = kmap_atomic(page);
78
		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
79
		btrfs_csum_final(csum, (u8 *)&csum);
80
		kunmap_atomic(kaddr);
81 82

		if (csum != *cb_sum) {
83
			btrfs_print_data_csum_error(inode, disk_start, csum,
84
					*cb_sum, cb->mirror_num);
85 86 87 88 89 90 91 92 93 94 95
			ret = -EIO;
			goto fail;
		}
		cb_sum++;

	}
	ret = 0;
fail:
	return ret;
}

C
Chris Mason 已提交
96 97 98 99 100 101 102 103 104 105
/* when we finish reading compressed pages from the disk, we
 * decompress them and then run the bio end_io routines on the
 * decompressed pages (in the inode address space).
 *
 * This allows the checksumming and other IO error handling routines
 * to work normally
 *
 * The compressed pages are freed here, and it must be run
 * in process context
 */
106
static void end_compressed_bio_read(struct bio *bio)
C
Chris Mason 已提交
107 108 109 110 111
{
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
	struct page *page;
	unsigned long index;
112
	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
113
	int ret = 0;
C
Chris Mason 已提交
114

115
	if (bio->bi_status)
C
Chris Mason 已提交
116 117 118 119 120
		cb->errors = 1;

	/* if there are more bios still pending for this compressed
	 * extent, just exit
	 */
121
	if (!refcount_dec_and_test(&cb->pending_bios))
C
Chris Mason 已提交
122 123
		goto out;

124 125 126 127 128 129 130 131
	/*
	 * Record the correct mirror_num in cb->orig_bio so that
	 * read-repair can work properly.
	 */
	ASSERT(btrfs_io_bio(cb->orig_bio));
	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
	cb->mirror_num = mirror;

132 133 134 135 136 137 138
	/*
	 * Some IO in this cb have failed, just skip checksum as there
	 * is no way it could be correct.
	 */
	if (cb->errors == 1)
		goto csum_failed;

139
	inode = cb->inode;
140
	ret = check_compressed_csum(BTRFS_I(inode), cb,
141
				    (u64)bio->bi_iter.bi_sector << 9);
142 143 144
	if (ret)
		goto csum_failed;

C
Chris Mason 已提交
145 146 147
	/* ok, we're the last bio for this extent, lets start
	 * the decompression.
	 */
148 149
	ret = btrfs_decompress_bio(cb);

150
csum_failed:
C
Chris Mason 已提交
151 152 153 154 155 156 157 158
	if (ret)
		cb->errors = 1;

	/* release the compressed pages */
	index = 0;
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
159
		put_page(page);
C
Chris Mason 已提交
160 161 162
	}

	/* do io completion on the original bio */
163
	if (cb->errors) {
C
Chris Mason 已提交
164
		bio_io_error(cb->orig_bio);
165
	} else {
166 167
		int i;
		struct bio_vec *bvec;
168 169 170 171 172

		/*
		 * we have verified the checksum already, set page
		 * checked so the end_io handlers know about it
		 */
173
		ASSERT(!bio_flagged(bio, BIO_CLONED));
174
		bio_for_each_segment_all(bvec, cb->orig_bio, i)
175
			SetPageChecked(bvec->bv_page);
176

177
		bio_endio(cb->orig_bio);
178
	}
C
Chris Mason 已提交
179 180 181 182 183 184 185 186 187 188 189 190

	/* finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}

/*
 * Clear the writeback bits on all of the file
 * pages for a compressed write
 */
191 192
static noinline void end_compressed_writeback(struct inode *inode,
					      const struct compressed_bio *cb)
C
Chris Mason 已提交
193
{
194 195
	unsigned long index = cb->start >> PAGE_SHIFT;
	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
196 197 198 199 200
	struct page *pages[16];
	unsigned long nr_pages = end_index - index + 1;
	int i;
	int ret;

201 202 203
	if (cb->errors)
		mapping_set_error(inode->i_mapping, -EIO);

C
Chris Mason 已提交
204
	while (nr_pages > 0) {
C
Chris Mason 已提交
205
		ret = find_get_pages_contig(inode->i_mapping, index,
206 207
				     min_t(unsigned long,
				     nr_pages, ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
208 209 210 211 212 213
		if (ret == 0) {
			nr_pages -= 1;
			index += 1;
			continue;
		}
		for (i = 0; i < ret; i++) {
214 215
			if (cb->errors)
				SetPageError(pages[i]);
C
Chris Mason 已提交
216
			end_page_writeback(pages[i]);
217
			put_page(pages[i]);
C
Chris Mason 已提交
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
		}
		nr_pages -= ret;
		index += ret;
	}
	/* the inode may be gone now */
}

/*
 * do the cleanup once all the compressed pages hit the disk.
 * This will clear writeback on the file pages and free the compressed
 * pages.
 *
 * This also calls the writeback end hooks for the file pages so that
 * metadata and checksums can be updated in the file.
 */
233
static void end_compressed_bio_write(struct bio *bio)
C
Chris Mason 已提交
234 235 236 237 238 239 240
{
	struct extent_io_tree *tree;
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
	struct page *page;
	unsigned long index;

241
	if (bio->bi_status)
C
Chris Mason 已提交
242 243 244 245 246
		cb->errors = 1;

	/* if there are more bios still pending for this compressed
	 * extent, just exit
	 */
247
	if (!refcount_dec_and_test(&cb->pending_bios))
C
Chris Mason 已提交
248 249 250 251 252 253 254
		goto out;

	/* ok, we're the last bio for this extent, step one is to
	 * call back into the FS and do all the end_io operations
	 */
	inode = cb->inode;
	tree = &BTRFS_I(inode)->io_tree;
C
Chris Mason 已提交
255
	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
C
Chris Mason 已提交
256 257 258
	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
					 cb->start,
					 cb->start + cb->len - 1,
259
					 NULL,
260 261
					 bio->bi_status ?
					 BLK_STS_OK : BLK_STS_NOTSUPP);
C
Chris Mason 已提交
262
	cb->compressed_pages[0]->mapping = NULL;
C
Chris Mason 已提交
263

264
	end_compressed_writeback(inode, cb);
C
Chris Mason 已提交
265 266 267 268 269 270 271 272 273 274
	/* note, our inode could be gone now */

	/*
	 * release the compressed pages, these came from alloc_page and
	 * are not attached to the inode at all
	 */
	index = 0;
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
275
		put_page(page);
C
Chris Mason 已提交
276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
	}

	/* finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}

/*
 * worker function to build and submit bios for previously compressed pages.
 * The corresponding pages in the inode should be marked for writeback
 * and the compressed pages should have a reference on them for dropping
 * when the IO is complete.
 *
 * This also checksums the file bytes and gets things ready for
 * the end io hooks.
 */
294
blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
C
Chris Mason 已提交
295 296 297
				 unsigned long len, u64 disk_start,
				 unsigned long compressed_len,
				 struct page **compressed_pages,
298 299
				 unsigned long nr_pages,
				 unsigned int write_flags)
C
Chris Mason 已提交
300
{
301
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
302 303 304 305
	struct bio *bio = NULL;
	struct compressed_bio *cb;
	unsigned long bytes_left;
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
306
	int pg_index = 0;
C
Chris Mason 已提交
307 308 309
	struct page *page;
	u64 first_byte = disk_start;
	struct block_device *bdev;
310
	blk_status_t ret;
311
	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
C
Chris Mason 已提交
312

313
	WARN_ON(start & ((u64)PAGE_SIZE - 1));
314
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
315
	if (!cb)
316
		return BLK_STS_RESOURCE;
317
	refcount_set(&cb->pending_bios, 0);
C
Chris Mason 已提交
318 319 320 321
	cb->errors = 0;
	cb->inode = inode;
	cb->start = start;
	cb->len = len;
322
	cb->mirror_num = 0;
C
Chris Mason 已提交
323 324 325 326 327
	cb->compressed_pages = compressed_pages;
	cb->compressed_len = compressed_len;
	cb->orig_bio = NULL;
	cb->nr_pages = nr_pages;

328
	bdev = fs_info->fs_devices->latest_bdev;
C
Chris Mason 已提交
329

330
	bio = btrfs_bio_alloc(bdev, first_byte);
331
	bio->bi_opf = REQ_OP_WRITE | write_flags;
C
Chris Mason 已提交
332 333
	bio->bi_private = cb;
	bio->bi_end_io = end_compressed_bio_write;
334
	refcount_set(&cb->pending_bios, 1);
C
Chris Mason 已提交
335 336 337

	/* create and submit bios for the compressed pages */
	bytes_left = compressed_len;
338
	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
339 340
		int submit = 0;

341
		page = compressed_pages[pg_index];
C
Chris Mason 已提交
342
		page->mapping = inode->i_mapping;
343
		if (bio->bi_iter.bi_size)
344
			submit = io_tree->ops->merge_bio_hook(page, 0,
345
							   PAGE_SIZE,
C
Chris Mason 已提交
346 347
							   bio, 0);

C
Chris Mason 已提交
348
		page->mapping = NULL;
349
		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
350
		    PAGE_SIZE) {
C
Chris Mason 已提交
351 352
			bio_get(bio);

353 354 355 356 357 358
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count.  Otherwise, the cb might get
			 * freed before we're done setting it up
			 */
359
			refcount_inc(&cb->pending_bios);
360 361
			ret = btrfs_bio_wq_end_io(fs_info, bio,
						  BTRFS_WQ_ENDIO_DATA);
362
			BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
363

364
			if (!skip_sum) {
365
				ret = btrfs_csum_one_bio(inode, bio, start, 1);
366
				BUG_ON(ret); /* -ENOMEM */
367
			}
368

369
			ret = btrfs_map_bio(fs_info, bio, 0, 1);
370
			if (ret) {
371
				bio->bi_status = ret;
372 373
				bio_endio(bio);
			}
C
Chris Mason 已提交
374 375 376

			bio_put(bio);

377
			bio = btrfs_bio_alloc(bdev, first_byte);
378
			bio->bi_opf = REQ_OP_WRITE | write_flags;
C
Chris Mason 已提交
379 380
			bio->bi_private = cb;
			bio->bi_end_io = end_compressed_bio_write;
381
			bio_add_page(bio, page, PAGE_SIZE, 0);
C
Chris Mason 已提交
382
		}
383
		if (bytes_left < PAGE_SIZE) {
384
			btrfs_info(fs_info,
385
					"bytes left %lu compress len %lu nr %lu",
386 387
			       bytes_left, cb->compressed_len, cb->nr_pages);
		}
388 389
		bytes_left -= PAGE_SIZE;
		first_byte += PAGE_SIZE;
390
		cond_resched();
C
Chris Mason 已提交
391 392 393
	}
	bio_get(bio);

394
	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
395
	BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
396

397
	if (!skip_sum) {
398
		ret = btrfs_csum_one_bio(inode, bio, start, 1);
399
		BUG_ON(ret); /* -ENOMEM */
400
	}
401

402
	ret = btrfs_map_bio(fs_info, bio, 0, 1);
403
	if (ret) {
404
		bio->bi_status = ret;
405 406
		bio_endio(bio);
	}
C
Chris Mason 已提交
407 408 409 410 411

	bio_put(bio);
	return 0;
}

412 413 414 415 416 417 418
static u64 bio_end_offset(struct bio *bio)
{
	struct bio_vec *last = &bio->bi_io_vec[bio->bi_vcnt - 1];

	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}

419 420 421 422 423
static noinline int add_ra_bio_pages(struct inode *inode,
				     u64 compressed_end,
				     struct compressed_bio *cb)
{
	unsigned long end_index;
424
	unsigned long pg_index;
425 426 427 428 429 430 431 432 433 434 435 436
	u64 last_offset;
	u64 isize = i_size_read(inode);
	int ret;
	struct page *page;
	unsigned long nr_pages = 0;
	struct extent_map *em;
	struct address_space *mapping = inode->i_mapping;
	struct extent_map_tree *em_tree;
	struct extent_io_tree *tree;
	u64 end;
	int misses = 0;

437
	last_offset = bio_end_offset(cb->orig_bio);
438 439 440 441 442 443
	em_tree = &BTRFS_I(inode)->extent_tree;
	tree = &BTRFS_I(inode)->io_tree;

	if (isize == 0)
		return 0;

444
	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
445

C
Chris Mason 已提交
446
	while (last_offset < compressed_end) {
447
		pg_index = last_offset >> PAGE_SHIFT;
448

449
		if (pg_index > end_index)
450 451 452
			break;

		rcu_read_lock();
453
		page = radix_tree_lookup(&mapping->page_tree, pg_index);
454
		rcu_read_unlock();
455
		if (page && !radix_tree_exceptional_entry(page)) {
456 457 458 459 460 461
			misses++;
			if (misses > 4)
				break;
			goto next;
		}

462 463
		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
								 ~__GFP_FS));
464 465 466
		if (!page)
			break;

467
		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
468
			put_page(page);
469 470 471
			goto next;
		}

472
		end = last_offset + PAGE_SIZE - 1;
473 474 475 476 477 478
		/*
		 * at this point, we have a locked page in the page cache
		 * for these bytes in the file.  But, we have to make
		 * sure they map to this compressed extent on disk.
		 */
		set_page_extent_mapped(page);
479
		lock_extent(tree, last_offset, end);
480
		read_lock(&em_tree->lock);
481
		em = lookup_extent_mapping(em_tree, last_offset,
482
					   PAGE_SIZE);
483
		read_unlock(&em_tree->lock);
484 485

		if (!em || last_offset < em->start ||
486
		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
487
		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
488
			free_extent_map(em);
489
			unlock_extent(tree, last_offset, end);
490
			unlock_page(page);
491
			put_page(page);
492 493 494 495 496 497
			break;
		}
		free_extent_map(em);

		if (page->index == end_index) {
			char *userpage;
498
			size_t zero_offset = isize & (PAGE_SIZE - 1);
499 500 501

			if (zero_offset) {
				int zeros;
502
				zeros = PAGE_SIZE - zero_offset;
503
				userpage = kmap_atomic(page);
504 505
				memset(userpage + zero_offset, 0, zeros);
				flush_dcache_page(page);
506
				kunmap_atomic(userpage);
507 508 509 510
			}
		}

		ret = bio_add_page(cb->orig_bio, page,
511
				   PAGE_SIZE, 0);
512

513
		if (ret == PAGE_SIZE) {
514
			nr_pages++;
515
			put_page(page);
516
		} else {
517
			unlock_extent(tree, last_offset, end);
518
			unlock_page(page);
519
			put_page(page);
520 521 522
			break;
		}
next:
523
		last_offset += PAGE_SIZE;
524 525 526 527
	}
	return 0;
}

C
Chris Mason 已提交
528 529 530 531 532
/*
 * for a compressed read, the bio we get passed has all the inode pages
 * in it.  We don't actually do IO on those pages but allocate new ones
 * to hold the compressed pages on disk.
 *
533
 * bio->bi_iter.bi_sector points to the compressed extent on disk
C
Chris Mason 已提交
534 535 536 537 538
 * bio->bi_io_vec points to all of the inode pages
 *
 * After the compressed pages are read, we copy the bytes into the
 * bio we were passed and then call the bio end_io calls
 */
539
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
C
Chris Mason 已提交
540 541
				 int mirror_num, unsigned long bio_flags)
{
542
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
543 544 545 546 547
	struct extent_io_tree *tree;
	struct extent_map_tree *em_tree;
	struct compressed_bio *cb;
	unsigned long compressed_len;
	unsigned long nr_pages;
548
	unsigned long pg_index;
C
Chris Mason 已提交
549 550 551
	struct page *page;
	struct block_device *bdev;
	struct bio *comp_bio;
552
	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
553 554
	u64 em_len;
	u64 em_start;
C
Chris Mason 已提交
555
	struct extent_map *em;
556
	blk_status_t ret = BLK_STS_RESOURCE;
557
	int faili = 0;
558
	u32 *sums;
C
Chris Mason 已提交
559 560 561 562 563

	tree = &BTRFS_I(inode)->io_tree;
	em_tree = &BTRFS_I(inode)->extent_tree;

	/* we need the actual starting offset of this extent in the file */
564
	read_lock(&em_tree->lock);
C
Chris Mason 已提交
565
	em = lookup_extent_mapping(em_tree,
566
				   page_offset(bio_first_page_all(bio)),
567
				   PAGE_SIZE);
568
	read_unlock(&em_tree->lock);
569
	if (!em)
570
		return BLK_STS_IOERR;
C
Chris Mason 已提交
571

572
	compressed_len = em->block_len;
573
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
574 575 576
	if (!cb)
		goto out;

577
	refcount_set(&cb->pending_bios, 0);
C
Chris Mason 已提交
578 579
	cb->errors = 0;
	cb->inode = inode;
580 581
	cb->mirror_num = mirror_num;
	sums = &cb->sums;
C
Chris Mason 已提交
582

583
	cb->start = em->orig_start;
584 585
	em_len = em->len;
	em_start = em->start;
586

C
Chris Mason 已提交
587
	free_extent_map(em);
588
	em = NULL;
C
Chris Mason 已提交
589

C
Christoph Hellwig 已提交
590
	cb->len = bio->bi_iter.bi_size;
C
Chris Mason 已提交
591
	cb->compressed_len = compressed_len;
592
	cb->compress_type = extent_compress_type(bio_flags);
C
Chris Mason 已提交
593 594
	cb->orig_bio = bio;

595
	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
596
	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
C
Chris Mason 已提交
597
				       GFP_NOFS);
598 599 600
	if (!cb->compressed_pages)
		goto fail1;

601
	bdev = fs_info->fs_devices->latest_bdev;
C
Chris Mason 已提交
602

603 604
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
C
Chris Mason 已提交
605
							      __GFP_HIGHMEM);
606 607
		if (!cb->compressed_pages[pg_index]) {
			faili = pg_index - 1;
D
Dan Carpenter 已提交
608
			ret = BLK_STS_RESOURCE;
609
			goto fail2;
610
		}
C
Chris Mason 已提交
611
	}
612
	faili = nr_pages - 1;
C
Chris Mason 已提交
613 614
	cb->nr_pages = nr_pages;

615
	add_ra_bio_pages(inode, em_start + em_len, cb);
616 617

	/* include any pages we added in add_ra-bio_pages */
C
Christoph Hellwig 已提交
618
	cb->len = bio->bi_iter.bi_size;
619

620
	comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
M
Mike Christie 已提交
621
	bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
C
Chris Mason 已提交
622 623
	comp_bio->bi_private = cb;
	comp_bio->bi_end_io = end_compressed_bio_read;
624
	refcount_set(&cb->pending_bios, 1);
C
Chris Mason 已提交
625

626
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
627 628
		int submit = 0;

629
		page = cb->compressed_pages[pg_index];
C
Chris Mason 已提交
630
		page->mapping = inode->i_mapping;
631
		page->index = em_start >> PAGE_SHIFT;
632

633
		if (comp_bio->bi_iter.bi_size)
634
			submit = tree->ops->merge_bio_hook(page, 0,
635
							PAGE_SIZE,
C
Chris Mason 已提交
636 637
							comp_bio, 0);

C
Chris Mason 已提交
638
		page->mapping = NULL;
639
		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
640
		    PAGE_SIZE) {
C
Chris Mason 已提交
641 642
			bio_get(comp_bio);

643 644
			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
						  BTRFS_WQ_ENDIO_DATA);
645
			BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
646

647 648 649 650 651 652
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count.  Otherwise, the cb might get
			 * freed before we're done setting it up
			 */
653
			refcount_inc(&cb->pending_bios);
654

655
			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
656 657
				ret = btrfs_lookup_bio_sums(inode, comp_bio,
							    sums);
658
				BUG_ON(ret); /* -ENOMEM */
659
			}
660
			sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
661
					     fs_info->sectorsize);
662

663
			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
664
			if (ret) {
665
				comp_bio->bi_status = ret;
666 667
				bio_endio(comp_bio);
			}
C
Chris Mason 已提交
668 669 670

			bio_put(comp_bio);

671
			comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
M
Mike Christie 已提交
672
			bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
673 674 675
			comp_bio->bi_private = cb;
			comp_bio->bi_end_io = end_compressed_bio_read;

676
			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
C
Chris Mason 已提交
677
		}
678
		cur_disk_byte += PAGE_SIZE;
C
Chris Mason 已提交
679 680 681
	}
	bio_get(comp_bio);

682
	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
683
	BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
684

685
	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
686
		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
687
		BUG_ON(ret); /* -ENOMEM */
688
	}
689

690
	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
691
	if (ret) {
692
		comp_bio->bi_status = ret;
693 694
		bio_endio(comp_bio);
	}
C
Chris Mason 已提交
695 696 697

	bio_put(comp_bio);
	return 0;
698 699

fail2:
700 701 702 703
	while (faili >= 0) {
		__free_page(cb->compressed_pages[faili]);
		faili--;
	}
704 705 706 707 708 709 710

	kfree(cb->compressed_pages);
fail1:
	kfree(cb);
out:
	free_extent_map(em);
	return ret;
C
Chris Mason 已提交
711
}
712

713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
/*
 * Heuristic uses systematic sampling to collect data from the input data
 * range, the logic can be tuned by the following constants:
 *
 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 */
#define SAMPLING_READ_SIZE	(16)
#define SAMPLING_INTERVAL	(256)

/*
 * For statistical analysis of the input data we consider bytes that form a
 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 * many times the object appeared in the sample.
 */
#define BUCKET_SIZE		(256)

/*
 * The size of the sample is based on a statistical sampling rule of thumb.
 * The common way is to perform sampling tests as long as the number of
 * elements in each cell is at least 5.
 *
 * Instead of 5, we choose 32 to obtain more accurate results.
 * If the data contain the maximum number of symbols, which is 256, we obtain a
 * sample size bound by 8192.
 *
 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 * from up to 512 locations.
 */
#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)

struct bucket_item {
	u32 count;
};
748 749

struct heuristic_ws {
750 751
	/* Partial copy of input data */
	u8 *sample;
752
	u32 sample_size;
753 754
	/* Buckets store counters for each byte value */
	struct bucket_item *bucket;
755 756 757 758 759 760 761 762 763
	struct list_head list;
};

static void free_heuristic_ws(struct list_head *ws)
{
	struct heuristic_ws *workspace;

	workspace = list_entry(ws, struct heuristic_ws, list);

764 765
	kvfree(workspace->sample);
	kfree(workspace->bucket);
766 767 768 769 770 771 772 773 774 775 776
	kfree(workspace);
}

static struct list_head *alloc_heuristic_ws(void)
{
	struct heuristic_ws *ws;

	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
	if (!ws)
		return ERR_PTR(-ENOMEM);

777 778 779 780 781 782 783
	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
	if (!ws->sample)
		goto fail;

	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
	if (!ws->bucket)
		goto fail;
784

785
	INIT_LIST_HEAD(&ws->list);
786
	return &ws->list;
787 788 789
fail:
	free_heuristic_ws(&ws->list);
	return ERR_PTR(-ENOMEM);
790 791 792
}

struct workspaces_list {
793 794
	struct list_head idle_ws;
	spinlock_t ws_lock;
795 796 797 798 799
	/* Number of free workspaces */
	int free_ws;
	/* Total number of allocated workspaces */
	atomic_t total_ws;
	/* Waiters for a free workspace */
800
	wait_queue_head_t ws_wait;
801 802 803 804 805
};

static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];

static struct workspaces_list btrfs_heuristic_ws;
806

807
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
808
	&btrfs_zlib_compress,
L
Li Zefan 已提交
809
	&btrfs_lzo_compress,
N
Nick Terrell 已提交
810
	&btrfs_zstd_compress,
811 812
};

813
void __init btrfs_init_compress(void)
814
{
815
	struct list_head *workspace;
816 817
	int i;

818 819 820 821
	INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
	spin_lock_init(&btrfs_heuristic_ws.ws_lock);
	atomic_set(&btrfs_heuristic_ws.total_ws, 0);
	init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
822

823 824 825 826 827 828 829 830 831 832 833
	workspace = alloc_heuristic_ws();
	if (IS_ERR(workspace)) {
		pr_warn(
	"BTRFS: cannot preallocate heuristic workspace, will try later\n");
	} else {
		atomic_set(&btrfs_heuristic_ws.total_ws, 1);
		btrfs_heuristic_ws.free_ws = 1;
		list_add(workspace, &btrfs_heuristic_ws.idle_ws);
	}

	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
834 835
		INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
		spin_lock_init(&btrfs_comp_ws[i].ws_lock);
836
		atomic_set(&btrfs_comp_ws[i].total_ws, 0);
837
		init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
838 839 840 841 842 843 844

		/*
		 * Preallocate one workspace for each compression type so
		 * we can guarantee forward progress in the worst case
		 */
		workspace = btrfs_compress_op[i]->alloc_workspace();
		if (IS_ERR(workspace)) {
845
			pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
846 847 848 849 850
		} else {
			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
			btrfs_comp_ws[i].free_ws = 1;
			list_add(workspace, &btrfs_comp_ws[i].idle_ws);
		}
851 852 853 854
	}
}

/*
855 856 857 858
 * This finds an available workspace or allocates a new one.
 * If it's not possible to allocate a new one, waits until there's one.
 * Preallocation makes a forward progress guarantees and we do not return
 * errors.
859
 */
860
static struct list_head *__find_workspace(int type, bool heuristic)
861 862 863 864
{
	struct list_head *workspace;
	int cpus = num_online_cpus();
	int idx = type - 1;
865
	unsigned nofs_flag;
866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

	if (heuristic) {
		idle_ws	 = &btrfs_heuristic_ws.idle_ws;
		ws_lock	 = &btrfs_heuristic_ws.ws_lock;
		total_ws = &btrfs_heuristic_ws.total_ws;
		ws_wait	 = &btrfs_heuristic_ws.ws_wait;
		free_ws	 = &btrfs_heuristic_ws.free_ws;
	} else {
		idle_ws	 = &btrfs_comp_ws[idx].idle_ws;
		ws_lock	 = &btrfs_comp_ws[idx].ws_lock;
		total_ws = &btrfs_comp_ws[idx].total_ws;
		ws_wait	 = &btrfs_comp_ws[idx].ws_wait;
		free_ws	 = &btrfs_comp_ws[idx].free_ws;
	}
885 886

again:
887 888 889
	spin_lock(ws_lock);
	if (!list_empty(idle_ws)) {
		workspace = idle_ws->next;
890
		list_del(workspace);
891
		(*free_ws)--;
892
		spin_unlock(ws_lock);
893 894 895
		return workspace;

	}
896
	if (atomic_read(total_ws) > cpus) {
897 898
		DEFINE_WAIT(wait);

899 900
		spin_unlock(ws_lock);
		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
901
		if (atomic_read(total_ws) > cpus && !*free_ws)
902
			schedule();
903
		finish_wait(ws_wait, &wait);
904 905
		goto again;
	}
906
	atomic_inc(total_ws);
907
	spin_unlock(ws_lock);
908

909 910 911 912 913 914
	/*
	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
	 * to turn it off here because we might get called from the restricted
	 * context of btrfs_compress_bio/btrfs_compress_pages
	 */
	nofs_flag = memalloc_nofs_save();
915 916 917 918
	if (heuristic)
		workspace = alloc_heuristic_ws();
	else
		workspace = btrfs_compress_op[idx]->alloc_workspace();
919 920
	memalloc_nofs_restore(nofs_flag);

921
	if (IS_ERR(workspace)) {
922
		atomic_dec(total_ws);
923
		wake_up(ws_wait);
924 925 926 927 928 929

		/*
		 * Do not return the error but go back to waiting. There's a
		 * workspace preallocated for each type and the compression
		 * time is bounded so we get to a workspace eventually. This
		 * makes our caller's life easier.
930 931 932 933
		 *
		 * To prevent silent and low-probability deadlocks (when the
		 * initial preallocation fails), check if there are any
		 * workspaces at all.
934
		 */
935 936 937 938 939 940
		if (atomic_read(total_ws) == 0) {
			static DEFINE_RATELIMIT_STATE(_rs,
					/* once per minute */ 60 * HZ,
					/* no burst */ 1);

			if (__ratelimit(&_rs)) {
941
				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
942 943
			}
		}
944
		goto again;
945 946 947 948
	}
	return workspace;
}

949 950 951 952 953
static struct list_head *find_workspace(int type)
{
	return __find_workspace(type, false);
}

954 955 956 957
/*
 * put a workspace struct back on the list or free it if we have enough
 * idle ones sitting around
 */
958 959
static void __free_workspace(int type, struct list_head *workspace,
			     bool heuristic)
960 961
{
	int idx = type - 1;
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

	if (heuristic) {
		idle_ws	 = &btrfs_heuristic_ws.idle_ws;
		ws_lock	 = &btrfs_heuristic_ws.ws_lock;
		total_ws = &btrfs_heuristic_ws.total_ws;
		ws_wait	 = &btrfs_heuristic_ws.ws_wait;
		free_ws	 = &btrfs_heuristic_ws.free_ws;
	} else {
		idle_ws	 = &btrfs_comp_ws[idx].idle_ws;
		ws_lock	 = &btrfs_comp_ws[idx].ws_lock;
		total_ws = &btrfs_comp_ws[idx].total_ws;
		ws_wait	 = &btrfs_comp_ws[idx].ws_wait;
		free_ws	 = &btrfs_comp_ws[idx].free_ws;
	}
981 982

	spin_lock(ws_lock);
983
	if (*free_ws <= num_online_cpus()) {
984
		list_add(workspace, idle_ws);
985
		(*free_ws)++;
986
		spin_unlock(ws_lock);
987 988
		goto wake;
	}
989
	spin_unlock(ws_lock);
990

991 992 993 994
	if (heuristic)
		free_heuristic_ws(workspace);
	else
		btrfs_compress_op[idx]->free_workspace(workspace);
995
	atomic_dec(total_ws);
996
wake:
997 998 999
	/*
	 * Make sure counter is updated before we wake up waiters.
	 */
1000
	smp_mb();
1001 1002
	if (waitqueue_active(ws_wait))
		wake_up(ws_wait);
1003 1004
}

1005 1006 1007 1008 1009
static void free_workspace(int type, struct list_head *ws)
{
	return __free_workspace(type, ws, false);
}

1010 1011 1012 1013 1014 1015 1016 1017
/*
 * cleanup function for module exit
 */
static void free_workspaces(void)
{
	struct list_head *workspace;
	int i;

1018 1019 1020 1021 1022 1023 1024
	while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
		workspace = btrfs_heuristic_ws.idle_ws.next;
		list_del(workspace);
		free_heuristic_ws(workspace);
		atomic_dec(&btrfs_heuristic_ws.total_ws);
	}

1025
	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
1026 1027
		while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
			workspace = btrfs_comp_ws[i].idle_ws.next;
1028 1029
			list_del(workspace);
			btrfs_compress_op[i]->free_workspace(workspace);
1030
			atomic_dec(&btrfs_comp_ws[i].total_ws);
1031 1032 1033 1034 1035
		}
	}
}

/*
1036 1037
 * Given an address space and start and length, compress the bytes into @pages
 * that are allocated on demand.
1038
 *
1039 1040 1041 1042 1043
 * @type_level is encoded algorithm and level, where level 0 means whatever
 * default the algorithm chooses and is opaque here;
 * - compression algo are 0-3
 * - the level are bits 4-7
 *
1044 1045
 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
 * and returns number of actually allocated pages
1046
 *
1047 1048
 * @total_in is used to return the number of bytes actually read.  It
 * may be smaller than the input length if we had to exit early because we
1049 1050 1051
 * ran out of room in the pages array or because we cross the
 * max_out threshold.
 *
1052 1053
 * @total_out is an in/out parameter, must be set to the input length and will
 * be also used to return the total number of compressed bytes
1054
 *
1055
 * @max_out tells us the max number of bytes that we're allowed to
1056 1057
 * stuff into pages
 */
1058
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1059
			 u64 start, struct page **pages,
1060 1061
			 unsigned long *out_pages,
			 unsigned long *total_in,
1062
			 unsigned long *total_out)
1063 1064 1065
{
	struct list_head *workspace;
	int ret;
1066
	int type = type_level & 0xF;
1067 1068 1069

	workspace = find_workspace(type);

1070
	btrfs_compress_op[type - 1]->set_level(workspace, type_level);
1071
	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
1072
						      start, pages,
1073
						      out_pages,
1074
						      total_in, total_out);
1075 1076 1077 1078 1079 1080 1081 1082 1083
	free_workspace(type, workspace);
	return ret;
}

/*
 * pages_in is an array of pages with compressed data.
 *
 * disk_start is the starting logical offset of this array in the file
 *
1084
 * orig_bio contains the pages from the file that we want to decompress into
1085 1086 1087 1088 1089 1090 1091 1092
 *
 * srclen is the number of bytes in pages_in
 *
 * The basic idea is that we have a bio that was created by readpages.
 * The pages in the bio are for the uncompressed data, and they may not
 * be contiguous.  They all correspond to the range of bytes covered by
 * the compressed extent.
 */
1093
static int btrfs_decompress_bio(struct compressed_bio *cb)
1094 1095 1096
{
	struct list_head *workspace;
	int ret;
1097
	int type = cb->compress_type;
1098 1099

	workspace = find_workspace(type);
1100
	ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
1101
	free_workspace(type, workspace);
1102

1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126
	return ret;
}

/*
 * a less complex decompression routine.  Our compressed data fits in a
 * single page, and we want to read a single page out of it.
 * start_byte tells us the offset into the compressed data we're interested in
 */
int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
		     unsigned long start_byte, size_t srclen, size_t destlen)
{
	struct list_head *workspace;
	int ret;

	workspace = find_workspace(type);

	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
						  dest_page, start_byte,
						  srclen, destlen);

	free_workspace(type, workspace);
	return ret;
}

1127
void btrfs_exit_compress(void)
1128 1129 1130
{
	free_workspaces();
}
1131 1132 1133 1134 1135 1136 1137 1138

/*
 * Copy uncompressed data from working buffer to pages.
 *
 * buf_start is the byte offset we're of the start of our workspace buffer.
 *
 * total_out is the last byte of the buffer
 */
1139
int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1140
			      unsigned long total_out, u64 disk_start,
1141
			      struct bio *bio)
1142 1143 1144 1145
{
	unsigned long buf_offset;
	unsigned long current_buf_start;
	unsigned long start_byte;
1146
	unsigned long prev_start_byte;
1147 1148 1149
	unsigned long working_bytes = total_out - buf_start;
	unsigned long bytes;
	char *kaddr;
1150
	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1151 1152 1153 1154 1155

	/*
	 * start byte is the first byte of the page we're currently
	 * copying into relative to the start of the compressed data.
	 */
1156
	start_byte = page_offset(bvec.bv_page) - disk_start;
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175

	/* we haven't yet hit data corresponding to this page */
	if (total_out <= start_byte)
		return 1;

	/*
	 * the start of the data we care about is offset into
	 * the middle of our working buffer
	 */
	if (total_out > start_byte && buf_start < start_byte) {
		buf_offset = start_byte - buf_start;
		working_bytes -= buf_offset;
	} else {
		buf_offset = 0;
	}
	current_buf_start = buf_start;

	/* copy bytes from the working buffer into the pages */
	while (working_bytes > 0) {
1176 1177
		bytes = min_t(unsigned long, bvec.bv_len,
				PAGE_SIZE - buf_offset);
1178
		bytes = min(bytes, working_bytes);
1179 1180 1181

		kaddr = kmap_atomic(bvec.bv_page);
		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1182
		kunmap_atomic(kaddr);
1183
		flush_dcache_page(bvec.bv_page);
1184 1185 1186 1187 1188 1189

		buf_offset += bytes;
		working_bytes -= bytes;
		current_buf_start += bytes;

		/* check if we need to pick another page */
1190 1191 1192 1193
		bio_advance(bio, bytes);
		if (!bio->bi_iter.bi_size)
			return 0;
		bvec = bio_iter_iovec(bio, bio->bi_iter);
1194
		prev_start_byte = start_byte;
1195
		start_byte = page_offset(bvec.bv_page) - disk_start;
1196

1197
		/*
1198 1199 1200 1201
		 * We need to make sure we're only adjusting
		 * our offset into compression working buffer when
		 * we're switching pages.  Otherwise we can incorrectly
		 * keep copying when we were actually done.
1202
		 */
1203 1204 1205 1206 1207 1208 1209
		if (start_byte != prev_start_byte) {
			/*
			 * make sure our new page is covered by this
			 * working buffer
			 */
			if (total_out <= start_byte)
				return 1;
1210

1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
			/*
			 * the next page in the biovec might not be adjacent
			 * to the last page, but it might still be found
			 * inside this working buffer. bump our offset pointer
			 */
			if (total_out > start_byte &&
			    current_buf_start < start_byte) {
				buf_offset = start_byte - buf_start;
				working_bytes = total_out - start_byte;
				current_buf_start = buf_start + buf_offset;
			}
1222 1223 1224 1225 1226
		}
	}

	return 1;
}
1227

1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280
/*
 * Shannon Entropy calculation
 *
 * Pure byte distribution analysis fails to determine compressiability of data.
 * Try calculating entropy to estimate the average minimum number of bits
 * needed to encode the sampled data.
 *
 * For convenience, return the percentage of needed bits, instead of amount of
 * bits directly.
 *
 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
 *			    and can be compressible with high probability
 *
 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
 *
 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
 */
#define ENTROPY_LVL_ACEPTABLE		(65)
#define ENTROPY_LVL_HIGH		(80)

/*
 * For increasead precision in shannon_entropy calculation,
 * let's do pow(n, M) to save more digits after comma:
 *
 * - maximum int bit length is 64
 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
 * - 13 * 4 = 52 < 64		-> M = 4
 *
 * So use pow(n, 4).
 */
static inline u32 ilog2_w(u64 n)
{
	return ilog2(n * n * n * n);
}

static u32 shannon_entropy(struct heuristic_ws *ws)
{
	const u32 entropy_max = 8 * ilog2_w(2);
	u32 entropy_sum = 0;
	u32 p, p_base, sz_base;
	u32 i;

	sz_base = ilog2_w(ws->sample_size);
	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
		p = ws->bucket[i].count;
		p_base = ilog2_w(p);
		entropy_sum += p * (sz_base - p_base);
	}

	entropy_sum /= ws->sample_size;
	return entropy_sum * 100 / entropy_max;
}

1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
/* Compare buckets by size, ascending */
static int bucket_comp_rev(const void *lv, const void *rv)
{
	const struct bucket_item *l = (const struct bucket_item *)lv;
	const struct bucket_item *r = (const struct bucket_item *)rv;

	return r->count - l->count;
}

/*
 * Size of the core byte set - how many bytes cover 90% of the sample
 *
 * There are several types of structured binary data that use nearly all byte
 * values. The distribution can be uniform and counts in all buckets will be
 * nearly the same (eg. encrypted data). Unlikely to be compressible.
 *
 * Other possibility is normal (Gaussian) distribution, where the data could
 * be potentially compressible, but we have to take a few more steps to decide
 * how much.
 *
 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
 *                       compression algo can easy fix that
 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
 *                       probability is not compressible
 */
#define BYTE_CORE_SET_LOW		(64)
#define BYTE_CORE_SET_HIGH		(200)

static int byte_core_set_size(struct heuristic_ws *ws)
{
	u32 i;
	u32 coreset_sum = 0;
	const u32 core_set_threshold = ws->sample_size * 90 / 100;
	struct bucket_item *bucket = ws->bucket;

	/* Sort in reverse order */
	sort(bucket, BUCKET_SIZE, sizeof(*bucket), &bucket_comp_rev, NULL);

	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
		coreset_sum += bucket[i].count;

	if (coreset_sum > core_set_threshold)
		return i;

	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
		coreset_sum += bucket[i].count;
		if (coreset_sum > core_set_threshold)
			break;
	}

	return i;
}

1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372
/*
 * Count byte values in buckets.
 * This heuristic can detect textual data (configs, xml, json, html, etc).
 * Because in most text-like data byte set is restricted to limited number of
 * possible characters, and that restriction in most cases makes data easy to
 * compress.
 *
 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
 *	less - compressible
 *	more - need additional analysis
 */
#define BYTE_SET_THRESHOLD		(64)

static u32 byte_set_size(const struct heuristic_ws *ws)
{
	u32 i;
	u32 byte_set_size = 0;

	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
		if (ws->bucket[i].count > 0)
			byte_set_size++;
	}

	/*
	 * Continue collecting count of byte values in buckets.  If the byte
	 * set size is bigger then the threshold, it's pointless to continue,
	 * the detection technique would fail for this type of data.
	 */
	for (; i < BUCKET_SIZE; i++) {
		if (ws->bucket[i].count > 0) {
			byte_set_size++;
			if (byte_set_size > BYTE_SET_THRESHOLD)
				return byte_set_size;
		}
	}

	return byte_set_size;
}

1373 1374 1375 1376 1377 1378 1379 1380
static bool sample_repeated_patterns(struct heuristic_ws *ws)
{
	const u32 half_of_sample = ws->sample_size / 2;
	const u8 *data = ws->sample;

	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
}

1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
				     struct heuristic_ws *ws)
{
	struct page *page;
	u64 index, index_end;
	u32 i, curr_sample_pos;
	u8 *in_data;

	/*
	 * Compression handles the input data by chunks of 128KiB
	 * (defined by BTRFS_MAX_UNCOMPRESSED)
	 *
	 * We do the same for the heuristic and loop over the whole range.
	 *
	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
	 */
	if (end - start > BTRFS_MAX_UNCOMPRESSED)
		end = start + BTRFS_MAX_UNCOMPRESSED;

	index = start >> PAGE_SHIFT;
	index_end = end >> PAGE_SHIFT;

	/* Don't miss unaligned end */
	if (!IS_ALIGNED(end, PAGE_SIZE))
		index_end++;

	curr_sample_pos = 0;
	while (index < index_end) {
		page = find_get_page(inode->i_mapping, index);
		in_data = kmap(page);
		/* Handle case where the start is not aligned to PAGE_SIZE */
		i = start % PAGE_SIZE;
		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
			/* Don't sample any garbage from the last page */
			if (start > end - SAMPLING_READ_SIZE)
				break;
			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
					SAMPLING_READ_SIZE);
			i += SAMPLING_INTERVAL;
			start += SAMPLING_INTERVAL;
			curr_sample_pos += SAMPLING_READ_SIZE;
		}
		kunmap(page);
		put_page(page);

		index++;
	}

	ws->sample_size = curr_sample_pos;
}

1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449
/*
 * Compression heuristic.
 *
 * For now is's a naive and optimistic 'return true', we'll extend the logic to
 * quickly (compared to direct compression) detect data characteristics
 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
 * data.
 *
 * The following types of analysis can be performed:
 * - detect mostly zero data
 * - detect data with low "byte set" size (text, etc)
 * - detect data with low/high "core byte" set
 *
 * Return non-zero if the compression should be done, 0 otherwise.
 */
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
1450 1451
	struct list_head *ws_list = __find_workspace(0, true);
	struct heuristic_ws *ws;
1452 1453
	u32 i;
	u8 byte;
1454
	int ret = 0;
1455

1456 1457
	ws = list_entry(ws_list, struct heuristic_ws, list);

1458 1459
	heuristic_collect_sample(inode, start, end, ws);

1460 1461 1462 1463 1464
	if (sample_repeated_patterns(ws)) {
		ret = 1;
		goto out;
	}

1465 1466 1467 1468 1469
	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);

	for (i = 0; i < ws->sample_size; i++) {
		byte = ws->sample[i];
		ws->bucket[byte].count++;
1470 1471
	}

1472 1473 1474 1475 1476 1477
	i = byte_set_size(ws);
	if (i < BYTE_SET_THRESHOLD) {
		ret = 2;
		goto out;
	}

1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
	i = byte_core_set_size(ws);
	if (i <= BYTE_CORE_SET_LOW) {
		ret = 3;
		goto out;
	}

	if (i >= BYTE_CORE_SET_HIGH) {
		ret = 0;
		goto out;
	}

1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
	i = shannon_entropy(ws);
	if (i <= ENTROPY_LVL_ACEPTABLE) {
		ret = 4;
		goto out;
	}

	/*
	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
	 * needed to give green light to compression.
	 *
	 * For now just assume that compression at that level is not worth the
	 * resources because:
	 *
	 * 1. it is possible to defrag the data later
	 *
	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
	 * values, every bucket has counter at level ~54. The heuristic would
	 * be confused. This can happen when data have some internal repeated
	 * patterns like "abbacbbc...". This can be detected by analyzing
	 * pairs of bytes, which is too costly.
	 */
	if (i < ENTROPY_LVL_HIGH) {
		ret = 5;
		goto out;
	} else {
		ret = 0;
		goto out;
	}

1518
out:
1519
	__free_workspace(0, ws_list, true);
1520 1521
	return ret;
}
1522 1523 1524 1525 1526 1527

unsigned int btrfs_compress_str2level(const char *str)
{
	if (strncmp(str, "zlib", 4) != 0)
		return 0;

1528 1529 1530
	/* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
	if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
		return str[5] - '0';
1531

1532
	return BTRFS_ZLIB_DEFAULT_LEVEL;
1533
}