compression.c 40.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Chris Mason 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/buffer_head.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/mpage.h>
#include <linux/swap.h>
#include <linux/writeback.h>
#include <linux/bit_spinlock.h>
21
#include <linux/slab.h>
22
#include <linux/sched/mm.h>
23
#include <linux/log2.h>
C
Chris Mason 已提交
24 25 26 27 28 29 30 31 32 33
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "volumes.h"
#include "ordered-data.h"
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };

const char* btrfs_compress_type2str(enum btrfs_compression_type type)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB:
	case BTRFS_COMPRESS_LZO:
	case BTRFS_COMPRESS_ZSTD:
	case BTRFS_COMPRESS_NONE:
		return btrfs_compress_types[type];
	}

	return NULL;
}

49
static int btrfs_decompress_bio(struct compressed_bio *cb);
50

51
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
52 53
				      unsigned long disk_size)
{
54
	u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
55

56
	return sizeof(struct compressed_bio) +
57
		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * csum_size;
58 59
}

60
static int check_compressed_csum(struct btrfs_inode *inode,
61 62 63 64 65 66 67 68 69 70
				 struct compressed_bio *cb,
				 u64 disk_start)
{
	int ret;
	struct page *page;
	unsigned long i;
	char *kaddr;
	u32 csum;
	u32 *cb_sum = &cb->sums;

71
	if (inode->flags & BTRFS_INODE_NODATASUM)
72 73 74 75 76 77
		return 0;

	for (i = 0; i < cb->nr_pages; i++) {
		page = cb->compressed_pages[i];
		csum = ~(u32)0;

78
		kaddr = kmap_atomic(page);
79
		csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
80
		btrfs_csum_final(csum, (u8 *)&csum);
81
		kunmap_atomic(kaddr);
82 83

		if (csum != *cb_sum) {
84
			btrfs_print_data_csum_error(inode, disk_start, csum,
85
					*cb_sum, cb->mirror_num);
86 87 88 89 90 91 92 93 94 95 96
			ret = -EIO;
			goto fail;
		}
		cb_sum++;

	}
	ret = 0;
fail:
	return ret;
}

C
Chris Mason 已提交
97 98 99 100 101 102 103 104 105 106
/* when we finish reading compressed pages from the disk, we
 * decompress them and then run the bio end_io routines on the
 * decompressed pages (in the inode address space).
 *
 * This allows the checksumming and other IO error handling routines
 * to work normally
 *
 * The compressed pages are freed here, and it must be run
 * in process context
 */
107
static void end_compressed_bio_read(struct bio *bio)
C
Chris Mason 已提交
108 109 110 111 112
{
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
	struct page *page;
	unsigned long index;
113
	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
114
	int ret = 0;
C
Chris Mason 已提交
115

116
	if (bio->bi_status)
C
Chris Mason 已提交
117 118 119 120 121
		cb->errors = 1;

	/* if there are more bios still pending for this compressed
	 * extent, just exit
	 */
122
	if (!refcount_dec_and_test(&cb->pending_bios))
C
Chris Mason 已提交
123 124
		goto out;

125 126 127 128 129 130 131 132
	/*
	 * Record the correct mirror_num in cb->orig_bio so that
	 * read-repair can work properly.
	 */
	ASSERT(btrfs_io_bio(cb->orig_bio));
	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
	cb->mirror_num = mirror;

133 134 135 136 137 138 139
	/*
	 * Some IO in this cb have failed, just skip checksum as there
	 * is no way it could be correct.
	 */
	if (cb->errors == 1)
		goto csum_failed;

140
	inode = cb->inode;
141
	ret = check_compressed_csum(BTRFS_I(inode), cb,
142
				    (u64)bio->bi_iter.bi_sector << 9);
143 144 145
	if (ret)
		goto csum_failed;

C
Chris Mason 已提交
146 147 148
	/* ok, we're the last bio for this extent, lets start
	 * the decompression.
	 */
149 150
	ret = btrfs_decompress_bio(cb);

151
csum_failed:
C
Chris Mason 已提交
152 153 154 155 156 157 158 159
	if (ret)
		cb->errors = 1;

	/* release the compressed pages */
	index = 0;
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
160
		put_page(page);
C
Chris Mason 已提交
161 162 163
	}

	/* do io completion on the original bio */
164
	if (cb->errors) {
C
Chris Mason 已提交
165
		bio_io_error(cb->orig_bio);
166
	} else {
167 168
		int i;
		struct bio_vec *bvec;
169 170 171 172 173

		/*
		 * we have verified the checksum already, set page
		 * checked so the end_io handlers know about it
		 */
174
		ASSERT(!bio_flagged(bio, BIO_CLONED));
175
		bio_for_each_segment_all(bvec, cb->orig_bio, i)
176
			SetPageChecked(bvec->bv_page);
177

178
		bio_endio(cb->orig_bio);
179
	}
C
Chris Mason 已提交
180 181 182 183 184 185 186 187 188 189 190 191

	/* finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}

/*
 * Clear the writeback bits on all of the file
 * pages for a compressed write
 */
192 193
static noinline void end_compressed_writeback(struct inode *inode,
					      const struct compressed_bio *cb)
C
Chris Mason 已提交
194
{
195 196
	unsigned long index = cb->start >> PAGE_SHIFT;
	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
197 198 199 200 201
	struct page *pages[16];
	unsigned long nr_pages = end_index - index + 1;
	int i;
	int ret;

202 203 204
	if (cb->errors)
		mapping_set_error(inode->i_mapping, -EIO);

C
Chris Mason 已提交
205
	while (nr_pages > 0) {
C
Chris Mason 已提交
206
		ret = find_get_pages_contig(inode->i_mapping, index,
207 208
				     min_t(unsigned long,
				     nr_pages, ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
209 210 211 212 213 214
		if (ret == 0) {
			nr_pages -= 1;
			index += 1;
			continue;
		}
		for (i = 0; i < ret; i++) {
215 216
			if (cb->errors)
				SetPageError(pages[i]);
C
Chris Mason 已提交
217
			end_page_writeback(pages[i]);
218
			put_page(pages[i]);
C
Chris Mason 已提交
219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
		}
		nr_pages -= ret;
		index += ret;
	}
	/* the inode may be gone now */
}

/*
 * do the cleanup once all the compressed pages hit the disk.
 * This will clear writeback on the file pages and free the compressed
 * pages.
 *
 * This also calls the writeback end hooks for the file pages so that
 * metadata and checksums can be updated in the file.
 */
234
static void end_compressed_bio_write(struct bio *bio)
C
Chris Mason 已提交
235 236 237 238 239 240 241
{
	struct extent_io_tree *tree;
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
	struct page *page;
	unsigned long index;

242
	if (bio->bi_status)
C
Chris Mason 已提交
243 244 245 246 247
		cb->errors = 1;

	/* if there are more bios still pending for this compressed
	 * extent, just exit
	 */
248
	if (!refcount_dec_and_test(&cb->pending_bios))
C
Chris Mason 已提交
249 250 251 252 253 254 255
		goto out;

	/* ok, we're the last bio for this extent, step one is to
	 * call back into the FS and do all the end_io operations
	 */
	inode = cb->inode;
	tree = &BTRFS_I(inode)->io_tree;
C
Chris Mason 已提交
256
	cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
C
Chris Mason 已提交
257 258 259
	tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
					 cb->start,
					 cb->start + cb->len - 1,
260
					 NULL,
261 262
					 bio->bi_status ?
					 BLK_STS_OK : BLK_STS_NOTSUPP);
C
Chris Mason 已提交
263
	cb->compressed_pages[0]->mapping = NULL;
C
Chris Mason 已提交
264

265
	end_compressed_writeback(inode, cb);
C
Chris Mason 已提交
266 267 268 269 270 271 272 273 274 275
	/* note, our inode could be gone now */

	/*
	 * release the compressed pages, these came from alloc_page and
	 * are not attached to the inode at all
	 */
	index = 0;
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
276
		put_page(page);
C
Chris Mason 已提交
277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294
	}

	/* finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}

/*
 * worker function to build and submit bios for previously compressed pages.
 * The corresponding pages in the inode should be marked for writeback
 * and the compressed pages should have a reference on them for dropping
 * when the IO is complete.
 *
 * This also checksums the file bytes and gets things ready for
 * the end io hooks.
 */
295
blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
C
Chris Mason 已提交
296 297 298
				 unsigned long len, u64 disk_start,
				 unsigned long compressed_len,
				 struct page **compressed_pages,
299 300
				 unsigned long nr_pages,
				 unsigned int write_flags)
C
Chris Mason 已提交
301
{
302
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
303 304 305 306
	struct bio *bio = NULL;
	struct compressed_bio *cb;
	unsigned long bytes_left;
	struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
307
	int pg_index = 0;
C
Chris Mason 已提交
308 309 310
	struct page *page;
	u64 first_byte = disk_start;
	struct block_device *bdev;
311
	blk_status_t ret;
312
	int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
C
Chris Mason 已提交
313

314
	WARN_ON(start & ((u64)PAGE_SIZE - 1));
315
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
316
	if (!cb)
317
		return BLK_STS_RESOURCE;
318
	refcount_set(&cb->pending_bios, 0);
C
Chris Mason 已提交
319 320 321 322
	cb->errors = 0;
	cb->inode = inode;
	cb->start = start;
	cb->len = len;
323
	cb->mirror_num = 0;
C
Chris Mason 已提交
324 325 326 327 328
	cb->compressed_pages = compressed_pages;
	cb->compressed_len = compressed_len;
	cb->orig_bio = NULL;
	cb->nr_pages = nr_pages;

329
	bdev = fs_info->fs_devices->latest_bdev;
C
Chris Mason 已提交
330

331
	bio = btrfs_bio_alloc(bdev, first_byte);
332
	bio->bi_opf = REQ_OP_WRITE | write_flags;
C
Chris Mason 已提交
333 334
	bio->bi_private = cb;
	bio->bi_end_io = end_compressed_bio_write;
335
	refcount_set(&cb->pending_bios, 1);
C
Chris Mason 已提交
336 337 338

	/* create and submit bios for the compressed pages */
	bytes_left = compressed_len;
339
	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
340 341
		int submit = 0;

342
		page = compressed_pages[pg_index];
C
Chris Mason 已提交
343
		page->mapping = inode->i_mapping;
344
		if (bio->bi_iter.bi_size)
345
			submit = io_tree->ops->merge_bio_hook(page, 0,
346
							   PAGE_SIZE,
C
Chris Mason 已提交
347 348
							   bio, 0);

C
Chris Mason 已提交
349
		page->mapping = NULL;
350
		if (submit || bio_add_page(bio, page, PAGE_SIZE, 0) <
351
		    PAGE_SIZE) {
352 353 354 355 356 357
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count.  Otherwise, the cb might get
			 * freed before we're done setting it up
			 */
358
			refcount_inc(&cb->pending_bios);
359 360
			ret = btrfs_bio_wq_end_io(fs_info, bio,
						  BTRFS_WQ_ENDIO_DATA);
361
			BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
362

363
			if (!skip_sum) {
364
				ret = btrfs_csum_one_bio(inode, bio, start, 1);
365
				BUG_ON(ret); /* -ENOMEM */
366
			}
367

368
			ret = btrfs_map_bio(fs_info, bio, 0, 1);
369
			if (ret) {
370
				bio->bi_status = ret;
371 372
				bio_endio(bio);
			}
C
Chris Mason 已提交
373

374
			bio = btrfs_bio_alloc(bdev, first_byte);
375
			bio->bi_opf = REQ_OP_WRITE | write_flags;
C
Chris Mason 已提交
376 377
			bio->bi_private = cb;
			bio->bi_end_io = end_compressed_bio_write;
378
			bio_add_page(bio, page, PAGE_SIZE, 0);
C
Chris Mason 已提交
379
		}
380
		if (bytes_left < PAGE_SIZE) {
381
			btrfs_info(fs_info,
382
					"bytes left %lu compress len %lu nr %lu",
383 384
			       bytes_left, cb->compressed_len, cb->nr_pages);
		}
385 386
		bytes_left -= PAGE_SIZE;
		first_byte += PAGE_SIZE;
387
		cond_resched();
C
Chris Mason 已提交
388 389
	}

390
	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
391
	BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
392

393
	if (!skip_sum) {
394
		ret = btrfs_csum_one_bio(inode, bio, start, 1);
395
		BUG_ON(ret); /* -ENOMEM */
396
	}
397

398
	ret = btrfs_map_bio(fs_info, bio, 0, 1);
399
	if (ret) {
400
		bio->bi_status = ret;
401 402
		bio_endio(bio);
	}
C
Chris Mason 已提交
403 404 405 406

	return 0;
}

407 408
static u64 bio_end_offset(struct bio *bio)
{
M
Ming Lei 已提交
409
	struct bio_vec *last = bio_last_bvec_all(bio);
410 411 412 413

	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}

414 415 416 417 418
static noinline int add_ra_bio_pages(struct inode *inode,
				     u64 compressed_end,
				     struct compressed_bio *cb)
{
	unsigned long end_index;
419
	unsigned long pg_index;
420 421 422 423 424 425 426 427 428 429 430 431
	u64 last_offset;
	u64 isize = i_size_read(inode);
	int ret;
	struct page *page;
	unsigned long nr_pages = 0;
	struct extent_map *em;
	struct address_space *mapping = inode->i_mapping;
	struct extent_map_tree *em_tree;
	struct extent_io_tree *tree;
	u64 end;
	int misses = 0;

432
	last_offset = bio_end_offset(cb->orig_bio);
433 434 435 436 437 438
	em_tree = &BTRFS_I(inode)->extent_tree;
	tree = &BTRFS_I(inode)->io_tree;

	if (isize == 0)
		return 0;

439
	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
440

C
Chris Mason 已提交
441
	while (last_offset < compressed_end) {
442
		pg_index = last_offset >> PAGE_SHIFT;
443

444
		if (pg_index > end_index)
445 446 447
			break;

		rcu_read_lock();
M
Matthew Wilcox 已提交
448
		page = radix_tree_lookup(&mapping->i_pages, pg_index);
449
		rcu_read_unlock();
450
		if (page && !radix_tree_exceptional_entry(page)) {
451 452 453 454 455 456
			misses++;
			if (misses > 4)
				break;
			goto next;
		}

457 458
		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
								 ~__GFP_FS));
459 460 461
		if (!page)
			break;

462
		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
463
			put_page(page);
464 465 466
			goto next;
		}

467
		end = last_offset + PAGE_SIZE - 1;
468 469 470 471 472 473
		/*
		 * at this point, we have a locked page in the page cache
		 * for these bytes in the file.  But, we have to make
		 * sure they map to this compressed extent on disk.
		 */
		set_page_extent_mapped(page);
474
		lock_extent(tree, last_offset, end);
475
		read_lock(&em_tree->lock);
476
		em = lookup_extent_mapping(em_tree, last_offset,
477
					   PAGE_SIZE);
478
		read_unlock(&em_tree->lock);
479 480

		if (!em || last_offset < em->start ||
481
		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
482
		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
483
			free_extent_map(em);
484
			unlock_extent(tree, last_offset, end);
485
			unlock_page(page);
486
			put_page(page);
487 488 489 490 491 492
			break;
		}
		free_extent_map(em);

		if (page->index == end_index) {
			char *userpage;
493
			size_t zero_offset = isize & (PAGE_SIZE - 1);
494 495 496

			if (zero_offset) {
				int zeros;
497
				zeros = PAGE_SIZE - zero_offset;
498
				userpage = kmap_atomic(page);
499 500
				memset(userpage + zero_offset, 0, zeros);
				flush_dcache_page(page);
501
				kunmap_atomic(userpage);
502 503 504 505
			}
		}

		ret = bio_add_page(cb->orig_bio, page,
506
				   PAGE_SIZE, 0);
507

508
		if (ret == PAGE_SIZE) {
509
			nr_pages++;
510
			put_page(page);
511
		} else {
512
			unlock_extent(tree, last_offset, end);
513
			unlock_page(page);
514
			put_page(page);
515 516 517
			break;
		}
next:
518
		last_offset += PAGE_SIZE;
519 520 521 522
	}
	return 0;
}

C
Chris Mason 已提交
523 524 525 526 527
/*
 * for a compressed read, the bio we get passed has all the inode pages
 * in it.  We don't actually do IO on those pages but allocate new ones
 * to hold the compressed pages on disk.
 *
528
 * bio->bi_iter.bi_sector points to the compressed extent on disk
C
Chris Mason 已提交
529 530 531 532 533
 * bio->bi_io_vec points to all of the inode pages
 *
 * After the compressed pages are read, we copy the bytes into the
 * bio we were passed and then call the bio end_io calls
 */
534
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
C
Chris Mason 已提交
535 536
				 int mirror_num, unsigned long bio_flags)
{
537
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
538 539 540 541 542
	struct extent_io_tree *tree;
	struct extent_map_tree *em_tree;
	struct compressed_bio *cb;
	unsigned long compressed_len;
	unsigned long nr_pages;
543
	unsigned long pg_index;
C
Chris Mason 已提交
544 545 546
	struct page *page;
	struct block_device *bdev;
	struct bio *comp_bio;
547
	u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
548 549
	u64 em_len;
	u64 em_start;
C
Chris Mason 已提交
550
	struct extent_map *em;
551
	blk_status_t ret = BLK_STS_RESOURCE;
552
	int faili = 0;
553
	u32 *sums;
C
Chris Mason 已提交
554 555 556 557 558

	tree = &BTRFS_I(inode)->io_tree;
	em_tree = &BTRFS_I(inode)->extent_tree;

	/* we need the actual starting offset of this extent in the file */
559
	read_lock(&em_tree->lock);
C
Chris Mason 已提交
560
	em = lookup_extent_mapping(em_tree,
561
				   page_offset(bio_first_page_all(bio)),
562
				   PAGE_SIZE);
563
	read_unlock(&em_tree->lock);
564
	if (!em)
565
		return BLK_STS_IOERR;
C
Chris Mason 已提交
566

567
	compressed_len = em->block_len;
568
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
569 570 571
	if (!cb)
		goto out;

572
	refcount_set(&cb->pending_bios, 0);
C
Chris Mason 已提交
573 574
	cb->errors = 0;
	cb->inode = inode;
575 576
	cb->mirror_num = mirror_num;
	sums = &cb->sums;
C
Chris Mason 已提交
577

578
	cb->start = em->orig_start;
579 580
	em_len = em->len;
	em_start = em->start;
581

C
Chris Mason 已提交
582
	free_extent_map(em);
583
	em = NULL;
C
Chris Mason 已提交
584

C
Christoph Hellwig 已提交
585
	cb->len = bio->bi_iter.bi_size;
C
Chris Mason 已提交
586
	cb->compressed_len = compressed_len;
587
	cb->compress_type = extent_compress_type(bio_flags);
C
Chris Mason 已提交
588 589
	cb->orig_bio = bio;

590
	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
591
	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
C
Chris Mason 已提交
592
				       GFP_NOFS);
593 594 595
	if (!cb->compressed_pages)
		goto fail1;

596
	bdev = fs_info->fs_devices->latest_bdev;
C
Chris Mason 已提交
597

598 599
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
C
Chris Mason 已提交
600
							      __GFP_HIGHMEM);
601 602
		if (!cb->compressed_pages[pg_index]) {
			faili = pg_index - 1;
D
Dan Carpenter 已提交
603
			ret = BLK_STS_RESOURCE;
604
			goto fail2;
605
		}
C
Chris Mason 已提交
606
	}
607
	faili = nr_pages - 1;
C
Chris Mason 已提交
608 609
	cb->nr_pages = nr_pages;

610
	add_ra_bio_pages(inode, em_start + em_len, cb);
611 612

	/* include any pages we added in add_ra-bio_pages */
C
Christoph Hellwig 已提交
613
	cb->len = bio->bi_iter.bi_size;
614

615
	comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
M
Mike Christie 已提交
616
	bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
C
Chris Mason 已提交
617 618
	comp_bio->bi_private = cb;
	comp_bio->bi_end_io = end_compressed_bio_read;
619
	refcount_set(&cb->pending_bios, 1);
C
Chris Mason 已提交
620

621
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
622 623
		int submit = 0;

624
		page = cb->compressed_pages[pg_index];
C
Chris Mason 已提交
625
		page->mapping = inode->i_mapping;
626
		page->index = em_start >> PAGE_SHIFT;
627

628
		if (comp_bio->bi_iter.bi_size)
629
			submit = tree->ops->merge_bio_hook(page, 0,
630
							PAGE_SIZE,
C
Chris Mason 已提交
631 632
							comp_bio, 0);

C
Chris Mason 已提交
633
		page->mapping = NULL;
634
		if (submit || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
635
		    PAGE_SIZE) {
636 637
			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
						  BTRFS_WQ_ENDIO_DATA);
638
			BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
639

640 641 642 643 644 645
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count.  Otherwise, the cb might get
			 * freed before we're done setting it up
			 */
646
			refcount_inc(&cb->pending_bios);
647

648
			if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
649 650
				ret = btrfs_lookup_bio_sums(inode, comp_bio,
							    sums);
651
				BUG_ON(ret); /* -ENOMEM */
652
			}
653
			sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
654
					     fs_info->sectorsize);
655

656
			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
657
			if (ret) {
658
				comp_bio->bi_status = ret;
659 660
				bio_endio(comp_bio);
			}
C
Chris Mason 已提交
661

662
			comp_bio = btrfs_bio_alloc(bdev, cur_disk_byte);
M
Mike Christie 已提交
663
			bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
664 665 666
			comp_bio->bi_private = cb;
			comp_bio->bi_end_io = end_compressed_bio_read;

667
			bio_add_page(comp_bio, page, PAGE_SIZE, 0);
C
Chris Mason 已提交
668
		}
669
		cur_disk_byte += PAGE_SIZE;
C
Chris Mason 已提交
670 671
	}

672
	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
673
	BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
674

675
	if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
676
		ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
677
		BUG_ON(ret); /* -ENOMEM */
678
	}
679

680
	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num, 0);
681
	if (ret) {
682
		comp_bio->bi_status = ret;
683 684
		bio_endio(comp_bio);
	}
C
Chris Mason 已提交
685 686

	return 0;
687 688

fail2:
689 690 691 692
	while (faili >= 0) {
		__free_page(cb->compressed_pages[faili]);
		faili--;
	}
693 694 695 696 697 698 699

	kfree(cb->compressed_pages);
fail1:
	kfree(cb);
out:
	free_extent_map(em);
	return ret;
C
Chris Mason 已提交
700
}
701

702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
/*
 * Heuristic uses systematic sampling to collect data from the input data
 * range, the logic can be tuned by the following constants:
 *
 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 */
#define SAMPLING_READ_SIZE	(16)
#define SAMPLING_INTERVAL	(256)

/*
 * For statistical analysis of the input data we consider bytes that form a
 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 * many times the object appeared in the sample.
 */
#define BUCKET_SIZE		(256)

/*
 * The size of the sample is based on a statistical sampling rule of thumb.
 * The common way is to perform sampling tests as long as the number of
 * elements in each cell is at least 5.
 *
 * Instead of 5, we choose 32 to obtain more accurate results.
 * If the data contain the maximum number of symbols, which is 256, we obtain a
 * sample size bound by 8192.
 *
 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 * from up to 512 locations.
 */
#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)

struct bucket_item {
	u32 count;
};
737 738

struct heuristic_ws {
739 740
	/* Partial copy of input data */
	u8 *sample;
741
	u32 sample_size;
742 743
	/* Buckets store counters for each byte value */
	struct bucket_item *bucket;
744 745
	/* Sorting buffer */
	struct bucket_item *bucket_b;
746 747 748 749 750 751 752 753 754
	struct list_head list;
};

static void free_heuristic_ws(struct list_head *ws)
{
	struct heuristic_ws *workspace;

	workspace = list_entry(ws, struct heuristic_ws, list);

755 756
	kvfree(workspace->sample);
	kfree(workspace->bucket);
757
	kfree(workspace->bucket_b);
758 759 760 761 762 763 764 765 766 767 768
	kfree(workspace);
}

static struct list_head *alloc_heuristic_ws(void)
{
	struct heuristic_ws *ws;

	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
	if (!ws)
		return ERR_PTR(-ENOMEM);

769 770 771 772 773 774 775
	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
	if (!ws->sample)
		goto fail;

	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
	if (!ws->bucket)
		goto fail;
776

777 778 779 780
	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
	if (!ws->bucket_b)
		goto fail;

781
	INIT_LIST_HEAD(&ws->list);
782
	return &ws->list;
783 784 785
fail:
	free_heuristic_ws(&ws->list);
	return ERR_PTR(-ENOMEM);
786 787 788
}

struct workspaces_list {
789 790
	struct list_head idle_ws;
	spinlock_t ws_lock;
791 792 793 794 795
	/* Number of free workspaces */
	int free_ws;
	/* Total number of allocated workspaces */
	atomic_t total_ws;
	/* Waiters for a free workspace */
796
	wait_queue_head_t ws_wait;
797 798 799 800 801
};

static struct workspaces_list btrfs_comp_ws[BTRFS_COMPRESS_TYPES];

static struct workspaces_list btrfs_heuristic_ws;
802

803
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
804
	&btrfs_zlib_compress,
L
Li Zefan 已提交
805
	&btrfs_lzo_compress,
N
Nick Terrell 已提交
806
	&btrfs_zstd_compress,
807 808
};

809
void __init btrfs_init_compress(void)
810
{
811
	struct list_head *workspace;
812 813
	int i;

814 815 816 817
	INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws);
	spin_lock_init(&btrfs_heuristic_ws.ws_lock);
	atomic_set(&btrfs_heuristic_ws.total_ws, 0);
	init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
818

819 820 821 822 823 824 825 826 827 828 829
	workspace = alloc_heuristic_ws();
	if (IS_ERR(workspace)) {
		pr_warn(
	"BTRFS: cannot preallocate heuristic workspace, will try later\n");
	} else {
		atomic_set(&btrfs_heuristic_ws.total_ws, 1);
		btrfs_heuristic_ws.free_ws = 1;
		list_add(workspace, &btrfs_heuristic_ws.idle_ws);
	}

	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
830 831
		INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
		spin_lock_init(&btrfs_comp_ws[i].ws_lock);
832
		atomic_set(&btrfs_comp_ws[i].total_ws, 0);
833
		init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
834 835 836 837 838 839 840

		/*
		 * Preallocate one workspace for each compression type so
		 * we can guarantee forward progress in the worst case
		 */
		workspace = btrfs_compress_op[i]->alloc_workspace();
		if (IS_ERR(workspace)) {
841
			pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
842 843 844 845 846
		} else {
			atomic_set(&btrfs_comp_ws[i].total_ws, 1);
			btrfs_comp_ws[i].free_ws = 1;
			list_add(workspace, &btrfs_comp_ws[i].idle_ws);
		}
847 848 849 850
	}
}

/*
851 852 853 854
 * This finds an available workspace or allocates a new one.
 * If it's not possible to allocate a new one, waits until there's one.
 * Preallocation makes a forward progress guarantees and we do not return
 * errors.
855
 */
856
static struct list_head *__find_workspace(int type, bool heuristic)
857 858 859 860
{
	struct list_head *workspace;
	int cpus = num_online_cpus();
	int idx = type - 1;
861
	unsigned nofs_flag;
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

	if (heuristic) {
		idle_ws	 = &btrfs_heuristic_ws.idle_ws;
		ws_lock	 = &btrfs_heuristic_ws.ws_lock;
		total_ws = &btrfs_heuristic_ws.total_ws;
		ws_wait	 = &btrfs_heuristic_ws.ws_wait;
		free_ws	 = &btrfs_heuristic_ws.free_ws;
	} else {
		idle_ws	 = &btrfs_comp_ws[idx].idle_ws;
		ws_lock	 = &btrfs_comp_ws[idx].ws_lock;
		total_ws = &btrfs_comp_ws[idx].total_ws;
		ws_wait	 = &btrfs_comp_ws[idx].ws_wait;
		free_ws	 = &btrfs_comp_ws[idx].free_ws;
	}
881 882

again:
883 884 885
	spin_lock(ws_lock);
	if (!list_empty(idle_ws)) {
		workspace = idle_ws->next;
886
		list_del(workspace);
887
		(*free_ws)--;
888
		spin_unlock(ws_lock);
889 890 891
		return workspace;

	}
892
	if (atomic_read(total_ws) > cpus) {
893 894
		DEFINE_WAIT(wait);

895 896
		spin_unlock(ws_lock);
		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
897
		if (atomic_read(total_ws) > cpus && !*free_ws)
898
			schedule();
899
		finish_wait(ws_wait, &wait);
900 901
		goto again;
	}
902
	atomic_inc(total_ws);
903
	spin_unlock(ws_lock);
904

905 906 907 908 909 910
	/*
	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
	 * to turn it off here because we might get called from the restricted
	 * context of btrfs_compress_bio/btrfs_compress_pages
	 */
	nofs_flag = memalloc_nofs_save();
911 912 913 914
	if (heuristic)
		workspace = alloc_heuristic_ws();
	else
		workspace = btrfs_compress_op[idx]->alloc_workspace();
915 916
	memalloc_nofs_restore(nofs_flag);

917
	if (IS_ERR(workspace)) {
918
		atomic_dec(total_ws);
919
		wake_up(ws_wait);
920 921 922 923 924 925

		/*
		 * Do not return the error but go back to waiting. There's a
		 * workspace preallocated for each type and the compression
		 * time is bounded so we get to a workspace eventually. This
		 * makes our caller's life easier.
926 927 928 929
		 *
		 * To prevent silent and low-probability deadlocks (when the
		 * initial preallocation fails), check if there are any
		 * workspaces at all.
930
		 */
931 932 933 934 935 936
		if (atomic_read(total_ws) == 0) {
			static DEFINE_RATELIMIT_STATE(_rs,
					/* once per minute */ 60 * HZ,
					/* no burst */ 1);

			if (__ratelimit(&_rs)) {
937
				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
938 939
			}
		}
940
		goto again;
941 942 943 944
	}
	return workspace;
}

945 946 947 948 949
static struct list_head *find_workspace(int type)
{
	return __find_workspace(type, false);
}

950 951 952 953
/*
 * put a workspace struct back on the list or free it if we have enough
 * idle ones sitting around
 */
954 955
static void __free_workspace(int type, struct list_head *workspace,
			     bool heuristic)
956 957
{
	int idx = type - 1;
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

	if (heuristic) {
		idle_ws	 = &btrfs_heuristic_ws.idle_ws;
		ws_lock	 = &btrfs_heuristic_ws.ws_lock;
		total_ws = &btrfs_heuristic_ws.total_ws;
		ws_wait	 = &btrfs_heuristic_ws.ws_wait;
		free_ws	 = &btrfs_heuristic_ws.free_ws;
	} else {
		idle_ws	 = &btrfs_comp_ws[idx].idle_ws;
		ws_lock	 = &btrfs_comp_ws[idx].ws_lock;
		total_ws = &btrfs_comp_ws[idx].total_ws;
		ws_wait	 = &btrfs_comp_ws[idx].ws_wait;
		free_ws	 = &btrfs_comp_ws[idx].free_ws;
	}
977 978

	spin_lock(ws_lock);
979
	if (*free_ws <= num_online_cpus()) {
980
		list_add(workspace, idle_ws);
981
		(*free_ws)++;
982
		spin_unlock(ws_lock);
983 984
		goto wake;
	}
985
	spin_unlock(ws_lock);
986

987 988 989 990
	if (heuristic)
		free_heuristic_ws(workspace);
	else
		btrfs_compress_op[idx]->free_workspace(workspace);
991
	atomic_dec(total_ws);
992
wake:
993 994 995
	/*
	 * Make sure counter is updated before we wake up waiters.
	 */
996
	smp_mb();
997 998
	if (waitqueue_active(ws_wait))
		wake_up(ws_wait);
999 1000
}

1001 1002 1003 1004 1005
static void free_workspace(int type, struct list_head *ws)
{
	return __free_workspace(type, ws, false);
}

1006 1007 1008 1009 1010 1011 1012 1013
/*
 * cleanup function for module exit
 */
static void free_workspaces(void)
{
	struct list_head *workspace;
	int i;

1014 1015 1016 1017 1018 1019 1020
	while (!list_empty(&btrfs_heuristic_ws.idle_ws)) {
		workspace = btrfs_heuristic_ws.idle_ws.next;
		list_del(workspace);
		free_heuristic_ws(workspace);
		atomic_dec(&btrfs_heuristic_ws.total_ws);
	}

1021
	for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
1022 1023
		while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
			workspace = btrfs_comp_ws[i].idle_ws.next;
1024 1025
			list_del(workspace);
			btrfs_compress_op[i]->free_workspace(workspace);
1026
			atomic_dec(&btrfs_comp_ws[i].total_ws);
1027 1028 1029 1030 1031
		}
	}
}

/*
1032 1033
 * Given an address space and start and length, compress the bytes into @pages
 * that are allocated on demand.
1034
 *
1035 1036 1037 1038 1039
 * @type_level is encoded algorithm and level, where level 0 means whatever
 * default the algorithm chooses and is opaque here;
 * - compression algo are 0-3
 * - the level are bits 4-7
 *
1040 1041
 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
 * and returns number of actually allocated pages
1042
 *
1043 1044
 * @total_in is used to return the number of bytes actually read.  It
 * may be smaller than the input length if we had to exit early because we
1045 1046 1047
 * ran out of room in the pages array or because we cross the
 * max_out threshold.
 *
1048 1049
 * @total_out is an in/out parameter, must be set to the input length and will
 * be also used to return the total number of compressed bytes
1050
 *
1051
 * @max_out tells us the max number of bytes that we're allowed to
1052 1053
 * stuff into pages
 */
1054
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1055
			 u64 start, struct page **pages,
1056 1057
			 unsigned long *out_pages,
			 unsigned long *total_in,
1058
			 unsigned long *total_out)
1059 1060 1061
{
	struct list_head *workspace;
	int ret;
1062
	int type = type_level & 0xF;
1063 1064 1065

	workspace = find_workspace(type);

1066
	btrfs_compress_op[type - 1]->set_level(workspace, type_level);
1067
	ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
1068
						      start, pages,
1069
						      out_pages,
1070
						      total_in, total_out);
1071 1072 1073 1074 1075 1076 1077 1078 1079
	free_workspace(type, workspace);
	return ret;
}

/*
 * pages_in is an array of pages with compressed data.
 *
 * disk_start is the starting logical offset of this array in the file
 *
1080
 * orig_bio contains the pages from the file that we want to decompress into
1081 1082 1083 1084 1085 1086 1087 1088
 *
 * srclen is the number of bytes in pages_in
 *
 * The basic idea is that we have a bio that was created by readpages.
 * The pages in the bio are for the uncompressed data, and they may not
 * be contiguous.  They all correspond to the range of bytes covered by
 * the compressed extent.
 */
1089
static int btrfs_decompress_bio(struct compressed_bio *cb)
1090 1091 1092
{
	struct list_head *workspace;
	int ret;
1093
	int type = cb->compress_type;
1094 1095

	workspace = find_workspace(type);
1096
	ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb);
1097
	free_workspace(type, workspace);
1098

1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122
	return ret;
}

/*
 * a less complex decompression routine.  Our compressed data fits in a
 * single page, and we want to read a single page out of it.
 * start_byte tells us the offset into the compressed data we're interested in
 */
int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
		     unsigned long start_byte, size_t srclen, size_t destlen)
{
	struct list_head *workspace;
	int ret;

	workspace = find_workspace(type);

	ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
						  dest_page, start_byte,
						  srclen, destlen);

	free_workspace(type, workspace);
	return ret;
}

1123
void __cold btrfs_exit_compress(void)
1124 1125 1126
{
	free_workspaces();
}
1127 1128 1129 1130 1131 1132 1133 1134

/*
 * Copy uncompressed data from working buffer to pages.
 *
 * buf_start is the byte offset we're of the start of our workspace buffer.
 *
 * total_out is the last byte of the buffer
 */
1135
int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1136
			      unsigned long total_out, u64 disk_start,
1137
			      struct bio *bio)
1138 1139 1140 1141
{
	unsigned long buf_offset;
	unsigned long current_buf_start;
	unsigned long start_byte;
1142
	unsigned long prev_start_byte;
1143 1144 1145
	unsigned long working_bytes = total_out - buf_start;
	unsigned long bytes;
	char *kaddr;
1146
	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1147 1148 1149 1150 1151

	/*
	 * start byte is the first byte of the page we're currently
	 * copying into relative to the start of the compressed data.
	 */
1152
	start_byte = page_offset(bvec.bv_page) - disk_start;
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171

	/* we haven't yet hit data corresponding to this page */
	if (total_out <= start_byte)
		return 1;

	/*
	 * the start of the data we care about is offset into
	 * the middle of our working buffer
	 */
	if (total_out > start_byte && buf_start < start_byte) {
		buf_offset = start_byte - buf_start;
		working_bytes -= buf_offset;
	} else {
		buf_offset = 0;
	}
	current_buf_start = buf_start;

	/* copy bytes from the working buffer into the pages */
	while (working_bytes > 0) {
1172 1173
		bytes = min_t(unsigned long, bvec.bv_len,
				PAGE_SIZE - buf_offset);
1174
		bytes = min(bytes, working_bytes);
1175 1176 1177

		kaddr = kmap_atomic(bvec.bv_page);
		memcpy(kaddr + bvec.bv_offset, buf + buf_offset, bytes);
1178
		kunmap_atomic(kaddr);
1179
		flush_dcache_page(bvec.bv_page);
1180 1181 1182 1183 1184 1185

		buf_offset += bytes;
		working_bytes -= bytes;
		current_buf_start += bytes;

		/* check if we need to pick another page */
1186 1187 1188 1189
		bio_advance(bio, bytes);
		if (!bio->bi_iter.bi_size)
			return 0;
		bvec = bio_iter_iovec(bio, bio->bi_iter);
1190
		prev_start_byte = start_byte;
1191
		start_byte = page_offset(bvec.bv_page) - disk_start;
1192

1193
		/*
1194 1195 1196 1197
		 * We need to make sure we're only adjusting
		 * our offset into compression working buffer when
		 * we're switching pages.  Otherwise we can incorrectly
		 * keep copying when we were actually done.
1198
		 */
1199 1200 1201 1202 1203 1204 1205
		if (start_byte != prev_start_byte) {
			/*
			 * make sure our new page is covered by this
			 * working buffer
			 */
			if (total_out <= start_byte)
				return 1;
1206

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217
			/*
			 * the next page in the biovec might not be adjacent
			 * to the last page, but it might still be found
			 * inside this working buffer. bump our offset pointer
			 */
			if (total_out > start_byte &&
			    current_buf_start < start_byte) {
				buf_offset = start_byte - buf_start;
				working_bytes = total_out - start_byte;
				current_buf_start = buf_start + buf_offset;
			}
1218 1219 1220 1221 1222
		}
	}

	return 1;
}
1223

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
/*
 * Shannon Entropy calculation
 *
 * Pure byte distribution analysis fails to determine compressiability of data.
 * Try calculating entropy to estimate the average minimum number of bits
 * needed to encode the sampled data.
 *
 * For convenience, return the percentage of needed bits, instead of amount of
 * bits directly.
 *
 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
 *			    and can be compressible with high probability
 *
 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
 *
 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
 */
#define ENTROPY_LVL_ACEPTABLE		(65)
#define ENTROPY_LVL_HIGH		(80)

/*
 * For increasead precision in shannon_entropy calculation,
 * let's do pow(n, M) to save more digits after comma:
 *
 * - maximum int bit length is 64
 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
 * - 13 * 4 = 52 < 64		-> M = 4
 *
 * So use pow(n, 4).
 */
static inline u32 ilog2_w(u64 n)
{
	return ilog2(n * n * n * n);
}

static u32 shannon_entropy(struct heuristic_ws *ws)
{
	const u32 entropy_max = 8 * ilog2_w(2);
	u32 entropy_sum = 0;
	u32 p, p_base, sz_base;
	u32 i;

	sz_base = ilog2_w(ws->sample_size);
	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
		p = ws->bucket[i].count;
		p_base = ilog2_w(p);
		entropy_sum += p * (sz_base - p_base);
	}

	entropy_sum /= ws->sample_size;
	return entropy_sum * 100 / entropy_max;
}

1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
#define RADIX_BASE		4U
#define COUNTERS_SIZE		(1U << RADIX_BASE)

static u8 get4bits(u64 num, int shift) {
	u8 low4bits;

	num >>= shift;
	/* Reverse order */
	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
	return low4bits;
}

/*
 * Use 4 bits as radix base
 * Use 16 u32 counters for calculating new possition in buf array
 *
 * @array     - array that will be sorted
 * @array_buf - buffer array to store sorting results
 *              must be equal in size to @array
 * @num       - array size
 */
1298
static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1299
		       int num)
1300
{
1301 1302 1303 1304 1305 1306 1307 1308
	u64 max_num;
	u64 buf_num;
	u32 counters[COUNTERS_SIZE];
	u32 new_addr;
	u32 addr;
	int bitlen;
	int shift;
	int i;
1309

1310 1311 1312 1313
	/*
	 * Try avoid useless loop iterations for small numbers stored in big
	 * counters.  Example: 48 33 4 ... in 64bit array
	 */
1314
	max_num = array[0].count;
1315
	for (i = 1; i < num; i++) {
1316
		buf_num = array[i].count;
1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328
		if (buf_num > max_num)
			max_num = buf_num;
	}

	buf_num = ilog2(max_num);
	bitlen = ALIGN(buf_num, RADIX_BASE * 2);

	shift = 0;
	while (shift < bitlen) {
		memset(counters, 0, sizeof(counters));

		for (i = 0; i < num; i++) {
1329
			buf_num = array[i].count;
1330 1331 1332 1333 1334 1335 1336 1337
			addr = get4bits(buf_num, shift);
			counters[addr]++;
		}

		for (i = 1; i < COUNTERS_SIZE; i++)
			counters[i] += counters[i - 1];

		for (i = num - 1; i >= 0; i--) {
1338
			buf_num = array[i].count;
1339 1340 1341
			addr = get4bits(buf_num, shift);
			counters[addr]--;
			new_addr = counters[addr];
1342
			array_buf[new_addr] = array[i];
1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355
		}

		shift += RADIX_BASE;

		/*
		 * Normal radix expects to move data from a temporary array, to
		 * the main one.  But that requires some CPU time. Avoid that
		 * by doing another sort iteration to original array instead of
		 * memcpy()
		 */
		memset(counters, 0, sizeof(counters));

		for (i = 0; i < num; i ++) {
1356
			buf_num = array_buf[i].count;
1357 1358 1359 1360 1361 1362 1363 1364
			addr = get4bits(buf_num, shift);
			counters[addr]++;
		}

		for (i = 1; i < COUNTERS_SIZE; i++)
			counters[i] += counters[i - 1];

		for (i = num - 1; i >= 0; i--) {
1365
			buf_num = array_buf[i].count;
1366 1367 1368
			addr = get4bits(buf_num, shift);
			counters[addr]--;
			new_addr = counters[addr];
1369
			array[new_addr] = array_buf[i];
1370 1371 1372 1373
		}

		shift += RADIX_BASE;
	}
1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402
}

/*
 * Size of the core byte set - how many bytes cover 90% of the sample
 *
 * There are several types of structured binary data that use nearly all byte
 * values. The distribution can be uniform and counts in all buckets will be
 * nearly the same (eg. encrypted data). Unlikely to be compressible.
 *
 * Other possibility is normal (Gaussian) distribution, where the data could
 * be potentially compressible, but we have to take a few more steps to decide
 * how much.
 *
 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
 *                       compression algo can easy fix that
 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
 *                       probability is not compressible
 */
#define BYTE_CORE_SET_LOW		(64)
#define BYTE_CORE_SET_HIGH		(200)

static int byte_core_set_size(struct heuristic_ws *ws)
{
	u32 i;
	u32 coreset_sum = 0;
	const u32 core_set_threshold = ws->sample_size * 90 / 100;
	struct bucket_item *bucket = ws->bucket;

	/* Sort in reverse order */
1403
	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419

	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
		coreset_sum += bucket[i].count;

	if (coreset_sum > core_set_threshold)
		return i;

	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
		coreset_sum += bucket[i].count;
		if (coreset_sum > core_set_threshold)
			break;
	}

	return i;
}

1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
/*
 * Count byte values in buckets.
 * This heuristic can detect textual data (configs, xml, json, html, etc).
 * Because in most text-like data byte set is restricted to limited number of
 * possible characters, and that restriction in most cases makes data easy to
 * compress.
 *
 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
 *	less - compressible
 *	more - need additional analysis
 */
#define BYTE_SET_THRESHOLD		(64)

static u32 byte_set_size(const struct heuristic_ws *ws)
{
	u32 i;
	u32 byte_set_size = 0;

	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
		if (ws->bucket[i].count > 0)
			byte_set_size++;
	}

	/*
	 * Continue collecting count of byte values in buckets.  If the byte
	 * set size is bigger then the threshold, it's pointless to continue,
	 * the detection technique would fail for this type of data.
	 */
	for (; i < BUCKET_SIZE; i++) {
		if (ws->bucket[i].count > 0) {
			byte_set_size++;
			if (byte_set_size > BYTE_SET_THRESHOLD)
				return byte_set_size;
		}
	}

	return byte_set_size;
}

1459 1460 1461 1462 1463 1464 1465 1466
static bool sample_repeated_patterns(struct heuristic_ws *ws)
{
	const u32 half_of_sample = ws->sample_size / 2;
	const u8 *data = ws->sample;

	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
}

1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518
static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
				     struct heuristic_ws *ws)
{
	struct page *page;
	u64 index, index_end;
	u32 i, curr_sample_pos;
	u8 *in_data;

	/*
	 * Compression handles the input data by chunks of 128KiB
	 * (defined by BTRFS_MAX_UNCOMPRESSED)
	 *
	 * We do the same for the heuristic and loop over the whole range.
	 *
	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
	 */
	if (end - start > BTRFS_MAX_UNCOMPRESSED)
		end = start + BTRFS_MAX_UNCOMPRESSED;

	index = start >> PAGE_SHIFT;
	index_end = end >> PAGE_SHIFT;

	/* Don't miss unaligned end */
	if (!IS_ALIGNED(end, PAGE_SIZE))
		index_end++;

	curr_sample_pos = 0;
	while (index < index_end) {
		page = find_get_page(inode->i_mapping, index);
		in_data = kmap(page);
		/* Handle case where the start is not aligned to PAGE_SIZE */
		i = start % PAGE_SIZE;
		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
			/* Don't sample any garbage from the last page */
			if (start > end - SAMPLING_READ_SIZE)
				break;
			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
					SAMPLING_READ_SIZE);
			i += SAMPLING_INTERVAL;
			start += SAMPLING_INTERVAL;
			curr_sample_pos += SAMPLING_READ_SIZE;
		}
		kunmap(page);
		put_page(page);

		index++;
	}

	ws->sample_size = curr_sample_pos;
}

1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535
/*
 * Compression heuristic.
 *
 * For now is's a naive and optimistic 'return true', we'll extend the logic to
 * quickly (compared to direct compression) detect data characteristics
 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
 * data.
 *
 * The following types of analysis can be performed:
 * - detect mostly zero data
 * - detect data with low "byte set" size (text, etc)
 * - detect data with low/high "core byte" set
 *
 * Return non-zero if the compression should be done, 0 otherwise.
 */
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
1536 1537
	struct list_head *ws_list = __find_workspace(0, true);
	struct heuristic_ws *ws;
1538 1539
	u32 i;
	u8 byte;
1540
	int ret = 0;
1541

1542 1543
	ws = list_entry(ws_list, struct heuristic_ws, list);

1544 1545
	heuristic_collect_sample(inode, start, end, ws);

1546 1547 1548 1549 1550
	if (sample_repeated_patterns(ws)) {
		ret = 1;
		goto out;
	}

1551 1552 1553 1554 1555
	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);

	for (i = 0; i < ws->sample_size; i++) {
		byte = ws->sample[i];
		ws->bucket[byte].count++;
1556 1557
	}

1558 1559 1560 1561 1562 1563
	i = byte_set_size(ws);
	if (i < BYTE_SET_THRESHOLD) {
		ret = 2;
		goto out;
	}

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
	i = byte_core_set_size(ws);
	if (i <= BYTE_CORE_SET_LOW) {
		ret = 3;
		goto out;
	}

	if (i >= BYTE_CORE_SET_HIGH) {
		ret = 0;
		goto out;
	}

1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603
	i = shannon_entropy(ws);
	if (i <= ENTROPY_LVL_ACEPTABLE) {
		ret = 4;
		goto out;
	}

	/*
	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
	 * needed to give green light to compression.
	 *
	 * For now just assume that compression at that level is not worth the
	 * resources because:
	 *
	 * 1. it is possible to defrag the data later
	 *
	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
	 * values, every bucket has counter at level ~54. The heuristic would
	 * be confused. This can happen when data have some internal repeated
	 * patterns like "abbacbbc...". This can be detected by analyzing
	 * pairs of bytes, which is too costly.
	 */
	if (i < ENTROPY_LVL_HIGH) {
		ret = 5;
		goto out;
	} else {
		ret = 0;
		goto out;
	}

1604
out:
1605
	__free_workspace(0, ws_list, true);
1606 1607
	return ret;
}
1608 1609 1610 1611 1612 1613

unsigned int btrfs_compress_str2level(const char *str)
{
	if (strncmp(str, "zlib", 4) != 0)
		return 0;

1614 1615 1616
	/* Accepted form: zlib:1 up to zlib:9 and nothing left after the number */
	if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
		return str[5] - '0';
1617

1618
	return BTRFS_ZLIB_DEFAULT_LEVEL;
1619
}