compression.c 45.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Chris Mason 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/writeback.h>
17
#include <linux/slab.h>
18
#include <linux/sched/mm.h>
19
#include <linux/log2.h>
20
#include <crypto/hash.h>
21
#include "misc.h"
C
Chris Mason 已提交
22 23 24 25 26 27 28 29 30
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "volumes.h"
#include "ordered-data.h"
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"
31
#include "zoned.h"
C
Chris Mason 已提交
32

33 34 35 36 37 38 39 40 41 42
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };

const char* btrfs_compress_type2str(enum btrfs_compression_type type)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB:
	case BTRFS_COMPRESS_LZO:
	case BTRFS_COMPRESS_ZSTD:
	case BTRFS_COMPRESS_NONE:
		return btrfs_compress_types[type];
43 44
	default:
		break;
45 46 47 48 49
	}

	return NULL;
}

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
bool btrfs_compress_is_valid_type(const char *str, size_t len)
{
	int i;

	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
		size_t comp_len = strlen(btrfs_compress_types[i]);

		if (len < comp_len)
			continue;

		if (!strncmp(btrfs_compress_types[i], str, comp_len))
			return true;
	}
	return false;
}

66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
static int compression_compress_pages(int type, struct list_head *ws,
               struct address_space *mapping, u64 start, struct page **pages,
               unsigned long *out_pages, unsigned long *total_in,
               unsigned long *total_out)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB:
		return zlib_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_LZO:
		return lzo_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_ZSTD:
		return zstd_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
84 85 86 87 88 89 90
		 * This can happen when compression races with remount setting
		 * it to 'no compress', while caller doesn't call
		 * inode_need_compress() to check if we really need to
		 * compress.
		 *
		 * Not a big deal, just need to inform caller that we
		 * haven't allocated any pages yet.
91
		 */
92
		*out_pages = 0;
93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
		return -E2BIG;
	}
}

static int compression_decompress_bio(int type, struct list_head *ws,
		struct compressed_bio *cb)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

static int compression_decompress(int type, struct list_head *ws,
               unsigned char *data_in, struct page *dest_page,
               unsigned long start_byte, size_t srclen, size_t destlen)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

135
static int btrfs_decompress_bio(struct compressed_bio *cb);
136

137
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
138 139 140
				      unsigned long disk_size)
{
	return sizeof(struct compressed_bio) +
141
		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
142 143
}

144
static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
145 146
				 u64 disk_start)
{
147
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
148
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
149
	const u32 csum_size = fs_info->csum_size;
150
	const u32 sectorsize = fs_info->sectorsize;
151
	struct page *page;
152
	unsigned int i;
153
	char *kaddr;
154
	u8 csum[BTRFS_CSUM_SIZE];
155
	struct compressed_bio *cb = bio->bi_private;
156
	u8 *cb_sum = cb->sums;
157

158
	if (!fs_info->csum_root || (inode->flags & BTRFS_INODE_NODATASUM))
159 160
		return 0;

161 162
	shash->tfm = fs_info->csum_shash;

163
	for (i = 0; i < cb->nr_pages; i++) {
164 165
		u32 pg_offset;
		u32 bytes_left = PAGE_SIZE;
166 167
		page = cb->compressed_pages[i];

168 169 170 171 172 173 174
		/* Determine the remaining bytes inside the page first */
		if (i == cb->nr_pages - 1)
			bytes_left = cb->compressed_len - i * PAGE_SIZE;

		/* Hash through the page sector by sector */
		for (pg_offset = 0; pg_offset < bytes_left;
		     pg_offset += sectorsize) {
175
			kaddr = page_address(page);
176 177 178 179 180 181 182 183 184 185 186 187 188 189
			crypto_shash_digest(shash, kaddr + pg_offset,
					    sectorsize, csum);

			if (memcmp(&csum, cb_sum, csum_size) != 0) {
				btrfs_print_data_csum_error(inode, disk_start,
						csum, cb_sum, cb->mirror_num);
				if (btrfs_io_bio(bio)->device)
					btrfs_dev_stat_inc_and_print(
						btrfs_io_bio(bio)->device,
						BTRFS_DEV_STAT_CORRUPTION_ERRS);
				return -EIO;
			}
			cb_sum += csum_size;
			disk_start += sectorsize;
190 191
		}
	}
192
	return 0;
193 194
}

C
Chris Mason 已提交
195 196 197 198 199 200 201 202 203 204
/* when we finish reading compressed pages from the disk, we
 * decompress them and then run the bio end_io routines on the
 * decompressed pages (in the inode address space).
 *
 * This allows the checksumming and other IO error handling routines
 * to work normally
 *
 * The compressed pages are freed here, and it must be run
 * in process context
 */
205
static void end_compressed_bio_read(struct bio *bio)
C
Chris Mason 已提交
206 207 208 209
{
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
	struct page *page;
210
	unsigned int index;
211
	unsigned int mirror = btrfs_io_bio(bio)->mirror_num;
212
	int ret = 0;
C
Chris Mason 已提交
213

214
	if (bio->bi_status)
C
Chris Mason 已提交
215 216 217 218 219
		cb->errors = 1;

	/* if there are more bios still pending for this compressed
	 * extent, just exit
	 */
220
	if (!refcount_dec_and_test(&cb->pending_bios))
C
Chris Mason 已提交
221 222
		goto out;

223 224 225 226 227 228 229
	/*
	 * Record the correct mirror_num in cb->orig_bio so that
	 * read-repair can work properly.
	 */
	btrfs_io_bio(cb->orig_bio)->mirror_num = mirror;
	cb->mirror_num = mirror;

230 231 232 233 234 235 236
	/*
	 * Some IO in this cb have failed, just skip checksum as there
	 * is no way it could be correct.
	 */
	if (cb->errors == 1)
		goto csum_failed;

237
	inode = cb->inode;
238
	ret = check_compressed_csum(BTRFS_I(inode), bio,
D
David Sterba 已提交
239
				    bio->bi_iter.bi_sector << 9);
240 241 242
	if (ret)
		goto csum_failed;

C
Chris Mason 已提交
243 244 245
	/* ok, we're the last bio for this extent, lets start
	 * the decompression.
	 */
246 247
	ret = btrfs_decompress_bio(cb);

248
csum_failed:
C
Chris Mason 已提交
249 250 251 252 253 254 255 256
	if (ret)
		cb->errors = 1;

	/* release the compressed pages */
	index = 0;
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
257
		put_page(page);
C
Chris Mason 已提交
258 259 260
	}

	/* do io completion on the original bio */
261
	if (cb->errors) {
C
Chris Mason 已提交
262
		bio_io_error(cb->orig_bio);
263
	} else {
264
		struct bio_vec *bvec;
265
		struct bvec_iter_all iter_all;
266 267 268 269 270

		/*
		 * we have verified the checksum already, set page
		 * checked so the end_io handlers know about it
		 */
271
		ASSERT(!bio_flagged(bio, BIO_CLONED));
272
		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all)
273
			SetPageChecked(bvec->bv_page);
274

275
		bio_endio(cb->orig_bio);
276
	}
C
Chris Mason 已提交
277 278 279 280 281 282 283 284 285 286 287 288

	/* finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}

/*
 * Clear the writeback bits on all of the file
 * pages for a compressed write
 */
289 290
static noinline void end_compressed_writeback(struct inode *inode,
					      const struct compressed_bio *cb)
C
Chris Mason 已提交
291
{
292 293
	unsigned long index = cb->start >> PAGE_SHIFT;
	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
294 295 296 297 298
	struct page *pages[16];
	unsigned long nr_pages = end_index - index + 1;
	int i;
	int ret;

299 300 301
	if (cb->errors)
		mapping_set_error(inode->i_mapping, -EIO);

C
Chris Mason 已提交
302
	while (nr_pages > 0) {
C
Chris Mason 已提交
303
		ret = find_get_pages_contig(inode->i_mapping, index,
304 305
				     min_t(unsigned long,
				     nr_pages, ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
306 307 308 309 310 311
		if (ret == 0) {
			nr_pages -= 1;
			index += 1;
			continue;
		}
		for (i = 0; i < ret; i++) {
312 313
			if (cb->errors)
				SetPageError(pages[i]);
C
Chris Mason 已提交
314
			end_page_writeback(pages[i]);
315
			put_page(pages[i]);
C
Chris Mason 已提交
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330
		}
		nr_pages -= ret;
		index += ret;
	}
	/* the inode may be gone now */
}

/*
 * do the cleanup once all the compressed pages hit the disk.
 * This will clear writeback on the file pages and free the compressed
 * pages.
 *
 * This also calls the writeback end hooks for the file pages so that
 * metadata and checksums can be updated in the file.
 */
331
static void end_compressed_bio_write(struct bio *bio)
C
Chris Mason 已提交
332 333 334 335
{
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
	struct page *page;
336
	unsigned int index;
C
Chris Mason 已提交
337

338
	if (bio->bi_status)
C
Chris Mason 已提交
339 340 341 342 343
		cb->errors = 1;

	/* if there are more bios still pending for this compressed
	 * extent, just exit
	 */
344
	if (!refcount_dec_and_test(&cb->pending_bios))
C
Chris Mason 已提交
345 346 347 348 349 350
		goto out;

	/* ok, we're the last bio for this extent, step one is to
	 * call back into the FS and do all the end_io operations
	 */
	inode = cb->inode;
351
	btrfs_record_physical_zoned(inode, cb->start, bio);
352
	btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
353
			cb->start, cb->start + cb->len - 1,
354
			!cb->errors);
C
Chris Mason 已提交
355

356
	end_compressed_writeback(inode, cb);
C
Chris Mason 已提交
357 358 359 360 361 362 363 364 365 366
	/* note, our inode could be gone now */

	/*
	 * release the compressed pages, these came from alloc_page and
	 * are not attached to the inode at all
	 */
	index = 0;
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
367
		put_page(page);
C
Chris Mason 已提交
368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
	}

	/* finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
out:
	bio_put(bio);
}

/*
 * worker function to build and submit bios for previously compressed pages.
 * The corresponding pages in the inode should be marked for writeback
 * and the compressed pages should have a reference on them for dropping
 * when the IO is complete.
 *
 * This also checksums the file bytes and gets things ready for
 * the end io hooks.
 */
386
blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
387 388
				 unsigned int len, u64 disk_start,
				 unsigned int compressed_len,
C
Chris Mason 已提交
389
				 struct page **compressed_pages,
390
				 unsigned int nr_pages,
391 392
				 unsigned int write_flags,
				 struct cgroup_subsys_state *blkcg_css)
C
Chris Mason 已提交
393
{
394
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
C
Chris Mason 已提交
395 396 397
	struct bio *bio = NULL;
	struct compressed_bio *cb;
	unsigned long bytes_left;
398
	int pg_index = 0;
C
Chris Mason 已提交
399 400
	struct page *page;
	u64 first_byte = disk_start;
401
	blk_status_t ret;
402
	int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
403 404
	const bool use_append = btrfs_use_zone_append(inode, disk_start);
	const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
C
Chris Mason 已提交
405

406
	WARN_ON(!PAGE_ALIGNED(start));
407
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
408
	if (!cb)
409
		return BLK_STS_RESOURCE;
410
	refcount_set(&cb->pending_bios, 0);
C
Chris Mason 已提交
411
	cb->errors = 0;
412
	cb->inode = &inode->vfs_inode;
C
Chris Mason 已提交
413 414
	cb->start = start;
	cb->len = len;
415
	cb->mirror_num = 0;
C
Chris Mason 已提交
416 417 418 419 420
	cb->compressed_pages = compressed_pages;
	cb->compressed_len = compressed_len;
	cb->orig_bio = NULL;
	cb->nr_pages = nr_pages;

421
	bio = btrfs_bio_alloc(first_byte);
422
	bio->bi_opf = bio_op | write_flags;
C
Chris Mason 已提交
423 424
	bio->bi_private = cb;
	bio->bi_end_io = end_compressed_bio_write;
425

426
	if (use_append) {
427
		struct btrfs_device *device;
428

429 430
		device = btrfs_zoned_get_device(fs_info, disk_start, PAGE_SIZE);
		if (IS_ERR(device)) {
431 432 433 434 435
			kfree(cb);
			bio_put(bio);
			return BLK_STS_NOTSUPP;
		}

436
		bio_set_dev(bio, device->bdev);
437 438
	}

439 440
	if (blkcg_css) {
		bio->bi_opf |= REQ_CGROUP_PUNT;
441
		kthread_associate_blkcg(blkcg_css);
442
	}
443
	refcount_set(&cb->pending_bios, 1);
C
Chris Mason 已提交
444 445 446

	/* create and submit bios for the compressed pages */
	bytes_left = compressed_len;
447
	for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
448
		int submit = 0;
449
		int len = 0;
450

451
		page = compressed_pages[pg_index];
452
		page->mapping = inode->vfs_inode.i_mapping;
453
		if (bio->bi_iter.bi_size)
454 455
			submit = btrfs_bio_fits_in_stripe(page, PAGE_SIZE, bio,
							  0);
C
Chris Mason 已提交
456

457 458 459 460 461 462 463 464 465 466 467
		/*
		 * Page can only be added to bio if the current bio fits in
		 * stripe.
		 */
		if (!submit) {
			if (pg_index == 0 && use_append)
				len = bio_add_zone_append_page(bio, page,
							       PAGE_SIZE, 0);
			else
				len = bio_add_page(bio, page, PAGE_SIZE, 0);
		}
468

C
Chris Mason 已提交
469
		page->mapping = NULL;
470
		if (submit || len < PAGE_SIZE) {
471 472 473 474 475 476
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count.  Otherwise, the cb might get
			 * freed before we're done setting it up
			 */
477
			refcount_inc(&cb->pending_bios);
478 479
			ret = btrfs_bio_wq_end_io(fs_info, bio,
						  BTRFS_WQ_ENDIO_DATA);
480
			BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
481

482
			if (!skip_sum) {
483
				ret = btrfs_csum_one_bio(inode, bio, start, 1);
484
				BUG_ON(ret); /* -ENOMEM */
485
			}
486

487
			ret = btrfs_map_bio(fs_info, bio, 0);
488
			if (ret) {
489
				bio->bi_status = ret;
490 491
				bio_endio(bio);
			}
C
Chris Mason 已提交
492

493
			bio = btrfs_bio_alloc(first_byte);
494
			bio->bi_opf = bio_op | write_flags;
C
Chris Mason 已提交
495 496
			bio->bi_private = cb;
			bio->bi_end_io = end_compressed_bio_write;
497
			if (blkcg_css)
498
				bio->bi_opf |= REQ_CGROUP_PUNT;
499 500 501 502
			/*
			 * Use bio_add_page() to ensure the bio has at least one
			 * page.
			 */
503
			bio_add_page(bio, page, PAGE_SIZE, 0);
C
Chris Mason 已提交
504
		}
505
		if (bytes_left < PAGE_SIZE) {
506
			btrfs_info(fs_info,
507
					"bytes left %lu compress len %u nr %u",
508 509
			       bytes_left, cb->compressed_len, cb->nr_pages);
		}
510 511
		bytes_left -= PAGE_SIZE;
		first_byte += PAGE_SIZE;
512
		cond_resched();
C
Chris Mason 已提交
513 514
	}

515
	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
516
	BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
517

518
	if (!skip_sum) {
519
		ret = btrfs_csum_one_bio(inode, bio, start, 1);
520
		BUG_ON(ret); /* -ENOMEM */
521
	}
522

523
	ret = btrfs_map_bio(fs_info, bio, 0);
524
	if (ret) {
525
		bio->bi_status = ret;
526 527
		bio_endio(bio);
	}
C
Chris Mason 已提交
528

529 530 531
	if (blkcg_css)
		kthread_associate_blkcg(NULL);

C
Chris Mason 已提交
532 533 534
	return 0;
}

535 536
static u64 bio_end_offset(struct bio *bio)
{
M
Ming Lei 已提交
537
	struct bio_vec *last = bio_last_bvec_all(bio);
538 539 540 541

	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}

542 543 544 545 546
static noinline int add_ra_bio_pages(struct inode *inode,
				     u64 compressed_end,
				     struct compressed_bio *cb)
{
	unsigned long end_index;
547
	unsigned long pg_index;
548 549 550 551 552 553 554 555 556 557 558 559
	u64 last_offset;
	u64 isize = i_size_read(inode);
	int ret;
	struct page *page;
	unsigned long nr_pages = 0;
	struct extent_map *em;
	struct address_space *mapping = inode->i_mapping;
	struct extent_map_tree *em_tree;
	struct extent_io_tree *tree;
	u64 end;
	int misses = 0;

560
	last_offset = bio_end_offset(cb->orig_bio);
561 562 563 564 565 566
	em_tree = &BTRFS_I(inode)->extent_tree;
	tree = &BTRFS_I(inode)->io_tree;

	if (isize == 0)
		return 0;

567 568 569 570 571 572 573 574 575 576
	/*
	 * For current subpage support, we only support 64K page size,
	 * which means maximum compressed extent size (128K) is just 2x page
	 * size.
	 * This makes readahead less effective, so here disable readahead for
	 * subpage for now, until full compressed write is supported.
	 */
	if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
		return 0;

577
	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
578

C
Chris Mason 已提交
579
	while (last_offset < compressed_end) {
580
		pg_index = last_offset >> PAGE_SHIFT;
581

582
		if (pg_index > end_index)
583 584
			break;

585
		page = xa_load(&mapping->i_pages, pg_index);
586
		if (page && !xa_is_value(page)) {
587 588 589 590 591 592
			misses++;
			if (misses > 4)
				break;
			goto next;
		}

593 594
		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
								 ~__GFP_FS));
595 596 597
		if (!page)
			break;

598
		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
599
			put_page(page);
600 601 602 603 604 605 606 607
			goto next;
		}

		/*
		 * at this point, we have a locked page in the page cache
		 * for these bytes in the file.  But, we have to make
		 * sure they map to this compressed extent on disk.
		 */
608 609 610 611 612 613 614 615
		ret = set_page_extent_mapped(page);
		if (ret < 0) {
			unlock_page(page);
			put_page(page);
			break;
		}

		end = last_offset + PAGE_SIZE - 1;
616
		lock_extent(tree, last_offset, end);
617
		read_lock(&em_tree->lock);
618
		em = lookup_extent_mapping(em_tree, last_offset,
619
					   PAGE_SIZE);
620
		read_unlock(&em_tree->lock);
621 622

		if (!em || last_offset < em->start ||
623
		    (last_offset + PAGE_SIZE > extent_map_end(em)) ||
624
		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
625
			free_extent_map(em);
626
			unlock_extent(tree, last_offset, end);
627
			unlock_page(page);
628
			put_page(page);
629 630 631 632 633
			break;
		}
		free_extent_map(em);

		if (page->index == end_index) {
634
			size_t zero_offset = offset_in_page(isize);
635 636 637

			if (zero_offset) {
				int zeros;
638
				zeros = PAGE_SIZE - zero_offset;
639
				memzero_page(page, zero_offset, zeros);
640 641 642 643 644
				flush_dcache_page(page);
			}
		}

		ret = bio_add_page(cb->orig_bio, page,
645
				   PAGE_SIZE, 0);
646

647
		if (ret == PAGE_SIZE) {
648
			nr_pages++;
649
			put_page(page);
650
		} else {
651
			unlock_extent(tree, last_offset, end);
652
			unlock_page(page);
653
			put_page(page);
654 655 656
			break;
		}
next:
657
		last_offset += PAGE_SIZE;
658 659 660 661
	}
	return 0;
}

C
Chris Mason 已提交
662 663 664 665 666
/*
 * for a compressed read, the bio we get passed has all the inode pages
 * in it.  We don't actually do IO on those pages but allocate new ones
 * to hold the compressed pages on disk.
 *
667
 * bio->bi_iter.bi_sector points to the compressed extent on disk
C
Chris Mason 已提交
668 669 670 671 672
 * bio->bi_io_vec points to all of the inode pages
 *
 * After the compressed pages are read, we copy the bytes into the
 * bio we were passed and then call the bio end_io calls
 */
673
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
C
Chris Mason 已提交
674 675
				 int mirror_num, unsigned long bio_flags)
{
676
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
677 678
	struct extent_map_tree *em_tree;
	struct compressed_bio *cb;
679 680 681
	unsigned int compressed_len;
	unsigned int nr_pages;
	unsigned int pg_index;
C
Chris Mason 已提交
682 683
	struct page *page;
	struct bio *comp_bio;
D
David Sterba 已提交
684
	u64 cur_disk_byte = bio->bi_iter.bi_sector << 9;
685
	u64 file_offset;
686 687
	u64 em_len;
	u64 em_start;
C
Chris Mason 已提交
688
	struct extent_map *em;
689
	blk_status_t ret = BLK_STS_RESOURCE;
690
	int faili = 0;
691
	u8 *sums;
C
Chris Mason 已提交
692 693 694

	em_tree = &BTRFS_I(inode)->extent_tree;

695 696 697
	file_offset = bio_first_bvec_all(bio)->bv_offset +
		      page_offset(bio_first_page_all(bio));

C
Chris Mason 已提交
698
	/* we need the actual starting offset of this extent in the file */
699
	read_lock(&em_tree->lock);
700
	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
701
	read_unlock(&em_tree->lock);
702
	if (!em)
703
		return BLK_STS_IOERR;
C
Chris Mason 已提交
704

705
	ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
706
	compressed_len = em->block_len;
707
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
708 709 710
	if (!cb)
		goto out;

711
	refcount_set(&cb->pending_bios, 0);
C
Chris Mason 已提交
712 713
	cb->errors = 0;
	cb->inode = inode;
714
	cb->mirror_num = mirror_num;
715
	sums = cb->sums;
C
Chris Mason 已提交
716

717
	cb->start = em->orig_start;
718 719
	em_len = em->len;
	em_start = em->start;
720

C
Chris Mason 已提交
721
	free_extent_map(em);
722
	em = NULL;
C
Chris Mason 已提交
723

C
Christoph Hellwig 已提交
724
	cb->len = bio->bi_iter.bi_size;
C
Chris Mason 已提交
725
	cb->compressed_len = compressed_len;
726
	cb->compress_type = extent_compress_type(bio_flags);
C
Chris Mason 已提交
727 728
	cb->orig_bio = bio;

729
	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
730
	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
C
Chris Mason 已提交
731
				       GFP_NOFS);
732 733 734
	if (!cb->compressed_pages)
		goto fail1;

735
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
736
		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS);
737 738
		if (!cb->compressed_pages[pg_index]) {
			faili = pg_index - 1;
D
Dan Carpenter 已提交
739
			ret = BLK_STS_RESOURCE;
740
			goto fail2;
741
		}
C
Chris Mason 已提交
742
	}
743
	faili = nr_pages - 1;
C
Chris Mason 已提交
744 745
	cb->nr_pages = nr_pages;

746
	add_ra_bio_pages(inode, em_start + em_len, cb);
747 748

	/* include any pages we added in add_ra-bio_pages */
C
Christoph Hellwig 已提交
749
	cb->len = bio->bi_iter.bi_size;
750

751
	comp_bio = btrfs_bio_alloc(cur_disk_byte);
D
David Sterba 已提交
752
	comp_bio->bi_opf = REQ_OP_READ;
C
Chris Mason 已提交
753 754
	comp_bio->bi_private = cb;
	comp_bio->bi_end_io = end_compressed_bio_read;
755
	refcount_set(&cb->pending_bios, 1);
C
Chris Mason 已提交
756

757
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
758
		u32 pg_len = PAGE_SIZE;
759 760
		int submit = 0;

761 762 763 764 765 766 767 768 769 770 771
		/*
		 * To handle subpage case, we need to make sure the bio only
		 * covers the range we need.
		 *
		 * If we're at the last page, truncate the length to only cover
		 * the remaining part.
		 */
		if (pg_index == nr_pages - 1)
			pg_len = min_t(u32, PAGE_SIZE,
					compressed_len - pg_index * PAGE_SIZE);

772
		page = cb->compressed_pages[pg_index];
C
Chris Mason 已提交
773
		page->mapping = inode->i_mapping;
774
		page->index = em_start >> PAGE_SHIFT;
775

776
		if (comp_bio->bi_iter.bi_size)
777
			submit = btrfs_bio_fits_in_stripe(page, pg_len,
778
							  comp_bio, 0);
C
Chris Mason 已提交
779

C
Chris Mason 已提交
780
		page->mapping = NULL;
781
		if (submit || bio_add_page(comp_bio, page, pg_len, 0) < pg_len) {
782 783
			unsigned int nr_sectors;

784 785
			ret = btrfs_bio_wq_end_io(fs_info, comp_bio,
						  BTRFS_WQ_ENDIO_DATA);
786
			BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
787

788 789 790 791 792 793
			/*
			 * inc the count before we submit the bio so
			 * we know the end IO handler won't happen before
			 * we inc the count.  Otherwise, the cb might get
			 * freed before we're done setting it up
			 */
794
			refcount_inc(&cb->pending_bios);
795

796
			ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
797
			BUG_ON(ret); /* -ENOMEM */
798 799 800

			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
						  fs_info->sectorsize);
801
			sums += fs_info->csum_size * nr_sectors;
802

803
			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
804
			if (ret) {
805
				comp_bio->bi_status = ret;
806 807
				bio_endio(comp_bio);
			}
C
Chris Mason 已提交
808

809
			comp_bio = btrfs_bio_alloc(cur_disk_byte);
D
David Sterba 已提交
810
			comp_bio->bi_opf = REQ_OP_READ;
811 812 813
			comp_bio->bi_private = cb;
			comp_bio->bi_end_io = end_compressed_bio_read;

814
			bio_add_page(comp_bio, page, pg_len, 0);
C
Chris Mason 已提交
815
		}
816
		cur_disk_byte += pg_len;
C
Chris Mason 已提交
817 818
	}

819
	ret = btrfs_bio_wq_end_io(fs_info, comp_bio, BTRFS_WQ_ENDIO_DATA);
820
	BUG_ON(ret); /* -ENOMEM */
C
Chris Mason 已提交
821

822
	ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
823
	BUG_ON(ret); /* -ENOMEM */
824

825
	ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
826
	if (ret) {
827
		comp_bio->bi_status = ret;
828 829
		bio_endio(comp_bio);
	}
C
Chris Mason 已提交
830 831

	return 0;
832 833

fail2:
834 835 836 837
	while (faili >= 0) {
		__free_page(cb->compressed_pages[faili]);
		faili--;
	}
838 839 840 841 842 843 844

	kfree(cb->compressed_pages);
fail1:
	kfree(cb);
out:
	free_extent_map(em);
	return ret;
C
Chris Mason 已提交
845
}
846

847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881
/*
 * Heuristic uses systematic sampling to collect data from the input data
 * range, the logic can be tuned by the following constants:
 *
 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 */
#define SAMPLING_READ_SIZE	(16)
#define SAMPLING_INTERVAL	(256)

/*
 * For statistical analysis of the input data we consider bytes that form a
 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 * many times the object appeared in the sample.
 */
#define BUCKET_SIZE		(256)

/*
 * The size of the sample is based on a statistical sampling rule of thumb.
 * The common way is to perform sampling tests as long as the number of
 * elements in each cell is at least 5.
 *
 * Instead of 5, we choose 32 to obtain more accurate results.
 * If the data contain the maximum number of symbols, which is 256, we obtain a
 * sample size bound by 8192.
 *
 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 * from up to 512 locations.
 */
#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)

struct bucket_item {
	u32 count;
};
882 883

struct heuristic_ws {
884 885
	/* Partial copy of input data */
	u8 *sample;
886
	u32 sample_size;
887 888
	/* Buckets store counters for each byte value */
	struct bucket_item *bucket;
889 890
	/* Sorting buffer */
	struct bucket_item *bucket_b;
891 892 893
	struct list_head list;
};

894 895
static struct workspace_manager heuristic_wsm;

896 897 898 899 900 901
static void free_heuristic_ws(struct list_head *ws)
{
	struct heuristic_ws *workspace;

	workspace = list_entry(ws, struct heuristic_ws, list);

902 903
	kvfree(workspace->sample);
	kfree(workspace->bucket);
904
	kfree(workspace->bucket_b);
905 906 907
	kfree(workspace);
}

908
static struct list_head *alloc_heuristic_ws(unsigned int level)
909 910 911 912 913 914 915
{
	struct heuristic_ws *ws;

	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
	if (!ws)
		return ERR_PTR(-ENOMEM);

916 917 918 919 920 921 922
	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
	if (!ws->sample)
		goto fail;

	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
	if (!ws->bucket)
		goto fail;
923

924 925 926 927
	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
	if (!ws->bucket_b)
		goto fail;

928
	INIT_LIST_HEAD(&ws->list);
929
	return &ws->list;
930 931 932
fail:
	free_heuristic_ws(&ws->list);
	return ERR_PTR(-ENOMEM);
933 934
}

935
const struct btrfs_compress_op btrfs_heuristic_compress = {
936
	.workspace_manager = &heuristic_wsm,
937 938
};

939
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
940 941
	/* The heuristic is represented as compression type 0 */
	&btrfs_heuristic_compress,
942
	&btrfs_zlib_compress,
L
Li Zefan 已提交
943
	&btrfs_lzo_compress,
N
Nick Terrell 已提交
944
	&btrfs_zstd_compress,
945 946
};

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962
static struct list_head *alloc_workspace(int type, unsigned int level)
{
	switch (type) {
	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
static void free_workspace(int type, struct list_head *ws)
{
	switch (type) {
	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

979
static void btrfs_init_workspace_manager(int type)
980
{
981
	struct workspace_manager *wsm;
982
	struct list_head *workspace;
983

984
	wsm = btrfs_compress_op[type]->workspace_manager;
985 986 987 988
	INIT_LIST_HEAD(&wsm->idle_ws);
	spin_lock_init(&wsm->ws_lock);
	atomic_set(&wsm->total_ws, 0);
	init_waitqueue_head(&wsm->ws_wait);
989

990 991 992 993
	/*
	 * Preallocate one workspace for each compression type so we can
	 * guarantee forward progress in the worst case
	 */
994
	workspace = alloc_workspace(type, 0);
995 996 997 998
	if (IS_ERR(workspace)) {
		pr_warn(
	"BTRFS: cannot preallocate compression workspace, will try later\n");
	} else {
999 1000 1001
		atomic_set(&wsm->total_ws, 1);
		wsm->free_ws = 1;
		list_add(workspace, &wsm->idle_ws);
1002 1003 1004
	}
}

1005
static void btrfs_cleanup_workspace_manager(int type)
1006
{
1007
	struct workspace_manager *wsman;
1008 1009
	struct list_head *ws;

1010
	wsman = btrfs_compress_op[type]->workspace_manager;
1011 1012 1013
	while (!list_empty(&wsman->idle_ws)) {
		ws = wsman->idle_ws.next;
		list_del(ws);
1014
		free_workspace(type, ws);
1015
		atomic_dec(&wsman->total_ws);
1016 1017 1018 1019
	}
}

/*
1020 1021 1022 1023
 * This finds an available workspace or allocates a new one.
 * If it's not possible to allocate a new one, waits until there's one.
 * Preallocation makes a forward progress guarantees and we do not return
 * errors.
1024
 */
1025
struct list_head *btrfs_get_workspace(int type, unsigned int level)
1026
{
1027
	struct workspace_manager *wsm;
1028 1029
	struct list_head *workspace;
	int cpus = num_online_cpus();
1030
	unsigned nofs_flag;
1031 1032 1033 1034 1035 1036
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

1037
	wsm = btrfs_compress_op[type]->workspace_manager;
1038 1039 1040 1041 1042
	idle_ws	 = &wsm->idle_ws;
	ws_lock	 = &wsm->ws_lock;
	total_ws = &wsm->total_ws;
	ws_wait	 = &wsm->ws_wait;
	free_ws	 = &wsm->free_ws;
1043 1044

again:
1045 1046 1047
	spin_lock(ws_lock);
	if (!list_empty(idle_ws)) {
		workspace = idle_ws->next;
1048
		list_del(workspace);
1049
		(*free_ws)--;
1050
		spin_unlock(ws_lock);
1051 1052 1053
		return workspace;

	}
1054
	if (atomic_read(total_ws) > cpus) {
1055 1056
		DEFINE_WAIT(wait);

1057 1058
		spin_unlock(ws_lock);
		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1059
		if (atomic_read(total_ws) > cpus && !*free_ws)
1060
			schedule();
1061
		finish_wait(ws_wait, &wait);
1062 1063
		goto again;
	}
1064
	atomic_inc(total_ws);
1065
	spin_unlock(ws_lock);
1066

1067 1068 1069 1070 1071 1072
	/*
	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
	 * to turn it off here because we might get called from the restricted
	 * context of btrfs_compress_bio/btrfs_compress_pages
	 */
	nofs_flag = memalloc_nofs_save();
1073
	workspace = alloc_workspace(type, level);
1074 1075
	memalloc_nofs_restore(nofs_flag);

1076
	if (IS_ERR(workspace)) {
1077
		atomic_dec(total_ws);
1078
		wake_up(ws_wait);
1079 1080 1081 1082 1083 1084

		/*
		 * Do not return the error but go back to waiting. There's a
		 * workspace preallocated for each type and the compression
		 * time is bounded so we get to a workspace eventually. This
		 * makes our caller's life easier.
1085 1086 1087 1088
		 *
		 * To prevent silent and low-probability deadlocks (when the
		 * initial preallocation fails), check if there are any
		 * workspaces at all.
1089
		 */
1090 1091 1092 1093 1094 1095
		if (atomic_read(total_ws) == 0) {
			static DEFINE_RATELIMIT_STATE(_rs,
					/* once per minute */ 60 * HZ,
					/* no burst */ 1);

			if (__ratelimit(&_rs)) {
1096
				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1097 1098
			}
		}
1099
		goto again;
1100 1101 1102 1103
	}
	return workspace;
}

1104
static struct list_head *get_workspace(int type, int level)
1105
{
1106
	switch (type) {
1107
	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1108
	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1109
	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1110 1111 1112 1113 1114 1115 1116 1117
	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
1118 1119
}

1120 1121 1122 1123
/*
 * put a workspace struct back on the list or free it if we have enough
 * idle ones sitting around
 */
1124
void btrfs_put_workspace(int type, struct list_head *ws)
1125
{
1126
	struct workspace_manager *wsm;
1127 1128 1129 1130 1131 1132
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

1133
	wsm = btrfs_compress_op[type]->workspace_manager;
1134 1135 1136 1137 1138
	idle_ws	 = &wsm->idle_ws;
	ws_lock	 = &wsm->ws_lock;
	total_ws = &wsm->total_ws;
	ws_wait	 = &wsm->ws_wait;
	free_ws	 = &wsm->free_ws;
1139 1140

	spin_lock(ws_lock);
1141
	if (*free_ws <= num_online_cpus()) {
1142
		list_add(ws, idle_ws);
1143
		(*free_ws)++;
1144
		spin_unlock(ws_lock);
1145 1146
		goto wake;
	}
1147
	spin_unlock(ws_lock);
1148

1149
	free_workspace(type, ws);
1150
	atomic_dec(total_ws);
1151
wake:
1152
	cond_wake_up(ws_wait);
1153 1154
}

1155 1156
static void put_workspace(int type, struct list_head *ws)
{
1157
	switch (type) {
1158 1159 1160
	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1161 1162 1163 1164 1165 1166 1167 1168
	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
1169 1170
}

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
/*
 * Adjust @level according to the limits of the compression algorithm or
 * fallback to default
 */
static unsigned int btrfs_compress_set_level(int type, unsigned level)
{
	const struct btrfs_compress_op *ops = btrfs_compress_op[type];

	if (level == 0)
		level = ops->default_level;
	else
		level = min(level, ops->max_level);

	return level;
}

1187
/*
1188 1189
 * Given an address space and start and length, compress the bytes into @pages
 * that are allocated on demand.
1190
 *
1191 1192 1193 1194 1195
 * @type_level is encoded algorithm and level, where level 0 means whatever
 * default the algorithm chooses and is opaque here;
 * - compression algo are 0-3
 * - the level are bits 4-7
 *
1196 1197
 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
 * and returns number of actually allocated pages
1198
 *
1199 1200
 * @total_in is used to return the number of bytes actually read.  It
 * may be smaller than the input length if we had to exit early because we
1201 1202 1203
 * ran out of room in the pages array or because we cross the
 * max_out threshold.
 *
1204 1205
 * @total_out is an in/out parameter, must be set to the input length and will
 * be also used to return the total number of compressed bytes
1206
 */
1207
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1208
			 u64 start, struct page **pages,
1209 1210
			 unsigned long *out_pages,
			 unsigned long *total_in,
1211
			 unsigned long *total_out)
1212
{
1213
	int type = btrfs_compress_type(type_level);
1214
	int level = btrfs_compress_level(type_level);
1215 1216 1217
	struct list_head *workspace;
	int ret;

1218
	level = btrfs_compress_set_level(type, level);
1219
	workspace = get_workspace(type, level);
1220 1221
	ret = compression_compress_pages(type, workspace, mapping, start, pages,
					 out_pages, total_in, total_out);
1222
	put_workspace(type, workspace);
1223 1224 1225
	return ret;
}

1226
static int btrfs_decompress_bio(struct compressed_bio *cb)
1227 1228 1229
{
	struct list_head *workspace;
	int ret;
1230
	int type = cb->compress_type;
1231

1232
	workspace = get_workspace(type, 0);
1233
	ret = compression_decompress_bio(type, workspace, cb);
1234
	put_workspace(type, workspace);
1235

1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249
	return ret;
}

/*
 * a less complex decompression routine.  Our compressed data fits in a
 * single page, and we want to read a single page out of it.
 * start_byte tells us the offset into the compressed data we're interested in
 */
int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
		     unsigned long start_byte, size_t srclen, size_t destlen)
{
	struct list_head *workspace;
	int ret;

1250
	workspace = get_workspace(type, 0);
1251 1252
	ret = compression_decompress(type, workspace, data_in, dest_page,
				     start_byte, srclen, destlen);
1253
	put_workspace(type, workspace);
1254

1255 1256 1257
	return ret;
}

1258 1259
void __init btrfs_init_compress(void)
{
1260 1261 1262 1263
	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
	zstd_init_workspace_manager();
1264 1265
}

1266
void __cold btrfs_exit_compress(void)
1267
{
1268 1269 1270 1271
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
	zstd_cleanup_workspace_manager();
1272
}
1273 1274 1275 1276 1277 1278 1279 1280

/*
 * Copy uncompressed data from working buffer to pages.
 *
 * buf_start is the byte offset we're of the start of our workspace buffer.
 *
 * total_out is the last byte of the buffer
 */
1281
int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
1282
			      unsigned long total_out, u64 disk_start,
1283
			      struct bio *bio)
1284 1285 1286 1287
{
	unsigned long buf_offset;
	unsigned long current_buf_start;
	unsigned long start_byte;
1288
	unsigned long prev_start_byte;
1289 1290
	unsigned long working_bytes = total_out - buf_start;
	unsigned long bytes;
1291
	struct bio_vec bvec = bio_iter_iovec(bio, bio->bi_iter);
1292 1293 1294 1295 1296

	/*
	 * start byte is the first byte of the page we're currently
	 * copying into relative to the start of the compressed data.
	 */
1297
	start_byte = page_offset(bvec.bv_page) - disk_start;
1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316

	/* we haven't yet hit data corresponding to this page */
	if (total_out <= start_byte)
		return 1;

	/*
	 * the start of the data we care about is offset into
	 * the middle of our working buffer
	 */
	if (total_out > start_byte && buf_start < start_byte) {
		buf_offset = start_byte - buf_start;
		working_bytes -= buf_offset;
	} else {
		buf_offset = 0;
	}
	current_buf_start = buf_start;

	/* copy bytes from the working buffer into the pages */
	while (working_bytes > 0) {
1317
		bytes = min_t(unsigned long, bvec.bv_len,
1318
				PAGE_SIZE - (buf_offset % PAGE_SIZE));
1319
		bytes = min(bytes, working_bytes);
1320

1321 1322
		memcpy_to_page(bvec.bv_page, bvec.bv_offset, buf + buf_offset,
			       bytes);
1323
		flush_dcache_page(bvec.bv_page);
1324 1325 1326 1327 1328 1329

		buf_offset += bytes;
		working_bytes -= bytes;
		current_buf_start += bytes;

		/* check if we need to pick another page */
1330 1331 1332 1333
		bio_advance(bio, bytes);
		if (!bio->bi_iter.bi_size)
			return 0;
		bvec = bio_iter_iovec(bio, bio->bi_iter);
1334
		prev_start_byte = start_byte;
1335
		start_byte = page_offset(bvec.bv_page) - disk_start;
1336

1337
		/*
1338 1339 1340 1341
		 * We need to make sure we're only adjusting
		 * our offset into compression working buffer when
		 * we're switching pages.  Otherwise we can incorrectly
		 * keep copying when we were actually done.
1342
		 */
1343 1344 1345 1346 1347 1348 1349
		if (start_byte != prev_start_byte) {
			/*
			 * make sure our new page is covered by this
			 * working buffer
			 */
			if (total_out <= start_byte)
				return 1;
1350

1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
			/*
			 * the next page in the biovec might not be adjacent
			 * to the last page, but it might still be found
			 * inside this working buffer. bump our offset pointer
			 */
			if (total_out > start_byte &&
			    current_buf_start < start_byte) {
				buf_offset = start_byte - buf_start;
				working_bytes = total_out - start_byte;
				current_buf_start = buf_start + buf_offset;
			}
1362 1363 1364 1365 1366
		}
	}

	return 1;
}
1367

1368 1369 1370
/*
 * Shannon Entropy calculation
 *
1371
 * Pure byte distribution analysis fails to determine compressibility of data.
1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420
 * Try calculating entropy to estimate the average minimum number of bits
 * needed to encode the sampled data.
 *
 * For convenience, return the percentage of needed bits, instead of amount of
 * bits directly.
 *
 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
 *			    and can be compressible with high probability
 *
 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
 *
 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
 */
#define ENTROPY_LVL_ACEPTABLE		(65)
#define ENTROPY_LVL_HIGH		(80)

/*
 * For increasead precision in shannon_entropy calculation,
 * let's do pow(n, M) to save more digits after comma:
 *
 * - maximum int bit length is 64
 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
 * - 13 * 4 = 52 < 64		-> M = 4
 *
 * So use pow(n, 4).
 */
static inline u32 ilog2_w(u64 n)
{
	return ilog2(n * n * n * n);
}

static u32 shannon_entropy(struct heuristic_ws *ws)
{
	const u32 entropy_max = 8 * ilog2_w(2);
	u32 entropy_sum = 0;
	u32 p, p_base, sz_base;
	u32 i;

	sz_base = ilog2_w(ws->sample_size);
	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
		p = ws->bucket[i].count;
		p_base = ilog2_w(p);
		entropy_sum += p * (sz_base - p_base);
	}

	entropy_sum /= ws->sample_size;
	return entropy_sum * 100 / entropy_max;
}

1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
#define RADIX_BASE		4U
#define COUNTERS_SIZE		(1U << RADIX_BASE)

static u8 get4bits(u64 num, int shift) {
	u8 low4bits;

	num >>= shift;
	/* Reverse order */
	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
	return low4bits;
}

/*
 * Use 4 bits as radix base
1435
 * Use 16 u32 counters for calculating new position in buf array
1436 1437 1438 1439 1440 1441
 *
 * @array     - array that will be sorted
 * @array_buf - buffer array to store sorting results
 *              must be equal in size to @array
 * @num       - array size
 */
1442
static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1443
		       int num)
1444
{
1445 1446 1447 1448 1449 1450 1451 1452
	u64 max_num;
	u64 buf_num;
	u32 counters[COUNTERS_SIZE];
	u32 new_addr;
	u32 addr;
	int bitlen;
	int shift;
	int i;
1453

1454 1455 1456 1457
	/*
	 * Try avoid useless loop iterations for small numbers stored in big
	 * counters.  Example: 48 33 4 ... in 64bit array
	 */
1458
	max_num = array[0].count;
1459
	for (i = 1; i < num; i++) {
1460
		buf_num = array[i].count;
1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472
		if (buf_num > max_num)
			max_num = buf_num;
	}

	buf_num = ilog2(max_num);
	bitlen = ALIGN(buf_num, RADIX_BASE * 2);

	shift = 0;
	while (shift < bitlen) {
		memset(counters, 0, sizeof(counters));

		for (i = 0; i < num; i++) {
1473
			buf_num = array[i].count;
1474 1475 1476 1477 1478 1479 1480 1481
			addr = get4bits(buf_num, shift);
			counters[addr]++;
		}

		for (i = 1; i < COUNTERS_SIZE; i++)
			counters[i] += counters[i - 1];

		for (i = num - 1; i >= 0; i--) {
1482
			buf_num = array[i].count;
1483 1484 1485
			addr = get4bits(buf_num, shift);
			counters[addr]--;
			new_addr = counters[addr];
1486
			array_buf[new_addr] = array[i];
1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499
		}

		shift += RADIX_BASE;

		/*
		 * Normal radix expects to move data from a temporary array, to
		 * the main one.  But that requires some CPU time. Avoid that
		 * by doing another sort iteration to original array instead of
		 * memcpy()
		 */
		memset(counters, 0, sizeof(counters));

		for (i = 0; i < num; i ++) {
1500
			buf_num = array_buf[i].count;
1501 1502 1503 1504 1505 1506 1507 1508
			addr = get4bits(buf_num, shift);
			counters[addr]++;
		}

		for (i = 1; i < COUNTERS_SIZE; i++)
			counters[i] += counters[i - 1];

		for (i = num - 1; i >= 0; i--) {
1509
			buf_num = array_buf[i].count;
1510 1511 1512
			addr = get4bits(buf_num, shift);
			counters[addr]--;
			new_addr = counters[addr];
1513
			array[new_addr] = array_buf[i];
1514 1515 1516 1517
		}

		shift += RADIX_BASE;
	}
1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546
}

/*
 * Size of the core byte set - how many bytes cover 90% of the sample
 *
 * There are several types of structured binary data that use nearly all byte
 * values. The distribution can be uniform and counts in all buckets will be
 * nearly the same (eg. encrypted data). Unlikely to be compressible.
 *
 * Other possibility is normal (Gaussian) distribution, where the data could
 * be potentially compressible, but we have to take a few more steps to decide
 * how much.
 *
 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
 *                       compression algo can easy fix that
 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
 *                       probability is not compressible
 */
#define BYTE_CORE_SET_LOW		(64)
#define BYTE_CORE_SET_HIGH		(200)

static int byte_core_set_size(struct heuristic_ws *ws)
{
	u32 i;
	u32 coreset_sum = 0;
	const u32 core_set_threshold = ws->sample_size * 90 / 100;
	struct bucket_item *bucket = ws->bucket;

	/* Sort in reverse order */
1547
	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563

	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
		coreset_sum += bucket[i].count;

	if (coreset_sum > core_set_threshold)
		return i;

	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
		coreset_sum += bucket[i].count;
		if (coreset_sum > core_set_threshold)
			break;
	}

	return i;
}

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
/*
 * Count byte values in buckets.
 * This heuristic can detect textual data (configs, xml, json, html, etc).
 * Because in most text-like data byte set is restricted to limited number of
 * possible characters, and that restriction in most cases makes data easy to
 * compress.
 *
 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
 *	less - compressible
 *	more - need additional analysis
 */
#define BYTE_SET_THRESHOLD		(64)

static u32 byte_set_size(const struct heuristic_ws *ws)
{
	u32 i;
	u32 byte_set_size = 0;

	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
		if (ws->bucket[i].count > 0)
			byte_set_size++;
	}

	/*
	 * Continue collecting count of byte values in buckets.  If the byte
	 * set size is bigger then the threshold, it's pointless to continue,
	 * the detection technique would fail for this type of data.
	 */
	for (; i < BUCKET_SIZE; i++) {
		if (ws->bucket[i].count > 0) {
			byte_set_size++;
			if (byte_set_size > BYTE_SET_THRESHOLD)
				return byte_set_size;
		}
	}

	return byte_set_size;
}

1603 1604 1605 1606 1607 1608 1609 1610
static bool sample_repeated_patterns(struct heuristic_ws *ws)
{
	const u32 half_of_sample = ws->sample_size / 2;
	const u8 *data = ws->sample;

	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
}

1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640
static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
				     struct heuristic_ws *ws)
{
	struct page *page;
	u64 index, index_end;
	u32 i, curr_sample_pos;
	u8 *in_data;

	/*
	 * Compression handles the input data by chunks of 128KiB
	 * (defined by BTRFS_MAX_UNCOMPRESSED)
	 *
	 * We do the same for the heuristic and loop over the whole range.
	 *
	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
	 */
	if (end - start > BTRFS_MAX_UNCOMPRESSED)
		end = start + BTRFS_MAX_UNCOMPRESSED;

	index = start >> PAGE_SHIFT;
	index_end = end >> PAGE_SHIFT;

	/* Don't miss unaligned end */
	if (!IS_ALIGNED(end, PAGE_SIZE))
		index_end++;

	curr_sample_pos = 0;
	while (index < index_end) {
		page = find_get_page(inode->i_mapping, index);
1641
		in_data = kmap_local_page(page);
1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653
		/* Handle case where the start is not aligned to PAGE_SIZE */
		i = start % PAGE_SIZE;
		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
			/* Don't sample any garbage from the last page */
			if (start > end - SAMPLING_READ_SIZE)
				break;
			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
					SAMPLING_READ_SIZE);
			i += SAMPLING_INTERVAL;
			start += SAMPLING_INTERVAL;
			curr_sample_pos += SAMPLING_READ_SIZE;
		}
1654
		kunmap_local(in_data);
1655 1656 1657 1658 1659 1660 1661 1662
		put_page(page);

		index++;
	}

	ws->sample_size = curr_sample_pos;
}

1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679
/*
 * Compression heuristic.
 *
 * For now is's a naive and optimistic 'return true', we'll extend the logic to
 * quickly (compared to direct compression) detect data characteristics
 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
 * data.
 *
 * The following types of analysis can be performed:
 * - detect mostly zero data
 * - detect data with low "byte set" size (text, etc)
 * - detect data with low/high "core byte" set
 *
 * Return non-zero if the compression should be done, 0 otherwise.
 */
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
1680
	struct list_head *ws_list = get_workspace(0, 0);
1681
	struct heuristic_ws *ws;
1682 1683
	u32 i;
	u8 byte;
1684
	int ret = 0;
1685

1686 1687
	ws = list_entry(ws_list, struct heuristic_ws, list);

1688 1689
	heuristic_collect_sample(inode, start, end, ws);

1690 1691 1692 1693 1694
	if (sample_repeated_patterns(ws)) {
		ret = 1;
		goto out;
	}

1695 1696 1697 1698 1699
	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);

	for (i = 0; i < ws->sample_size; i++) {
		byte = ws->sample[i];
		ws->bucket[byte].count++;
1700 1701
	}

1702 1703 1704 1705 1706 1707
	i = byte_set_size(ws);
	if (i < BYTE_SET_THRESHOLD) {
		ret = 2;
		goto out;
	}

1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
	i = byte_core_set_size(ws);
	if (i <= BYTE_CORE_SET_LOW) {
		ret = 3;
		goto out;
	}

	if (i >= BYTE_CORE_SET_HIGH) {
		ret = 0;
		goto out;
	}

1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747
	i = shannon_entropy(ws);
	if (i <= ENTROPY_LVL_ACEPTABLE) {
		ret = 4;
		goto out;
	}

	/*
	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
	 * needed to give green light to compression.
	 *
	 * For now just assume that compression at that level is not worth the
	 * resources because:
	 *
	 * 1. it is possible to defrag the data later
	 *
	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
	 * values, every bucket has counter at level ~54. The heuristic would
	 * be confused. This can happen when data have some internal repeated
	 * patterns like "abbacbbc...". This can be detected by analyzing
	 * pairs of bytes, which is too costly.
	 */
	if (i < ENTROPY_LVL_HIGH) {
		ret = 5;
		goto out;
	} else {
		ret = 0;
		goto out;
	}

1748
out:
1749
	put_workspace(0, ws_list);
1750 1751
	return ret;
}
1752

1753 1754 1755 1756 1757
/*
 * Convert the compression suffix (eg. after "zlib" starting with ":") to
 * level, unrecognized string will set the default level
 */
unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1758
{
1759 1760 1761 1762
	unsigned int level = 0;
	int ret;

	if (!type)
1763 1764
		return 0;

1765 1766 1767 1768 1769 1770
	if (str[0] == ':') {
		ret = kstrtouint(str + 1, 10, &level);
		if (ret)
			level = 0;
	}

1771 1772 1773 1774
	level = btrfs_compress_set_level(type, level);

	return level;
}