compression.c 50.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Chris Mason 已提交
2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
12
#include <linux/kthread.h>
C
Chris Mason 已提交
13 14 15 16 17
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/writeback.h>
18
#include <linux/slab.h>
19
#include <linux/sched/mm.h>
20
#include <linux/log2.h>
21
#include <crypto/hash.h>
22
#include "misc.h"
C
Chris Mason 已提交
23 24 25 26 27 28 29 30 31
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "volumes.h"
#include "ordered-data.h"
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"
32
#include "subpage.h"
33
#include "zoned.h"
C
Chris Mason 已提交
34

35 36 37 38 39 40 41 42 43 44
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };

const char* btrfs_compress_type2str(enum btrfs_compression_type type)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB:
	case BTRFS_COMPRESS_LZO:
	case BTRFS_COMPRESS_ZSTD:
	case BTRFS_COMPRESS_NONE:
		return btrfs_compress_types[type];
45 46
	default:
		break;
47 48 49 50 51
	}

	return NULL;
}

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
bool btrfs_compress_is_valid_type(const char *str, size_t len)
{
	int i;

	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
		size_t comp_len = strlen(btrfs_compress_types[i]);

		if (len < comp_len)
			continue;

		if (!strncmp(btrfs_compress_types[i], str, comp_len))
			return true;
	}
	return false;
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static int compression_compress_pages(int type, struct list_head *ws,
               struct address_space *mapping, u64 start, struct page **pages,
               unsigned long *out_pages, unsigned long *total_in,
               unsigned long *total_out)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB:
		return zlib_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_LZO:
		return lzo_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_ZSTD:
		return zstd_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
86 87 88 89 90 91 92
		 * This can happen when compression races with remount setting
		 * it to 'no compress', while caller doesn't call
		 * inode_need_compress() to check if we really need to
		 * compress.
		 *
		 * Not a big deal, just need to inform caller that we
		 * haven't allocated any pages yet.
93
		 */
94
		*out_pages = 0;
95 96 97 98
		return -E2BIG;
	}
}

99 100
static int compression_decompress_bio(struct list_head *ws,
				      struct compressed_bio *cb)
101
{
102
	switch (cb->compress_type) {
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

static int compression_decompress(int type, struct list_head *ws,
               unsigned char *data_in, struct page *dest_page,
               unsigned long start_byte, size_t srclen, size_t destlen)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

137
static int btrfs_decompress_bio(struct compressed_bio *cb);
138

139
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
140 141 142
				      unsigned long disk_size)
{
	return sizeof(struct compressed_bio) +
143
		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
144 145
}

146
static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
147 148
				 u64 disk_start)
{
149
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
150
	SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
151
	const u32 csum_size = fs_info->csum_size;
152
	const u32 sectorsize = fs_info->sectorsize;
153
	struct page *page;
154
	unsigned int i;
155
	char *kaddr;
156
	u8 csum[BTRFS_CSUM_SIZE];
157
	struct compressed_bio *cb = bio->bi_private;
158
	u8 *cb_sum = cb->sums;
159

160 161
	if ((inode->flags & BTRFS_INODE_NODATASUM) ||
	    test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
162 163
		return 0;

164 165
	shash->tfm = fs_info->csum_shash;

166
	for (i = 0; i < cb->nr_pages; i++) {
167 168
		u32 pg_offset;
		u32 bytes_left = PAGE_SIZE;
169 170
		page = cb->compressed_pages[i];

171 172 173 174 175 176 177
		/* Determine the remaining bytes inside the page first */
		if (i == cb->nr_pages - 1)
			bytes_left = cb->compressed_len - i * PAGE_SIZE;

		/* Hash through the page sector by sector */
		for (pg_offset = 0; pg_offset < bytes_left;
		     pg_offset += sectorsize) {
178
			kaddr = kmap_atomic(page);
179 180
			crypto_shash_digest(shash, kaddr + pg_offset,
					    sectorsize, csum);
181
			kunmap_atomic(kaddr);
182 183 184 185

			if (memcmp(&csum, cb_sum, csum_size) != 0) {
				btrfs_print_data_csum_error(inode, disk_start,
						csum, cb_sum, cb->mirror_num);
186
				if (btrfs_bio(bio)->device)
187
					btrfs_dev_stat_inc_and_print(
188
						btrfs_bio(bio)->device,
189 190 191 192 193
						BTRFS_DEV_STAT_CORRUPTION_ERRS);
				return -EIO;
			}
			cb_sum += csum_size;
			disk_start += sectorsize;
194 195
		}
	}
196
	return 0;
197 198
}

199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226
/*
 * Reduce bio and io accounting for a compressed_bio with its corresponding bio.
 *
 * Return true if there is no pending bio nor io.
 * Return false otherwise.
 */
static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *bio)
{
	struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
	unsigned int bi_size = 0;
	bool last_io = false;
	struct bio_vec *bvec;
	struct bvec_iter_all iter_all;

	/*
	 * At endio time, bi_iter.bi_size doesn't represent the real bio size.
	 * Thus here we have to iterate through all segments to grab correct
	 * bio size.
	 */
	bio_for_each_segment_all(bvec, bio, iter_all)
		bi_size += bvec->bv_len;

	if (bio->bi_status)
		cb->errors = 1;

	ASSERT(bi_size && bi_size <= cb->compressed_len);
	last_io = refcount_sub_and_test(bi_size >> fs_info->sectorsize_bits,
					&cb->pending_sectors);
227 228 229 230 231 232 233
	/*
	 * Here we must wake up the possible error handler after all other
	 * operations on @cb finished, or we can race with
	 * finish_compressed_bio_*() which may free @cb.
	 */
	wake_up_var(cb);

234 235 236
	return last_io;
}

237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279
static void finish_compressed_bio_read(struct compressed_bio *cb, struct bio *bio)
{
	unsigned int index;
	struct page *page;

	/* Release the compressed pages */
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
		put_page(page);
	}

	/* Do io completion on the original bio */
	if (cb->errors) {
		bio_io_error(cb->orig_bio);
	} else {
		struct bio_vec *bvec;
		struct bvec_iter_all iter_all;

		ASSERT(bio);
		ASSERT(!bio->bi_status);
		/*
		 * We have verified the checksum already, set page checked so
		 * the end_io handlers know about it
		 */
		ASSERT(!bio_flagged(bio, BIO_CLONED));
		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) {
			u64 bvec_start = page_offset(bvec->bv_page) +
					 bvec->bv_offset;

			btrfs_page_set_checked(btrfs_sb(cb->inode->i_sb),
					bvec->bv_page, bvec_start,
					bvec->bv_len);
		}

		bio_endio(cb->orig_bio);
	}

	/* Finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
}

C
Chris Mason 已提交
280 281 282 283 284 285 286 287 288 289
/* when we finish reading compressed pages from the disk, we
 * decompress them and then run the bio end_io routines on the
 * decompressed pages (in the inode address space).
 *
 * This allows the checksumming and other IO error handling routines
 * to work normally
 *
 * The compressed pages are freed here, and it must be run
 * in process context
 */
290
static void end_compressed_bio_read(struct bio *bio)
C
Chris Mason 已提交
291 292 293
{
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
294
	unsigned int mirror = btrfs_bio(bio)->mirror_num;
295
	int ret = 0;
C
Chris Mason 已提交
296

297
	if (!dec_and_test_compressed_bio(cb, bio))
C
Chris Mason 已提交
298 299
		goto out;

300 301 302 303
	/*
	 * Record the correct mirror_num in cb->orig_bio so that
	 * read-repair can work properly.
	 */
304
	btrfs_bio(cb->orig_bio)->mirror_num = mirror;
305 306
	cb->mirror_num = mirror;

307 308 309 310 311 312 313
	/*
	 * Some IO in this cb have failed, just skip checksum as there
	 * is no way it could be correct.
	 */
	if (cb->errors == 1)
		goto csum_failed;

314
	inode = cb->inode;
315
	ret = check_compressed_csum(BTRFS_I(inode), bio,
D
David Sterba 已提交
316
				    bio->bi_iter.bi_sector << 9);
317 318 319
	if (ret)
		goto csum_failed;

C
Chris Mason 已提交
320 321 322
	/* ok, we're the last bio for this extent, lets start
	 * the decompression.
	 */
323 324
	ret = btrfs_decompress_bio(cb);

325
csum_failed:
C
Chris Mason 已提交
326 327
	if (ret)
		cb->errors = 1;
328
	finish_compressed_bio_read(cb, bio);
C
Chris Mason 已提交
329 330 331 332 333 334 335 336
out:
	bio_put(bio);
}

/*
 * Clear the writeback bits on all of the file
 * pages for a compressed write
 */
337 338
static noinline void end_compressed_writeback(struct inode *inode,
					      const struct compressed_bio *cb)
C
Chris Mason 已提交
339
{
340
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
341 342
	unsigned long index = cb->start >> PAGE_SHIFT;
	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
343 344 345 346 347
	struct page *pages[16];
	unsigned long nr_pages = end_index - index + 1;
	int i;
	int ret;

348 349 350
	if (cb->errors)
		mapping_set_error(inode->i_mapping, -EIO);

C
Chris Mason 已提交
351
	while (nr_pages > 0) {
C
Chris Mason 已提交
352
		ret = find_get_pages_contig(inode->i_mapping, index,
353 354
				     min_t(unsigned long,
				     nr_pages, ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
355 356 357 358 359 360
		if (ret == 0) {
			nr_pages -= 1;
			index += 1;
			continue;
		}
		for (i = 0; i < ret; i++) {
361 362
			if (cb->errors)
				SetPageError(pages[i]);
363 364
			btrfs_page_clamp_clear_writeback(fs_info, pages[i],
							 cb->start, cb->len);
365
			put_page(pages[i]);
C
Chris Mason 已提交
366 367 368 369 370 371 372
		}
		nr_pages -= ret;
		index += ret;
	}
	/* the inode may be gone now */
}

373
static void finish_compressed_bio_write(struct compressed_bio *cb)
C
Chris Mason 已提交
374
{
375
	struct inode *inode = cb->inode;
376
	unsigned int index;
C
Chris Mason 已提交
377

378 379 380
	/*
	 * Ok, we're the last bio for this extent, step one is to call back
	 * into the FS and do all the end_io operations.
C
Chris Mason 已提交
381
	 */
382
	btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
383
			cb->start, cb->start + cb->len - 1,
384
			!cb->errors);
C
Chris Mason 已提交
385

386
	end_compressed_writeback(inode, cb);
387
	/* Note, our inode could be gone now */
C
Chris Mason 已提交
388 389

	/*
390
	 * Release the compressed pages, these came from alloc_page and
C
Chris Mason 已提交
391 392 393
	 * are not attached to the inode at all
	 */
	for (index = 0; index < cb->nr_pages; index++) {
394 395
		struct page *page = cb->compressed_pages[index];

C
Chris Mason 已提交
396
		page->mapping = NULL;
397
		put_page(page);
C
Chris Mason 已提交
398 399
	}

400
	/* Finally free the cb struct */
C
Chris Mason 已提交
401 402
	kfree(cb->compressed_pages);
	kfree(cb);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421
}

/*
 * Do the cleanup once all the compressed pages hit the disk.  This will clear
 * writeback on the file pages and free the compressed pages.
 *
 * This also calls the writeback end hooks for the file pages so that metadata
 * and checksums can be updated in the file.
 */
static void end_compressed_bio_write(struct bio *bio)
{
	struct compressed_bio *cb = bio->bi_private;

	if (!dec_and_test_compressed_bio(cb, bio))
		goto out;

	btrfs_record_physical_zoned(cb->inode, cb->start, bio);

	finish_compressed_bio_write(cb);
C
Chris Mason 已提交
422 423 424 425
out:
	bio_put(bio);
}

426 427 428 429 430 431 432 433 434 435 436 437 438 439
static blk_status_t submit_compressed_bio(struct btrfs_fs_info *fs_info,
					  struct compressed_bio *cb,
					  struct bio *bio, int mirror_num)
{
	blk_status_t ret;

	ASSERT(bio->bi_iter.bi_size);
	ret = btrfs_bio_wq_end_io(fs_info, bio, BTRFS_WQ_ENDIO_DATA);
	if (ret)
		return ret;
	ret = btrfs_map_bio(fs_info, bio, mirror_num);
	return ret;
}

440
/*
441 442 443 444 445 446 447 448 449 450 451 452 453
 * Allocate a compressed_bio, which will be used to read/write on-disk
 * (aka, compressed) * data.
 *
 * @cb:                 The compressed_bio structure, which records all the needed
 *                      information to bind the compressed data to the uncompressed
 *                      page cache.
 * @disk_byten:         The logical bytenr where the compressed data will be read
 *                      from or written to.
 * @endio_func:         The endio function to call after the IO for compressed data
 *                      is finished.
 * @next_stripe_start:  Return value of logical bytenr of where next stripe starts.
 *                      Let the caller know to only fill the bio up to the stripe
 *                      boundary.
454
 */
455 456


457
static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
458 459
					unsigned int opf, bio_end_io_t endio_func,
					u64 *next_stripe_start)
460
{
461 462 463
	struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
	struct btrfs_io_geometry geom;
	struct extent_map *em;
464
	struct bio *bio;
465
	int ret;
466 467 468 469 470 471 472 473

	bio = btrfs_bio_alloc(BIO_MAX_VECS);

	bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
	bio->bi_opf = opf;
	bio->bi_private = cb;
	bio->bi_end_io = endio_func;

474 475 476 477 478
	em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
	if (IS_ERR(em)) {
		bio_put(bio);
		return ERR_CAST(em);
	}
479

480 481 482 483 484 485 486 487
	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
		bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev);

	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom);
	free_extent_map(em);
	if (ret < 0) {
		bio_put(bio);
		return ERR_PTR(ret);
488
	}
489 490
	*next_stripe_start = disk_bytenr + geom.len;

491 492 493
	return bio;
}

C
Chris Mason 已提交
494 495 496 497 498 499 500 501 502
/*
 * worker function to build and submit bios for previously compressed pages.
 * The corresponding pages in the inode should be marked for writeback
 * and the compressed pages should have a reference on them for dropping
 * when the IO is complete.
 *
 * This also checksums the file bytes and gets things ready for
 * the end io hooks.
 */
503
blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
504 505
				 unsigned int len, u64 disk_start,
				 unsigned int compressed_len,
C
Chris Mason 已提交
506
				 struct page **compressed_pages,
507
				 unsigned int nr_pages,
508 509
				 unsigned int write_flags,
				 struct cgroup_subsys_state *blkcg_css)
C
Chris Mason 已提交
510
{
511
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
C
Chris Mason 已提交
512 513
	struct bio *bio = NULL;
	struct compressed_bio *cb;
514
	u64 cur_disk_bytenr = disk_start;
515
	u64 next_stripe_start;
516
	blk_status_t ret;
517
	int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
518 519
	const bool use_append = btrfs_use_zone_append(inode, disk_start);
	const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
C
Chris Mason 已提交
520

521 522
	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
	       IS_ALIGNED(len, fs_info->sectorsize));
523
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
524
	if (!cb)
525
		return BLK_STS_RESOURCE;
526
	refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
C
Chris Mason 已提交
527
	cb->errors = 0;
528
	cb->inode = &inode->vfs_inode;
C
Chris Mason 已提交
529 530
	cb->start = start;
	cb->len = len;
531
	cb->mirror_num = 0;
C
Chris Mason 已提交
532 533 534 535 536
	cb->compressed_pages = compressed_pages;
	cb->compressed_len = compressed_len;
	cb->orig_bio = NULL;
	cb->nr_pages = nr_pages;

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
	while (cur_disk_bytenr < disk_start + compressed_len) {
		u64 offset = cur_disk_bytenr - disk_start;
		unsigned int index = offset >> PAGE_SHIFT;
		unsigned int real_size;
		unsigned int added;
		struct page *page = compressed_pages[index];
		bool submit = false;

		/* Allocate new bio if submitted or not yet allocated */
		if (!bio) {
			bio = alloc_compressed_bio(cb, cur_disk_bytenr,
				bio_op | write_flags, end_compressed_bio_write,
				&next_stripe_start);
			if (IS_ERR(bio)) {
				ret = errno_to_blk_status(PTR_ERR(bio));
				bio = NULL;
				goto finish_cb;
			}
555
		}
556
		/*
557 558
		 * We should never reach next_stripe_start start as we will
		 * submit comp_bio when reach the boundary immediately.
559
		 */
560
		ASSERT(cur_disk_bytenr != next_stripe_start);
C
Chris Mason 已提交
561

562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
		/*
		 * We have various limits on the real read size:
		 * - stripe boundary
		 * - page boundary
		 * - compressed length boundary
		 */
		real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr);
		real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
		real_size = min_t(u64, real_size, compressed_len - offset);
		ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));

		if (use_append)
			added = bio_add_zone_append_page(bio, page, real_size,
					offset_in_page(offset));
		else
			added = bio_add_page(bio, page, real_size,
					offset_in_page(offset));
		/* Reached zoned boundary */
		if (added == 0)
			submit = true;

		cur_disk_bytenr += added;
		/* Reached stripe boundary */
		if (cur_disk_bytenr == next_stripe_start)
			submit = true;

		/* Finished the range */
		if (cur_disk_bytenr == disk_start + compressed_len)
			submit = true;

		if (submit) {
593
			if (!skip_sum) {
594
				ret = btrfs_csum_one_bio(inode, bio, start, true);
595 596
				if (ret)
					goto finish_cb;
597
			}
C
Chris Mason 已提交
598

599
			ret = submit_compressed_bio(fs_info, cb, bio, 0);
600 601
			if (ret)
				goto finish_cb;
602
			bio = NULL;
C
Chris Mason 已提交
603
		}
604
		cond_resched();
C
Chris Mason 已提交
605
	}
606 607
	if (blkcg_css)
		kthread_associate_blkcg(NULL);
C
Chris Mason 已提交
608 609

	return 0;
610

611 612
finish_cb:
	if (bio) {
613
		bio->bi_status = ret;
614 615
		bio_endio(bio);
	}
616 617 618
	/* Last byte of @cb is submitted, endio will free @cb */
	if (cur_disk_bytenr == disk_start + compressed_len)
		return ret;
C
Chris Mason 已提交
619

620 621 622
	wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
			   (disk_start + compressed_len - cur_disk_bytenr) >>
			   fs_info->sectorsize_bits);
623 624 625 626 627 628 629 630
	/*
	 * Even with previous bio ended, we should still have io not yet
	 * submitted, thus need to finish manually.
	 */
	ASSERT(refcount_read(&cb->pending_sectors));
	/* Now we are the only one referring @cb, can finish it safely. */
	finish_compressed_bio_write(cb);
	return ret;
C
Chris Mason 已提交
631 632
}

633 634
static u64 bio_end_offset(struct bio *bio)
{
M
Ming Lei 已提交
635
	struct bio_vec *last = bio_last_bvec_all(bio);
636 637 638 639

	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}

640 641 642 643 644 645 646 647 648 649 650
/*
 * Add extra pages in the same compressed file extent so that we don't need to
 * re-read the same extent again and again.
 *
 * NOTE: this won't work well for subpage, as for subpage read, we lock the
 * full page then submit bio for each compressed/regular extents.
 *
 * This means, if we have several sectors in the same page points to the same
 * on-disk compressed data, we will re-read the same extent many times and
 * this function can only help for the next page.
 */
651 652 653 654
static noinline int add_ra_bio_pages(struct inode *inode,
				     u64 compressed_end,
				     struct compressed_bio *cb)
{
655
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
656
	unsigned long end_index;
657
	u64 cur = bio_end_offset(cb->orig_bio);
658 659 660 661 662 663 664
	u64 isize = i_size_read(inode);
	int ret;
	struct page *page;
	struct extent_map *em;
	struct address_space *mapping = inode->i_mapping;
	struct extent_map_tree *em_tree;
	struct extent_io_tree *tree;
665
	int sectors_missed = 0;
666 667 668 669 670 671 672

	em_tree = &BTRFS_I(inode)->extent_tree;
	tree = &BTRFS_I(inode)->io_tree;

	if (isize == 0)
		return 0;

673 674 675 676 677 678 679 680 681 682
	/*
	 * For current subpage support, we only support 64K page size,
	 * which means maximum compressed extent size (128K) is just 2x page
	 * size.
	 * This makes readahead less effective, so here disable readahead for
	 * subpage for now, until full compressed write is supported.
	 */
	if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
		return 0;

683
	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
684

685 686 687 688
	while (cur < compressed_end) {
		u64 page_end;
		u64 pg_index = cur >> PAGE_SHIFT;
		u32 add_size;
689

690
		if (pg_index > end_index)
691 692
			break;

693
		page = xa_load(&mapping->i_pages, pg_index);
694
		if (page && !xa_is_value(page)) {
695 696 697 698 699
			sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
					  fs_info->sectorsize_bits;

			/* Beyond threshold, no need to continue */
			if (sectors_missed > 4)
700
				break;
701 702 703 704 705 706 707

			/*
			 * Jump to next page start as we already have page for
			 * current offset.
			 */
			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
			continue;
708 709
		}

710 711
		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
								 ~__GFP_FS));
712 713 714
		if (!page)
			break;

715
		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
716
			put_page(page);
717 718 719
			/* There is already a page, skip to page end */
			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
			continue;
720 721
		}

722 723 724 725 726 727 728
		ret = set_page_extent_mapped(page);
		if (ret < 0) {
			unlock_page(page);
			put_page(page);
			break;
		}

729 730
		page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
		lock_extent(tree, cur, page_end);
731
		read_lock(&em_tree->lock);
732
		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
733
		read_unlock(&em_tree->lock);
734

735 736 737 738 739 740 741
		/*
		 * At this point, we have a locked page in the page cache for
		 * these bytes in the file.  But, we have to make sure they map
		 * to this compressed extent on disk.
		 */
		if (!em || cur < em->start ||
		    (cur + fs_info->sectorsize > extent_map_end(em)) ||
742
		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
743
			free_extent_map(em);
744
			unlock_extent(tree, cur, page_end);
745
			unlock_page(page);
746
			put_page(page);
747 748 749 750 751
			break;
		}
		free_extent_map(em);

		if (page->index == end_index) {
752
			size_t zero_offset = offset_in_page(isize);
753 754 755

			if (zero_offset) {
				int zeros;
756
				zeros = PAGE_SIZE - zero_offset;
757
				memzero_page(page, zero_offset, zeros);
758 759 760 761
				flush_dcache_page(page);
			}
		}

762 763 764 765
		add_size = min(em->start + em->len, page_end + 1) - cur;
		ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
		if (ret != add_size) {
			unlock_extent(tree, cur, page_end);
766
			unlock_page(page);
767
			put_page(page);
768 769
			break;
		}
770 771 772 773 774 775 776 777 778
		/*
		 * If it's subpage, we also need to increase its
		 * subpage::readers number, as at endio we will decrease
		 * subpage::readers and to unlock the page.
		 */
		if (fs_info->sectorsize < PAGE_SIZE)
			btrfs_subpage_start_reader(fs_info, page, cur, add_size);
		put_page(page);
		cur += add_size;
779 780 781 782
	}
	return 0;
}

C
Chris Mason 已提交
783 784 785 786 787
/*
 * for a compressed read, the bio we get passed has all the inode pages
 * in it.  We don't actually do IO on those pages but allocate new ones
 * to hold the compressed pages on disk.
 *
788
 * bio->bi_iter.bi_sector points to the compressed extent on disk
C
Chris Mason 已提交
789 790 791 792 793
 * bio->bi_io_vec points to all of the inode pages
 *
 * After the compressed pages are read, we copy the bytes into the
 * bio we were passed and then call the bio end_io calls
 */
794
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
C
Chris Mason 已提交
795 796
				 int mirror_num, unsigned long bio_flags)
{
797
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
798 799
	struct extent_map_tree *em_tree;
	struct compressed_bio *cb;
800 801 802
	unsigned int compressed_len;
	unsigned int nr_pages;
	unsigned int pg_index;
803 804 805 806
	struct bio *comp_bio = NULL;
	const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
	u64 cur_disk_byte = disk_bytenr;
	u64 next_stripe_start;
807
	u64 file_offset;
808 809
	u64 em_len;
	u64 em_start;
C
Chris Mason 已提交
810
	struct extent_map *em;
811
	blk_status_t ret = BLK_STS_RESOURCE;
812
	int faili = 0;
813
	u8 *sums;
C
Chris Mason 已提交
814 815 816

	em_tree = &BTRFS_I(inode)->extent_tree;

817 818 819
	file_offset = bio_first_bvec_all(bio)->bv_offset +
		      page_offset(bio_first_page_all(bio));

C
Chris Mason 已提交
820
	/* we need the actual starting offset of this extent in the file */
821
	read_lock(&em_tree->lock);
822
	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
823
	read_unlock(&em_tree->lock);
824
	if (!em)
825
		return BLK_STS_IOERR;
C
Chris Mason 已提交
826

827
	ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
828
	compressed_len = em->block_len;
829
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
830 831 832
	if (!cb)
		goto out;

833
	refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
C
Chris Mason 已提交
834 835
	cb->errors = 0;
	cb->inode = inode;
836
	cb->mirror_num = mirror_num;
837
	sums = cb->sums;
C
Chris Mason 已提交
838

839
	cb->start = em->orig_start;
840 841
	em_len = em->len;
	em_start = em->start;
842

C
Chris Mason 已提交
843
	free_extent_map(em);
844
	em = NULL;
C
Chris Mason 已提交
845

C
Christoph Hellwig 已提交
846
	cb->len = bio->bi_iter.bi_size;
C
Chris Mason 已提交
847
	cb->compressed_len = compressed_len;
848
	cb->compress_type = extent_compress_type(bio_flags);
C
Chris Mason 已提交
849 850
	cb->orig_bio = bio;

851
	nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
852
	cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
C
Chris Mason 已提交
853
				       GFP_NOFS);
854 855 856
	if (!cb->compressed_pages)
		goto fail1;

857
	for (pg_index = 0; pg_index < nr_pages; pg_index++) {
858
		cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS);
859 860
		if (!cb->compressed_pages[pg_index]) {
			faili = pg_index - 1;
D
Dan Carpenter 已提交
861
			ret = BLK_STS_RESOURCE;
862
			goto fail2;
863
		}
C
Chris Mason 已提交
864
	}
865
	faili = nr_pages - 1;
C
Chris Mason 已提交
866 867
	cb->nr_pages = nr_pages;

868
	add_ra_bio_pages(inode, em_start + em_len, cb);
869 870

	/* include any pages we added in add_ra-bio_pages */
C
Christoph Hellwig 已提交
871
	cb->len = bio->bi_iter.bi_size;
872

873 874 875 876 877 878 879
	while (cur_disk_byte < disk_bytenr + compressed_len) {
		u64 offset = cur_disk_byte - disk_bytenr;
		unsigned int index = offset >> PAGE_SHIFT;
		unsigned int real_size;
		unsigned int added;
		struct page *page = cb->compressed_pages[index];
		bool submit = false;
C
Chris Mason 已提交
880

881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906
		/* Allocate new bio if submitted or not yet allocated */
		if (!comp_bio) {
			comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
					REQ_OP_READ, end_compressed_bio_read,
					&next_stripe_start);
			if (IS_ERR(comp_bio)) {
				ret = errno_to_blk_status(PTR_ERR(comp_bio));
				comp_bio = NULL;
				goto finish_cb;
			}
		}
		/*
		 * We should never reach next_stripe_start start as we will
		 * submit comp_bio when reach the boundary immediately.
		 */
		ASSERT(cur_disk_byte != next_stripe_start);
		/*
		 * We have various limit on the real read size:
		 * - stripe boundary
		 * - page boundary
		 * - compressed length boundary
		 */
		real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte);
		real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
		real_size = min_t(u64, real_size, compressed_len - offset);
		ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
907

908
		added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset));
909
		/*
910 911
		 * Maximum compressed extent is smaller than bio size limit,
		 * thus bio_add_page() should always success.
912
		 */
913 914
		ASSERT(added == real_size);
		cur_disk_byte += added;
915

916 917 918
		/* Reached stripe boundary, need to submit */
		if (cur_disk_byte == next_stripe_start)
			submit = true;
919

920 921 922
		/* Has finished the range, need to submit */
		if (cur_disk_byte == disk_bytenr + compressed_len)
			submit = true;
C
Chris Mason 已提交
923

924
		if (submit) {
925 926
			unsigned int nr_sectors;

927
			ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
928 929
			if (ret)
				goto finish_cb;
930 931 932

			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
						  fs_info->sectorsize);
933
			sums += fs_info->csum_size * nr_sectors;
934

935
			ret = submit_compressed_bio(fs_info, cb, comp_bio, mirror_num);
936 937
			if (ret)
				goto finish_cb;
938
			comp_bio = NULL;
C
Chris Mason 已提交
939 940 941
		}
	}
	return 0;
942 943

fail2:
944 945 946 947
	while (faili >= 0) {
		__free_page(cb->compressed_pages[faili]);
		faili--;
	}
948 949 950 951 952 953 954

	kfree(cb->compressed_pages);
fail1:
	kfree(cb);
out:
	free_extent_map(em);
	return ret;
955 956 957 958 959
finish_cb:
	if (comp_bio) {
		comp_bio->bi_status = ret;
		bio_endio(comp_bio);
	}
960 961 962 963 964 965 966
	/* All bytes of @cb is submitted, endio will free @cb */
	if (cur_disk_byte == disk_bytenr + compressed_len)
		return ret;

	wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
			   (disk_bytenr + compressed_len - cur_disk_byte) >>
			   fs_info->sectorsize_bits);
967 968 969 970 971 972 973 974
	/*
	 * Even with previous bio ended, we should still have io not yet
	 * submitted, thus need to finish @cb manually.
	 */
	ASSERT(refcount_read(&cb->pending_sectors));
	/* Now we are the only one referring @cb, can finish it safely. */
	finish_compressed_bio_read(cb, NULL);
	return ret;
C
Chris Mason 已提交
975
}
976

977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
/*
 * Heuristic uses systematic sampling to collect data from the input data
 * range, the logic can be tuned by the following constants:
 *
 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 */
#define SAMPLING_READ_SIZE	(16)
#define SAMPLING_INTERVAL	(256)

/*
 * For statistical analysis of the input data we consider bytes that form a
 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 * many times the object appeared in the sample.
 */
#define BUCKET_SIZE		(256)

/*
 * The size of the sample is based on a statistical sampling rule of thumb.
 * The common way is to perform sampling tests as long as the number of
 * elements in each cell is at least 5.
 *
 * Instead of 5, we choose 32 to obtain more accurate results.
 * If the data contain the maximum number of symbols, which is 256, we obtain a
 * sample size bound by 8192.
 *
 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 * from up to 512 locations.
 */
#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)

struct bucket_item {
	u32 count;
};
1012 1013

struct heuristic_ws {
1014 1015
	/* Partial copy of input data */
	u8 *sample;
1016
	u32 sample_size;
1017 1018
	/* Buckets store counters for each byte value */
	struct bucket_item *bucket;
1019 1020
	/* Sorting buffer */
	struct bucket_item *bucket_b;
1021 1022 1023
	struct list_head list;
};

1024 1025
static struct workspace_manager heuristic_wsm;

1026 1027 1028 1029 1030 1031
static void free_heuristic_ws(struct list_head *ws)
{
	struct heuristic_ws *workspace;

	workspace = list_entry(ws, struct heuristic_ws, list);

1032 1033
	kvfree(workspace->sample);
	kfree(workspace->bucket);
1034
	kfree(workspace->bucket_b);
1035 1036 1037
	kfree(workspace);
}

1038
static struct list_head *alloc_heuristic_ws(unsigned int level)
1039 1040 1041 1042 1043 1044 1045
{
	struct heuristic_ws *ws;

	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
	if (!ws)
		return ERR_PTR(-ENOMEM);

1046 1047 1048 1049 1050 1051 1052
	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
	if (!ws->sample)
		goto fail;

	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
	if (!ws->bucket)
		goto fail;
1053

1054 1055 1056 1057
	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
	if (!ws->bucket_b)
		goto fail;

1058
	INIT_LIST_HEAD(&ws->list);
1059
	return &ws->list;
1060 1061 1062
fail:
	free_heuristic_ws(&ws->list);
	return ERR_PTR(-ENOMEM);
1063 1064
}

1065
const struct btrfs_compress_op btrfs_heuristic_compress = {
1066
	.workspace_manager = &heuristic_wsm,
1067 1068
};

1069
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
1070 1071
	/* The heuristic is represented as compression type 0 */
	&btrfs_heuristic_compress,
1072
	&btrfs_zlib_compress,
L
Li Zefan 已提交
1073
	&btrfs_lzo_compress,
N
Nick Terrell 已提交
1074
	&btrfs_zstd_compress,
1075 1076
};

1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
static struct list_head *alloc_workspace(int type, unsigned int level)
{
	switch (type) {
	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
static void free_workspace(int type, struct list_head *ws)
{
	switch (type) {
	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

1109
static void btrfs_init_workspace_manager(int type)
1110
{
1111
	struct workspace_manager *wsm;
1112
	struct list_head *workspace;
1113

1114
	wsm = btrfs_compress_op[type]->workspace_manager;
1115 1116 1117 1118
	INIT_LIST_HEAD(&wsm->idle_ws);
	spin_lock_init(&wsm->ws_lock);
	atomic_set(&wsm->total_ws, 0);
	init_waitqueue_head(&wsm->ws_wait);
1119

1120 1121 1122 1123
	/*
	 * Preallocate one workspace for each compression type so we can
	 * guarantee forward progress in the worst case
	 */
1124
	workspace = alloc_workspace(type, 0);
1125 1126 1127 1128
	if (IS_ERR(workspace)) {
		pr_warn(
	"BTRFS: cannot preallocate compression workspace, will try later\n");
	} else {
1129 1130 1131
		atomic_set(&wsm->total_ws, 1);
		wsm->free_ws = 1;
		list_add(workspace, &wsm->idle_ws);
1132 1133 1134
	}
}

1135
static void btrfs_cleanup_workspace_manager(int type)
1136
{
1137
	struct workspace_manager *wsman;
1138 1139
	struct list_head *ws;

1140
	wsman = btrfs_compress_op[type]->workspace_manager;
1141 1142 1143
	while (!list_empty(&wsman->idle_ws)) {
		ws = wsman->idle_ws.next;
		list_del(ws);
1144
		free_workspace(type, ws);
1145
		atomic_dec(&wsman->total_ws);
1146 1147 1148 1149
	}
}

/*
1150 1151 1152 1153
 * This finds an available workspace or allocates a new one.
 * If it's not possible to allocate a new one, waits until there's one.
 * Preallocation makes a forward progress guarantees and we do not return
 * errors.
1154
 */
1155
struct list_head *btrfs_get_workspace(int type, unsigned int level)
1156
{
1157
	struct workspace_manager *wsm;
1158 1159
	struct list_head *workspace;
	int cpus = num_online_cpus();
1160
	unsigned nofs_flag;
1161 1162 1163 1164 1165 1166
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

1167
	wsm = btrfs_compress_op[type]->workspace_manager;
1168 1169 1170 1171 1172
	idle_ws	 = &wsm->idle_ws;
	ws_lock	 = &wsm->ws_lock;
	total_ws = &wsm->total_ws;
	ws_wait	 = &wsm->ws_wait;
	free_ws	 = &wsm->free_ws;
1173 1174

again:
1175 1176 1177
	spin_lock(ws_lock);
	if (!list_empty(idle_ws)) {
		workspace = idle_ws->next;
1178
		list_del(workspace);
1179
		(*free_ws)--;
1180
		spin_unlock(ws_lock);
1181 1182 1183
		return workspace;

	}
1184
	if (atomic_read(total_ws) > cpus) {
1185 1186
		DEFINE_WAIT(wait);

1187 1188
		spin_unlock(ws_lock);
		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1189
		if (atomic_read(total_ws) > cpus && !*free_ws)
1190
			schedule();
1191
		finish_wait(ws_wait, &wait);
1192 1193
		goto again;
	}
1194
	atomic_inc(total_ws);
1195
	spin_unlock(ws_lock);
1196

1197 1198 1199 1200 1201 1202
	/*
	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
	 * to turn it off here because we might get called from the restricted
	 * context of btrfs_compress_bio/btrfs_compress_pages
	 */
	nofs_flag = memalloc_nofs_save();
1203
	workspace = alloc_workspace(type, level);
1204 1205
	memalloc_nofs_restore(nofs_flag);

1206
	if (IS_ERR(workspace)) {
1207
		atomic_dec(total_ws);
1208
		wake_up(ws_wait);
1209 1210 1211 1212 1213 1214

		/*
		 * Do not return the error but go back to waiting. There's a
		 * workspace preallocated for each type and the compression
		 * time is bounded so we get to a workspace eventually. This
		 * makes our caller's life easier.
1215 1216 1217 1218
		 *
		 * To prevent silent and low-probability deadlocks (when the
		 * initial preallocation fails), check if there are any
		 * workspaces at all.
1219
		 */
1220 1221 1222 1223 1224 1225
		if (atomic_read(total_ws) == 0) {
			static DEFINE_RATELIMIT_STATE(_rs,
					/* once per minute */ 60 * HZ,
					/* no burst */ 1);

			if (__ratelimit(&_rs)) {
1226
				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1227 1228
			}
		}
1229
		goto again;
1230 1231 1232 1233
	}
	return workspace;
}

1234
static struct list_head *get_workspace(int type, int level)
1235
{
1236
	switch (type) {
1237
	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1238
	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1239
	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1240 1241 1242 1243 1244 1245 1246 1247
	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
1248 1249
}

1250 1251 1252 1253
/*
 * put a workspace struct back on the list or free it if we have enough
 * idle ones sitting around
 */
1254
void btrfs_put_workspace(int type, struct list_head *ws)
1255
{
1256
	struct workspace_manager *wsm;
1257 1258 1259 1260 1261 1262
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

1263
	wsm = btrfs_compress_op[type]->workspace_manager;
1264 1265 1266 1267 1268
	idle_ws	 = &wsm->idle_ws;
	ws_lock	 = &wsm->ws_lock;
	total_ws = &wsm->total_ws;
	ws_wait	 = &wsm->ws_wait;
	free_ws	 = &wsm->free_ws;
1269 1270

	spin_lock(ws_lock);
1271
	if (*free_ws <= num_online_cpus()) {
1272
		list_add(ws, idle_ws);
1273
		(*free_ws)++;
1274
		spin_unlock(ws_lock);
1275 1276
		goto wake;
	}
1277
	spin_unlock(ws_lock);
1278

1279
	free_workspace(type, ws);
1280
	atomic_dec(total_ws);
1281
wake:
1282
	cond_wake_up(ws_wait);
1283 1284
}

1285 1286
static void put_workspace(int type, struct list_head *ws)
{
1287
	switch (type) {
1288 1289 1290
	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1291 1292 1293 1294 1295 1296 1297 1298
	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
1299 1300
}

1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316
/*
 * Adjust @level according to the limits of the compression algorithm or
 * fallback to default
 */
static unsigned int btrfs_compress_set_level(int type, unsigned level)
{
	const struct btrfs_compress_op *ops = btrfs_compress_op[type];

	if (level == 0)
		level = ops->default_level;
	else
		level = min(level, ops->max_level);

	return level;
}

1317
/*
1318 1319
 * Given an address space and start and length, compress the bytes into @pages
 * that are allocated on demand.
1320
 *
1321 1322 1323 1324 1325
 * @type_level is encoded algorithm and level, where level 0 means whatever
 * default the algorithm chooses and is opaque here;
 * - compression algo are 0-3
 * - the level are bits 4-7
 *
1326 1327
 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
 * and returns number of actually allocated pages
1328
 *
1329 1330
 * @total_in is used to return the number of bytes actually read.  It
 * may be smaller than the input length if we had to exit early because we
1331 1332 1333
 * ran out of room in the pages array or because we cross the
 * max_out threshold.
 *
1334 1335
 * @total_out is an in/out parameter, must be set to the input length and will
 * be also used to return the total number of compressed bytes
1336
 */
1337
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1338
			 u64 start, struct page **pages,
1339 1340
			 unsigned long *out_pages,
			 unsigned long *total_in,
1341
			 unsigned long *total_out)
1342
{
1343
	int type = btrfs_compress_type(type_level);
1344
	int level = btrfs_compress_level(type_level);
1345 1346 1347
	struct list_head *workspace;
	int ret;

1348
	level = btrfs_compress_set_level(type, level);
1349
	workspace = get_workspace(type, level);
1350 1351
	ret = compression_compress_pages(type, workspace, mapping, start, pages,
					 out_pages, total_in, total_out);
1352
	put_workspace(type, workspace);
1353 1354 1355
	return ret;
}

1356
static int btrfs_decompress_bio(struct compressed_bio *cb)
1357 1358 1359
{
	struct list_head *workspace;
	int ret;
1360
	int type = cb->compress_type;
1361

1362
	workspace = get_workspace(type, 0);
1363
	ret = compression_decompress_bio(workspace, cb);
1364
	put_workspace(type, workspace);
1365

1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
	return ret;
}

/*
 * a less complex decompression routine.  Our compressed data fits in a
 * single page, and we want to read a single page out of it.
 * start_byte tells us the offset into the compressed data we're interested in
 */
int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
		     unsigned long start_byte, size_t srclen, size_t destlen)
{
	struct list_head *workspace;
	int ret;

1380
	workspace = get_workspace(type, 0);
1381 1382
	ret = compression_decompress(type, workspace, data_in, dest_page,
				     start_byte, srclen, destlen);
1383
	put_workspace(type, workspace);
1384

1385 1386 1387
	return ret;
}

1388 1389
void __init btrfs_init_compress(void)
{
1390 1391 1392 1393
	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
	zstd_init_workspace_manager();
1394 1395
}

1396
void __cold btrfs_exit_compress(void)
1397
{
1398 1399 1400 1401
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
	zstd_cleanup_workspace_manager();
1402
}
1403 1404

/*
1405
 * Copy decompressed data from working buffer to pages.
1406
 *
1407 1408 1409 1410 1411 1412
 * @buf:		The decompressed data buffer
 * @buf_len:		The decompressed data length
 * @decompressed:	Number of bytes that are already decompressed inside the
 * 			compressed extent
 * @cb:			The compressed extent descriptor
 * @orig_bio:		The original bio that the caller wants to read for
1413
 *
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432
 * An easier to understand graph is like below:
 *
 * 		|<- orig_bio ->|     |<- orig_bio->|
 * 	|<-------      full decompressed extent      ----->|
 * 	|<-----------    @cb range   ---->|
 * 	|			|<-- @buf_len -->|
 * 	|<--- @decompressed --->|
 *
 * Note that, @cb can be a subpage of the full decompressed extent, but
 * @cb->start always has the same as the orig_file_offset value of the full
 * decompressed extent.
 *
 * When reading compressed extent, we have to read the full compressed extent,
 * while @orig_bio may only want part of the range.
 * Thus this function will ensure only data covered by @orig_bio will be copied
 * to.
 *
 * Return 0 if we have copied all needed contents for @orig_bio.
 * Return >0 if we need continue decompress.
1433
 */
1434 1435
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
			      struct compressed_bio *cb, u32 decompressed)
1436
{
1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455
	struct bio *orig_bio = cb->orig_bio;
	/* Offset inside the full decompressed extent */
	u32 cur_offset;

	cur_offset = decompressed;
	/* The main loop to do the copy */
	while (cur_offset < decompressed + buf_len) {
		struct bio_vec bvec;
		size_t copy_len;
		u32 copy_start;
		/* Offset inside the full decompressed extent */
		u32 bvec_offset;

		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
		/*
		 * cb->start may underflow, but subtracting that value can still
		 * give us correct offset inside the full decompressed extent.
		 */
		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1456

1457 1458 1459
		/* Haven't reached the bvec range, exit */
		if (decompressed + buf_len <= bvec_offset)
			return 1;
1460

1461 1462 1463 1464
		copy_start = max(cur_offset, bvec_offset);
		copy_len = min(bvec_offset + bvec.bv_len,
			       decompressed + buf_len) - copy_start;
		ASSERT(copy_len);
1465

1466
		/*
1467 1468
		 * Extra range check to ensure we didn't go beyond
		 * @buf + @buf_len.
1469
		 */
1470 1471 1472 1473 1474
		ASSERT(copy_start - decompressed < buf_len);
		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
			       buf + copy_start - decompressed, copy_len);
		flush_dcache_page(bvec.bv_page);
		cur_offset += copy_len;
1475

1476 1477 1478 1479
		bio_advance(orig_bio, copy_len);
		/* Finished the bio */
		if (!orig_bio->bi_iter.bi_size)
			return 0;
1480 1481 1482
	}
	return 1;
}
1483

1484 1485 1486
/*
 * Shannon Entropy calculation
 *
1487
 * Pure byte distribution analysis fails to determine compressibility of data.
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
 * Try calculating entropy to estimate the average minimum number of bits
 * needed to encode the sampled data.
 *
 * For convenience, return the percentage of needed bits, instead of amount of
 * bits directly.
 *
 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
 *			    and can be compressible with high probability
 *
 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
 *
 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
 */
#define ENTROPY_LVL_ACEPTABLE		(65)
#define ENTROPY_LVL_HIGH		(80)

/*
 * For increasead precision in shannon_entropy calculation,
 * let's do pow(n, M) to save more digits after comma:
 *
 * - maximum int bit length is 64
 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
 * - 13 * 4 = 52 < 64		-> M = 4
 *
 * So use pow(n, 4).
 */
static inline u32 ilog2_w(u64 n)
{
	return ilog2(n * n * n * n);
}

static u32 shannon_entropy(struct heuristic_ws *ws)
{
	const u32 entropy_max = 8 * ilog2_w(2);
	u32 entropy_sum = 0;
	u32 p, p_base, sz_base;
	u32 i;

	sz_base = ilog2_w(ws->sample_size);
	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
		p = ws->bucket[i].count;
		p_base = ilog2_w(p);
		entropy_sum += p * (sz_base - p_base);
	}

	entropy_sum /= ws->sample_size;
	return entropy_sum * 100 / entropy_max;
}

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
#define RADIX_BASE		4U
#define COUNTERS_SIZE		(1U << RADIX_BASE)

static u8 get4bits(u64 num, int shift) {
	u8 low4bits;

	num >>= shift;
	/* Reverse order */
	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
	return low4bits;
}

/*
 * Use 4 bits as radix base
1551
 * Use 16 u32 counters for calculating new position in buf array
1552 1553 1554 1555 1556 1557
 *
 * @array     - array that will be sorted
 * @array_buf - buffer array to store sorting results
 *              must be equal in size to @array
 * @num       - array size
 */
1558
static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1559
		       int num)
1560
{
1561 1562 1563 1564 1565 1566 1567 1568
	u64 max_num;
	u64 buf_num;
	u32 counters[COUNTERS_SIZE];
	u32 new_addr;
	u32 addr;
	int bitlen;
	int shift;
	int i;
1569

1570 1571 1572 1573
	/*
	 * Try avoid useless loop iterations for small numbers stored in big
	 * counters.  Example: 48 33 4 ... in 64bit array
	 */
1574
	max_num = array[0].count;
1575
	for (i = 1; i < num; i++) {
1576
		buf_num = array[i].count;
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
		if (buf_num > max_num)
			max_num = buf_num;
	}

	buf_num = ilog2(max_num);
	bitlen = ALIGN(buf_num, RADIX_BASE * 2);

	shift = 0;
	while (shift < bitlen) {
		memset(counters, 0, sizeof(counters));

		for (i = 0; i < num; i++) {
1589
			buf_num = array[i].count;
1590 1591 1592 1593 1594 1595 1596 1597
			addr = get4bits(buf_num, shift);
			counters[addr]++;
		}

		for (i = 1; i < COUNTERS_SIZE; i++)
			counters[i] += counters[i - 1];

		for (i = num - 1; i >= 0; i--) {
1598
			buf_num = array[i].count;
1599 1600 1601
			addr = get4bits(buf_num, shift);
			counters[addr]--;
			new_addr = counters[addr];
1602
			array_buf[new_addr] = array[i];
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
		}

		shift += RADIX_BASE;

		/*
		 * Normal radix expects to move data from a temporary array, to
		 * the main one.  But that requires some CPU time. Avoid that
		 * by doing another sort iteration to original array instead of
		 * memcpy()
		 */
		memset(counters, 0, sizeof(counters));

		for (i = 0; i < num; i ++) {
1616
			buf_num = array_buf[i].count;
1617 1618 1619 1620 1621 1622 1623 1624
			addr = get4bits(buf_num, shift);
			counters[addr]++;
		}

		for (i = 1; i < COUNTERS_SIZE; i++)
			counters[i] += counters[i - 1];

		for (i = num - 1; i >= 0; i--) {
1625
			buf_num = array_buf[i].count;
1626 1627 1628
			addr = get4bits(buf_num, shift);
			counters[addr]--;
			new_addr = counters[addr];
1629
			array[new_addr] = array_buf[i];
1630 1631 1632 1633
		}

		shift += RADIX_BASE;
	}
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
}

/*
 * Size of the core byte set - how many bytes cover 90% of the sample
 *
 * There are several types of structured binary data that use nearly all byte
 * values. The distribution can be uniform and counts in all buckets will be
 * nearly the same (eg. encrypted data). Unlikely to be compressible.
 *
 * Other possibility is normal (Gaussian) distribution, where the data could
 * be potentially compressible, but we have to take a few more steps to decide
 * how much.
 *
 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
 *                       compression algo can easy fix that
 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
 *                       probability is not compressible
 */
#define BYTE_CORE_SET_LOW		(64)
#define BYTE_CORE_SET_HIGH		(200)

static int byte_core_set_size(struct heuristic_ws *ws)
{
	u32 i;
	u32 coreset_sum = 0;
	const u32 core_set_threshold = ws->sample_size * 90 / 100;
	struct bucket_item *bucket = ws->bucket;

	/* Sort in reverse order */
1663
	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679

	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
		coreset_sum += bucket[i].count;

	if (coreset_sum > core_set_threshold)
		return i;

	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
		coreset_sum += bucket[i].count;
		if (coreset_sum > core_set_threshold)
			break;
	}

	return i;
}

1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
/*
 * Count byte values in buckets.
 * This heuristic can detect textual data (configs, xml, json, html, etc).
 * Because in most text-like data byte set is restricted to limited number of
 * possible characters, and that restriction in most cases makes data easy to
 * compress.
 *
 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
 *	less - compressible
 *	more - need additional analysis
 */
#define BYTE_SET_THRESHOLD		(64)

static u32 byte_set_size(const struct heuristic_ws *ws)
{
	u32 i;
	u32 byte_set_size = 0;

	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
		if (ws->bucket[i].count > 0)
			byte_set_size++;
	}

	/*
	 * Continue collecting count of byte values in buckets.  If the byte
	 * set size is bigger then the threshold, it's pointless to continue,
	 * the detection technique would fail for this type of data.
	 */
	for (; i < BUCKET_SIZE; i++) {
		if (ws->bucket[i].count > 0) {
			byte_set_size++;
			if (byte_set_size > BYTE_SET_THRESHOLD)
				return byte_set_size;
		}
	}

	return byte_set_size;
}

1719 1720 1721 1722 1723 1724 1725 1726
static bool sample_repeated_patterns(struct heuristic_ws *ws)
{
	const u32 half_of_sample = ws->sample_size / 2;
	const u8 *data = ws->sample;

	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
}

1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
				     struct heuristic_ws *ws)
{
	struct page *page;
	u64 index, index_end;
	u32 i, curr_sample_pos;
	u8 *in_data;

	/*
	 * Compression handles the input data by chunks of 128KiB
	 * (defined by BTRFS_MAX_UNCOMPRESSED)
	 *
	 * We do the same for the heuristic and loop over the whole range.
	 *
	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
	 */
	if (end - start > BTRFS_MAX_UNCOMPRESSED)
		end = start + BTRFS_MAX_UNCOMPRESSED;

	index = start >> PAGE_SHIFT;
	index_end = end >> PAGE_SHIFT;

	/* Don't miss unaligned end */
	if (!IS_ALIGNED(end, PAGE_SIZE))
		index_end++;

	curr_sample_pos = 0;
	while (index < index_end) {
		page = find_get_page(inode->i_mapping, index);
1757
		in_data = kmap_local_page(page);
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
		/* Handle case where the start is not aligned to PAGE_SIZE */
		i = start % PAGE_SIZE;
		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
			/* Don't sample any garbage from the last page */
			if (start > end - SAMPLING_READ_SIZE)
				break;
			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
					SAMPLING_READ_SIZE);
			i += SAMPLING_INTERVAL;
			start += SAMPLING_INTERVAL;
			curr_sample_pos += SAMPLING_READ_SIZE;
		}
1770
		kunmap_local(in_data);
1771 1772 1773 1774 1775 1776 1777 1778
		put_page(page);

		index++;
	}

	ws->sample_size = curr_sample_pos;
}

1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
/*
 * Compression heuristic.
 *
 * For now is's a naive and optimistic 'return true', we'll extend the logic to
 * quickly (compared to direct compression) detect data characteristics
 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
 * data.
 *
 * The following types of analysis can be performed:
 * - detect mostly zero data
 * - detect data with low "byte set" size (text, etc)
 * - detect data with low/high "core byte" set
 *
 * Return non-zero if the compression should be done, 0 otherwise.
 */
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
1796
	struct list_head *ws_list = get_workspace(0, 0);
1797
	struct heuristic_ws *ws;
1798 1799
	u32 i;
	u8 byte;
1800
	int ret = 0;
1801

1802 1803
	ws = list_entry(ws_list, struct heuristic_ws, list);

1804 1805
	heuristic_collect_sample(inode, start, end, ws);

1806 1807 1808 1809 1810
	if (sample_repeated_patterns(ws)) {
		ret = 1;
		goto out;
	}

1811 1812 1813 1814 1815
	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);

	for (i = 0; i < ws->sample_size; i++) {
		byte = ws->sample[i];
		ws->bucket[byte].count++;
1816 1817
	}

1818 1819 1820 1821 1822 1823
	i = byte_set_size(ws);
	if (i < BYTE_SET_THRESHOLD) {
		ret = 2;
		goto out;
	}

1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
	i = byte_core_set_size(ws);
	if (i <= BYTE_CORE_SET_LOW) {
		ret = 3;
		goto out;
	}

	if (i >= BYTE_CORE_SET_HIGH) {
		ret = 0;
		goto out;
	}

1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
	i = shannon_entropy(ws);
	if (i <= ENTROPY_LVL_ACEPTABLE) {
		ret = 4;
		goto out;
	}

	/*
	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
	 * needed to give green light to compression.
	 *
	 * For now just assume that compression at that level is not worth the
	 * resources because:
	 *
	 * 1. it is possible to defrag the data later
	 *
	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
	 * values, every bucket has counter at level ~54. The heuristic would
	 * be confused. This can happen when data have some internal repeated
	 * patterns like "abbacbbc...". This can be detected by analyzing
	 * pairs of bytes, which is too costly.
	 */
	if (i < ENTROPY_LVL_HIGH) {
		ret = 5;
		goto out;
	} else {
		ret = 0;
		goto out;
	}

1864
out:
1865
	put_workspace(0, ws_list);
1866 1867
	return ret;
}
1868

1869 1870 1871 1872 1873
/*
 * Convert the compression suffix (eg. after "zlib" starting with ":") to
 * level, unrecognized string will set the default level
 */
unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1874
{
1875 1876 1877 1878
	unsigned int level = 0;
	int ret;

	if (!type)
1879 1880
		return 0;

1881 1882 1883 1884 1885 1886
	if (str[0] == ':') {
		ret = kstrtouint(str + 1, 10, &level);
		if (ret)
			level = 0;
	}

1887 1888 1889 1890
	level = btrfs_compress_set_level(type, level);

	return level;
}