compression.c 50.0 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
C
Chris Mason 已提交
2 3 4 5 6 7 8 9 10 11
/*
 * Copyright (C) 2008 Oracle.  All rights reserved.
 */

#include <linux/kernel.h>
#include <linux/bio.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
12
#include <linux/kthread.h>
C
Chris Mason 已提交
13 14 15 16 17
#include <linux/time.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
#include <linux/writeback.h>
18
#include <linux/slab.h>
19
#include <linux/sched/mm.h>
20
#include <linux/log2.h>
21
#include <crypto/hash.h>
22
#include "misc.h"
C
Chris Mason 已提交
23 24 25 26 27 28 29 30 31
#include "ctree.h"
#include "disk-io.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "volumes.h"
#include "ordered-data.h"
#include "compression.h"
#include "extent_io.h"
#include "extent_map.h"
32
#include "subpage.h"
33
#include "zoned.h"
C
Chris Mason 已提交
34

35 36 37 38 39 40 41 42 43 44
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };

const char* btrfs_compress_type2str(enum btrfs_compression_type type)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB:
	case BTRFS_COMPRESS_LZO:
	case BTRFS_COMPRESS_ZSTD:
	case BTRFS_COMPRESS_NONE:
		return btrfs_compress_types[type];
45 46
	default:
		break;
47 48 49 50 51
	}

	return NULL;
}

52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67
bool btrfs_compress_is_valid_type(const char *str, size_t len)
{
	int i;

	for (i = 1; i < ARRAY_SIZE(btrfs_compress_types); i++) {
		size_t comp_len = strlen(btrfs_compress_types[i]);

		if (len < comp_len)
			continue;

		if (!strncmp(btrfs_compress_types[i], str, comp_len))
			return true;
	}
	return false;
}

68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
static int compression_compress_pages(int type, struct list_head *ws,
               struct address_space *mapping, u64 start, struct page **pages,
               unsigned long *out_pages, unsigned long *total_in,
               unsigned long *total_out)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB:
		return zlib_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_LZO:
		return lzo_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_ZSTD:
		return zstd_compress_pages(ws, mapping, start, pages,
				out_pages, total_in, total_out);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
86 87 88 89 90 91 92
		 * This can happen when compression races with remount setting
		 * it to 'no compress', while caller doesn't call
		 * inode_need_compress() to check if we really need to
		 * compress.
		 *
		 * Not a big deal, just need to inform caller that we
		 * haven't allocated any pages yet.
93
		 */
94
		*out_pages = 0;
95 96 97 98
		return -E2BIG;
	}
}

99 100
static int compression_decompress_bio(struct list_head *ws,
				      struct compressed_bio *cb)
101
{
102
	switch (cb->compress_type) {
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
	case BTRFS_COMPRESS_ZLIB: return zlib_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_LZO:  return lzo_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_ZSTD: return zstd_decompress_bio(ws, cb);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

static int compression_decompress(int type, struct list_head *ws,
               unsigned char *data_in, struct page *dest_page,
               unsigned long start_byte, size_t srclen, size_t destlen)
{
	switch (type) {
	case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
						start_byte, srclen, destlen);
	case BTRFS_COMPRESS_NONE:
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

137
static int btrfs_decompress_bio(struct compressed_bio *cb);
138

139
static inline int compressed_bio_size(struct btrfs_fs_info *fs_info,
140 141 142
				      unsigned long disk_size)
{
	return sizeof(struct compressed_bio) +
143
		(DIV_ROUND_UP(disk_size, fs_info->sectorsize)) * fs_info->csum_size;
144 145
}

146
static int check_compressed_csum(struct btrfs_inode *inode, struct bio *bio,
147 148
				 u64 disk_start)
{
149
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
150
	const u32 csum_size = fs_info->csum_size;
151
	const u32 sectorsize = fs_info->sectorsize;
152
	struct page *page;
153
	unsigned int i;
154
	u8 csum[BTRFS_CSUM_SIZE];
155
	struct compressed_bio *cb = bio->bi_private;
156
	u8 *cb_sum = cb->sums;
157

158 159
	if ((inode->flags & BTRFS_INODE_NODATASUM) ||
	    test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state))
160 161 162
		return 0;

	for (i = 0; i < cb->nr_pages; i++) {
163 164
		u32 pg_offset;
		u32 bytes_left = PAGE_SIZE;
165 166
		page = cb->compressed_pages[i];

167 168 169 170 171 172 173
		/* Determine the remaining bytes inside the page first */
		if (i == cb->nr_pages - 1)
			bytes_left = cb->compressed_len - i * PAGE_SIZE;

		/* Hash through the page sector by sector */
		for (pg_offset = 0; pg_offset < bytes_left;
		     pg_offset += sectorsize) {
174
			int ret;
175

176 177 178
			ret = btrfs_check_sector_csum(fs_info, page, pg_offset,
						      csum, cb_sum);
			if (ret) {
179 180
				btrfs_print_data_csum_error(inode, disk_start,
						csum, cb_sum, cb->mirror_num);
181
				if (btrfs_bio(bio)->device)
182
					btrfs_dev_stat_inc_and_print(
183
						btrfs_bio(bio)->device,
184 185 186 187 188
						BTRFS_DEV_STAT_CORRUPTION_ERRS);
				return -EIO;
			}
			cb_sum += csum_size;
			disk_start += sectorsize;
189 190
		}
	}
191
	return 0;
192 193
}

194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
/*
 * Reduce bio and io accounting for a compressed_bio with its corresponding bio.
 *
 * Return true if there is no pending bio nor io.
 * Return false otherwise.
 */
static bool dec_and_test_compressed_bio(struct compressed_bio *cb, struct bio *bio)
{
	struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
	unsigned int bi_size = 0;
	bool last_io = false;
	struct bio_vec *bvec;
	struct bvec_iter_all iter_all;

	/*
	 * At endio time, bi_iter.bi_size doesn't represent the real bio size.
	 * Thus here we have to iterate through all segments to grab correct
	 * bio size.
	 */
	bio_for_each_segment_all(bvec, bio, iter_all)
		bi_size += bvec->bv_len;

	if (bio->bi_status)
217
		cb->status = bio->bi_status;
218 219 220 221

	ASSERT(bi_size && bi_size <= cb->compressed_len);
	last_io = refcount_sub_and_test(bi_size >> fs_info->sectorsize_bits,
					&cb->pending_sectors);
222 223 224 225 226 227 228
	/*
	 * Here we must wake up the possible error handler after all other
	 * operations on @cb finished, or we can race with
	 * finish_compressed_bio_*() which may free @cb.
	 */
	wake_up_var(cb);

229 230 231
	return last_io;
}

232
static void finish_compressed_bio_read(struct compressed_bio *cb)
233 234 235 236 237 238 239 240 241 242 243 244
{
	unsigned int index;
	struct page *page;

	/* Release the compressed pages */
	for (index = 0; index < cb->nr_pages; index++) {
		page = cb->compressed_pages[index];
		page->mapping = NULL;
		put_page(page);
	}

	/* Do io completion on the original bio */
245 246 247
	if (cb->status != BLK_STS_OK) {
		cb->orig_bio->bi_status = cb->status;
		bio_endio(cb->orig_bio);
248 249 250 251 252 253 254 255
	} else {
		struct bio_vec *bvec;
		struct bvec_iter_all iter_all;

		/*
		 * We have verified the checksum already, set page checked so
		 * the end_io handlers know about it
		 */
256
		ASSERT(!bio_flagged(cb->orig_bio, BIO_CLONED));
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
		bio_for_each_segment_all(bvec, cb->orig_bio, iter_all) {
			u64 bvec_start = page_offset(bvec->bv_page) +
					 bvec->bv_offset;

			btrfs_page_set_checked(btrfs_sb(cb->inode->i_sb),
					bvec->bv_page, bvec_start,
					bvec->bv_len);
		}

		bio_endio(cb->orig_bio);
	}

	/* Finally free the cb struct */
	kfree(cb->compressed_pages);
	kfree(cb);
}

C
Chris Mason 已提交
274 275 276 277 278 279 280 281 282 283
/* when we finish reading compressed pages from the disk, we
 * decompress them and then run the bio end_io routines on the
 * decompressed pages (in the inode address space).
 *
 * This allows the checksumming and other IO error handling routines
 * to work normally
 *
 * The compressed pages are freed here, and it must be run
 * in process context
 */
284
static void end_compressed_bio_read(struct bio *bio)
C
Chris Mason 已提交
285 286 287
{
	struct compressed_bio *cb = bio->bi_private;
	struct inode *inode;
288
	unsigned int mirror = btrfs_bio(bio)->mirror_num;
289
	int ret = 0;
C
Chris Mason 已提交
290

291
	if (!dec_and_test_compressed_bio(cb, bio))
C
Chris Mason 已提交
292 293
		goto out;

294 295 296 297
	/*
	 * Record the correct mirror_num in cb->orig_bio so that
	 * read-repair can work properly.
	 */
298
	btrfs_bio(cb->orig_bio)->mirror_num = mirror;
299 300
	cb->mirror_num = mirror;

301 302 303 304
	/*
	 * Some IO in this cb have failed, just skip checksum as there
	 * is no way it could be correct.
	 */
305
	if (cb->status != BLK_STS_OK)
306 307
		goto csum_failed;

308
	inode = cb->inode;
309
	ret = check_compressed_csum(BTRFS_I(inode), bio,
D
David Sterba 已提交
310
				    bio->bi_iter.bi_sector << 9);
311 312 313
	if (ret)
		goto csum_failed;

C
Chris Mason 已提交
314 315 316
	/* ok, we're the last bio for this extent, lets start
	 * the decompression.
	 */
317 318
	ret = btrfs_decompress_bio(cb);

319
csum_failed:
C
Chris Mason 已提交
320
	if (ret)
321
		cb->status = errno_to_blk_status(ret);
322
	finish_compressed_bio_read(cb);
C
Chris Mason 已提交
323 324 325 326 327 328 329 330
out:
	bio_put(bio);
}

/*
 * Clear the writeback bits on all of the file
 * pages for a compressed write
 */
331 332
static noinline void end_compressed_writeback(struct inode *inode,
					      const struct compressed_bio *cb)
C
Chris Mason 已提交
333
{
334
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
335 336
	unsigned long index = cb->start >> PAGE_SHIFT;
	unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
C
Chris Mason 已提交
337 338
	struct page *pages[16];
	unsigned long nr_pages = end_index - index + 1;
339
	const int errno = blk_status_to_errno(cb->status);
C
Chris Mason 已提交
340 341 342
	int i;
	int ret;

343 344
	if (errno)
		mapping_set_error(inode->i_mapping, errno);
345

C
Chris Mason 已提交
346
	while (nr_pages > 0) {
C
Chris Mason 已提交
347
		ret = find_get_pages_contig(inode->i_mapping, index,
348 349
				     min_t(unsigned long,
				     nr_pages, ARRAY_SIZE(pages)), pages);
C
Chris Mason 已提交
350 351 352 353 354 355
		if (ret == 0) {
			nr_pages -= 1;
			index += 1;
			continue;
		}
		for (i = 0; i < ret; i++) {
356
			if (errno)
357
				SetPageError(pages[i]);
358 359
			btrfs_page_clamp_clear_writeback(fs_info, pages[i],
							 cb->start, cb->len);
360
			put_page(pages[i]);
C
Chris Mason 已提交
361 362 363 364 365 366 367
		}
		nr_pages -= ret;
		index += ret;
	}
	/* the inode may be gone now */
}

368
static void finish_compressed_bio_write(struct compressed_bio *cb)
C
Chris Mason 已提交
369
{
370
	struct inode *inode = cb->inode;
371
	unsigned int index;
C
Chris Mason 已提交
372

373 374 375
	/*
	 * Ok, we're the last bio for this extent, step one is to call back
	 * into the FS and do all the end_io operations.
C
Chris Mason 已提交
376
	 */
377
	btrfs_writepage_endio_finish_ordered(BTRFS_I(inode), NULL,
378
			cb->start, cb->start + cb->len - 1,
379
			cb->status == BLK_STS_OK);
C
Chris Mason 已提交
380

381 382
	if (cb->writeback)
		end_compressed_writeback(inode, cb);
383
	/* Note, our inode could be gone now */
C
Chris Mason 已提交
384 385

	/*
386
	 * Release the compressed pages, these came from alloc_page and
C
Chris Mason 已提交
387 388 389
	 * are not attached to the inode at all
	 */
	for (index = 0; index < cb->nr_pages; index++) {
390 391
		struct page *page = cb->compressed_pages[index];

C
Chris Mason 已提交
392
		page->mapping = NULL;
393
		put_page(page);
C
Chris Mason 已提交
394 395
	}

396
	/* Finally free the cb struct */
C
Chris Mason 已提交
397 398
	kfree(cb->compressed_pages);
	kfree(cb);
399 400
}

401 402 403 404 405 406 407 408
static void btrfs_finish_compressed_write_work(struct work_struct *work)
{
	struct compressed_bio *cb =
		container_of(work, struct compressed_bio, write_end_work);

	finish_compressed_bio_write(cb);
}

409 410 411 412 413 414 415 416 417 418 419
/*
 * Do the cleanup once all the compressed pages hit the disk.  This will clear
 * writeback on the file pages and free the compressed pages.
 *
 * This also calls the writeback end hooks for the file pages so that metadata
 * and checksums can be updated in the file.
 */
static void end_compressed_bio_write(struct bio *bio)
{
	struct compressed_bio *cb = bio->bi_private;

420 421
	if (dec_and_test_compressed_bio(cb, bio)) {
		struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
422

423 424 425
		btrfs_record_physical_zoned(cb->inode, cb->start, bio);
		queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
	}
C
Chris Mason 已提交
426 427 428
	bio_put(bio);
}

429
/*
430 431 432 433 434 435 436 437 438 439 440 441 442
 * Allocate a compressed_bio, which will be used to read/write on-disk
 * (aka, compressed) * data.
 *
 * @cb:                 The compressed_bio structure, which records all the needed
 *                      information to bind the compressed data to the uncompressed
 *                      page cache.
 * @disk_byten:         The logical bytenr where the compressed data will be read
 *                      from or written to.
 * @endio_func:         The endio function to call after the IO for compressed data
 *                      is finished.
 * @next_stripe_start:  Return value of logical bytenr of where next stripe starts.
 *                      Let the caller know to only fill the bio up to the stripe
 *                      boundary.
443
 */
444 445


446
static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
447 448
					unsigned int opf, bio_end_io_t endio_func,
					u64 *next_stripe_start)
449
{
450 451 452
	struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
	struct btrfs_io_geometry geom;
	struct extent_map *em;
453
	struct bio *bio;
454
	int ret;
455 456 457 458 459 460 461 462

	bio = btrfs_bio_alloc(BIO_MAX_VECS);

	bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
	bio->bi_opf = opf;
	bio->bi_private = cb;
	bio->bi_end_io = endio_func;

463 464 465 466 467
	em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
	if (IS_ERR(em)) {
		bio_put(bio);
		return ERR_CAST(em);
	}
468

469 470 471 472 473 474 475 476
	if (bio_op(bio) == REQ_OP_ZONE_APPEND)
		bio_set_dev(bio, em->map_lookup->stripes[0].dev->bdev);

	ret = btrfs_get_io_geometry(fs_info, em, btrfs_op(bio), disk_bytenr, &geom);
	free_extent_map(em);
	if (ret < 0) {
		bio_put(bio);
		return ERR_PTR(ret);
477
	}
478 479
	*next_stripe_start = disk_bytenr + geom.len;

480 481 482
	return bio;
}

C
Chris Mason 已提交
483 484 485 486 487 488 489 490 491
/*
 * worker function to build and submit bios for previously compressed pages.
 * The corresponding pages in the inode should be marked for writeback
 * and the compressed pages should have a reference on them for dropping
 * when the IO is complete.
 *
 * This also checksums the file bytes and gets things ready for
 * the end io hooks.
 */
492
blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
493 494
				 unsigned int len, u64 disk_start,
				 unsigned int compressed_len,
C
Chris Mason 已提交
495
				 struct page **compressed_pages,
496
				 unsigned int nr_pages,
497
				 unsigned int write_flags,
498 499
				 struct cgroup_subsys_state *blkcg_css,
				 bool writeback)
C
Chris Mason 已提交
500
{
501
	struct btrfs_fs_info *fs_info = inode->root->fs_info;
C
Chris Mason 已提交
502 503
	struct bio *bio = NULL;
	struct compressed_bio *cb;
504
	u64 cur_disk_bytenr = disk_start;
505
	u64 next_stripe_start;
506
	blk_status_t ret;
507
	int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
508 509
	const bool use_append = btrfs_use_zone_append(inode, disk_start);
	const unsigned int bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
C
Chris Mason 已提交
510

511 512
	ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
	       IS_ALIGNED(len, fs_info->sectorsize));
513
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
514
	if (!cb)
515
		return BLK_STS_RESOURCE;
516
	refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
517
	cb->status = BLK_STS_OK;
518
	cb->inode = &inode->vfs_inode;
C
Chris Mason 已提交
519 520
	cb->start = start;
	cb->len = len;
521
	cb->mirror_num = 0;
C
Chris Mason 已提交
522 523
	cb->compressed_pages = compressed_pages;
	cb->compressed_len = compressed_len;
524
	cb->writeback = writeback;
525
	INIT_WORK(&cb->write_end_work, btrfs_finish_compressed_write_work);
C
Chris Mason 已提交
526 527
	cb->nr_pages = nr_pages;

528 529 530
	if (blkcg_css)
		kthread_associate_blkcg(blkcg_css);

531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548
	while (cur_disk_bytenr < disk_start + compressed_len) {
		u64 offset = cur_disk_bytenr - disk_start;
		unsigned int index = offset >> PAGE_SHIFT;
		unsigned int real_size;
		unsigned int added;
		struct page *page = compressed_pages[index];
		bool submit = false;

		/* Allocate new bio if submitted or not yet allocated */
		if (!bio) {
			bio = alloc_compressed_bio(cb, cur_disk_bytenr,
				bio_op | write_flags, end_compressed_bio_write,
				&next_stripe_start);
			if (IS_ERR(bio)) {
				ret = errno_to_blk_status(PTR_ERR(bio));
				bio = NULL;
				goto finish_cb;
			}
549 550
			if (blkcg_css)
				bio->bi_opf |= REQ_CGROUP_PUNT;
551
		}
552
		/*
553 554
		 * We should never reach next_stripe_start start as we will
		 * submit comp_bio when reach the boundary immediately.
555
		 */
556
		ASSERT(cur_disk_bytenr != next_stripe_start);
C
Chris Mason 已提交
557

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
		/*
		 * We have various limits on the real read size:
		 * - stripe boundary
		 * - page boundary
		 * - compressed length boundary
		 */
		real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_bytenr);
		real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
		real_size = min_t(u64, real_size, compressed_len - offset);
		ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));

		if (use_append)
			added = bio_add_zone_append_page(bio, page, real_size,
					offset_in_page(offset));
		else
			added = bio_add_page(bio, page, real_size,
					offset_in_page(offset));
		/* Reached zoned boundary */
		if (added == 0)
			submit = true;

		cur_disk_bytenr += added;
		/* Reached stripe boundary */
		if (cur_disk_bytenr == next_stripe_start)
			submit = true;

		/* Finished the range */
		if (cur_disk_bytenr == disk_start + compressed_len)
			submit = true;

		if (submit) {
589
			if (!skip_sum) {
590
				ret = btrfs_csum_one_bio(inode, bio, start, true);
591 592
				if (ret)
					goto finish_cb;
593
			}
C
Chris Mason 已提交
594

595 596
			ASSERT(bio->bi_iter.bi_size);
			ret = btrfs_map_bio(fs_info, bio, 0);
597 598
			if (ret)
				goto finish_cb;
599
			bio = NULL;
C
Chris Mason 已提交
600
		}
601
		cond_resched();
C
Chris Mason 已提交
602
	}
603 604
	if (blkcg_css)
		kthread_associate_blkcg(NULL);
C
Chris Mason 已提交
605 606

	return 0;
607

608
finish_cb:
609 610 611
	if (blkcg_css)
		kthread_associate_blkcg(NULL);

612
	if (bio) {
613
		bio->bi_status = ret;
614 615
		bio_endio(bio);
	}
616 617 618
	/* Last byte of @cb is submitted, endio will free @cb */
	if (cur_disk_bytenr == disk_start + compressed_len)
		return ret;
C
Chris Mason 已提交
619

620 621 622
	wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
			   (disk_start + compressed_len - cur_disk_bytenr) >>
			   fs_info->sectorsize_bits);
623 624 625 626 627 628 629 630
	/*
	 * Even with previous bio ended, we should still have io not yet
	 * submitted, thus need to finish manually.
	 */
	ASSERT(refcount_read(&cb->pending_sectors));
	/* Now we are the only one referring @cb, can finish it safely. */
	finish_compressed_bio_write(cb);
	return ret;
C
Chris Mason 已提交
631 632
}

633 634
static u64 bio_end_offset(struct bio *bio)
{
M
Ming Lei 已提交
635
	struct bio_vec *last = bio_last_bvec_all(bio);
636 637 638 639

	return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
}

640 641 642 643 644 645 646 647 648 649 650
/*
 * Add extra pages in the same compressed file extent so that we don't need to
 * re-read the same extent again and again.
 *
 * NOTE: this won't work well for subpage, as for subpage read, we lock the
 * full page then submit bio for each compressed/regular extents.
 *
 * This means, if we have several sectors in the same page points to the same
 * on-disk compressed data, we will re-read the same extent many times and
 * this function can only help for the next page.
 */
651 652 653 654
static noinline int add_ra_bio_pages(struct inode *inode,
				     u64 compressed_end,
				     struct compressed_bio *cb)
{
655
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
656
	unsigned long end_index;
657
	u64 cur = bio_end_offset(cb->orig_bio);
658 659 660 661 662 663 664
	u64 isize = i_size_read(inode);
	int ret;
	struct page *page;
	struct extent_map *em;
	struct address_space *mapping = inode->i_mapping;
	struct extent_map_tree *em_tree;
	struct extent_io_tree *tree;
665
	int sectors_missed = 0;
666 667 668 669 670 671 672

	em_tree = &BTRFS_I(inode)->extent_tree;
	tree = &BTRFS_I(inode)->io_tree;

	if (isize == 0)
		return 0;

673 674 675 676 677 678 679 680 681 682
	/*
	 * For current subpage support, we only support 64K page size,
	 * which means maximum compressed extent size (128K) is just 2x page
	 * size.
	 * This makes readahead less effective, so here disable readahead for
	 * subpage for now, until full compressed write is supported.
	 */
	if (btrfs_sb(inode->i_sb)->sectorsize < PAGE_SIZE)
		return 0;

683
	end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
684

685 686 687 688
	while (cur < compressed_end) {
		u64 page_end;
		u64 pg_index = cur >> PAGE_SHIFT;
		u32 add_size;
689

690
		if (pg_index > end_index)
691 692
			break;

693
		page = xa_load(&mapping->i_pages, pg_index);
694
		if (page && !xa_is_value(page)) {
695 696 697 698 699
			sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
					  fs_info->sectorsize_bits;

			/* Beyond threshold, no need to continue */
			if (sectors_missed > 4)
700
				break;
701 702 703 704 705 706 707

			/*
			 * Jump to next page start as we already have page for
			 * current offset.
			 */
			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
			continue;
708 709
		}

710 711
		page = __page_cache_alloc(mapping_gfp_constraint(mapping,
								 ~__GFP_FS));
712 713 714
		if (!page)
			break;

715
		if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
716
			put_page(page);
717 718 719
			/* There is already a page, skip to page end */
			cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
			continue;
720 721
		}

722 723 724 725 726 727 728
		ret = set_page_extent_mapped(page);
		if (ret < 0) {
			unlock_page(page);
			put_page(page);
			break;
		}

729 730
		page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
		lock_extent(tree, cur, page_end);
731
		read_lock(&em_tree->lock);
732
		em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
733
		read_unlock(&em_tree->lock);
734

735 736 737 738 739 740 741
		/*
		 * At this point, we have a locked page in the page cache for
		 * these bytes in the file.  But, we have to make sure they map
		 * to this compressed extent on disk.
		 */
		if (!em || cur < em->start ||
		    (cur + fs_info->sectorsize > extent_map_end(em)) ||
742
		    (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
743
			free_extent_map(em);
744
			unlock_extent(tree, cur, page_end);
745
			unlock_page(page);
746
			put_page(page);
747 748 749 750 751
			break;
		}
		free_extent_map(em);

		if (page->index == end_index) {
752
			size_t zero_offset = offset_in_page(isize);
753 754 755

			if (zero_offset) {
				int zeros;
756
				zeros = PAGE_SIZE - zero_offset;
757
				memzero_page(page, zero_offset, zeros);
758 759 760
			}
		}

761 762 763 764
		add_size = min(em->start + em->len, page_end + 1) - cur;
		ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
		if (ret != add_size) {
			unlock_extent(tree, cur, page_end);
765
			unlock_page(page);
766
			put_page(page);
767 768
			break;
		}
769 770 771 772 773 774 775 776 777
		/*
		 * If it's subpage, we also need to increase its
		 * subpage::readers number, as at endio we will decrease
		 * subpage::readers and to unlock the page.
		 */
		if (fs_info->sectorsize < PAGE_SIZE)
			btrfs_subpage_start_reader(fs_info, page, cur, add_size);
		put_page(page);
		cur += add_size;
778 779 780 781
	}
	return 0;
}

C
Chris Mason 已提交
782 783 784 785 786
/*
 * for a compressed read, the bio we get passed has all the inode pages
 * in it.  We don't actually do IO on those pages but allocate new ones
 * to hold the compressed pages on disk.
 *
787
 * bio->bi_iter.bi_sector points to the compressed extent on disk
C
Chris Mason 已提交
788 789 790 791 792
 * bio->bi_io_vec points to all of the inode pages
 *
 * After the compressed pages are read, we copy the bytes into the
 * bio we were passed and then call the bio end_io calls
 */
793
void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
794
				  int mirror_num)
C
Chris Mason 已提交
795
{
796
	struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
C
Chris Mason 已提交
797 798
	struct extent_map_tree *em_tree;
	struct compressed_bio *cb;
799
	unsigned int compressed_len;
800 801 802 803
	struct bio *comp_bio = NULL;
	const u64 disk_bytenr = bio->bi_iter.bi_sector << SECTOR_SHIFT;
	u64 cur_disk_byte = disk_bytenr;
	u64 next_stripe_start;
804
	u64 file_offset;
805 806
	u64 em_len;
	u64 em_start;
C
Chris Mason 已提交
807
	struct extent_map *em;
808
	blk_status_t ret;
809 810
	int ret2;
	int i;
811
	u8 *sums;
C
Chris Mason 已提交
812 813 814

	em_tree = &BTRFS_I(inode)->extent_tree;

815 816 817
	file_offset = bio_first_bvec_all(bio)->bv_offset +
		      page_offset(bio_first_page_all(bio));

C
Chris Mason 已提交
818
	/* we need the actual starting offset of this extent in the file */
819
	read_lock(&em_tree->lock);
820
	em = lookup_extent_mapping(em_tree, file_offset, fs_info->sectorsize);
821
	read_unlock(&em_tree->lock);
822 823 824 825
	if (!em) {
		ret = BLK_STS_IOERR;
		goto out;
	}
C
Chris Mason 已提交
826

827
	ASSERT(em->compress_type != BTRFS_COMPRESS_NONE);
828
	compressed_len = em->block_len;
829
	cb = kmalloc(compressed_bio_size(fs_info, compressed_len), GFP_NOFS);
830 831
	if (!cb) {
		ret = BLK_STS_RESOURCE;
832
		goto out;
833
	}
834

835
	refcount_set(&cb->pending_sectors, compressed_len >> fs_info->sectorsize_bits);
836
	cb->status = BLK_STS_OK;
C
Chris Mason 已提交
837
	cb->inode = inode;
838
	cb->mirror_num = mirror_num;
839
	sums = cb->sums;
C
Chris Mason 已提交
840

841
	cb->start = em->orig_start;
842 843
	em_len = em->len;
	em_start = em->start;
844

C
Christoph Hellwig 已提交
845
	cb->len = bio->bi_iter.bi_size;
C
Chris Mason 已提交
846
	cb->compressed_len = compressed_len;
847
	cb->compress_type = em->compress_type;
C
Chris Mason 已提交
848 849
	cb->orig_bio = bio;

850 851 852
	free_extent_map(em);
	em = NULL;

853 854
	cb->nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
	cb->compressed_pages = kcalloc(cb->nr_pages, sizeof(struct page *), GFP_NOFS);
855 856
	if (!cb->compressed_pages) {
		ret = BLK_STS_RESOURCE;
857
		goto fail;
858
	}
859

860 861 862 863
	ret2 = btrfs_alloc_page_array(cb->nr_pages, cb->compressed_pages);
	if (ret2) {
		ret = BLK_STS_RESOURCE;
		goto fail;
C
Chris Mason 已提交
864 865
	}

866
	add_ra_bio_pages(inode, em_start + em_len, cb);
867 868

	/* include any pages we added in add_ra-bio_pages */
C
Christoph Hellwig 已提交
869
	cb->len = bio->bi_iter.bi_size;
870

871 872 873 874 875 876 877
	while (cur_disk_byte < disk_bytenr + compressed_len) {
		u64 offset = cur_disk_byte - disk_bytenr;
		unsigned int index = offset >> PAGE_SHIFT;
		unsigned int real_size;
		unsigned int added;
		struct page *page = cb->compressed_pages[index];
		bool submit = false;
C
Chris Mason 已提交
878

879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
		/* Allocate new bio if submitted or not yet allocated */
		if (!comp_bio) {
			comp_bio = alloc_compressed_bio(cb, cur_disk_byte,
					REQ_OP_READ, end_compressed_bio_read,
					&next_stripe_start);
			if (IS_ERR(comp_bio)) {
				ret = errno_to_blk_status(PTR_ERR(comp_bio));
				comp_bio = NULL;
				goto finish_cb;
			}
		}
		/*
		 * We should never reach next_stripe_start start as we will
		 * submit comp_bio when reach the boundary immediately.
		 */
		ASSERT(cur_disk_byte != next_stripe_start);
		/*
		 * We have various limit on the real read size:
		 * - stripe boundary
		 * - page boundary
		 * - compressed length boundary
		 */
		real_size = min_t(u64, U32_MAX, next_stripe_start - cur_disk_byte);
		real_size = min_t(u64, real_size, PAGE_SIZE - offset_in_page(offset));
		real_size = min_t(u64, real_size, compressed_len - offset);
		ASSERT(IS_ALIGNED(real_size, fs_info->sectorsize));
905

906
		added = bio_add_page(comp_bio, page, real_size, offset_in_page(offset));
907
		/*
908 909
		 * Maximum compressed extent is smaller than bio size limit,
		 * thus bio_add_page() should always success.
910
		 */
911 912
		ASSERT(added == real_size);
		cur_disk_byte += added;
913

914 915 916
		/* Reached stripe boundary, need to submit */
		if (cur_disk_byte == next_stripe_start)
			submit = true;
917

918 919 920
		/* Has finished the range, need to submit */
		if (cur_disk_byte == disk_bytenr + compressed_len)
			submit = true;
C
Chris Mason 已提交
921

922
		if (submit) {
923 924
			unsigned int nr_sectors;

925
			ret = btrfs_lookup_bio_sums(inode, comp_bio, sums);
926 927
			if (ret)
				goto finish_cb;
928 929 930

			nr_sectors = DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
						  fs_info->sectorsize);
931
			sums += fs_info->csum_size * nr_sectors;
932

933 934
			ASSERT(comp_bio->bi_iter.bi_size);
			ret = btrfs_map_bio(fs_info, comp_bio, mirror_num);
935 936
			if (ret)
				goto finish_cb;
937
			comp_bio = NULL;
C
Chris Mason 已提交
938 939
		}
	}
940
	return;
941

942 943 944 945 946 947
fail:
	if (cb->compressed_pages) {
		for (i = 0; i < cb->nr_pages; i++) {
			if (cb->compressed_pages[i])
				__free_page(cb->compressed_pages[i]);
		}
948
	}
949 950 951 952 953

	kfree(cb->compressed_pages);
	kfree(cb);
out:
	free_extent_map(em);
954 955
	bio->bi_status = ret;
	bio_endio(bio);
956
	return;
957 958 959 960 961
finish_cb:
	if (comp_bio) {
		comp_bio->bi_status = ret;
		bio_endio(comp_bio);
	}
962 963
	/* All bytes of @cb is submitted, endio will free @cb */
	if (cur_disk_byte == disk_bytenr + compressed_len)
964
		return;
965 966 967 968

	wait_var_event(cb, refcount_read(&cb->pending_sectors) ==
			   (disk_bytenr + compressed_len - cur_disk_byte) >>
			   fs_info->sectorsize_bits);
969 970 971 972 973 974
	/*
	 * Even with previous bio ended, we should still have io not yet
	 * submitted, thus need to finish @cb manually.
	 */
	ASSERT(refcount_read(&cb->pending_sectors));
	/* Now we are the only one referring @cb, can finish it safely. */
975
	finish_compressed_bio_read(cb);
C
Chris Mason 已提交
976
}
977

978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012
/*
 * Heuristic uses systematic sampling to collect data from the input data
 * range, the logic can be tuned by the following constants:
 *
 * @SAMPLING_READ_SIZE - how many bytes will be copied from for each sample
 * @SAMPLING_INTERVAL  - range from which the sampled data can be collected
 */
#define SAMPLING_READ_SIZE	(16)
#define SAMPLING_INTERVAL	(256)

/*
 * For statistical analysis of the input data we consider bytes that form a
 * Galois Field of 256 objects. Each object has an attribute count, ie. how
 * many times the object appeared in the sample.
 */
#define BUCKET_SIZE		(256)

/*
 * The size of the sample is based on a statistical sampling rule of thumb.
 * The common way is to perform sampling tests as long as the number of
 * elements in each cell is at least 5.
 *
 * Instead of 5, we choose 32 to obtain more accurate results.
 * If the data contain the maximum number of symbols, which is 256, we obtain a
 * sample size bound by 8192.
 *
 * For a sample of at most 8KB of data per data range: 16 consecutive bytes
 * from up to 512 locations.
 */
#define MAX_SAMPLE_SIZE		(BTRFS_MAX_UNCOMPRESSED *		\
				 SAMPLING_READ_SIZE / SAMPLING_INTERVAL)

struct bucket_item {
	u32 count;
};
1013 1014

struct heuristic_ws {
1015 1016
	/* Partial copy of input data */
	u8 *sample;
1017
	u32 sample_size;
1018 1019
	/* Buckets store counters for each byte value */
	struct bucket_item *bucket;
1020 1021
	/* Sorting buffer */
	struct bucket_item *bucket_b;
1022 1023 1024
	struct list_head list;
};

1025 1026
static struct workspace_manager heuristic_wsm;

1027 1028 1029 1030 1031 1032
static void free_heuristic_ws(struct list_head *ws)
{
	struct heuristic_ws *workspace;

	workspace = list_entry(ws, struct heuristic_ws, list);

1033 1034
	kvfree(workspace->sample);
	kfree(workspace->bucket);
1035
	kfree(workspace->bucket_b);
1036 1037 1038
	kfree(workspace);
}

1039
static struct list_head *alloc_heuristic_ws(unsigned int level)
1040 1041 1042 1043 1044 1045 1046
{
	struct heuristic_ws *ws;

	ws = kzalloc(sizeof(*ws), GFP_KERNEL);
	if (!ws)
		return ERR_PTR(-ENOMEM);

1047 1048 1049 1050 1051 1052 1053
	ws->sample = kvmalloc(MAX_SAMPLE_SIZE, GFP_KERNEL);
	if (!ws->sample)
		goto fail;

	ws->bucket = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket), GFP_KERNEL);
	if (!ws->bucket)
		goto fail;
1054

1055 1056 1057 1058
	ws->bucket_b = kcalloc(BUCKET_SIZE, sizeof(*ws->bucket_b), GFP_KERNEL);
	if (!ws->bucket_b)
		goto fail;

1059
	INIT_LIST_HEAD(&ws->list);
1060
	return &ws->list;
1061 1062 1063
fail:
	free_heuristic_ws(&ws->list);
	return ERR_PTR(-ENOMEM);
1064 1065
}

1066
const struct btrfs_compress_op btrfs_heuristic_compress = {
1067
	.workspace_manager = &heuristic_wsm,
1068 1069
};

1070
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
1071 1072
	/* The heuristic is represented as compression type 0 */
	&btrfs_heuristic_compress,
1073
	&btrfs_zlib_compress,
L
Li Zefan 已提交
1074
	&btrfs_lzo_compress,
N
Nick Terrell 已提交
1075
	&btrfs_zstd_compress,
1076 1077
};

1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093
static struct list_head *alloc_workspace(int type, unsigned int level)
{
	switch (type) {
	case BTRFS_COMPRESS_NONE: return alloc_heuristic_ws(level);
	case BTRFS_COMPRESS_ZLIB: return zlib_alloc_workspace(level);
	case BTRFS_COMPRESS_LZO:  return lzo_alloc_workspace(level);
	case BTRFS_COMPRESS_ZSTD: return zstd_alloc_workspace(level);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109
static void free_workspace(int type, struct list_head *ws)
{
	switch (type) {
	case BTRFS_COMPRESS_NONE: return free_heuristic_ws(ws);
	case BTRFS_COMPRESS_ZLIB: return zlib_free_workspace(ws);
	case BTRFS_COMPRESS_LZO:  return lzo_free_workspace(ws);
	case BTRFS_COMPRESS_ZSTD: return zstd_free_workspace(ws);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
}

1110
static void btrfs_init_workspace_manager(int type)
1111
{
1112
	struct workspace_manager *wsm;
1113
	struct list_head *workspace;
1114

1115
	wsm = btrfs_compress_op[type]->workspace_manager;
1116 1117 1118 1119
	INIT_LIST_HEAD(&wsm->idle_ws);
	spin_lock_init(&wsm->ws_lock);
	atomic_set(&wsm->total_ws, 0);
	init_waitqueue_head(&wsm->ws_wait);
1120

1121 1122 1123 1124
	/*
	 * Preallocate one workspace for each compression type so we can
	 * guarantee forward progress in the worst case
	 */
1125
	workspace = alloc_workspace(type, 0);
1126 1127 1128 1129
	if (IS_ERR(workspace)) {
		pr_warn(
	"BTRFS: cannot preallocate compression workspace, will try later\n");
	} else {
1130 1131 1132
		atomic_set(&wsm->total_ws, 1);
		wsm->free_ws = 1;
		list_add(workspace, &wsm->idle_ws);
1133 1134 1135
	}
}

1136
static void btrfs_cleanup_workspace_manager(int type)
1137
{
1138
	struct workspace_manager *wsman;
1139 1140
	struct list_head *ws;

1141
	wsman = btrfs_compress_op[type]->workspace_manager;
1142 1143 1144
	while (!list_empty(&wsman->idle_ws)) {
		ws = wsman->idle_ws.next;
		list_del(ws);
1145
		free_workspace(type, ws);
1146
		atomic_dec(&wsman->total_ws);
1147 1148 1149 1150
	}
}

/*
1151 1152 1153 1154
 * This finds an available workspace or allocates a new one.
 * If it's not possible to allocate a new one, waits until there's one.
 * Preallocation makes a forward progress guarantees and we do not return
 * errors.
1155
 */
1156
struct list_head *btrfs_get_workspace(int type, unsigned int level)
1157
{
1158
	struct workspace_manager *wsm;
1159 1160
	struct list_head *workspace;
	int cpus = num_online_cpus();
1161
	unsigned nofs_flag;
1162 1163 1164 1165 1166 1167
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

1168
	wsm = btrfs_compress_op[type]->workspace_manager;
1169 1170 1171 1172 1173
	idle_ws	 = &wsm->idle_ws;
	ws_lock	 = &wsm->ws_lock;
	total_ws = &wsm->total_ws;
	ws_wait	 = &wsm->ws_wait;
	free_ws	 = &wsm->free_ws;
1174 1175

again:
1176 1177 1178
	spin_lock(ws_lock);
	if (!list_empty(idle_ws)) {
		workspace = idle_ws->next;
1179
		list_del(workspace);
1180
		(*free_ws)--;
1181
		spin_unlock(ws_lock);
1182 1183 1184
		return workspace;

	}
1185
	if (atomic_read(total_ws) > cpus) {
1186 1187
		DEFINE_WAIT(wait);

1188 1189
		spin_unlock(ws_lock);
		prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
1190
		if (atomic_read(total_ws) > cpus && !*free_ws)
1191
			schedule();
1192
		finish_wait(ws_wait, &wait);
1193 1194
		goto again;
	}
1195
	atomic_inc(total_ws);
1196
	spin_unlock(ws_lock);
1197

1198 1199 1200 1201 1202 1203
	/*
	 * Allocation helpers call vmalloc that can't use GFP_NOFS, so we have
	 * to turn it off here because we might get called from the restricted
	 * context of btrfs_compress_bio/btrfs_compress_pages
	 */
	nofs_flag = memalloc_nofs_save();
1204
	workspace = alloc_workspace(type, level);
1205 1206
	memalloc_nofs_restore(nofs_flag);

1207
	if (IS_ERR(workspace)) {
1208
		atomic_dec(total_ws);
1209
		wake_up(ws_wait);
1210 1211 1212 1213 1214 1215

		/*
		 * Do not return the error but go back to waiting. There's a
		 * workspace preallocated for each type and the compression
		 * time is bounded so we get to a workspace eventually. This
		 * makes our caller's life easier.
1216 1217 1218 1219
		 *
		 * To prevent silent and low-probability deadlocks (when the
		 * initial preallocation fails), check if there are any
		 * workspaces at all.
1220
		 */
1221 1222 1223 1224 1225 1226
		if (atomic_read(total_ws) == 0) {
			static DEFINE_RATELIMIT_STATE(_rs,
					/* once per minute */ 60 * HZ,
					/* no burst */ 1);

			if (__ratelimit(&_rs)) {
1227
				pr_warn("BTRFS: no compression workspaces, low memory, retrying\n");
1228 1229
			}
		}
1230
		goto again;
1231 1232 1233 1234
	}
	return workspace;
}

1235
static struct list_head *get_workspace(int type, int level)
1236
{
1237
	switch (type) {
1238
	case BTRFS_COMPRESS_NONE: return btrfs_get_workspace(type, level);
1239
	case BTRFS_COMPRESS_ZLIB: return zlib_get_workspace(level);
1240
	case BTRFS_COMPRESS_LZO:  return btrfs_get_workspace(type, level);
1241 1242 1243 1244 1245 1246 1247 1248
	case BTRFS_COMPRESS_ZSTD: return zstd_get_workspace(level);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
1249 1250
}

1251 1252 1253 1254
/*
 * put a workspace struct back on the list or free it if we have enough
 * idle ones sitting around
 */
1255
void btrfs_put_workspace(int type, struct list_head *ws)
1256
{
1257
	struct workspace_manager *wsm;
1258 1259 1260 1261 1262 1263
	struct list_head *idle_ws;
	spinlock_t *ws_lock;
	atomic_t *total_ws;
	wait_queue_head_t *ws_wait;
	int *free_ws;

1264
	wsm = btrfs_compress_op[type]->workspace_manager;
1265 1266 1267 1268 1269
	idle_ws	 = &wsm->idle_ws;
	ws_lock	 = &wsm->ws_lock;
	total_ws = &wsm->total_ws;
	ws_wait	 = &wsm->ws_wait;
	free_ws	 = &wsm->free_ws;
1270 1271

	spin_lock(ws_lock);
1272
	if (*free_ws <= num_online_cpus()) {
1273
		list_add(ws, idle_ws);
1274
		(*free_ws)++;
1275
		spin_unlock(ws_lock);
1276 1277
		goto wake;
	}
1278
	spin_unlock(ws_lock);
1279

1280
	free_workspace(type, ws);
1281
	atomic_dec(total_ws);
1282
wake:
1283
	cond_wake_up(ws_wait);
1284 1285
}

1286 1287
static void put_workspace(int type, struct list_head *ws)
{
1288
	switch (type) {
1289 1290 1291
	case BTRFS_COMPRESS_NONE: return btrfs_put_workspace(type, ws);
	case BTRFS_COMPRESS_ZLIB: return btrfs_put_workspace(type, ws);
	case BTRFS_COMPRESS_LZO:  return btrfs_put_workspace(type, ws);
1292 1293 1294 1295 1296 1297 1298 1299
	case BTRFS_COMPRESS_ZSTD: return zstd_put_workspace(ws);
	default:
		/*
		 * This can't happen, the type is validated several times
		 * before we get here.
		 */
		BUG();
	}
1300 1301
}

1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
/*
 * Adjust @level according to the limits of the compression algorithm or
 * fallback to default
 */
static unsigned int btrfs_compress_set_level(int type, unsigned level)
{
	const struct btrfs_compress_op *ops = btrfs_compress_op[type];

	if (level == 0)
		level = ops->default_level;
	else
		level = min(level, ops->max_level);

	return level;
}

1318
/*
1319 1320
 * Given an address space and start and length, compress the bytes into @pages
 * that are allocated on demand.
1321
 *
1322 1323 1324 1325 1326
 * @type_level is encoded algorithm and level, where level 0 means whatever
 * default the algorithm chooses and is opaque here;
 * - compression algo are 0-3
 * - the level are bits 4-7
 *
1327 1328
 * @out_pages is an in/out parameter, holds maximum number of pages to allocate
 * and returns number of actually allocated pages
1329
 *
1330 1331
 * @total_in is used to return the number of bytes actually read.  It
 * may be smaller than the input length if we had to exit early because we
1332 1333 1334
 * ran out of room in the pages array or because we cross the
 * max_out threshold.
 *
1335 1336
 * @total_out is an in/out parameter, must be set to the input length and will
 * be also used to return the total number of compressed bytes
1337
 */
1338
int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
1339
			 u64 start, struct page **pages,
1340 1341
			 unsigned long *out_pages,
			 unsigned long *total_in,
1342
			 unsigned long *total_out)
1343
{
1344
	int type = btrfs_compress_type(type_level);
1345
	int level = btrfs_compress_level(type_level);
1346 1347 1348
	struct list_head *workspace;
	int ret;

1349
	level = btrfs_compress_set_level(type, level);
1350
	workspace = get_workspace(type, level);
1351 1352
	ret = compression_compress_pages(type, workspace, mapping, start, pages,
					 out_pages, total_in, total_out);
1353
	put_workspace(type, workspace);
1354 1355 1356
	return ret;
}

1357
static int btrfs_decompress_bio(struct compressed_bio *cb)
1358 1359 1360
{
	struct list_head *workspace;
	int ret;
1361
	int type = cb->compress_type;
1362

1363
	workspace = get_workspace(type, 0);
1364
	ret = compression_decompress_bio(workspace, cb);
1365
	put_workspace(type, workspace);
1366

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380
	return ret;
}

/*
 * a less complex decompression routine.  Our compressed data fits in a
 * single page, and we want to read a single page out of it.
 * start_byte tells us the offset into the compressed data we're interested in
 */
int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
		     unsigned long start_byte, size_t srclen, size_t destlen)
{
	struct list_head *workspace;
	int ret;

1381
	workspace = get_workspace(type, 0);
1382 1383
	ret = compression_decompress(type, workspace, data_in, dest_page,
				     start_byte, srclen, destlen);
1384
	put_workspace(type, workspace);
1385

1386 1387 1388
	return ret;
}

1389 1390
void __init btrfs_init_compress(void)
{
1391 1392 1393 1394
	btrfs_init_workspace_manager(BTRFS_COMPRESS_NONE);
	btrfs_init_workspace_manager(BTRFS_COMPRESS_ZLIB);
	btrfs_init_workspace_manager(BTRFS_COMPRESS_LZO);
	zstd_init_workspace_manager();
1395 1396
}

1397
void __cold btrfs_exit_compress(void)
1398
{
1399 1400 1401 1402
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_NONE);
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_ZLIB);
	btrfs_cleanup_workspace_manager(BTRFS_COMPRESS_LZO);
	zstd_cleanup_workspace_manager();
1403
}
1404 1405

/*
1406
 * Copy decompressed data from working buffer to pages.
1407
 *
1408 1409 1410 1411 1412 1413
 * @buf:		The decompressed data buffer
 * @buf_len:		The decompressed data length
 * @decompressed:	Number of bytes that are already decompressed inside the
 * 			compressed extent
 * @cb:			The compressed extent descriptor
 * @orig_bio:		The original bio that the caller wants to read for
1414
 *
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433
 * An easier to understand graph is like below:
 *
 * 		|<- orig_bio ->|     |<- orig_bio->|
 * 	|<-------      full decompressed extent      ----->|
 * 	|<-----------    @cb range   ---->|
 * 	|			|<-- @buf_len -->|
 * 	|<--- @decompressed --->|
 *
 * Note that, @cb can be a subpage of the full decompressed extent, but
 * @cb->start always has the same as the orig_file_offset value of the full
 * decompressed extent.
 *
 * When reading compressed extent, we have to read the full compressed extent,
 * while @orig_bio may only want part of the range.
 * Thus this function will ensure only data covered by @orig_bio will be copied
 * to.
 *
 * Return 0 if we have copied all needed contents for @orig_bio.
 * Return >0 if we need continue decompress.
1434
 */
1435 1436
int btrfs_decompress_buf2page(const char *buf, u32 buf_len,
			      struct compressed_bio *cb, u32 decompressed)
1437
{
1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
	struct bio *orig_bio = cb->orig_bio;
	/* Offset inside the full decompressed extent */
	u32 cur_offset;

	cur_offset = decompressed;
	/* The main loop to do the copy */
	while (cur_offset < decompressed + buf_len) {
		struct bio_vec bvec;
		size_t copy_len;
		u32 copy_start;
		/* Offset inside the full decompressed extent */
		u32 bvec_offset;

		bvec = bio_iter_iovec(orig_bio, orig_bio->bi_iter);
		/*
		 * cb->start may underflow, but subtracting that value can still
		 * give us correct offset inside the full decompressed extent.
		 */
		bvec_offset = page_offset(bvec.bv_page) + bvec.bv_offset - cb->start;
1457

1458 1459 1460
		/* Haven't reached the bvec range, exit */
		if (decompressed + buf_len <= bvec_offset)
			return 1;
1461

1462 1463 1464 1465
		copy_start = max(cur_offset, bvec_offset);
		copy_len = min(bvec_offset + bvec.bv_len,
			       decompressed + buf_len) - copy_start;
		ASSERT(copy_len);
1466

1467
		/*
1468 1469
		 * Extra range check to ensure we didn't go beyond
		 * @buf + @buf_len.
1470
		 */
1471 1472 1473 1474
		ASSERT(copy_start - decompressed < buf_len);
		memcpy_to_page(bvec.bv_page, bvec.bv_offset,
			       buf + copy_start - decompressed, copy_len);
		cur_offset += copy_len;
1475

1476 1477 1478 1479
		bio_advance(orig_bio, copy_len);
		/* Finished the bio */
		if (!orig_bio->bi_iter.bi_size)
			return 0;
1480 1481 1482
	}
	return 1;
}
1483

1484 1485 1486
/*
 * Shannon Entropy calculation
 *
1487
 * Pure byte distribution analysis fails to determine compressibility of data.
1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
 * Try calculating entropy to estimate the average minimum number of bits
 * needed to encode the sampled data.
 *
 * For convenience, return the percentage of needed bits, instead of amount of
 * bits directly.
 *
 * @ENTROPY_LVL_ACEPTABLE - below that threshold, sample has low byte entropy
 *			    and can be compressible with high probability
 *
 * @ENTROPY_LVL_HIGH - data are not compressible with high probability
 *
 * Use of ilog2() decreases precision, we lower the LVL to 5 to compensate.
 */
#define ENTROPY_LVL_ACEPTABLE		(65)
#define ENTROPY_LVL_HIGH		(80)

/*
 * For increasead precision in shannon_entropy calculation,
 * let's do pow(n, M) to save more digits after comma:
 *
 * - maximum int bit length is 64
 * - ilog2(MAX_SAMPLE_SIZE)	-> 13
 * - 13 * 4 = 52 < 64		-> M = 4
 *
 * So use pow(n, 4).
 */
static inline u32 ilog2_w(u64 n)
{
	return ilog2(n * n * n * n);
}

static u32 shannon_entropy(struct heuristic_ws *ws)
{
	const u32 entropy_max = 8 * ilog2_w(2);
	u32 entropy_sum = 0;
	u32 p, p_base, sz_base;
	u32 i;

	sz_base = ilog2_w(ws->sample_size);
	for (i = 0; i < BUCKET_SIZE && ws->bucket[i].count > 0; i++) {
		p = ws->bucket[i].count;
		p_base = ilog2_w(p);
		entropy_sum += p * (sz_base - p_base);
	}

	entropy_sum /= ws->sample_size;
	return entropy_sum * 100 / entropy_max;
}

1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550
#define RADIX_BASE		4U
#define COUNTERS_SIZE		(1U << RADIX_BASE)

static u8 get4bits(u64 num, int shift) {
	u8 low4bits;

	num >>= shift;
	/* Reverse order */
	low4bits = (COUNTERS_SIZE - 1) - (num % COUNTERS_SIZE);
	return low4bits;
}

/*
 * Use 4 bits as radix base
1551
 * Use 16 u32 counters for calculating new position in buf array
1552 1553 1554 1555 1556 1557
 *
 * @array     - array that will be sorted
 * @array_buf - buffer array to store sorting results
 *              must be equal in size to @array
 * @num       - array size
 */
1558
static void radix_sort(struct bucket_item *array, struct bucket_item *array_buf,
1559
		       int num)
1560
{
1561 1562 1563 1564 1565 1566 1567 1568
	u64 max_num;
	u64 buf_num;
	u32 counters[COUNTERS_SIZE];
	u32 new_addr;
	u32 addr;
	int bitlen;
	int shift;
	int i;
1569

1570 1571 1572 1573
	/*
	 * Try avoid useless loop iterations for small numbers stored in big
	 * counters.  Example: 48 33 4 ... in 64bit array
	 */
1574
	max_num = array[0].count;
1575
	for (i = 1; i < num; i++) {
1576
		buf_num = array[i].count;
1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588
		if (buf_num > max_num)
			max_num = buf_num;
	}

	buf_num = ilog2(max_num);
	bitlen = ALIGN(buf_num, RADIX_BASE * 2);

	shift = 0;
	while (shift < bitlen) {
		memset(counters, 0, sizeof(counters));

		for (i = 0; i < num; i++) {
1589
			buf_num = array[i].count;
1590 1591 1592 1593 1594 1595 1596 1597
			addr = get4bits(buf_num, shift);
			counters[addr]++;
		}

		for (i = 1; i < COUNTERS_SIZE; i++)
			counters[i] += counters[i - 1];

		for (i = num - 1; i >= 0; i--) {
1598
			buf_num = array[i].count;
1599 1600 1601
			addr = get4bits(buf_num, shift);
			counters[addr]--;
			new_addr = counters[addr];
1602
			array_buf[new_addr] = array[i];
1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615
		}

		shift += RADIX_BASE;

		/*
		 * Normal radix expects to move data from a temporary array, to
		 * the main one.  But that requires some CPU time. Avoid that
		 * by doing another sort iteration to original array instead of
		 * memcpy()
		 */
		memset(counters, 0, sizeof(counters));

		for (i = 0; i < num; i ++) {
1616
			buf_num = array_buf[i].count;
1617 1618 1619 1620 1621 1622 1623 1624
			addr = get4bits(buf_num, shift);
			counters[addr]++;
		}

		for (i = 1; i < COUNTERS_SIZE; i++)
			counters[i] += counters[i - 1];

		for (i = num - 1; i >= 0; i--) {
1625
			buf_num = array_buf[i].count;
1626 1627 1628
			addr = get4bits(buf_num, shift);
			counters[addr]--;
			new_addr = counters[addr];
1629
			array[new_addr] = array_buf[i];
1630 1631 1632 1633
		}

		shift += RADIX_BASE;
	}
1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662
}

/*
 * Size of the core byte set - how many bytes cover 90% of the sample
 *
 * There are several types of structured binary data that use nearly all byte
 * values. The distribution can be uniform and counts in all buckets will be
 * nearly the same (eg. encrypted data). Unlikely to be compressible.
 *
 * Other possibility is normal (Gaussian) distribution, where the data could
 * be potentially compressible, but we have to take a few more steps to decide
 * how much.
 *
 * @BYTE_CORE_SET_LOW  - main part of byte values repeated frequently,
 *                       compression algo can easy fix that
 * @BYTE_CORE_SET_HIGH - data have uniform distribution and with high
 *                       probability is not compressible
 */
#define BYTE_CORE_SET_LOW		(64)
#define BYTE_CORE_SET_HIGH		(200)

static int byte_core_set_size(struct heuristic_ws *ws)
{
	u32 i;
	u32 coreset_sum = 0;
	const u32 core_set_threshold = ws->sample_size * 90 / 100;
	struct bucket_item *bucket = ws->bucket;

	/* Sort in reverse order */
1663
	radix_sort(ws->bucket, ws->bucket_b, BUCKET_SIZE);
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679

	for (i = 0; i < BYTE_CORE_SET_LOW; i++)
		coreset_sum += bucket[i].count;

	if (coreset_sum > core_set_threshold)
		return i;

	for (; i < BYTE_CORE_SET_HIGH && bucket[i].count > 0; i++) {
		coreset_sum += bucket[i].count;
		if (coreset_sum > core_set_threshold)
			break;
	}

	return i;
}

1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718
/*
 * Count byte values in buckets.
 * This heuristic can detect textual data (configs, xml, json, html, etc).
 * Because in most text-like data byte set is restricted to limited number of
 * possible characters, and that restriction in most cases makes data easy to
 * compress.
 *
 * @BYTE_SET_THRESHOLD - consider all data within this byte set size:
 *	less - compressible
 *	more - need additional analysis
 */
#define BYTE_SET_THRESHOLD		(64)

static u32 byte_set_size(const struct heuristic_ws *ws)
{
	u32 i;
	u32 byte_set_size = 0;

	for (i = 0; i < BYTE_SET_THRESHOLD; i++) {
		if (ws->bucket[i].count > 0)
			byte_set_size++;
	}

	/*
	 * Continue collecting count of byte values in buckets.  If the byte
	 * set size is bigger then the threshold, it's pointless to continue,
	 * the detection technique would fail for this type of data.
	 */
	for (; i < BUCKET_SIZE; i++) {
		if (ws->bucket[i].count > 0) {
			byte_set_size++;
			if (byte_set_size > BYTE_SET_THRESHOLD)
				return byte_set_size;
		}
	}

	return byte_set_size;
}

1719 1720 1721 1722 1723 1724 1725 1726
static bool sample_repeated_patterns(struct heuristic_ws *ws)
{
	const u32 half_of_sample = ws->sample_size / 2;
	const u8 *data = ws->sample;

	return memcmp(&data[0], &data[half_of_sample], half_of_sample) == 0;
}

1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
				     struct heuristic_ws *ws)
{
	struct page *page;
	u64 index, index_end;
	u32 i, curr_sample_pos;
	u8 *in_data;

	/*
	 * Compression handles the input data by chunks of 128KiB
	 * (defined by BTRFS_MAX_UNCOMPRESSED)
	 *
	 * We do the same for the heuristic and loop over the whole range.
	 *
	 * MAX_SAMPLE_SIZE - calculated under assumption that heuristic will
	 * process no more than BTRFS_MAX_UNCOMPRESSED at a time.
	 */
	if (end - start > BTRFS_MAX_UNCOMPRESSED)
		end = start + BTRFS_MAX_UNCOMPRESSED;

	index = start >> PAGE_SHIFT;
	index_end = end >> PAGE_SHIFT;

	/* Don't miss unaligned end */
	if (!IS_ALIGNED(end, PAGE_SIZE))
		index_end++;

	curr_sample_pos = 0;
	while (index < index_end) {
		page = find_get_page(inode->i_mapping, index);
1757
		in_data = kmap_local_page(page);
1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769
		/* Handle case where the start is not aligned to PAGE_SIZE */
		i = start % PAGE_SIZE;
		while (i < PAGE_SIZE - SAMPLING_READ_SIZE) {
			/* Don't sample any garbage from the last page */
			if (start > end - SAMPLING_READ_SIZE)
				break;
			memcpy(&ws->sample[curr_sample_pos], &in_data[i],
					SAMPLING_READ_SIZE);
			i += SAMPLING_INTERVAL;
			start += SAMPLING_INTERVAL;
			curr_sample_pos += SAMPLING_READ_SIZE;
		}
1770
		kunmap_local(in_data);
1771 1772 1773 1774 1775 1776 1777 1778
		put_page(page);

		index++;
	}

	ws->sample_size = curr_sample_pos;
}

1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795
/*
 * Compression heuristic.
 *
 * For now is's a naive and optimistic 'return true', we'll extend the logic to
 * quickly (compared to direct compression) detect data characteristics
 * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
 * data.
 *
 * The following types of analysis can be performed:
 * - detect mostly zero data
 * - detect data with low "byte set" size (text, etc)
 * - detect data with low/high "core byte" set
 *
 * Return non-zero if the compression should be done, 0 otherwise.
 */
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{
1796
	struct list_head *ws_list = get_workspace(0, 0);
1797
	struct heuristic_ws *ws;
1798 1799
	u32 i;
	u8 byte;
1800
	int ret = 0;
1801

1802 1803
	ws = list_entry(ws_list, struct heuristic_ws, list);

1804 1805
	heuristic_collect_sample(inode, start, end, ws);

1806 1807 1808 1809 1810
	if (sample_repeated_patterns(ws)) {
		ret = 1;
		goto out;
	}

1811 1812 1813 1814 1815
	memset(ws->bucket, 0, sizeof(*ws->bucket)*BUCKET_SIZE);

	for (i = 0; i < ws->sample_size; i++) {
		byte = ws->sample[i];
		ws->bucket[byte].count++;
1816 1817
	}

1818 1819 1820 1821 1822 1823
	i = byte_set_size(ws);
	if (i < BYTE_SET_THRESHOLD) {
		ret = 2;
		goto out;
	}

1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
	i = byte_core_set_size(ws);
	if (i <= BYTE_CORE_SET_LOW) {
		ret = 3;
		goto out;
	}

	if (i >= BYTE_CORE_SET_HIGH) {
		ret = 0;
		goto out;
	}

1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863
	i = shannon_entropy(ws);
	if (i <= ENTROPY_LVL_ACEPTABLE) {
		ret = 4;
		goto out;
	}

	/*
	 * For the levels below ENTROPY_LVL_HIGH, additional analysis would be
	 * needed to give green light to compression.
	 *
	 * For now just assume that compression at that level is not worth the
	 * resources because:
	 *
	 * 1. it is possible to defrag the data later
	 *
	 * 2. the data would turn out to be hardly compressible, eg. 150 byte
	 * values, every bucket has counter at level ~54. The heuristic would
	 * be confused. This can happen when data have some internal repeated
	 * patterns like "abbacbbc...". This can be detected by analyzing
	 * pairs of bytes, which is too costly.
	 */
	if (i < ENTROPY_LVL_HIGH) {
		ret = 5;
		goto out;
	} else {
		ret = 0;
		goto out;
	}

1864
out:
1865
	put_workspace(0, ws_list);
1866 1867
	return ret;
}
1868

1869 1870 1871 1872 1873
/*
 * Convert the compression suffix (eg. after "zlib" starting with ":") to
 * level, unrecognized string will set the default level
 */
unsigned int btrfs_compress_str2level(unsigned int type, const char *str)
1874
{
1875 1876 1877 1878
	unsigned int level = 0;
	int ret;

	if (!type)
1879 1880
		return 0;

1881 1882 1883 1884 1885 1886
	if (str[0] == ':') {
		ret = kstrtouint(str + 1, 10, &level);
		if (ret)
			level = 0;
	}

1887 1888 1889 1890
	level = btrfs_compress_set_level(type, level);

	return level;
}