zram_drv.c 22.7 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
M
Minchan Kim 已提交
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33 34 35
#include <linux/lzo.h>
#include <linux/string.h>
#include <linux/vmalloc.h>

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41 42

/* Module params (documentation at end) */
43
static unsigned int num_devices = 1;
44

45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n", zram->disksize);
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%u\n", zram->init_done);
}

static ssize_t num_reads_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.num_reads));
}

static ssize_t num_writes_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.num_writes));
}

static ssize_t invalid_io_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.invalid_io));
}

static ssize_t notify_free_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.notify_free));
}

static ssize_t zero_pages_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

107
	return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
108 109 110 111 112 113 114 115
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
116
		(u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142
}

static ssize_t compr_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

	return sprintf(buf, "%llu\n",
			(u64)atomic64_read(&zram->stats.compr_size));
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
	if (zram->init_done)
		val = zs_get_total_size_bytes(meta->mem_pool);
	up_read(&zram->init_lock);

	return sprintf(buf, "%llu\n", val);
}

M
Minchan Kim 已提交
143
/* flag operations needs meta->tb_lock */
M
Minchan Kim 已提交
144
static int zram_test_flag(struct zram_meta *meta, u32 index,
145
			enum zram_pageflags flag)
146
{
M
Minchan Kim 已提交
147
	return meta->table[index].flags & BIT(flag);
148 149
}

M
Minchan Kim 已提交
150
static void zram_set_flag(struct zram_meta *meta, u32 index,
151
			enum zram_pageflags flag)
152
{
M
Minchan Kim 已提交
153
	meta->table[index].flags |= BIT(flag);
154 155
}

M
Minchan Kim 已提交
156
static void zram_clear_flag(struct zram_meta *meta, u32 index,
157
			enum zram_pageflags flag)
158
{
M
Minchan Kim 已提交
159
	meta->table[index].flags &= ~BIT(flag);
160 161
}

162 163 164 165 166 167 168 169 170 171 172
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
173

174
	/* unaligned request */
175 176
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
177
		return 0;
178
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
179 180
		return 0;

181 182
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
183 184
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
185
	if (unlikely(start >= bound || end > bound || start > end))
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	kfree(meta->compress_workmem);
	free_pages((unsigned long)meta->compress_buffer, 1);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
	if (!meta->compress_workmem)
		goto free_meta;

	meta->compress_buffer =
		(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
	if (!meta->compress_buffer) {
		pr_err("Error allocating compressor buffer space\n");
		goto free_workmem;
	}

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
		goto free_buffer;
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

M
Minchan Kim 已提交
232
	rwlock_init(&meta->tb_lock);
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
	return meta;

free_table:
	vfree(meta->table);
free_buffer:
	free_pages((unsigned long)meta->compress_buffer, 1);
free_workmem:
	kfree(meta->compress_workmem);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

M
Minchan Kim 已提交
285
/* NOTE: caller should hold meta->tb_lock with write-side */
286
static void zram_free_page(struct zram *zram, size_t index)
287
{
M
Minchan Kim 已提交
288 289 290
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
	u16 size = meta->table[index].size;
291

292
	if (unlikely(!handle)) {
293 294 295 296
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
M
Minchan Kim 已提交
297 298
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
299
			atomic_dec(&zram->stats.pages_zero);
300 301 302 303
		}
		return;
	}

304
	if (unlikely(size > max_zpage_size))
305
		atomic_dec(&zram->stats.bad_compress);
306

M
Minchan Kim 已提交
307
	zs_free(meta->mem_pool, handle);
308

309
	if (size <= PAGE_SIZE / 2)
310
		atomic_dec(&zram->stats.good_compress);
311

312
	atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
313
	atomic_dec(&zram->stats.pages_stored);
314

M
Minchan Kim 已提交
315 316
	meta->table[index].handle = 0;
	meta->table[index].size = 0;
317 318
}

319
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
320
{
321 322 323
	int ret = LZO_E_OK;
	size_t clen = PAGE_SIZE;
	unsigned char *cmem;
M
Minchan Kim 已提交
324
	struct zram_meta *meta = zram->meta;
M
Minchan Kim 已提交
325 326 327 328 329 330
	unsigned long handle;
	u16 size;

	read_lock(&meta->tb_lock);
	handle = meta->table[index].handle;
	size = meta->table[index].size;
331

M
Minchan Kim 已提交
332
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
M
Minchan Kim 已提交
333
		read_unlock(&meta->tb_lock);
334
		clear_page(mem);
335 336
		return 0;
	}
337

M
Minchan Kim 已提交
338
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
M
Minchan Kim 已提交
339
	if (size == PAGE_SIZE)
340
		copy_page(mem, cmem);
341
	else
M
Minchan Kim 已提交
342
		ret = lzo1x_decompress_safe(cmem, size,	mem, &clen);
M
Minchan Kim 已提交
343
	zs_unmap_object(meta->mem_pool, handle);
M
Minchan Kim 已提交
344
	read_unlock(&meta->tb_lock);
345

346 347 348
	/* Should NEVER happen. Return bio error if it does. */
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
349
		atomic64_inc(&zram->stats.failed_reads);
350
		return ret;
351
	}
352

353
	return 0;
354 355
}

356 357
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
358 359
{
	int ret;
360 361
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
M
Minchan Kim 已提交
362
	struct zram_meta *meta = zram->meta;
363 364
	page = bvec->bv_page;

M
Minchan Kim 已提交
365
	read_lock(&meta->tb_lock);
M
Minchan Kim 已提交
366 367
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
M
Minchan Kim 已提交
368
		read_unlock(&meta->tb_lock);
369
		handle_zero_page(bvec);
370 371
		return 0;
	}
M
Minchan Kim 已提交
372
	read_unlock(&meta->tb_lock);
373

374 375
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
376 377 378 379
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
380 381 382 383 384 385 386
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
387

388
	ret = zram_decompress_page(zram, uncmem, index);
389
	/* Should NEVER happen. Return bio error if it does. */
390
	if (unlikely(ret != LZO_E_OK))
391
		goto out_cleanup;
392

393 394 395 396 397 398 399 400 401 402 403
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
404 405 406 407
}

static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
408
{
409
	int ret = 0;
410
	size_t clen;
411
	unsigned long handle;
412
	struct page *page;
413
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
M
Minchan Kim 已提交
414
	struct zram_meta *meta = zram->meta;
415

416
	page = bvec->bv_page;
M
Minchan Kim 已提交
417
	src = meta->compress_buffer;
418

419 420 421 422 423
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
424
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
425 426 427 428
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
429
		ret = zram_decompress_page(zram, uncmem, index);
430
		if (ret)
431 432 433
			goto out;
	}

434
	user_mem = kmap_atomic(page);
435

436
	if (is_partial_io(bvec)) {
437 438
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
439 440 441
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
442
		uncmem = user_mem;
443
	}
444 445

	if (page_zero_filled(uncmem)) {
446
		kunmap_atomic(user_mem);
447
		/* Free memory associated with this sector now. */
M
Minchan Kim 已提交
448
		write_lock(&zram->meta->tb_lock);
449
		zram_free_page(zram, index);
M
Minchan Kim 已提交
450 451
		zram_set_flag(meta, index, ZRAM_ZERO);
		write_unlock(&zram->meta->tb_lock);
452

453
		atomic_inc(&zram->stats.pages_zero);
454 455
		ret = 0;
		goto out;
456
	}
457

458
	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
M
Minchan Kim 已提交
459
			       meta->compress_workmem);
460

461 462 463 464 465
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
466

467 468
	if (unlikely(ret != LZO_E_OK)) {
		pr_err("Compression failed! err=%d\n", ret);
469
		goto out;
470
	}
471

472
	if (unlikely(clen > max_zpage_size)) {
473
		atomic_inc(&zram->stats.bad_compress);
474
		clen = PAGE_SIZE;
475 476 477
		src = NULL;
		if (is_partial_io(bvec))
			src = uncmem;
478
	}
479

M
Minchan Kim 已提交
480
	handle = zs_malloc(meta->mem_pool, clen);
481
	if (!handle) {
482 483
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
484 485
		ret = -ENOMEM;
		goto out;
486
	}
M
Minchan Kim 已提交
487
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
488

489
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
490
		src = kmap_atomic(page);
491
		copy_page(cmem, src);
492
		kunmap_atomic(src);
493 494 495
	} else {
		memcpy(cmem, src, clen);
	}
496

M
Minchan Kim 已提交
497
	zs_unmap_object(meta->mem_pool, handle);
498

499 500 501 502
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
M
Minchan Kim 已提交
503
	write_lock(&zram->meta->tb_lock);
504 505
	zram_free_page(zram, index);

M
Minchan Kim 已提交
506 507
	meta->table[index].handle = handle;
	meta->table[index].size = clen;
M
Minchan Kim 已提交
508
	write_unlock(&zram->meta->tb_lock);
509

510
	/* Update stats */
511
	atomic64_add(clen, &zram->stats.compr_size);
512
	atomic_inc(&zram->stats.pages_stored);
513
	if (clen <= PAGE_SIZE / 2)
514
		atomic_inc(&zram->stats.good_compress);
515

516
out:
517 518 519
	if (is_partial_io(bvec))
		kfree(uncmem);

520
	if (ret)
521
		atomic64_inc(&zram->stats.failed_writes);
522
	return ret;
523 524
}

525 526 527 528 529 530 531 532 533 534 535 536 537 538
static void handle_pending_slot_free(struct zram *zram)
{
	struct zram_slot_free *free_rq;

	spin_lock(&zram->slot_free_lock);
	while (zram->slot_free_rq) {
		free_rq = zram->slot_free_rq;
		zram->slot_free_rq = free_rq->next;
		zram_free_page(zram, free_rq->index);
		kfree(free_rq);
	}
	spin_unlock(&zram->slot_free_lock);
}

539
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
540
			int offset, struct bio *bio, int rw)
541
{
542
	int ret;
543

544 545 546 547 548 549
	if (rw == READ) {
		down_read(&zram->lock);
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
		up_read(&zram->lock);
	} else {
		down_write(&zram->lock);
550
		handle_pending_slot_free(zram);
551 552 553 554 555
		ret = zram_bvec_write(zram, bvec, index, offset);
		up_write(&zram->lock);
	}

	return ret;
556 557
}

M
Minchan Kim 已提交
558
static void zram_reset_device(struct zram *zram, bool reset_capacity)
559
{
560 561 562
	size_t index;
	struct zram_meta *meta;

563 564 565
	down_write(&zram->init_lock);
	if (!zram->init_done) {
		up_write(&zram->init_lock);
566
		return;
567
	}
568

569 570
	flush_work(&zram->free_work);

571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
	meta = zram->meta;
	zram->init_done = 0;

	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
M
Minchan Kim 已提交
589 590
	if (reset_capacity)
		set_capacity(zram->disk, 0);
591
	up_write(&zram->init_lock);
592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
}

static void zram_init_device(struct zram *zram, struct zram_meta *meta)
{
	if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
		pr_info(
		"There is little point creating a zram of greater than "
		"twice the size of memory since we expect a 2:1 compression "
		"ratio. Note that zram uses about 0.1%% of the size of "
		"the disk when not in use so a huge zram is "
		"wasteful.\n"
		"\tMemory Size: %lu kB\n"
		"\tSize you selected: %llu kB\n"
		"Continuing anyway ...\n",
		(totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
		);
	}

	/* zram devices sort of resembles non-rotational disks */
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);

	zram->meta = meta;
	zram->init_done = 1;

	pr_debug("Initialization done!\n");
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
	down_write(&zram->init_lock);
	if (zram->init_done) {
		up_write(&zram->init_lock);
		zram_meta_free(meta);
		pr_info("Cannot change disksize for initialized device\n");
		return -EBUSY;
	}

	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	zram_init_device(zram, meta);
	up_write(&zram->init_lock);

	return len;
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

659 660 661
	if (!bdev)
		return -ENOMEM;

662
	/* Do not reset an active device! */
663 664 665 666
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
667 668 669

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
670
		goto out;
671

672 673 674 675
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
676 677

	/* Make sure all pending I/O is finished */
678
	fsync_bdev(bdev);
679
	bdput(bdev);
680

M
Minchan Kim 已提交
681
	zram_reset_device(zram, true);
682
	return len;
683 684 685 686

out:
	bdput(bdev);
	return ret;
687 688 689 690
}

static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
{
691
	int offset;
692
	u32 index;
693 694
	struct bio_vec bvec;
	struct bvec_iter iter;
695 696 697

	switch (rw) {
	case READ:
698
		atomic64_inc(&zram->stats.num_reads);
699 700
		break;
	case WRITE:
701
		atomic64_inc(&zram->stats.num_writes);
702 703 704
		break;
	}

705 706 707
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
708

709
	bio_for_each_segment(bvec, bio, iter) {
710 711
		int max_transfer_size = PAGE_SIZE - offset;

712
		if (bvec.bv_len > max_transfer_size) {
713 714 715 716 717 718
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

719
			bv.bv_page = bvec.bv_page;
720
			bv.bv_len = max_transfer_size;
721
			bv.bv_offset = bvec.bv_offset;
722 723 724 725

			if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
				goto out;

726
			bv.bv_len = bvec.bv_len - max_transfer_size;
727 728 729 730
			bv.bv_offset += max_transfer_size;
			if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
				goto out;
		} else
731
			if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
732 733 734
			    < 0)
				goto out;

735
		update_position(&index, &offset, &bvec);
736
	}
737 738 739

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
740
	return;
741 742 743 744 745 746

out:
	bio_io_error(bio);
}

/*
747
 * Handler function for all zram I/O requests.
748
 */
749
static void zram_make_request(struct request_queue *queue, struct bio *bio)
750
{
751
	struct zram *zram = queue->queuedata;
752

753 754
	down_read(&zram->init_lock);
	if (unlikely(!zram->init_done))
755
		goto error;
756

757
	if (!valid_io_request(zram, bio)) {
758
		atomic64_inc(&zram->stats.invalid_io);
759
		goto error;
760 761
	}

762
	__zram_make_request(zram, bio, bio_data_dir(bio));
763
	up_read(&zram->init_lock);
764

765
	return;
766 767

error:
768
	up_read(&zram->init_lock);
769
	bio_io_error(bio);
770 771
}

772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
static void zram_slot_free(struct work_struct *work)
{
	struct zram *zram;

	zram = container_of(work, struct zram, free_work);
	down_write(&zram->lock);
	handle_pending_slot_free(zram);
	up_write(&zram->lock);
}

static void add_slot_free(struct zram *zram, struct zram_slot_free *free_rq)
{
	spin_lock(&zram->slot_free_lock);
	free_rq->next = zram->slot_free_rq;
	zram->slot_free_rq = free_rq;
	spin_unlock(&zram->slot_free_lock);
}

N
Nitin Gupta 已提交
790 791
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
792
{
793
	struct zram *zram;
794
	struct zram_slot_free *free_rq;
795

796
	zram = bdev->bd_disk->private_data;
797
	atomic64_inc(&zram->stats.notify_free);
798 799 800 801 802 803 804 805

	free_rq = kmalloc(sizeof(struct zram_slot_free), GFP_ATOMIC);
	if (!free_rq)
		return;

	free_rq->index = index;
	add_slot_free(zram, free_rq);
	schedule_work(&zram->free_work);
806 807
}

808 809
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
810
	.owner = THIS_MODULE
811 812
};

813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);

static struct attribute *zram_disk_attrs[] = {
	&dev_attr_disksize.attr,
	&dev_attr_initstate.attr,
	&dev_attr_reset.attr,
	&dev_attr_num_reads.attr,
	&dev_attr_num_writes.attr,
	&dev_attr_invalid_io.attr,
	&dev_attr_notify_free.attr,
	&dev_attr_zero_pages.attr,
	&dev_attr_orig_data_size.attr,
	&dev_attr_compr_data_size.attr,
	&dev_attr_mem_used_total.attr,
	NULL,
};

static struct attribute_group zram_disk_attr_group = {
	.attrs = zram_disk_attrs,
};

845
static int create_device(struct zram *zram, int device_id)
846
{
847
	int ret = -ENOMEM;
848

849
	init_rwsem(&zram->lock);
850
	init_rwsem(&zram->init_lock);
851

852 853 854 855
	INIT_WORK(&zram->free_work, zram_slot_free);
	spin_lock_init(&zram->slot_free_lock);
	zram->slot_free_rq = NULL;

856 857
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
858 859
		pr_err("Error allocating disk queue for device %d\n",
			device_id);
860
		goto out;
861 862
	}

863 864
	blk_queue_make_request(zram->queue, zram_make_request);
	zram->queue->queuedata = zram;
865 866

	 /* gendisk structure */
867 868
	zram->disk = alloc_disk(1);
	if (!zram->disk) {
869
		pr_warn("Error allocating disk structure for device %d\n",
870
			device_id);
871
		goto out_free_queue;
872 873
	}

874 875 876 877 878 879
	zram->disk->major = zram_major;
	zram->disk->first_minor = device_id;
	zram->disk->fops = &zram_devops;
	zram->disk->queue = zram->queue;
	zram->disk->private_data = zram;
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
880

881
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
882
	set_capacity(zram->disk, 0);
883

884 885 886 887
	/*
	 * To ensure that we always get PAGE_SIZE aligned
	 * and n*PAGE_SIZED sized I/O requests.
	 */
888
	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
889 890
	blk_queue_logical_block_size(zram->disk->queue,
					ZRAM_LOGICAL_BLOCK_SIZE);
891 892
	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
893

894
	add_disk(zram->disk);
895

896 897 898
	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
				&zram_disk_attr_group);
	if (ret < 0) {
899
		pr_warn("Error creating sysfs group");
900
		goto out_free_disk;
901 902
	}

903
	zram->init_done = 0;
904
	return 0;
905

906 907 908 909 910
out_free_disk:
	del_gendisk(zram->disk);
	put_disk(zram->disk);
out_free_queue:
	blk_cleanup_queue(zram->queue);
911 912
out:
	return ret;
913 914
}

915
static void destroy_device(struct zram *zram)
916
{
917 918 919
	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
			&zram_disk_attr_group);

920 921
	del_gendisk(zram->disk);
	put_disk(zram->disk);
922

923
	blk_cleanup_queue(zram->queue);
924 925
}

926
static int __init zram_init(void)
927
{
928
	int ret, dev_id;
929

930
	if (num_devices > max_num_devices) {
931
		pr_warn("Invalid value for num_devices: %u\n",
932
				num_devices);
933 934
		ret = -EINVAL;
		goto out;
935 936
	}

937 938
	zram_major = register_blkdev(0, "zram");
	if (zram_major <= 0) {
939
		pr_warn("Unable to get major number\n");
940 941
		ret = -EBUSY;
		goto out;
942 943 944
	}

	/* Allocate the device array and initialize each one */
945
	zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
946
	if (!zram_devices) {
947 948 949
		ret = -ENOMEM;
		goto unregister;
	}
950

951
	for (dev_id = 0; dev_id < num_devices; dev_id++) {
952
		ret = create_device(&zram_devices[dev_id], dev_id);
953
		if (ret)
954
			goto free_devices;
955 956
	}

957 958
	pr_info("Created %u device(s) ...\n", num_devices);

959
	return 0;
960

961
free_devices:
962
	while (dev_id)
963 964
		destroy_device(&zram_devices[--dev_id]);
	kfree(zram_devices);
965
unregister:
966
	unregister_blkdev(zram_major, "zram");
967
out:
968 969 970
	return ret;
}

971
static void __exit zram_exit(void)
972 973
{
	int i;
974
	struct zram *zram;
975

976
	for (i = 0; i < num_devices; i++) {
977
		zram = &zram_devices[i];
978

979
		destroy_device(zram);
M
Minchan Kim 已提交
980 981 982 983 984
		/*
		 * Shouldn't access zram->disk after destroy_device
		 * because destroy_device already released zram->disk.
		 */
		zram_reset_device(zram, false);
985 986
	}

987
	unregister_blkdev(zram_major, "zram");
988

989
	kfree(zram_devices);
990 991 992
	pr_debug("Cleanup done!\n");
}

993 994
module_init(zram_init);
module_exit(zram_exit);
995

996 997 998
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of zram devices");

999 1000
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1001
MODULE_DESCRIPTION("Compressed RAM Block Device");