zram_drv.c 27.5 KB
Newer Older
1
/*
2
 * Compressed RAM block device
3
 *
4
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
M
Minchan Kim 已提交
5
 *               2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13 14
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the licence that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 *
 */

15
#define KMSG_COMPONENT "zram"
16 17
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt

18 19 20 21
#ifdef CONFIG_ZRAM_DEBUG
#define DEBUG
#endif

22 23
#include <linux/module.h>
#include <linux/kernel.h>
24
#include <linux/bio.h>
25 26 27 28 29 30
#include <linux/bitops.h>
#include <linux/blkdev.h>
#include <linux/buffer_head.h>
#include <linux/device.h>
#include <linux/genhd.h>
#include <linux/highmem.h>
31
#include <linux/slab.h>
32 33
#include <linux/string.h>
#include <linux/vmalloc.h>
34
#include <linux/err.h>
35

36
#include "zram_drv.h"
37 38

/* Globals */
39
static int zram_major;
40
static struct zram *zram_devices;
41
static const char *default_compressor = "lzo";
42 43

/* Module params (documentation at end) */
44
static unsigned int num_devices = 1;
45

46 47 48 49 50
#define ZRAM_ATTR_RO(name)						\
static ssize_t zram_attr_##name##_show(struct device *d,		\
				struct device_attribute *attr, char *b)	\
{									\
	struct zram *zram = dev_to_zram(d);				\
51
	return scnprintf(b, PAGE_SIZE, "%llu\n",			\
52 53 54 55 56
		(u64)atomic64_read(&zram->stats.name));			\
}									\
static struct device_attribute dev_attr_##name =			\
	__ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);

57 58 59 60 61
static inline int init_done(struct zram *zram)
{
	return zram->meta != NULL;
}

62 63 64 65 66 67 68 69 70 71
static inline struct zram *dev_to_zram(struct device *dev)
{
	return (struct zram *)dev_to_disk(dev)->private_data;
}

static ssize_t disksize_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

72
	return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
73 74 75 76 77
}

static ssize_t initstate_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
78
	u32 val;
79 80
	struct zram *zram = dev_to_zram(dev);

81 82 83
	down_read(&zram->init_lock);
	val = init_done(zram);
	up_read(&zram->init_lock);
84

85
	return scnprintf(buf, PAGE_SIZE, "%u\n", val);
86 87 88 89 90 91 92
}

static ssize_t orig_data_size_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	struct zram *zram = dev_to_zram(dev);

93
	return scnprintf(buf, PAGE_SIZE, "%llu\n",
94
		(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
95 96 97 98 99 100 101 102 103 104
}

static ssize_t mem_used_total_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	down_read(&zram->init_lock);
105
	if (init_done(zram))
106
		val = zs_get_total_pages(meta->mem_pool);
107 108
	up_read(&zram->init_lock);

109
	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
110 111
}

112 113 114 115 116 117 118 119 120 121
static ssize_t max_comp_streams_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	int val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->max_comp_streams;
	up_read(&zram->init_lock);

122
	return scnprintf(buf, PAGE_SIZE, "%d\n", val);
123 124
}

M
Minchan Kim 已提交
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
static ssize_t mem_limit_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	val = zram->limit_pages;
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_limit_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 limit;
	char *tmp;
	struct zram *zram = dev_to_zram(dev);

	limit = memparse(buf, &tmp);
	if (buf == tmp) /* no chars parsed, invalid input */
		return -EINVAL;

	down_write(&zram->init_lock);
	zram->limit_pages = PAGE_ALIGN(limit) >> PAGE_SHIFT;
	up_write(&zram->init_lock);

	return len;
}

M
Minchan Kim 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190
static ssize_t mem_used_max_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	u64 val = 0;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	if (init_done(zram))
		val = atomic_long_read(&zram->stats.max_used_pages);
	up_read(&zram->init_lock);

	return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
}

static ssize_t mem_used_max_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int err;
	unsigned long val;
	struct zram *zram = dev_to_zram(dev);
	struct zram_meta *meta = zram->meta;

	err = kstrtoul(buf, 10, &val);
	if (err || val != 0)
		return -EINVAL;

	down_read(&zram->init_lock);
	if (init_done(zram))
		atomic_long_set(&zram->stats.max_used_pages,
				zs_get_total_pages(meta->mem_pool));
	up_read(&zram->init_lock);

	return len;
}

191 192 193 194 195
static ssize_t max_comp_streams_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int num;
	struct zram *zram = dev_to_zram(dev);
M
Minchan Kim 已提交
196
	int ret;
197

M
Minchan Kim 已提交
198 199 200
	ret = kstrtoint(buf, 0, &num);
	if (ret < 0)
		return ret;
201 202
	if (num < 1)
		return -EINVAL;
M
Minchan Kim 已提交
203

204 205
	down_write(&zram->init_lock);
	if (init_done(zram)) {
M
Minchan Kim 已提交
206
		if (!zcomp_set_max_streams(zram->comp, num)) {
207
			pr_info("Cannot change max compression streams\n");
M
Minchan Kim 已提交
208 209 210
			ret = -EINVAL;
			goto out;
		}
211
	}
M
Minchan Kim 已提交
212

213
	zram->max_comp_streams = num;
M
Minchan Kim 已提交
214 215
	ret = len;
out:
216
	up_write(&zram->init_lock);
M
Minchan Kim 已提交
217
	return ret;
218 219
}

220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
static ssize_t comp_algorithm_show(struct device *dev,
		struct device_attribute *attr, char *buf)
{
	size_t sz;
	struct zram *zram = dev_to_zram(dev);

	down_read(&zram->init_lock);
	sz = zcomp_available_show(zram->compressor, buf);
	up_read(&zram->init_lock);

	return sz;
}

static ssize_t comp_algorithm_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	struct zram *zram = dev_to_zram(dev);
	down_write(&zram->init_lock);
	if (init_done(zram)) {
		up_write(&zram->init_lock);
		pr_info("Can't change algorithm for initialized device\n");
		return -EBUSY;
	}
	strlcpy(zram->compressor, buf, sizeof(zram->compressor));
	up_write(&zram->init_lock);
	return len;
}

M
Minchan Kim 已提交
248
/* flag operations needs meta->tb_lock */
M
Minchan Kim 已提交
249
static int zram_test_flag(struct zram_meta *meta, u32 index,
250
			enum zram_pageflags flag)
251
{
252
	return meta->table[index].value & BIT(flag);
253 254
}

M
Minchan Kim 已提交
255
static void zram_set_flag(struct zram_meta *meta, u32 index,
256
			enum zram_pageflags flag)
257
{
258
	meta->table[index].value |= BIT(flag);
259 260
}

M
Minchan Kim 已提交
261
static void zram_clear_flag(struct zram_meta *meta, u32 index,
262
			enum zram_pageflags flag)
263
{
264 265 266 267 268 269 270 271 272 273 274 275 276 277
	meta->table[index].value &= ~BIT(flag);
}

static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
{
	return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
}

static void zram_set_obj_size(struct zram_meta *meta,
					u32 index, size_t size)
{
	unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;

	meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
278 279
}

280 281 282 283 284 285 286 287 288 289 290
static inline int is_partial_io(struct bio_vec *bvec)
{
	return bvec->bv_len != PAGE_SIZE;
}

/*
 * Check if request is within bounds and aligned on zram logical blocks.
 */
static inline int valid_io_request(struct zram *zram, struct bio *bio)
{
	u64 start, end, bound;
291

292
	/* unaligned request */
293 294
	if (unlikely(bio->bi_iter.bi_sector &
		     (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
295
		return 0;
296
	if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
297 298
		return 0;

299 300
	start = bio->bi_iter.bi_sector;
	end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
301 302
	bound = zram->disksize >> SECTOR_SHIFT;
	/* out of range range */
303
	if (unlikely(start >= bound || end > bound || start > end))
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327
		return 0;

	/* I/O request is valid */
	return 1;
}

static void zram_meta_free(struct zram_meta *meta)
{
	zs_destroy_pool(meta->mem_pool);
	vfree(meta->table);
	kfree(meta);
}

static struct zram_meta *zram_meta_alloc(u64 disksize)
{
	size_t num_pages;
	struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
	if (!meta)
		goto out;

	num_pages = disksize >> PAGE_SHIFT;
	meta->table = vzalloc(num_pages * sizeof(*meta->table));
	if (!meta->table) {
		pr_err("Error allocating zram address table\n");
328
		goto free_meta;
329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354
	}

	meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
	if (!meta->mem_pool) {
		pr_err("Error creating memory pool\n");
		goto free_table;
	}

	return meta;

free_table:
	vfree(meta->table);
free_meta:
	kfree(meta);
	meta = NULL;
out:
	return meta;
}

static void update_position(u32 *index, int *offset, struct bio_vec *bvec)
{
	if (*offset + bvec->bv_len >= PAGE_SIZE)
		(*index)++;
	*offset = (*offset + bvec->bv_len) % PAGE_SIZE;
}

355 356 357 358 359 360 361 362 363 364 365 366 367 368 369
static int page_zero_filled(void *ptr)
{
	unsigned int pos;
	unsigned long *page;

	page = (unsigned long *)ptr;

	for (pos = 0; pos != PAGE_SIZE / sizeof(*page); pos++) {
		if (page[pos])
			return 0;
	}

	return 1;
}

370 371 372 373 374 375 376 377 378 379 380 381 382 383 384
static void handle_zero_page(struct bio_vec *bvec)
{
	struct page *page = bvec->bv_page;
	void *user_mem;

	user_mem = kmap_atomic(page);
	if (is_partial_io(bvec))
		memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
	else
		clear_page(user_mem);
	kunmap_atomic(user_mem);

	flush_dcache_page(page);
}

385 386 387 388 389 390

/*
 * To protect concurrent access to the same index entry,
 * caller should hold this table index entry's bit_spinlock to
 * indicate this index entry is accessing.
 */
391
static void zram_free_page(struct zram *zram, size_t index)
392
{
M
Minchan Kim 已提交
393 394
	struct zram_meta *meta = zram->meta;
	unsigned long handle = meta->table[index].handle;
395

396
	if (unlikely(!handle)) {
397 398 399 400
		/*
		 * No memory is allocated for zero filled pages.
		 * Simply clear zero page flag.
		 */
M
Minchan Kim 已提交
401 402
		if (zram_test_flag(meta, index, ZRAM_ZERO)) {
			zram_clear_flag(meta, index, ZRAM_ZERO);
403
			atomic64_dec(&zram->stats.zero_pages);
404 405 406 407
		}
		return;
	}

M
Minchan Kim 已提交
408
	zs_free(meta->mem_pool, handle);
409

410 411
	atomic64_sub(zram_get_obj_size(meta, index),
			&zram->stats.compr_data_size);
412
	atomic64_dec(&zram->stats.pages_stored);
413

M
Minchan Kim 已提交
414
	meta->table[index].handle = 0;
415
	zram_set_obj_size(meta, index, 0);
416 417
}

418
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
419
{
420
	int ret = 0;
421
	unsigned char *cmem;
M
Minchan Kim 已提交
422
	struct zram_meta *meta = zram->meta;
M
Minchan Kim 已提交
423
	unsigned long handle;
M
Minchan Kim 已提交
424
	size_t size;
M
Minchan Kim 已提交
425

426
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
M
Minchan Kim 已提交
427
	handle = meta->table[index].handle;
428
	size = zram_get_obj_size(meta, index);
429

M
Minchan Kim 已提交
430
	if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
431
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
432
		clear_page(mem);
433 434
		return 0;
	}
435

M
Minchan Kim 已提交
436
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO);
M
Minchan Kim 已提交
437
	if (size == PAGE_SIZE)
438
		copy_page(mem, cmem);
439
	else
440
		ret = zcomp_decompress(zram->comp, cmem, size, mem);
M
Minchan Kim 已提交
441
	zs_unmap_object(meta->mem_pool, handle);
442
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
443

444
	/* Should NEVER happen. Return bio error if it does. */
445
	if (unlikely(ret)) {
446 447
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
		return ret;
448
	}
449

450
	return 0;
451 452
}

453 454
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
			  u32 index, int offset, struct bio *bio)
455 456
{
	int ret;
457 458
	struct page *page;
	unsigned char *user_mem, *uncmem = NULL;
M
Minchan Kim 已提交
459
	struct zram_meta *meta = zram->meta;
460 461
	page = bvec->bv_page;

462
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
M
Minchan Kim 已提交
463 464
	if (unlikely(!meta->table[index].handle) ||
			zram_test_flag(meta, index, ZRAM_ZERO)) {
465
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
466
		handle_zero_page(bvec);
467 468
		return 0;
	}
469
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
470

471 472
	if (is_partial_io(bvec))
		/* Use  a temporary buffer to decompress the page */
473 474 475 476
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);

	user_mem = kmap_atomic(page);
	if (!is_partial_io(bvec))
477 478 479 480 481 482 483
		uncmem = user_mem;

	if (!uncmem) {
		pr_info("Unable to allocate temp memory\n");
		ret = -ENOMEM;
		goto out_cleanup;
	}
484

485
	ret = zram_decompress_page(zram, uncmem, index);
486
	/* Should NEVER happen. Return bio error if it does. */
487
	if (unlikely(ret))
488
		goto out_cleanup;
489

490 491 492 493 494 495 496 497 498 499 500
	if (is_partial_io(bvec))
		memcpy(user_mem + bvec->bv_offset, uncmem + offset,
				bvec->bv_len);

	flush_dcache_page(page);
	ret = 0;
out_cleanup:
	kunmap_atomic(user_mem);
	if (is_partial_io(bvec))
		kfree(uncmem);
	return ret;
501 502
}

M
Minchan Kim 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
static inline void update_used_max(struct zram *zram,
					const unsigned long pages)
{
	int old_max, cur_max;

	old_max = atomic_long_read(&zram->stats.max_used_pages);

	do {
		cur_max = old_max;
		if (pages > cur_max)
			old_max = atomic_long_cmpxchg(
				&zram->stats.max_used_pages, cur_max, pages);
	} while (old_max != cur_max);
}

518 519
static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
			   int offset)
520
{
521
	int ret = 0;
522
	size_t clen;
523
	unsigned long handle;
524
	struct page *page;
525
	unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
M
Minchan Kim 已提交
526
	struct zram_meta *meta = zram->meta;
527
	struct zcomp_strm *zstrm;
528
	bool locked = false;
M
Minchan Kim 已提交
529
	unsigned long alloced_pages;
530

531
	page = bvec->bv_page;
532 533 534 535 536
	if (is_partial_io(bvec)) {
		/*
		 * This is a partial IO. We need to read the full page
		 * before to write the changes.
		 */
537
		uncmem = kmalloc(PAGE_SIZE, GFP_NOIO);
538 539 540 541
		if (!uncmem) {
			ret = -ENOMEM;
			goto out;
		}
542
		ret = zram_decompress_page(zram, uncmem, index);
543
		if (ret)
544 545 546
			goto out;
	}

547
	zstrm = zcomp_strm_find(zram->comp);
548
	locked = true;
549
	user_mem = kmap_atomic(page);
550

551
	if (is_partial_io(bvec)) {
552 553
		memcpy(uncmem + offset, user_mem + bvec->bv_offset,
		       bvec->bv_len);
554 555 556
		kunmap_atomic(user_mem);
		user_mem = NULL;
	} else {
557
		uncmem = user_mem;
558
	}
559 560

	if (page_zero_filled(uncmem)) {
561
		kunmap_atomic(user_mem);
562
		/* Free memory associated with this sector now. */
563
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
564
		zram_free_page(zram, index);
M
Minchan Kim 已提交
565
		zram_set_flag(meta, index, ZRAM_ZERO);
566
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
567

568
		atomic64_inc(&zram->stats.zero_pages);
569 570
		ret = 0;
		goto out;
571
	}
572

573
	ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
574 575 576 577 578
	if (!is_partial_io(bvec)) {
		kunmap_atomic(user_mem);
		user_mem = NULL;
		uncmem = NULL;
	}
579

580
	if (unlikely(ret)) {
581
		pr_err("Compression failed! err=%d\n", ret);
582
		goto out;
583
	}
584
	src = zstrm->buffer;
585 586
	if (unlikely(clen > max_zpage_size)) {
		clen = PAGE_SIZE;
587 588
		if (is_partial_io(bvec))
			src = uncmem;
589
	}
590

M
Minchan Kim 已提交
591
	handle = zs_malloc(meta->mem_pool, clen);
592
	if (!handle) {
593 594
		pr_info("Error allocating memory for compressed page: %u, size=%zu\n",
			index, clen);
595 596
		ret = -ENOMEM;
		goto out;
597
	}
M
Minchan Kim 已提交
598

M
Minchan Kim 已提交
599 600
	alloced_pages = zs_get_total_pages(meta->mem_pool);
	if (zram->limit_pages && alloced_pages > zram->limit_pages) {
M
Minchan Kim 已提交
601 602 603 604 605
		zs_free(meta->mem_pool, handle);
		ret = -ENOMEM;
		goto out;
	}

M
Minchan Kim 已提交
606 607
	update_used_max(zram, alloced_pages);

M
Minchan Kim 已提交
608
	cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_WO);
609

610
	if ((clen == PAGE_SIZE) && !is_partial_io(bvec)) {
611
		src = kmap_atomic(page);
612
		copy_page(cmem, src);
613
		kunmap_atomic(src);
614 615 616
	} else {
		memcpy(cmem, src, clen);
	}
617

618 619
	zcomp_strm_release(zram->comp, zstrm);
	locked = false;
M
Minchan Kim 已提交
620
	zs_unmap_object(meta->mem_pool, handle);
621

622 623 624 625
	/*
	 * Free memory associated with this sector
	 * before overwriting unused sectors.
	 */
626
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
627 628
	zram_free_page(zram, index);

M
Minchan Kim 已提交
629
	meta->table[index].handle = handle;
630 631
	zram_set_obj_size(meta, index, clen);
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
632

633
	/* Update stats */
634 635
	atomic64_add(clen, &zram->stats.compr_data_size);
	atomic64_inc(&zram->stats.pages_stored);
636
out:
637
	if (locked)
638
		zcomp_strm_release(zram->comp, zstrm);
639 640
	if (is_partial_io(bvec))
		kfree(uncmem);
641
	return ret;
642 643 644
}

static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
645
			int offset, struct bio *bio)
646
{
647
	int ret;
648
	int rw = bio_data_dir(bio);
649

650 651
	if (rw == READ) {
		atomic64_inc(&zram->stats.num_reads);
652
		ret = zram_bvec_read(zram, bvec, index, offset, bio);
653 654
	} else {
		atomic64_inc(&zram->stats.num_writes);
655
		ret = zram_bvec_write(zram, bvec, index, offset);
656
	}
657

658 659 660 661 662 663 664
	if (unlikely(ret)) {
		if (rw == READ)
			atomic64_inc(&zram->stats.failed_reads);
		else
			atomic64_inc(&zram->stats.failed_writes);
	}

665
	return ret;
666 667
}

J
Joonsoo Kim 已提交
668 669 670 671 672 673 674 675 676
/*
 * zram_bio_discard - handler on discard request
 * @index: physical block index in PAGE_SIZE units
 * @offset: byte offset within physical block
 */
static void zram_bio_discard(struct zram *zram, u32 index,
			     int offset, struct bio *bio)
{
	size_t n = bio->bi_iter.bi_size;
677
	struct zram_meta *meta = zram->meta;
J
Joonsoo Kim 已提交
678 679 680 681 682 683 684 685 686 687 688 689

	/*
	 * zram manages data in physical block size units. Because logical block
	 * size isn't identical with physical block size on some arch, we
	 * could get a discard request pointing to a specific offset within a
	 * certain physical block.  Although we can handle this request by
	 * reading that physiclal block and decompressing and partially zeroing
	 * and re-compressing and then re-storing it, this isn't reasonable
	 * because our intent with a discard request is to save memory.  So
	 * skipping this logical block is appropriate here.
	 */
	if (offset) {
690
		if (n <= (PAGE_SIZE - offset))
J
Joonsoo Kim 已提交
691 692
			return;

693
		n -= (PAGE_SIZE - offset);
J
Joonsoo Kim 已提交
694 695 696 697
		index++;
	}

	while (n >= PAGE_SIZE) {
698
		bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
J
Joonsoo Kim 已提交
699
		zram_free_page(zram, index);
700
		bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
701
		atomic64_inc(&zram->stats.notify_free);
J
Joonsoo Kim 已提交
702 703 704 705 706
		index++;
		n -= PAGE_SIZE;
	}
}

M
Minchan Kim 已提交
707
static void zram_reset_device(struct zram *zram, bool reset_capacity)
708
{
709 710 711
	size_t index;
	struct zram_meta *meta;

712
	down_write(&zram->init_lock);
M
Minchan Kim 已提交
713 714 715

	zram->limit_pages = 0;

716
	if (!init_done(zram)) {
717
		up_write(&zram->init_lock);
718
		return;
719
	}
720 721 722 723 724 725 726 727 728 729 730

	meta = zram->meta;
	/* Free all pages that are still in this zram device */
	for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
		unsigned long handle = meta->table[index].handle;
		if (!handle)
			continue;

		zs_free(meta->mem_pool, handle);
	}

731
	zcomp_destroy(zram->comp);
732 733
	zram->max_comp_streams = 1;

734 735 736 737 738 739
	zram_meta_free(zram->meta);
	zram->meta = NULL;
	/* Reset stats */
	memset(&zram->stats, 0, sizeof(zram->stats));

	zram->disksize = 0;
740
	if (reset_capacity)
M
Minchan Kim 已提交
741
		set_capacity(zram->disk, 0);
742

743
	up_write(&zram->init_lock);
744 745 746 747 748 749 750 751

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	if (reset_capacity)
		revalidate_disk(zram->disk);
752 753 754 755 756 757
}

static ssize_t disksize_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	u64 disksize;
758
	struct zcomp *comp;
759 760
	struct zram_meta *meta;
	struct zram *zram = dev_to_zram(dev);
761
	int err;
762 763 764 765 766 767 768

	disksize = memparse(buf, NULL);
	if (!disksize)
		return -EINVAL;

	disksize = PAGE_ALIGN(disksize);
	meta = zram_meta_alloc(disksize);
769 770
	if (!meta)
		return -ENOMEM;
771

772
	comp = zcomp_create(zram->compressor, zram->max_comp_streams);
773
	if (IS_ERR(comp)) {
774 775
		pr_info("Cannot initialise %s compressing backend\n",
				zram->compressor);
776 777
		err = PTR_ERR(comp);
		goto out_free_meta;
778 779
	}

780
	down_write(&zram->init_lock);
781
	if (init_done(zram)) {
782
		pr_info("Cannot change disksize for initialized device\n");
783
		err = -EBUSY;
784
		goto out_destroy_comp;
785 786
	}

787
	zram->meta = meta;
788
	zram->comp = comp;
789 790 791
	zram->disksize = disksize;
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
	up_write(&zram->init_lock);
792 793 794 795 796 797 798 799

	/*
	 * Revalidate disk out of the init_lock to avoid lockdep splat.
	 * It's okay because disk's capacity is protected by init_lock
	 * so that revalidate_disk always sees up-to-date capacity.
	 */
	revalidate_disk(zram->disk);

800
	return len;
801

802 803 804 805
out_destroy_comp:
	up_write(&zram->init_lock);
	zcomp_destroy(comp);
out_free_meta:
806 807
	zram_meta_free(meta);
	return err;
808 809 810 811 812 813 814 815 816 817 818 819 820
}

static ssize_t reset_store(struct device *dev,
		struct device_attribute *attr, const char *buf, size_t len)
{
	int ret;
	unsigned short do_reset;
	struct zram *zram;
	struct block_device *bdev;

	zram = dev_to_zram(dev);
	bdev = bdget_disk(zram->disk, 0);

821 822 823
	if (!bdev)
		return -ENOMEM;

824
	/* Do not reset an active device! */
825 826 827 828
	if (bdev->bd_holders) {
		ret = -EBUSY;
		goto out;
	}
829 830 831

	ret = kstrtou16(buf, 10, &do_reset);
	if (ret)
832
		goto out;
833

834 835 836 837
	if (!do_reset) {
		ret = -EINVAL;
		goto out;
	}
838 839

	/* Make sure all pending I/O is finished */
840
	fsync_bdev(bdev);
841
	bdput(bdev);
842

M
Minchan Kim 已提交
843
	zram_reset_device(zram, true);
844
	return len;
845 846 847 848

out:
	bdput(bdev);
	return ret;
849 850
}

851
static void __zram_make_request(struct zram *zram, struct bio *bio)
852
{
853
	int offset;
854
	u32 index;
855 856
	struct bio_vec bvec;
	struct bvec_iter iter;
857

858 859 860
	index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
	offset = (bio->bi_iter.bi_sector &
		  (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
861

J
Joonsoo Kim 已提交
862 863 864 865 866 867
	if (unlikely(bio->bi_rw & REQ_DISCARD)) {
		zram_bio_discard(zram, index, offset, bio);
		bio_endio(bio, 0);
		return;
	}

868
	bio_for_each_segment(bvec, bio, iter) {
869 870
		int max_transfer_size = PAGE_SIZE - offset;

871
		if (bvec.bv_len > max_transfer_size) {
872 873 874 875 876 877
			/*
			 * zram_bvec_rw() can only make operation on a single
			 * zram page. Split the bio vector.
			 */
			struct bio_vec bv;

878
			bv.bv_page = bvec.bv_page;
879
			bv.bv_len = max_transfer_size;
880
			bv.bv_offset = bvec.bv_offset;
881

882
			if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
883 884
				goto out;

885
			bv.bv_len = bvec.bv_len - max_transfer_size;
886
			bv.bv_offset += max_transfer_size;
887
			if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
888 889
				goto out;
		} else
890
			if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
891 892
				goto out;

893
		update_position(&index, &offset, &bvec);
894
	}
895 896 897

	set_bit(BIO_UPTODATE, &bio->bi_flags);
	bio_endio(bio, 0);
898
	return;
899 900 901 902 903 904

out:
	bio_io_error(bio);
}

/*
905
 * Handler function for all zram I/O requests.
906
 */
907
static void zram_make_request(struct request_queue *queue, struct bio *bio)
908
{
909
	struct zram *zram = queue->queuedata;
910

911
	down_read(&zram->init_lock);
912
	if (unlikely(!init_done(zram)))
913
		goto error;
914

915
	if (!valid_io_request(zram, bio)) {
916
		atomic64_inc(&zram->stats.invalid_io);
917
		goto error;
918 919
	}

920
	__zram_make_request(zram, bio);
921
	up_read(&zram->init_lock);
922

923
	return;
924 925

error:
926
	up_read(&zram->init_lock);
927
	bio_io_error(bio);
928 929
}

N
Nitin Gupta 已提交
930 931
static void zram_slot_free_notify(struct block_device *bdev,
				unsigned long index)
932
{
933
	struct zram *zram;
934
	struct zram_meta *meta;
935

936
	zram = bdev->bd_disk->private_data;
937
	meta = zram->meta;
938

939
	bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
940
	zram_free_page(zram, index);
941
	bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
942
	atomic64_inc(&zram->stats.notify_free);
943 944
}

945 946
static const struct block_device_operations zram_devops = {
	.swap_slot_free_notify = zram_slot_free_notify,
947
	.owner = THIS_MODULE
948 949
};

950 951 952 953 954 955
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
		disksize_show, disksize_store);
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
M
Minchan Kim 已提交
956 957
static DEVICE_ATTR(mem_limit, S_IRUGO | S_IWUSR, mem_limit_show,
		mem_limit_store);
M
Minchan Kim 已提交
958 959
static DEVICE_ATTR(mem_used_max, S_IRUGO | S_IWUSR, mem_used_max_show,
		mem_used_max_store);
960 961
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
		max_comp_streams_show, max_comp_streams_store);
962 963
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
		comp_algorithm_show, comp_algorithm_store);
964

965 966
ZRAM_ATTR_RO(num_reads);
ZRAM_ATTR_RO(num_writes);
967 968
ZRAM_ATTR_RO(failed_reads);
ZRAM_ATTR_RO(failed_writes);
969 970 971 972 973
ZRAM_ATTR_RO(invalid_io);
ZRAM_ATTR_RO(notify_free);
ZRAM_ATTR_RO(zero_pages);
ZRAM_ATTR_RO(compr_data_size);

974 975 976 977 978 979
static struct attribute *zram_disk_attrs[] = {
	&dev_attr_disksize.attr,
	&dev_attr_initstate.attr,
	&dev_attr_reset.attr,
	&dev_attr_num_reads.attr,
	&dev_attr_num_writes.attr,
980 981
	&dev_attr_failed_reads.attr,
	&dev_attr_failed_writes.attr,
982 983 984 985 986 987
	&dev_attr_invalid_io.attr,
	&dev_attr_notify_free.attr,
	&dev_attr_zero_pages.attr,
	&dev_attr_orig_data_size.attr,
	&dev_attr_compr_data_size.attr,
	&dev_attr_mem_used_total.attr,
M
Minchan Kim 已提交
988
	&dev_attr_mem_limit.attr,
M
Minchan Kim 已提交
989
	&dev_attr_mem_used_max.attr,
990
	&dev_attr_max_comp_streams.attr,
991
	&dev_attr_comp_algorithm.attr,
992 993 994 995 996 997 998
	NULL,
};

static struct attribute_group zram_disk_attr_group = {
	.attrs = zram_disk_attrs,
};

999
static int create_device(struct zram *zram, int device_id)
1000
{
1001
	int ret = -ENOMEM;
1002

1003
	init_rwsem(&zram->init_lock);
1004

1005 1006
	zram->queue = blk_alloc_queue(GFP_KERNEL);
	if (!zram->queue) {
1007 1008
		pr_err("Error allocating disk queue for device %d\n",
			device_id);
1009
		goto out;
1010 1011
	}

1012 1013
	blk_queue_make_request(zram->queue, zram_make_request);
	zram->queue->queuedata = zram;
1014 1015

	 /* gendisk structure */
1016 1017
	zram->disk = alloc_disk(1);
	if (!zram->disk) {
1018
		pr_warn("Error allocating disk structure for device %d\n",
1019
			device_id);
1020
		goto out_free_queue;
1021 1022
	}

1023 1024 1025 1026 1027 1028
	zram->disk->major = zram_major;
	zram->disk->first_minor = device_id;
	zram->disk->fops = &zram_devops;
	zram->disk->queue = zram->queue;
	zram->disk->private_data = zram;
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
1029

1030
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
1031
	set_capacity(zram->disk, 0);
1032 1033
	/* zram devices sort of resembles non-rotational disks */
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
1034
	queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zram->disk->queue);
1035 1036 1037 1038
	/*
	 * To ensure that we always get PAGE_SIZE aligned
	 * and n*PAGE_SIZED sized I/O requests.
	 */
1039
	blk_queue_physical_block_size(zram->disk->queue, PAGE_SIZE);
1040 1041
	blk_queue_logical_block_size(zram->disk->queue,
					ZRAM_LOGICAL_BLOCK_SIZE);
1042 1043
	blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
	blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
J
Joonsoo Kim 已提交
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058
	zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
	zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
	/*
	 * zram_bio_discard() will clear all logical blocks if logical block
	 * size is identical with physical block size(PAGE_SIZE). But if it is
	 * different, we will skip discarding some parts of logical blocks in
	 * the part of the request range which isn't aligned to physical block
	 * size.  So we can't ensure that all discarded logical blocks are
	 * zeroed.
	 */
	if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
		zram->disk->queue->limits.discard_zeroes_data = 1;
	else
		zram->disk->queue->limits.discard_zeroes_data = 0;
	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
1059

1060
	add_disk(zram->disk);
1061

1062 1063 1064
	ret = sysfs_create_group(&disk_to_dev(zram->disk)->kobj,
				&zram_disk_attr_group);
	if (ret < 0) {
1065
		pr_warn("Error creating sysfs group");
1066
		goto out_free_disk;
1067
	}
1068
	strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
1069
	zram->meta = NULL;
1070
	zram->max_comp_streams = 1;
1071
	return 0;
1072

1073 1074 1075 1076 1077
out_free_disk:
	del_gendisk(zram->disk);
	put_disk(zram->disk);
out_free_queue:
	blk_cleanup_queue(zram->queue);
1078 1079
out:
	return ret;
1080 1081
}

1082
static void destroy_device(struct zram *zram)
1083
{
1084 1085 1086
	sysfs_remove_group(&disk_to_dev(zram->disk)->kobj,
			&zram_disk_attr_group);

1087 1088
	del_gendisk(zram->disk);
	put_disk(zram->disk);
1089

1090
	blk_cleanup_queue(zram->queue);
1091 1092
}

1093
static int __init zram_init(void)
1094
{
1095
	int ret, dev_id;
1096

1097
	if (num_devices > max_num_devices) {
1098
		pr_warn("Invalid value for num_devices: %u\n",
1099
				num_devices);
1100 1101
		ret = -EINVAL;
		goto out;
1102 1103
	}

1104 1105
	zram_major = register_blkdev(0, "zram");
	if (zram_major <= 0) {
1106
		pr_warn("Unable to get major number\n");
1107 1108
		ret = -EBUSY;
		goto out;
1109 1110 1111
	}

	/* Allocate the device array and initialize each one */
1112
	zram_devices = kzalloc(num_devices * sizeof(struct zram), GFP_KERNEL);
1113
	if (!zram_devices) {
1114 1115 1116
		ret = -ENOMEM;
		goto unregister;
	}
1117

1118
	for (dev_id = 0; dev_id < num_devices; dev_id++) {
1119
		ret = create_device(&zram_devices[dev_id], dev_id);
1120
		if (ret)
1121
			goto free_devices;
1122 1123
	}

1124 1125
	pr_info("Created %u device(s) ...\n", num_devices);

1126
	return 0;
1127

1128
free_devices:
1129
	while (dev_id)
1130 1131
		destroy_device(&zram_devices[--dev_id]);
	kfree(zram_devices);
1132
unregister:
1133
	unregister_blkdev(zram_major, "zram");
1134
out:
1135 1136 1137
	return ret;
}

1138
static void __exit zram_exit(void)
1139 1140
{
	int i;
1141
	struct zram *zram;
1142

1143
	for (i = 0; i < num_devices; i++) {
1144
		zram = &zram_devices[i];
1145

1146
		destroy_device(zram);
M
Minchan Kim 已提交
1147 1148 1149 1150 1151
		/*
		 * Shouldn't access zram->disk after destroy_device
		 * because destroy_device already released zram->disk.
		 */
		zram_reset_device(zram, false);
1152 1153
	}

1154
	unregister_blkdev(zram_major, "zram");
1155

1156
	kfree(zram_devices);
1157 1158 1159
	pr_debug("Cleanup done!\n");
}

1160 1161
module_init(zram_init);
module_exit(zram_exit);
1162

1163 1164 1165
module_param(num_devices, uint, 0);
MODULE_PARM_DESC(num_devices, "Number of zram devices");

1166 1167
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
1168
MODULE_DESCRIPTION("Compressed RAM Block Device");