zsmalloc.c 59.3 KB
Newer Older
1 2 3 4
/*
 * zsmalloc memory allocator
 *
 * Copyright (C) 2011  Nitin Gupta
M
Minchan Kim 已提交
5
 * Copyright (C) 2012, 2013 Minchan Kim
6 7 8 9 10 11 12 13
 *
 * This code is released using a dual license strategy: BSD/GPL
 * You can choose the license that better fits your requirements.
 *
 * Released under the terms of 3-clause BSD License
 * Released under the terms of GNU General Public License Version 2.0
 */

N
Nitin Gupta 已提交
14 15 16 17 18
/*
 * Following is how we use various fields and flags of underlying
 * struct page(s) to form a zspage.
 *
 * Usage of struct page fields:
19
 *	page->private: points to zspage
M
Minchan Kim 已提交
20 21 22
 *	page->freelist(index): links together all component pages of a zspage
 *		For the huge page, this is always 0, so we use this field
 *		to store handle.
23
 *	page->units: first object offset in a subpage of zspage
N
Nitin Gupta 已提交
24 25 26
 *
 * Usage of struct page flags:
 *	PG_private: identifies the first component page
X
Xishi Qiu 已提交
27
 *	PG_owner_priv_1: identifies the huge component page
N
Nitin Gupta 已提交
28 29 30
 *
 */

31 32
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

33 34
#include <linux/module.h>
#include <linux/kernel.h>
M
Minchan Kim 已提交
35
#include <linux/sched.h>
36
#include <linux/magic.h>
37 38 39 40 41 42 43 44 45
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <asm/tlbflush.h>
#include <asm/pgtable.h>
#include <linux/cpumask.h>
#include <linux/cpu.h>
46
#include <linux/vmalloc.h>
47
#include <linux/preempt.h>
48
#include <linux/spinlock.h>
49
#include <linux/shrinker.h>
50
#include <linux/types.h>
51
#include <linux/debugfs.h>
M
Minchan Kim 已提交
52
#include <linux/zsmalloc.h>
53
#include <linux/zpool.h>
M
Minchan Kim 已提交
54
#include <linux/mount.h>
55
#include <linux/migrate.h>
M
Minchan Kim 已提交
56
#include <linux/pagemap.h>
S
Sergey Senozhatsky 已提交
57
#include <linux/fs.h>
M
Minchan Kim 已提交
58 59

#define ZSPAGE_MAGIC	0x58
60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75

/*
 * This must be power of 2 and greater than of equal to sizeof(link_free).
 * These two conditions ensure that any 'struct link_free' itself doesn't
 * span more than 1 page which avoids complex case of mapping 2 pages simply
 * to restore link_free pointer values.
 */
#define ZS_ALIGN		8

/*
 * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
 * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
 */
#define ZS_MAX_ZSPAGE_ORDER 2
#define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)

76 77
#define ZS_HANDLE_SIZE (sizeof(unsigned long))

78 79
/*
 * Object location (<PFN>, <obj_idx>) is encoded as
N
Nitin Cupta 已提交
80
 * as single (unsigned long) handle value.
81
 *
M
Minchan Kim 已提交
82
 * Note that object index <obj_idx> starts from 0.
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
 *
 * This is made more complicated by various memory models and PAE.
 */

#ifndef MAX_PHYSMEM_BITS
#ifdef CONFIG_HIGHMEM64G
#define MAX_PHYSMEM_BITS 36
#else /* !CONFIG_HIGHMEM64G */
/*
 * If this definition of MAX_PHYSMEM_BITS is used, OBJ_INDEX_BITS will just
 * be PAGE_SHIFT
 */
#define MAX_PHYSMEM_BITS BITS_PER_LONG
#endif
#endif
#define _PFN_BITS		(MAX_PHYSMEM_BITS - PAGE_SHIFT)
M
Minchan Kim 已提交
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118

/*
 * Memory for allocating for handle keeps object position by
 * encoding <page, obj_idx> and the encoded value has a room
 * in least bit(ie, look at obj_to_location).
 * We use the bit to synchronize between object access by
 * user and migration.
 */
#define HANDLE_PIN_BIT	0

/*
 * Head in allocated object should have OBJ_ALLOCATED_TAG
 * to identify the object was allocated or not.
 * It's okay to add the status bit in the least bit because
 * header keeps handle which is 4byte-aligned address so we
 * have room for two bit at least.
 */
#define OBJ_ALLOCATED_TAG 1
#define OBJ_TAG_BITS 1
#define OBJ_INDEX_BITS	(BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS)
119 120
#define OBJ_INDEX_MASK	((_AC(1, UL) << OBJ_INDEX_BITS) - 1)

121 122 123 124 125
#define FULLNESS_BITS	2
#define CLASS_BITS	8
#define ISOLATED_BITS	3
#define MAGIC_VAL_BITS	8

126 127 128 129
#define MAX(a, b) ((a) >= (b) ? (a) : (b))
/* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
#define ZS_MIN_ALLOC_SIZE \
	MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
130
/* each chunk includes extra space to keep handle */
131
#define ZS_MAX_ALLOC_SIZE	PAGE_SIZE
132 133

/*
134
 * On systems with 4K page size, this gives 255 size classes! There is a
135 136 137 138 139 140 141 142 143 144 145
 * trader-off here:
 *  - Large number of size classes is potentially wasteful as free page are
 *    spread across these classes
 *  - Small number of size classes causes large internal fragmentation
 *  - Probably its better to use specific size classes (empirically
 *    determined). NOTE: all those class sizes must be set as multiple of
 *    ZS_ALIGN to make sure link_free itself never has to span 2 pages.
 *
 *  ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
 *  (reason above)
 */
146
#define ZS_SIZE_CLASS_DELTA	(PAGE_SIZE >> CLASS_BITS)
147 148
#define ZS_SIZE_CLASSES	(DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
				      ZS_SIZE_CLASS_DELTA) + 1)
149 150 151

enum fullness_group {
	ZS_EMPTY,
M
Minchan Kim 已提交
152 153 154 155
	ZS_ALMOST_EMPTY,
	ZS_ALMOST_FULL,
	ZS_FULL,
	NR_ZS_FULLNESS,
156 157
};

158
enum zs_stat_type {
M
Minchan Kim 已提交
159 160 161 162
	CLASS_EMPTY,
	CLASS_ALMOST_EMPTY,
	CLASS_ALMOST_FULL,
	CLASS_FULL,
163 164
	OBJ_ALLOCATED,
	OBJ_USED,
M
Minchan Kim 已提交
165
	NR_ZS_STAT_TYPE,
166 167 168 169 170 171
};

struct zs_size_stat {
	unsigned long objs[NR_ZS_STAT_TYPE];
};

172 173
#ifdef CONFIG_ZSMALLOC_STAT
static struct dentry *zs_stat_root;
174 175
#endif

M
Minchan Kim 已提交
176 177 178 179
#ifdef CONFIG_COMPACTION
static struct vfsmount *zsmalloc_mnt;
#endif

180 181 182 183 184
/*
 * We assign a page to ZS_ALMOST_EMPTY fullness group when:
 *	n <= N / f, where
 * n = number of allocated objects
 * N = total number of objects zspage can store
185
 * f = fullness_threshold_frac
186 187 188 189 190 191 192 193 194 195 196
 *
 * Similarly, we assign zspage to:
 *	ZS_ALMOST_FULL	when n > N / f
 *	ZS_EMPTY	when n == 0
 *	ZS_FULL		when n == N
 *
 * (see: fix_fullness_group())
 */
static const int fullness_threshold_frac = 4;

struct size_class {
197
	spinlock_t lock;
M
Minchan Kim 已提交
198
	struct list_head fullness_list[NR_ZS_FULLNESS];
199 200 201 202 203
	/*
	 * Size of objects stored in this class. Must be multiple
	 * of ZS_ALIGN.
	 */
	int size;
204
	int objs_per_zspage;
205 206
	/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
	int pages_per_zspage;
M
Minchan Kim 已提交
207 208 209

	unsigned int index;
	struct zs_size_stat stats;
210 211
};

M
Minchan Kim 已提交
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
static void SetPageHugeObject(struct page *page)
{
	SetPageOwnerPriv1(page);
}

static void ClearPageHugeObject(struct page *page)
{
	ClearPageOwnerPriv1(page);
}

static int PageHugeObject(struct page *page)
{
	return PageOwnerPriv1(page);
}

228 229
/*
 * Placed within free objects to form a singly linked list.
230
 * For every zspage, zspage->freeobj gives head of this list.
231 232 233 234
 *
 * This must be power of 2 and less than or equal to ZS_ALIGN
 */
struct link_free {
235 236
	union {
		/*
M
Minchan Kim 已提交
237
		 * Free object index;
238 239
		 * It's valid for non-allocated object
		 */
M
Minchan Kim 已提交
240
		unsigned long next;
241 242 243 244 245
		/*
		 * Handle of allocated object.
		 */
		unsigned long handle;
	};
246 247 248
};

struct zs_pool {
249
	const char *name;
250

251
	struct size_class *size_class[ZS_SIZE_CLASSES];
252
	struct kmem_cache *handle_cachep;
253
	struct kmem_cache *zspage_cachep;
254

255
	atomic_long_t pages_allocated;
256

257
	struct zs_pool_stats stats;
258 259 260

	/* Compact classes */
	struct shrinker shrinker;
261

262 263 264
#ifdef CONFIG_ZSMALLOC_STAT
	struct dentry *stat_dentry;
#endif
M
Minchan Kim 已提交
265 266 267 268
#ifdef CONFIG_COMPACTION
	struct inode *inode;
	struct work_struct free_work;
#endif
269
};
270

271 272 273
struct zspage {
	struct {
		unsigned int fullness:FULLNESS_BITS;
M
Minchan Kim 已提交
274
		unsigned int class:CLASS_BITS + 1;
M
Minchan Kim 已提交
275 276
		unsigned int isolated:ISOLATED_BITS;
		unsigned int magic:MAGIC_VAL_BITS;
277 278
	};
	unsigned int inuse;
M
Minchan Kim 已提交
279
	unsigned int freeobj;
280 281
	struct page *first_page;
	struct list_head list; /* fullness list */
M
Minchan Kim 已提交
282 283 284
#ifdef CONFIG_COMPACTION
	rwlock_t lock;
#endif
285
};
286

287
struct mapping_area {
288
#ifdef CONFIG_PGTABLE_MAPPING
289 290 291 292 293 294 295 296
	struct vm_struct *vm; /* vm area for mapping object that span pages */
#else
	char *vm_buf; /* copy buffer for objects that span pages */
#endif
	char *vm_addr; /* address of kmap_atomic()'ed pages */
	enum zs_mapmode vm_mm; /* mapping mode */
};

M
Minchan Kim 已提交
297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318
#ifdef CONFIG_COMPACTION
static int zs_register_migration(struct zs_pool *pool);
static void zs_unregister_migration(struct zs_pool *pool);
static void migrate_lock_init(struct zspage *zspage);
static void migrate_read_lock(struct zspage *zspage);
static void migrate_read_unlock(struct zspage *zspage);
static void kick_deferred_free(struct zs_pool *pool);
static void init_deferred_free(struct zs_pool *pool);
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
#else
static int zsmalloc_mount(void) { return 0; }
static void zsmalloc_unmount(void) {}
static int zs_register_migration(struct zs_pool *pool) { return 0; }
static void zs_unregister_migration(struct zs_pool *pool) {}
static void migrate_lock_init(struct zspage *zspage) {}
static void migrate_read_lock(struct zspage *zspage) {}
static void migrate_read_unlock(struct zspage *zspage) {}
static void kick_deferred_free(struct zs_pool *pool) {}
static void init_deferred_free(struct zs_pool *pool) {}
static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
#endif

319
static int create_cache(struct zs_pool *pool)
320 321 322
{
	pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
					0, 0, NULL);
323 324 325 326 327 328 329 330 331 332 333 334
	if (!pool->handle_cachep)
		return 1;

	pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
					0, 0, NULL);
	if (!pool->zspage_cachep) {
		kmem_cache_destroy(pool->handle_cachep);
		pool->handle_cachep = NULL;
		return 1;
	}

	return 0;
335 336
}

337
static void destroy_cache(struct zs_pool *pool)
338
{
339
	kmem_cache_destroy(pool->handle_cachep);
340
	kmem_cache_destroy(pool->zspage_cachep);
341 342
}

343
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
344 345
{
	return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
M
Minchan Kim 已提交
346
			gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
347 348
}

349
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
350 351 352 353
{
	kmem_cache_free(pool->handle_cachep, (void *)handle);
}

354 355
static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
{
M
Minchan Kim 已提交
356 357
	return kmem_cache_alloc(pool->zspage_cachep,
			flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
X
Xishi Qiu 已提交
358
}
359 360 361 362 363 364

static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
{
	kmem_cache_free(pool->zspage_cachep, zspage);
}

365 366
static void record_obj(unsigned long handle, unsigned long obj)
{
367 368 369 370 371 372
	/*
	 * lsb of @obj represents handle lock while other bits
	 * represent object value the handle is pointing so
	 * updating shouldn't do store tearing.
	 */
	WRITE_ONCE(*(unsigned long *)handle, obj);
373 374
}

375 376 377 378
/* zpool driver */

#ifdef CONFIG_ZPOOL

379
static void *zs_zpool_create(const char *name, gfp_t gfp,
380
			     const struct zpool_ops *zpool_ops,
D
Dan Streetman 已提交
381
			     struct zpool *zpool)
382
{
383 384 385 386 387 388
	/*
	 * Ignore global gfp flags: zs_malloc() may be invoked from
	 * different contexts and its caller must provide a valid
	 * gfp mask.
	 */
	return zs_create_pool(name);
389 390 391 392 393 394 395 396 397 398
}

static void zs_zpool_destroy(void *pool)
{
	zs_destroy_pool(pool);
}

static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
			unsigned long *handle)
{
399
	*handle = zs_malloc(pool, size, gfp);
400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439
	return *handle ? 0 : -1;
}
static void zs_zpool_free(void *pool, unsigned long handle)
{
	zs_free(pool, handle);
}

static int zs_zpool_shrink(void *pool, unsigned int pages,
			unsigned int *reclaimed)
{
	return -EINVAL;
}

static void *zs_zpool_map(void *pool, unsigned long handle,
			enum zpool_mapmode mm)
{
	enum zs_mapmode zs_mm;

	switch (mm) {
	case ZPOOL_MM_RO:
		zs_mm = ZS_MM_RO;
		break;
	case ZPOOL_MM_WO:
		zs_mm = ZS_MM_WO;
		break;
	case ZPOOL_MM_RW: /* fallthru */
	default:
		zs_mm = ZS_MM_RW;
		break;
	}

	return zs_map_object(pool, handle, zs_mm);
}
static void zs_zpool_unmap(void *pool, unsigned long handle)
{
	zs_unmap_object(pool, handle);
}

static u64 zs_zpool_total_size(void *pool)
{
440
	return zs_get_total_pages(pool) << PAGE_SHIFT;
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
}

static struct zpool_driver zs_zpool_driver = {
	.type =		"zsmalloc",
	.owner =	THIS_MODULE,
	.create =	zs_zpool_create,
	.destroy =	zs_zpool_destroy,
	.malloc =	zs_zpool_malloc,
	.free =		zs_zpool_free,
	.shrink =	zs_zpool_shrink,
	.map =		zs_zpool_map,
	.unmap =	zs_zpool_unmap,
	.total_size =	zs_zpool_total_size,
};

456
MODULE_ALIAS("zpool-zsmalloc");
457 458
#endif /* CONFIG_ZPOOL */

459 460 461
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);

M
Minchan Kim 已提交
462 463 464 465 466
static bool is_zspage_isolated(struct zspage *zspage)
{
	return zspage->isolated;
}

467
static __maybe_unused int is_first_page(struct page *page)
468
{
469
	return PagePrivate(page);
470 471
}

M
Minchan Kim 已提交
472
/* Protected by class->lock */
473
static inline int get_zspage_inuse(struct zspage *zspage)
M
Minchan Kim 已提交
474
{
475
	return zspage->inuse;
M
Minchan Kim 已提交
476 477
}

478
static inline void set_zspage_inuse(struct zspage *zspage, int val)
M
Minchan Kim 已提交
479
{
480
	zspage->inuse = val;
M
Minchan Kim 已提交
481 482
}

483
static inline void mod_zspage_inuse(struct zspage *zspage, int val)
M
Minchan Kim 已提交
484
{
485
	zspage->inuse += val;
M
Minchan Kim 已提交
486 487
}

M
Minchan Kim 已提交
488
static inline struct page *get_first_page(struct zspage *zspage)
M
Minchan Kim 已提交
489
{
M
Minchan Kim 已提交
490
	struct page *first_page = zspage->first_page;
491

M
Minchan Kim 已提交
492 493
	VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
	return first_page;
M
Minchan Kim 已提交
494 495
}

M
Minchan Kim 已提交
496
static inline int get_first_obj_offset(struct page *page)
M
Minchan Kim 已提交
497
{
M
Minchan Kim 已提交
498 499
	return page->units;
}
500

M
Minchan Kim 已提交
501 502 503
static inline void set_first_obj_offset(struct page *page, int offset)
{
	page->units = offset;
M
Minchan Kim 已提交
504 505
}

M
Minchan Kim 已提交
506
static inline unsigned int get_freeobj(struct zspage *zspage)
M
Minchan Kim 已提交
507
{
M
Minchan Kim 已提交
508
	return zspage->freeobj;
M
Minchan Kim 已提交
509 510
}

M
Minchan Kim 已提交
511
static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
M
Minchan Kim 已提交
512
{
M
Minchan Kim 已提交
513
	zspage->freeobj = obj;
M
Minchan Kim 已提交
514 515
}

516
static void get_zspage_mapping(struct zspage *zspage,
517
				unsigned int *class_idx,
518 519
				enum fullness_group *fullness)
{
M
Minchan Kim 已提交
520 521
	BUG_ON(zspage->magic != ZSPAGE_MAGIC);

522 523
	*fullness = zspage->fullness;
	*class_idx = zspage->class;
524 525
}

526
static void set_zspage_mapping(struct zspage *zspage,
527
				unsigned int class_idx,
528 529
				enum fullness_group fullness)
{
530 531
	zspage->class = class_idx;
	zspage->fullness = fullness;
532 533
}

N
Nitin Cupta 已提交
534 535 536 537 538 539 540
/*
 * zsmalloc divides the pool into various size classes where each
 * class maintains a list of zspages where each zspage is divided
 * into equal sized chunks. Each allocation falls into one of these
 * classes depending on its size. This function returns index of the
 * size class which has chunk size big enough to hold the give size.
 */
541 542 543 544 545 546 547 548
static int get_size_class_index(int size)
{
	int idx = 0;

	if (likely(size > ZS_MIN_ALLOC_SIZE))
		idx = DIV_ROUND_UP(size - ZS_MIN_ALLOC_SIZE,
				ZS_SIZE_CLASS_DELTA);

549
	return min_t(int, ZS_SIZE_CLASSES - 1, idx);
550 551
}

552
/* type can be of enum type zs_stat_type or fullness_group */
M
Minchan Kim 已提交
553
static inline void zs_stat_inc(struct size_class *class,
554
				int type, unsigned long cnt)
M
Minchan Kim 已提交
555
{
M
Minchan Kim 已提交
556
	class->stats.objs[type] += cnt;
M
Minchan Kim 已提交
557 558
}

559
/* type can be of enum type zs_stat_type or fullness_group */
M
Minchan Kim 已提交
560
static inline void zs_stat_dec(struct size_class *class,
561
				int type, unsigned long cnt)
M
Minchan Kim 已提交
562
{
M
Minchan Kim 已提交
563
	class->stats.objs[type] -= cnt;
M
Minchan Kim 已提交
564 565
}

566
/* type can be of enum type zs_stat_type or fullness_group */
M
Minchan Kim 已提交
567
static inline unsigned long zs_stat_get(struct size_class *class,
568
				int type)
M
Minchan Kim 已提交
569
{
M
Minchan Kim 已提交
570
	return class->stats.objs[type];
M
Minchan Kim 已提交
571 572
}

573 574
#ifdef CONFIG_ZSMALLOC_STAT

575
static void __init zs_stat_init(void)
M
Minchan Kim 已提交
576
{
577 578 579 580
	if (!debugfs_initialized()) {
		pr_warn("debugfs not available, stat dir not created\n");
		return;
	}
M
Minchan Kim 已提交
581 582 583

	zs_stat_root = debugfs_create_dir("zsmalloc", NULL);
	if (!zs_stat_root)
584
		pr_warn("debugfs 'zsmalloc' stat dir creation failed\n");
M
Minchan Kim 已提交
585 586 587 588 589 590 591
}

static void __exit zs_stat_exit(void)
{
	debugfs_remove_recursive(zs_stat_root);
}

592 593
static unsigned long zs_can_compact(struct size_class *class);

M
Minchan Kim 已提交
594 595 596 597 598 599 600
static int zs_stats_size_show(struct seq_file *s, void *v)
{
	int i;
	struct zs_pool *pool = s->private;
	struct size_class *class;
	int objs_per_zspage;
	unsigned long class_almost_full, class_almost_empty;
601
	unsigned long obj_allocated, obj_used, pages_used, freeable;
M
Minchan Kim 已提交
602 603
	unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
	unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
604
	unsigned long total_freeable = 0;
M
Minchan Kim 已提交
605

606
	seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
M
Minchan Kim 已提交
607 608
			"class", "size", "almost_full", "almost_empty",
			"obj_allocated", "obj_used", "pages_used",
609
			"pages_per_zspage", "freeable");
M
Minchan Kim 已提交
610

611
	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
M
Minchan Kim 已提交
612 613 614 615 616 617 618 619 620 621
		class = pool->size_class[i];

		if (class->index != i)
			continue;

		spin_lock(&class->lock);
		class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
		class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
		obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
		obj_used = zs_stat_get(class, OBJ_USED);
622
		freeable = zs_can_compact(class);
M
Minchan Kim 已提交
623 624
		spin_unlock(&class->lock);

625
		objs_per_zspage = class->objs_per_zspage;
M
Minchan Kim 已提交
626 627 628
		pages_used = obj_allocated / objs_per_zspage *
				class->pages_per_zspage;

629 630
		seq_printf(s, " %5u %5u %11lu %12lu %13lu"
				" %10lu %10lu %16d %8lu\n",
M
Minchan Kim 已提交
631 632
			i, class->size, class_almost_full, class_almost_empty,
			obj_allocated, obj_used, pages_used,
633
			class->pages_per_zspage, freeable);
M
Minchan Kim 已提交
634 635 636 637 638 639

		total_class_almost_full += class_almost_full;
		total_class_almost_empty += class_almost_empty;
		total_objs += obj_allocated;
		total_used_objs += obj_used;
		total_pages += pages_used;
640
		total_freeable += freeable;
M
Minchan Kim 已提交
641 642 643
	}

	seq_puts(s, "\n");
644
	seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
M
Minchan Kim 已提交
645 646
			"Total", "", total_class_almost_full,
			total_class_almost_empty, total_objs,
647
			total_used_objs, total_pages, "", total_freeable);
M
Minchan Kim 已提交
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663

	return 0;
}

static int zs_stats_size_open(struct inode *inode, struct file *file)
{
	return single_open(file, zs_stats_size_show, inode->i_private);
}

static const struct file_operations zs_stat_size_ops = {
	.open           = zs_stats_size_open,
	.read           = seq_read,
	.llseek         = seq_lseek,
	.release        = single_release,
};

664
static void zs_pool_stat_create(struct zs_pool *pool, const char *name)
M
Minchan Kim 已提交
665 666 667
{
	struct dentry *entry;

668 669
	if (!zs_stat_root) {
		pr_warn("no root stat dir, not creating <%s> stat dir\n", name);
670
		return;
671
	}
M
Minchan Kim 已提交
672 673 674 675

	entry = debugfs_create_dir(name, zs_stat_root);
	if (!entry) {
		pr_warn("debugfs dir <%s> creation failed\n", name);
676
		return;
M
Minchan Kim 已提交
677 678 679 680 681 682 683 684
	}
	pool->stat_dentry = entry;

	entry = debugfs_create_file("classes", S_IFREG | S_IRUGO,
			pool->stat_dentry, pool, &zs_stat_size_ops);
	if (!entry) {
		pr_warn("%s: debugfs file entry <%s> creation failed\n",
				name, "classes");
685 686
		debugfs_remove_recursive(pool->stat_dentry);
		pool->stat_dentry = NULL;
M
Minchan Kim 已提交
687 688 689 690 691 692 693 694 695
	}
}

static void zs_pool_stat_destroy(struct zs_pool *pool)
{
	debugfs_remove_recursive(pool->stat_dentry);
}

#else /* CONFIG_ZSMALLOC_STAT */
696
static void __init zs_stat_init(void)
M
Minchan Kim 已提交
697 698 699 700 701 702 703
{
}

static void __exit zs_stat_exit(void)
{
}

704
static inline void zs_pool_stat_create(struct zs_pool *pool, const char *name)
M
Minchan Kim 已提交
705 706 707 708 709 710 711 712
{
}

static inline void zs_pool_stat_destroy(struct zs_pool *pool)
{
}
#endif

M
Minchan Kim 已提交
713

N
Nitin Cupta 已提交
714 715 716 717 718 719 720
/*
 * For each size class, zspages are divided into different groups
 * depending on how "full" they are. This was done so that we could
 * easily find empty or nearly empty zspages when we try to shrink
 * the pool (not yet implemented). This function returns fullness
 * status of the given page.
 */
721
static enum fullness_group get_fullness_group(struct size_class *class,
722
						struct zspage *zspage)
723
{
724
	int inuse, objs_per_zspage;
725
	enum fullness_group fg;
M
Minchan Kim 已提交
726

727
	inuse = get_zspage_inuse(zspage);
728
	objs_per_zspage = class->objs_per_zspage;
729 730 731

	if (inuse == 0)
		fg = ZS_EMPTY;
732
	else if (inuse == objs_per_zspage)
733
		fg = ZS_FULL;
734
	else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
735 736 737 738 739 740 741
		fg = ZS_ALMOST_EMPTY;
	else
		fg = ZS_ALMOST_FULL;

	return fg;
}

N
Nitin Cupta 已提交
742 743 744 745 746 747
/*
 * Each size class maintains various freelists and zspages are assigned
 * to one of these freelists based on the number of live objects they
 * have. This functions inserts the given zspage into the freelist
 * identified by <class, fullness_group>.
 */
748
static void insert_zspage(struct size_class *class,
749 750
				struct zspage *zspage,
				enum fullness_group fullness)
751
{
752
	struct zspage *head;
753

M
Minchan Kim 已提交
754
	zs_stat_inc(class, fullness, 1);
755 756
	head = list_first_entry_or_null(&class->fullness_list[fullness],
					struct zspage, list);
757
	/*
758 759
	 * We want to see more ZS_FULL pages and less almost empty/full.
	 * Put pages with higher ->inuse first.
760
	 */
761 762 763 764 765 766 767
	if (head) {
		if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) {
			list_add(&zspage->list, &head->list);
			return;
		}
	}
	list_add(&zspage->list, &class->fullness_list[fullness]);
768 769
}

N
Nitin Cupta 已提交
770 771 772 773
/*
 * This function removes the given zspage from the freelist identified
 * by <class, fullness_group>.
 */
774
static void remove_zspage(struct size_class *class,
775 776
				struct zspage *zspage,
				enum fullness_group fullness)
777
{
778
	VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
M
Minchan Kim 已提交
779
	VM_BUG_ON(is_zspage_isolated(zspage));
780

781
	list_del_init(&zspage->list);
M
Minchan Kim 已提交
782
	zs_stat_dec(class, fullness, 1);
783 784
}

N
Nitin Cupta 已提交
785 786 787 788 789 790 791 792 793
/*
 * Each size class maintains zspages in different fullness groups depending
 * on the number of live objects they contain. When allocating or freeing
 * objects, the fullness status of the page can change, say, from ALMOST_FULL
 * to ALMOST_EMPTY when freeing an object. This function checks if such
 * a status change has occurred for the given page and accordingly moves the
 * page from the freelist of the old fullness group to that of the new
 * fullness group.
 */
794
static enum fullness_group fix_fullness_group(struct size_class *class,
795
						struct zspage *zspage)
796 797 798 799
{
	int class_idx;
	enum fullness_group currfg, newfg;

800 801
	get_zspage_mapping(zspage, &class_idx, &currfg);
	newfg = get_fullness_group(class, zspage);
802 803 804
	if (newfg == currfg)
		goto out;

M
Minchan Kim 已提交
805 806 807 808 809
	if (!is_zspage_isolated(zspage)) {
		remove_zspage(class, zspage, currfg);
		insert_zspage(class, zspage, newfg);
	}

810
	set_zspage_mapping(zspage, class_idx, newfg);
811 812 813 814 815 816 817 818 819 820

out:
	return newfg;
}

/*
 * We have to decide on how many pages to link together
 * to form a zspage for each size class. This is important
 * to reduce wastage due to unusable space left at end of
 * each zspage which is given as:
821 822
 *     wastage = Zp % class_size
 *     usage = Zp - wastage
823 824 825 826 827 828
 * where Zp = zspage size = k * PAGE_SIZE where k = 1, 2, ...
 *
 * For example, for size class of 3/8 * PAGE_SIZE, we should
 * link together 3 PAGE_SIZE sized pages to form a zspage
 * since then we can perfectly fit in 8 such objects.
 */
829
static int get_pages_per_zspage(int class_size)
830 831 832 833 834
{
	int i, max_usedpc = 0;
	/* zspage order which gives maximum used size per KB */
	int max_usedpc_order = 1;

835
	for (i = 1; i <= ZS_MAX_PAGES_PER_ZSPAGE; i++) {
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
		int zspage_size;
		int waste, usedpc;

		zspage_size = i * PAGE_SIZE;
		waste = zspage_size % class_size;
		usedpc = (zspage_size - waste) * 100 / zspage_size;

		if (usedpc > max_usedpc) {
			max_usedpc = usedpc;
			max_usedpc_order = i;
		}
	}

	return max_usedpc_order;
}

852
static struct zspage *get_zspage(struct page *page)
853
{
M
Minchan Kim 已提交
854 855 856 857
	struct zspage *zspage = (struct zspage *)page->private;

	BUG_ON(zspage->magic != ZSPAGE_MAGIC);
	return zspage;
858 859 860 861
}

static struct page *get_next_page(struct page *page)
{
M
Minchan Kim 已提交
862 863 864 865
	if (unlikely(PageHugeObject(page)))
		return NULL;

	return page->freelist;
866 867
}

M
Minchan Kim 已提交
868 869 870 871
/**
 * obj_to_location - get (<page>, <obj_idx>) from encoded object value
 * @page: page object resides in zspage
 * @obj_idx: object index
872
 */
M
Minchan Kim 已提交
873 874
static void obj_to_location(unsigned long obj, struct page **page,
				unsigned int *obj_idx)
875
{
M
Minchan Kim 已提交
876 877 878 879
	obj >>= OBJ_TAG_BITS;
	*page = pfn_to_page(obj >> OBJ_INDEX_BITS);
	*obj_idx = (obj & OBJ_INDEX_MASK);
}
880

M
Minchan Kim 已提交
881 882 883 884 885 886 887 888
/**
 * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
 * @page: page object resides in zspage
 * @obj_idx: object index
 */
static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
{
	unsigned long obj;
889

M
Minchan Kim 已提交
890
	obj = page_to_pfn(page) << OBJ_INDEX_BITS;
M
Minchan Kim 已提交
891
	obj |= obj_idx & OBJ_INDEX_MASK;
M
Minchan Kim 已提交
892
	obj <<= OBJ_TAG_BITS;
893

M
Minchan Kim 已提交
894
	return obj;
895 896
}

897 898 899 900 901
static unsigned long handle_to_obj(unsigned long handle)
{
	return *(unsigned long *)handle;
}

M
Minchan Kim 已提交
902
static unsigned long obj_to_head(struct page *page, void *obj)
M
Minchan Kim 已提交
903
{
M
Minchan Kim 已提交
904
	if (unlikely(PageHugeObject(page))) {
M
Minchan Kim 已提交
905
		VM_BUG_ON_PAGE(!is_first_page(page), page);
906
		return page->index;
907 908
	} else
		return *(unsigned long *)obj;
M
Minchan Kim 已提交
909 910
}

M
Minchan Kim 已提交
911 912 913 914 915
static inline int testpin_tag(unsigned long handle)
{
	return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
}

M
Minchan Kim 已提交
916 917
static inline int trypin_tag(unsigned long handle)
{
M
Minchan Kim 已提交
918
	return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
M
Minchan Kim 已提交
919 920 921 922
}

static void pin_tag(unsigned long handle)
{
M
Minchan Kim 已提交
923
	bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
M
Minchan Kim 已提交
924 925 926 927
}

static void unpin_tag(unsigned long handle)
{
M
Minchan Kim 已提交
928
	bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
M
Minchan Kim 已提交
929 930
}

N
Nitin Gupta 已提交
931 932
static void reset_page(struct page *page)
{
M
Minchan Kim 已提交
933
	__ClearPageMovable(page);
934
	ClearPagePrivate(page);
N
Nitin Gupta 已提交
935
	set_page_private(page, 0);
M
Minchan Kim 已提交
936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
	page_mapcount_reset(page);
	ClearPageHugeObject(page);
	page->freelist = NULL;
}

/*
 * To prevent zspage destroy during migration, zspage freeing should
 * hold locks of all pages in the zspage.
 */
void lock_zspage(struct zspage *zspage)
{
	struct page *page = get_first_page(zspage);

	do {
		lock_page(page);
	} while ((page = get_next_page(page)) != NULL);
}

int trylock_zspage(struct zspage *zspage)
{
	struct page *cursor, *fail;

	for (cursor = get_first_page(zspage); cursor != NULL; cursor =
					get_next_page(cursor)) {
		if (!trylock_page(cursor)) {
			fail = cursor;
			goto unlock;
		}
	}

	return 1;
unlock:
	for (cursor = get_first_page(zspage); cursor != fail; cursor =
					get_next_page(cursor))
		unlock_page(cursor);

	return 0;
N
Nitin Gupta 已提交
973 974
}

M
Minchan Kim 已提交
975 976
static void __free_zspage(struct zs_pool *pool, struct size_class *class,
				struct zspage *zspage)
977
{
978
	struct page *page, *next;
M
Minchan Kim 已提交
979 980 981 982 983 984
	enum fullness_group fg;
	unsigned int class_idx;

	get_zspage_mapping(zspage, &class_idx, &fg);

	assert_spin_locked(&class->lock);
985

986
	VM_BUG_ON(get_zspage_inuse(zspage));
M
Minchan Kim 已提交
987
	VM_BUG_ON(fg != ZS_EMPTY);
988

M
Minchan Kim 已提交
989
	next = page = get_first_page(zspage);
990
	do {
M
Minchan Kim 已提交
991 992
		VM_BUG_ON_PAGE(!PageLocked(page), page);
		next = get_next_page(page);
993
		reset_page(page);
M
Minchan Kim 已提交
994
		unlock_page(page);
M
Minchan Kim 已提交
995
		dec_zone_page_state(page, NR_ZSPAGES);
996 997 998
		put_page(page);
		page = next;
	} while (page != NULL);
999

1000
	cache_free_zspage(pool, zspage);
M
Minchan Kim 已提交
1001

1002
	zs_stat_dec(class, OBJ_ALLOCATED, class->objs_per_zspage);
M
Minchan Kim 已提交
1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019
	atomic_long_sub(class->pages_per_zspage,
					&pool->pages_allocated);
}

static void free_zspage(struct zs_pool *pool, struct size_class *class,
				struct zspage *zspage)
{
	VM_BUG_ON(get_zspage_inuse(zspage));
	VM_BUG_ON(list_empty(&zspage->list));

	if (!trylock_zspage(zspage)) {
		kick_deferred_free(pool);
		return;
	}

	remove_zspage(class, zspage, ZS_EMPTY);
	__free_zspage(pool, class, zspage);
1020 1021 1022
}

/* Initialize a newly allocated zspage */
1023
static void init_zspage(struct size_class *class, struct zspage *zspage)
1024
{
M
Minchan Kim 已提交
1025
	unsigned int freeobj = 1;
1026
	unsigned long off = 0;
M
Minchan Kim 已提交
1027
	struct page *page = get_first_page(zspage);
M
Minchan Kim 已提交
1028

1029 1030 1031
	while (page) {
		struct page *next_page;
		struct link_free *link;
1032
		void *vaddr;
1033

1034
		set_first_obj_offset(page, off);
1035

1036 1037
		vaddr = kmap_atomic(page);
		link = (struct link_free *)vaddr + off / sizeof(*link);
1038 1039

		while ((off += class->size) < PAGE_SIZE) {
1040
			link->next = freeobj++ << OBJ_TAG_BITS;
1041
			link += class->size / sizeof(*link);
1042 1043 1044 1045 1046 1047 1048 1049
		}

		/*
		 * We now come to the last (full or partial) object on this
		 * page, which must point to the first object on the next
		 * page (if present)
		 */
		next_page = get_next_page(page);
M
Minchan Kim 已提交
1050
		if (next_page) {
1051
			link->next = freeobj++ << OBJ_TAG_BITS;
M
Minchan Kim 已提交
1052 1053
		} else {
			/*
1054
			 * Reset OBJ_TAG_BITS bit to last link to tell
M
Minchan Kim 已提交
1055 1056
			 * whether it's allocated object or not.
			 */
1057
			link->next = -1 << OBJ_TAG_BITS;
M
Minchan Kim 已提交
1058
		}
1059
		kunmap_atomic(vaddr);
1060
		page = next_page;
1061
		off %= PAGE_SIZE;
1062
	}
1063

M
Minchan Kim 已提交
1064
	set_freeobj(zspage, 0);
1065 1066
}

M
Minchan Kim 已提交
1067 1068
static void create_page_chain(struct size_class *class, struct zspage *zspage,
				struct page *pages[])
1069
{
1070 1071 1072
	int i;
	struct page *page;
	struct page *prev_page = NULL;
M
Minchan Kim 已提交
1073
	int nr_pages = class->pages_per_zspage;
1074 1075 1076

	/*
	 * Allocate individual pages and link them together as:
M
Minchan Kim 已提交
1077
	 * 1. all pages are linked together using page->freelist
1078
	 * 2. each sub-page point to zspage using page->private
1079
	 *
1080
	 * we set PG_private to identify the first page (i.e. no other sub-page
1081
	 * has this flag set).
1082
	 */
1083 1084
	for (i = 0; i < nr_pages; i++) {
		page = pages[i];
1085
		set_page_private(page, (unsigned long)zspage);
M
Minchan Kim 已提交
1086
		page->freelist = NULL;
1087
		if (i == 0) {
1088
			zspage->first_page = page;
1089
			SetPagePrivate(page);
M
Minchan Kim 已提交
1090 1091 1092
			if (unlikely(class->objs_per_zspage == 1 &&
					class->pages_per_zspage == 1))
				SetPageHugeObject(page);
1093
		} else {
M
Minchan Kim 已提交
1094
			prev_page->freelist = page;
1095 1096 1097
		}
		prev_page = page;
	}
1098
}
1099

1100 1101 1102
/*
 * Allocate a zspage for the given size class
 */
1103 1104 1105
static struct zspage *alloc_zspage(struct zs_pool *pool,
					struct size_class *class,
					gfp_t gfp)
1106 1107 1108
{
	int i;
	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
1109 1110 1111 1112 1113 1114
	struct zspage *zspage = cache_alloc_zspage(pool, gfp);

	if (!zspage)
		return NULL;

	memset(zspage, 0, sizeof(struct zspage));
M
Minchan Kim 已提交
1115 1116
	zspage->magic = ZSPAGE_MAGIC;
	migrate_lock_init(zspage);
1117

1118 1119
	for (i = 0; i < class->pages_per_zspage; i++) {
		struct page *page;
1120

1121
		page = alloc_page(gfp);
1122
		if (!page) {
M
Minchan Kim 已提交
1123 1124
			while (--i >= 0) {
				dec_zone_page_state(pages[i], NR_ZSPAGES);
1125
				__free_page(pages[i]);
M
Minchan Kim 已提交
1126
			}
1127
			cache_free_zspage(pool, zspage);
1128 1129
			return NULL;
		}
M
Minchan Kim 已提交
1130 1131

		inc_zone_page_state(page, NR_ZSPAGES);
1132
		pages[i] = page;
1133 1134
	}

M
Minchan Kim 已提交
1135
	create_page_chain(class, zspage, pages);
1136
	init_zspage(class, zspage);
1137

1138
	return zspage;
1139 1140
}

1141
static struct zspage *find_get_zspage(struct size_class *class)
1142 1143
{
	int i;
1144
	struct zspage *zspage;
1145

M
Minchan Kim 已提交
1146
	for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
1147 1148 1149
		zspage = list_first_entry_or_null(&class->fullness_list[i],
				struct zspage, list);
		if (zspage)
1150 1151 1152
			break;
	}

1153
	return zspage;
1154 1155
}

1156
#ifdef CONFIG_PGTABLE_MAPPING
1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180
static inline int __zs_cpu_up(struct mapping_area *area)
{
	/*
	 * Make sure we don't leak memory if a cpu UP notification
	 * and zs_init() race and both call zs_cpu_up() on the same cpu
	 */
	if (area->vm)
		return 0;
	area->vm = alloc_vm_area(PAGE_SIZE * 2, NULL);
	if (!area->vm)
		return -ENOMEM;
	return 0;
}

static inline void __zs_cpu_down(struct mapping_area *area)
{
	if (area->vm)
		free_vm_area(area->vm);
	area->vm = NULL;
}

static inline void *__zs_map_object(struct mapping_area *area,
				struct page *pages[2], int off, int size)
{
1181
	BUG_ON(map_vm_area(area->vm, PAGE_KERNEL, pages));
1182 1183 1184 1185 1186 1187 1188 1189 1190
	area->vm_addr = area->vm->addr;
	return area->vm_addr + off;
}

static inline void __zs_unmap_object(struct mapping_area *area,
				struct page *pages[2], int off, int size)
{
	unsigned long addr = (unsigned long)area->vm_addr;

1191
	unmap_kernel_range(addr, PAGE_SIZE * 2);
1192 1193
}

1194
#else /* CONFIG_PGTABLE_MAPPING */
1195 1196 1197 1198 1199 1200 1201 1202 1203

static inline int __zs_cpu_up(struct mapping_area *area)
{
	/*
	 * Make sure we don't leak memory if a cpu UP notification
	 * and zs_init() race and both call zs_cpu_up() on the same cpu
	 */
	if (area->vm_buf)
		return 0;
1204
	area->vm_buf = kmalloc(ZS_MAX_ALLOC_SIZE, GFP_KERNEL);
1205 1206 1207 1208 1209 1210 1211
	if (!area->vm_buf)
		return -ENOMEM;
	return 0;
}

static inline void __zs_cpu_down(struct mapping_area *area)
{
1212
	kfree(area->vm_buf);
1213 1214 1215 1216 1217
	area->vm_buf = NULL;
}

static void *__zs_map_object(struct mapping_area *area,
			struct page *pages[2], int off, int size)
1218 1219 1220
{
	int sizes[2];
	void *addr;
1221
	char *buf = area->vm_buf;
1222

1223 1224 1225 1226 1227 1228
	/* disable page faults to match kmap_atomic() return conditions */
	pagefault_disable();

	/* no read fastpath */
	if (area->vm_mm == ZS_MM_WO)
		goto out;
1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239

	sizes[0] = PAGE_SIZE - off;
	sizes[1] = size - sizes[0];

	/* copy object to per-cpu buffer */
	addr = kmap_atomic(pages[0]);
	memcpy(buf, addr + off, sizes[0]);
	kunmap_atomic(addr);
	addr = kmap_atomic(pages[1]);
	memcpy(buf + sizes[0], addr, sizes[1]);
	kunmap_atomic(addr);
1240 1241
out:
	return area->vm_buf;
1242 1243
}

1244 1245
static void __zs_unmap_object(struct mapping_area *area,
			struct page *pages[2], int off, int size)
1246 1247 1248
{
	int sizes[2];
	void *addr;
1249
	char *buf;
1250

1251 1252 1253
	/* no write fastpath */
	if (area->vm_mm == ZS_MM_RO)
		goto out;
1254

1255
	buf = area->vm_buf;
1256 1257 1258
	buf = buf + ZS_HANDLE_SIZE;
	size -= ZS_HANDLE_SIZE;
	off += ZS_HANDLE_SIZE;
1259

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	sizes[0] = PAGE_SIZE - off;
	sizes[1] = size - sizes[0];

	/* copy per-cpu buffer to object */
	addr = kmap_atomic(pages[0]);
	memcpy(addr + off, buf, sizes[0]);
	kunmap_atomic(addr);
	addr = kmap_atomic(pages[1]);
	memcpy(addr, buf + sizes[0], sizes[1]);
	kunmap_atomic(addr);
1270 1271 1272 1273

out:
	/* enable page faults to match kunmap_atomic() return conditions */
	pagefault_enable();
1274
}
1275

1276
#endif /* CONFIG_PGTABLE_MAPPING */
1277

1278
static int zs_cpu_prepare(unsigned int cpu)
1279 1280 1281
{
	struct mapping_area *area;

1282 1283
	area = &per_cpu(zs_map_area, cpu);
	return __zs_cpu_up(area);
1284 1285
}

1286
static int zs_cpu_dead(unsigned int cpu)
1287
{
1288
	struct mapping_area *area;
1289

1290 1291 1292
	area = &per_cpu(zs_map_area, cpu);
	__zs_cpu_down(area);
	return 0;
1293 1294
}

1295 1296
static bool can_merge(struct size_class *prev, int pages_per_zspage,
					int objs_per_zspage)
1297
{
1298 1299 1300
	if (prev->pages_per_zspage == pages_per_zspage &&
		prev->objs_per_zspage == objs_per_zspage)
		return true;
1301

1302
	return false;
1303 1304
}

1305
static bool zspage_full(struct size_class *class, struct zspage *zspage)
M
Minchan Kim 已提交
1306
{
1307
	return get_zspage_inuse(zspage) == class->objs_per_zspage;
M
Minchan Kim 已提交
1308 1309
}

1310 1311 1312 1313 1314 1315
unsigned long zs_get_total_pages(struct zs_pool *pool)
{
	return atomic_long_read(&pool->pages_allocated);
}
EXPORT_SYMBOL_GPL(zs_get_total_pages);

1316
/**
1317 1318 1319
 * zs_map_object - get address of allocated object from handle.
 * @pool: pool from which the object was allocated
 * @handle: handle returned from zs_malloc
1320
 *
1321 1322 1323
 * Before using an object allocated from zs_malloc, it must be mapped using
 * this function. When done with the object, it must be unmapped using
 * zs_unmap_object.
1324
 *
1325 1326 1327 1328
 * Only one object can be mapped per cpu at a time. There is no protection
 * against nested mappings.
 *
 * This function returns with preemption and page faults disabled.
1329
 */
1330 1331
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
			enum zs_mapmode mm)
1332
{
1333
	struct zspage *zspage;
1334
	struct page *page;
M
Minchan Kim 已提交
1335 1336
	unsigned long obj, off;
	unsigned int obj_idx;
1337

1338 1339 1340 1341 1342
	unsigned int class_idx;
	enum fullness_group fg;
	struct size_class *class;
	struct mapping_area *area;
	struct page *pages[2];
1343
	void *ret;
1344

1345
	/*
1346 1347 1348
	 * Because we use per-cpu mapping areas shared among the
	 * pools/users, we can't allow mapping in interrupt context
	 * because it can corrupt another users mappings.
1349
	 */
1350
	BUG_ON(in_interrupt());
1351

M
Minchan Kim 已提交
1352 1353 1354
	/* From now on, migration cannot move the object */
	pin_tag(handle);

1355 1356
	obj = handle_to_obj(handle);
	obj_to_location(obj, &page, &obj_idx);
1357
	zspage = get_zspage(page);
M
Minchan Kim 已提交
1358 1359 1360 1361

	/* migration cannot move any subpage in this zspage */
	migrate_read_lock(zspage);

1362
	get_zspage_mapping(zspage, &class_idx, &fg);
1363
	class = pool->size_class[class_idx];
M
Minchan Kim 已提交
1364
	off = (class->size * obj_idx) & ~PAGE_MASK;
1365

1366 1367 1368 1369 1370
	area = &get_cpu_var(zs_map_area);
	area->vm_mm = mm;
	if (off + class->size <= PAGE_SIZE) {
		/* this object is contained entirely within a page */
		area->vm_addr = kmap_atomic(page);
1371 1372
		ret = area->vm_addr + off;
		goto out;
1373 1374
	}

1375 1376 1377 1378
	/* this object spans two pages */
	pages[0] = page;
	pages[1] = get_next_page(page);
	BUG_ON(!pages[1]);
1379

1380 1381
	ret = __zs_map_object(area, pages, off, class->size);
out:
M
Minchan Kim 已提交
1382
	if (likely(!PageHugeObject(page)))
1383 1384 1385
		ret += ZS_HANDLE_SIZE;

	return ret;
1386
}
1387
EXPORT_SYMBOL_GPL(zs_map_object);
1388

1389
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
1390
{
1391
	struct zspage *zspage;
1392
	struct page *page;
M
Minchan Kim 已提交
1393 1394
	unsigned long obj, off;
	unsigned int obj_idx;
1395

1396 1397 1398 1399
	unsigned int class_idx;
	enum fullness_group fg;
	struct size_class *class;
	struct mapping_area *area;
1400

1401 1402
	obj = handle_to_obj(handle);
	obj_to_location(obj, &page, &obj_idx);
1403 1404
	zspage = get_zspage(page);
	get_zspage_mapping(zspage, &class_idx, &fg);
1405
	class = pool->size_class[class_idx];
M
Minchan Kim 已提交
1406
	off = (class->size * obj_idx) & ~PAGE_MASK;
1407

1408 1409 1410 1411 1412
	area = this_cpu_ptr(&zs_map_area);
	if (off + class->size <= PAGE_SIZE)
		kunmap_atomic(area->vm_addr);
	else {
		struct page *pages[2];
1413

1414 1415 1416 1417 1418 1419 1420
		pages[0] = page;
		pages[1] = get_next_page(page);
		BUG_ON(!pages[1]);

		__zs_unmap_object(area, pages, off, class->size);
	}
	put_cpu_var(zs_map_area);
M
Minchan Kim 已提交
1421 1422

	migrate_read_unlock(zspage);
M
Minchan Kim 已提交
1423
	unpin_tag(handle);
1424
}
1425
EXPORT_SYMBOL_GPL(zs_unmap_object);
1426

1427
static unsigned long obj_malloc(struct size_class *class,
1428
				struct zspage *zspage, unsigned long handle)
1429
{
M
Minchan Kim 已提交
1430
	int i, nr_page, offset;
1431 1432 1433 1434
	unsigned long obj;
	struct link_free *link;

	struct page *m_page;
M
Minchan Kim 已提交
1435
	unsigned long m_offset;
1436 1437
	void *vaddr;

M
Minchan Kim 已提交
1438
	handle |= OBJ_ALLOCATED_TAG;
1439
	obj = get_freeobj(zspage);
M
Minchan Kim 已提交
1440 1441 1442 1443 1444 1445 1446 1447

	offset = obj * class->size;
	nr_page = offset >> PAGE_SHIFT;
	m_offset = offset & ~PAGE_MASK;
	m_page = get_first_page(zspage);

	for (i = 0; i < nr_page; i++)
		m_page = get_next_page(m_page);
1448 1449 1450

	vaddr = kmap_atomic(m_page);
	link = (struct link_free *)vaddr + m_offset / sizeof(*link);
1451
	set_freeobj(zspage, link->next >> OBJ_TAG_BITS);
M
Minchan Kim 已提交
1452
	if (likely(!PageHugeObject(m_page)))
1453 1454 1455
		/* record handle in the header of allocated chunk */
		link->handle = handle;
	else
1456 1457 1458
		/* record handle to page->index */
		zspage->first_page->index = handle;

1459
	kunmap_atomic(vaddr);
1460
	mod_zspage_inuse(zspage, 1);
1461 1462
	zs_stat_inc(class, OBJ_USED, 1);

M
Minchan Kim 已提交
1463 1464
	obj = location_to_obj(m_page, obj);

1465 1466 1467 1468
	return obj;
}


1469 1470 1471 1472
/**
 * zs_malloc - Allocate block of given size from pool.
 * @pool: pool to allocate from
 * @size: size of block to allocate
1473
 * @gfp: gfp flags when allocating object
1474
 *
1475
 * On success, handle to the allocated object is returned,
1476
 * otherwise 0.
1477 1478
 * Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
 */
1479
unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1480
{
1481
	unsigned long handle, obj;
1482
	struct size_class *class;
M
Minchan Kim 已提交
1483
	enum fullness_group newfg;
1484
	struct zspage *zspage;
1485

1486
	if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
1487 1488
		return 0;

1489
	handle = cache_alloc_handle(pool, gfp);
1490
	if (!handle)
1491
		return 0;
1492

1493 1494
	/* extra space in chunk to keep the handle */
	size += ZS_HANDLE_SIZE;
1495
	class = pool->size_class[get_size_class_index(size)];
1496 1497

	spin_lock(&class->lock);
1498
	zspage = find_get_zspage(class);
M
Minchan Kim 已提交
1499 1500 1501 1502 1503
	if (likely(zspage)) {
		obj = obj_malloc(class, zspage, handle);
		/* Now move the zspage to another fullness group, if required */
		fix_fullness_group(class, zspage);
		record_obj(handle, obj);
1504 1505
		spin_unlock(&class->lock);

M
Minchan Kim 已提交
1506 1507
		return handle;
	}
1508

M
Minchan Kim 已提交
1509 1510 1511 1512 1513 1514
	spin_unlock(&class->lock);

	zspage = alloc_zspage(pool, class, gfp);
	if (!zspage) {
		cache_free_handle(pool, handle);
		return 0;
1515 1516
	}

M
Minchan Kim 已提交
1517
	spin_lock(&class->lock);
1518
	obj = obj_malloc(class, zspage, handle);
M
Minchan Kim 已提交
1519 1520 1521
	newfg = get_fullness_group(class, zspage);
	insert_zspage(class, zspage, newfg);
	set_zspage_mapping(zspage, class->index, newfg);
1522
	record_obj(handle, obj);
M
Minchan Kim 已提交
1523 1524
	atomic_long_add(class->pages_per_zspage,
				&pool->pages_allocated);
1525
	zs_stat_inc(class, OBJ_ALLOCATED, class->objs_per_zspage);
M
Minchan Kim 已提交
1526 1527 1528

	/* We completely set up zspage so mark them as movable */
	SetZsPageMovable(pool, zspage);
1529 1530
	spin_unlock(&class->lock);

1531
	return handle;
1532 1533 1534
}
EXPORT_SYMBOL_GPL(zs_malloc);

1535
static void obj_free(struct size_class *class, unsigned long obj)
1536 1537
{
	struct link_free *link;
1538 1539
	struct zspage *zspage;
	struct page *f_page;
M
Minchan Kim 已提交
1540 1541
	unsigned long f_offset;
	unsigned int f_objidx;
1542
	void *vaddr;
1543

M
Minchan Kim 已提交
1544
	obj &= ~OBJ_ALLOCATED_TAG;
1545
	obj_to_location(obj, &f_page, &f_objidx);
M
Minchan Kim 已提交
1546
	f_offset = (class->size * f_objidx) & ~PAGE_MASK;
1547
	zspage = get_zspage(f_page);
1548

1549
	vaddr = kmap_atomic(f_page);
1550 1551

	/* Insert this object in containing zspage's freelist */
1552
	link = (struct link_free *)(vaddr + f_offset);
1553
	link->next = get_freeobj(zspage) << OBJ_TAG_BITS;
1554
	kunmap_atomic(vaddr);
M
Minchan Kim 已提交
1555
	set_freeobj(zspage, f_objidx);
1556
	mod_zspage_inuse(zspage, -1);
1557
	zs_stat_dec(class, OBJ_USED, 1);
1558 1559 1560 1561
}

void zs_free(struct zs_pool *pool, unsigned long handle)
{
1562 1563
	struct zspage *zspage;
	struct page *f_page;
M
Minchan Kim 已提交
1564 1565
	unsigned long obj;
	unsigned int f_objidx;
1566 1567 1568
	int class_idx;
	struct size_class *class;
	enum fullness_group fullness;
M
Minchan Kim 已提交
1569
	bool isolated;
1570 1571 1572 1573

	if (unlikely(!handle))
		return;

M
Minchan Kim 已提交
1574
	pin_tag(handle);
1575 1576
	obj = handle_to_obj(handle);
	obj_to_location(obj, &f_page, &f_objidx);
1577
	zspage = get_zspage(f_page);
1578

M
Minchan Kim 已提交
1579 1580
	migrate_read_lock(zspage);

1581
	get_zspage_mapping(zspage, &class_idx, &fullness);
1582 1583 1584
	class = pool->size_class[class_idx];

	spin_lock(&class->lock);
1585
	obj_free(class, obj);
1586
	fullness = fix_fullness_group(class, zspage);
M
Minchan Kim 已提交
1587 1588 1589
	if (fullness != ZS_EMPTY) {
		migrate_read_unlock(zspage);
		goto out;
M
Minchan Kim 已提交
1590
	}
M
Minchan Kim 已提交
1591 1592 1593 1594 1595 1596 1597 1598

	isolated = is_zspage_isolated(zspage);
	migrate_read_unlock(zspage);
	/* If zspage is isolated, zs_page_putback will free the zspage */
	if (likely(!isolated))
		free_zspage(pool, class, zspage);
out:

1599
	spin_unlock(&class->lock);
M
Minchan Kim 已提交
1600
	unpin_tag(handle);
1601
	cache_free_handle(pool, handle);
M
Minchan Kim 已提交
1602 1603 1604
}
EXPORT_SYMBOL_GPL(zs_free);

1605 1606
static void zs_object_copy(struct size_class *class, unsigned long dst,
				unsigned long src)
M
Minchan Kim 已提交
1607 1608
{
	struct page *s_page, *d_page;
M
Minchan Kim 已提交
1609
	unsigned int s_objidx, d_objidx;
M
Minchan Kim 已提交
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619
	unsigned long s_off, d_off;
	void *s_addr, *d_addr;
	int s_size, d_size, size;
	int written = 0;

	s_size = d_size = class->size;

	obj_to_location(src, &s_page, &s_objidx);
	obj_to_location(dst, &d_page, &d_objidx);

M
Minchan Kim 已提交
1620 1621
	s_off = (class->size * s_objidx) & ~PAGE_MASK;
	d_off = (class->size * d_objidx) & ~PAGE_MASK;
M
Minchan Kim 已提交
1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639

	if (s_off + class->size > PAGE_SIZE)
		s_size = PAGE_SIZE - s_off;

	if (d_off + class->size > PAGE_SIZE)
		d_size = PAGE_SIZE - d_off;

	s_addr = kmap_atomic(s_page);
	d_addr = kmap_atomic(d_page);

	while (1) {
		size = min(s_size, d_size);
		memcpy(d_addr + d_off, s_addr + s_off, size);
		written += size;

		if (written == class->size)
			break;

1640 1641 1642 1643 1644 1645
		s_off += size;
		s_size -= size;
		d_off += size;
		d_size -= size;

		if (s_off >= PAGE_SIZE) {
M
Minchan Kim 已提交
1646 1647 1648 1649 1650 1651 1652 1653 1654
			kunmap_atomic(d_addr);
			kunmap_atomic(s_addr);
			s_page = get_next_page(s_page);
			s_addr = kmap_atomic(s_page);
			d_addr = kmap_atomic(d_page);
			s_size = class->size - written;
			s_off = 0;
		}

1655
		if (d_off >= PAGE_SIZE) {
M
Minchan Kim 已提交
1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671
			kunmap_atomic(d_addr);
			d_page = get_next_page(d_page);
			d_addr = kmap_atomic(d_page);
			d_size = class->size - written;
			d_off = 0;
		}
	}

	kunmap_atomic(d_addr);
	kunmap_atomic(s_addr);
}

/*
 * Find alloced object in zspage from index object and
 * return handle.
 */
1672
static unsigned long find_alloced_obj(struct size_class *class,
1673
					struct page *page, int *obj_idx)
M
Minchan Kim 已提交
1674 1675 1676
{
	unsigned long head;
	int offset = 0;
1677
	int index = *obj_idx;
M
Minchan Kim 已提交
1678 1679 1680
	unsigned long handle = 0;
	void *addr = kmap_atomic(page);

1681
	offset = get_first_obj_offset(page);
M
Minchan Kim 已提交
1682 1683 1684
	offset += class->size * index;

	while (offset < PAGE_SIZE) {
M
Minchan Kim 已提交
1685
		head = obj_to_head(page, addr + offset);
M
Minchan Kim 已提交
1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697
		if (head & OBJ_ALLOCATED_TAG) {
			handle = head & ~OBJ_ALLOCATED_TAG;
			if (trypin_tag(handle))
				break;
			handle = 0;
		}

		offset += class->size;
		index++;
	}

	kunmap_atomic(addr);
1698 1699 1700

	*obj_idx = index;

M
Minchan Kim 已提交
1701 1702 1703 1704
	return handle;
}

struct zs_compact_control {
1705
	/* Source spage for migration which could be a subpage of zspage */
M
Minchan Kim 已提交
1706 1707 1708 1709 1710 1711
	struct page *s_page;
	/* Destination page for migration which should be a first page
	 * of zspage. */
	struct page *d_page;
	 /* Starting object index within @s_page which used for live object
	  * in the subpage. */
1712
	int obj_idx;
M
Minchan Kim 已提交
1713 1714 1715 1716 1717 1718 1719 1720 1721
};

static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
				struct zs_compact_control *cc)
{
	unsigned long used_obj, free_obj;
	unsigned long handle;
	struct page *s_page = cc->s_page;
	struct page *d_page = cc->d_page;
1722
	int obj_idx = cc->obj_idx;
M
Minchan Kim 已提交
1723 1724 1725
	int ret = 0;

	while (1) {
1726
		handle = find_alloced_obj(class, s_page, &obj_idx);
M
Minchan Kim 已提交
1727 1728 1729 1730
		if (!handle) {
			s_page = get_next_page(s_page);
			if (!s_page)
				break;
1731
			obj_idx = 0;
M
Minchan Kim 已提交
1732 1733 1734 1735
			continue;
		}

		/* Stop if there is no more space */
1736
		if (zspage_full(class, get_zspage(d_page))) {
M
Minchan Kim 已提交
1737 1738 1739 1740 1741 1742
			unpin_tag(handle);
			ret = -ENOMEM;
			break;
		}

		used_obj = handle_to_obj(handle);
1743
		free_obj = obj_malloc(class, get_zspage(d_page), handle);
1744
		zs_object_copy(class, free_obj, used_obj);
1745
		obj_idx++;
1746 1747 1748 1749 1750 1751 1752
		/*
		 * record_obj updates handle's value to free_obj and it will
		 * invalidate lock bit(ie, HANDLE_PIN_BIT) of handle, which
		 * breaks synchronization using pin_tag(e,g, zs_free) so
		 * let's keep the lock bit.
		 */
		free_obj |= BIT(HANDLE_PIN_BIT);
M
Minchan Kim 已提交
1753 1754
		record_obj(handle, free_obj);
		unpin_tag(handle);
1755
		obj_free(class, used_obj);
M
Minchan Kim 已提交
1756 1757 1758 1759
	}

	/* Remember last position in this iteration */
	cc->s_page = s_page;
1760
	cc->obj_idx = obj_idx;
M
Minchan Kim 已提交
1761 1762 1763 1764

	return ret;
}

1765
static struct zspage *isolate_zspage(struct size_class *class, bool source)
M
Minchan Kim 已提交
1766 1767
{
	int i;
1768 1769
	struct zspage *zspage;
	enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
M
Minchan Kim 已提交
1770

1771 1772 1773 1774 1775 1776 1777 1778 1779
	if (!source) {
		fg[0] = ZS_ALMOST_FULL;
		fg[1] = ZS_ALMOST_EMPTY;
	}

	for (i = 0; i < 2; i++) {
		zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
							struct zspage, list);
		if (zspage) {
M
Minchan Kim 已提交
1780
			VM_BUG_ON(is_zspage_isolated(zspage));
1781 1782
			remove_zspage(class, zspage, fg[i]);
			return zspage;
M
Minchan Kim 已提交
1783 1784 1785
		}
	}

1786
	return zspage;
M
Minchan Kim 已提交
1787 1788
}

1789
/*
1790
 * putback_zspage - add @zspage into right class's fullness list
1791
 * @class: destination class
1792
 * @zspage: target page
1793
 *
1794
 * Return @zspage's fullness_group
1795
 */
1796
static enum fullness_group putback_zspage(struct size_class *class,
1797
			struct zspage *zspage)
M
Minchan Kim 已提交
1798 1799 1800
{
	enum fullness_group fullness;

M
Minchan Kim 已提交
1801 1802
	VM_BUG_ON(is_zspage_isolated(zspage));

1803 1804 1805
	fullness = get_fullness_group(class, zspage);
	insert_zspage(class, zspage, fullness);
	set_zspage_mapping(zspage, class->index, fullness);
1806

1807
	return fullness;
1808
}
M
Minchan Kim 已提交
1809

M
Minchan Kim 已提交
1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
#ifdef CONFIG_COMPACTION
static struct dentry *zs_mount(struct file_system_type *fs_type,
				int flags, const char *dev_name, void *data)
{
	static const struct dentry_operations ops = {
		.d_dname = simple_dname,
	};

	return mount_pseudo(fs_type, "zsmalloc:", NULL, &ops, ZSMALLOC_MAGIC);
}

static struct file_system_type zsmalloc_fs = {
	.name		= "zsmalloc",
	.mount		= zs_mount,
	.kill_sb	= kill_anon_super,
};

static int zsmalloc_mount(void)
{
	int ret = 0;

	zsmalloc_mnt = kern_mount(&zsmalloc_fs);
	if (IS_ERR(zsmalloc_mnt))
		ret = PTR_ERR(zsmalloc_mnt);

	return ret;
}

static void zsmalloc_unmount(void)
{
	kern_unmount(zsmalloc_mnt);
}

static void migrate_lock_init(struct zspage *zspage)
{
	rwlock_init(&zspage->lock);
}

static void migrate_read_lock(struct zspage *zspage)
{
	read_lock(&zspage->lock);
}

static void migrate_read_unlock(struct zspage *zspage)
{
	read_unlock(&zspage->lock);
}

static void migrate_write_lock(struct zspage *zspage)
{
	write_lock(&zspage->lock);
}

static void migrate_write_unlock(struct zspage *zspage)
{
	write_unlock(&zspage->lock);
}

/* Number of isolated subpage for *page migration* in this zspage */
static void inc_zspage_isolation(struct zspage *zspage)
{
	zspage->isolated++;
}

static void dec_zspage_isolation(struct zspage *zspage)
{
	zspage->isolated--;
}

static void replace_sub_page(struct size_class *class, struct zspage *zspage,
				struct page *newpage, struct page *oldpage)
{
	struct page *page;
	struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
	int idx = 0;

	page = get_first_page(zspage);
	do {
		if (page == oldpage)
			pages[idx] = newpage;
		else
			pages[idx] = page;
		idx++;
	} while ((page = get_next_page(page)) != NULL);

	create_page_chain(class, zspage, pages);
	set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
	if (unlikely(PageHugeObject(oldpage)))
		newpage->index = oldpage->index;
	__SetPageMovable(newpage, page_mapping(oldpage));
}

bool zs_page_isolate(struct page *page, isolate_mode_t mode)
{
	struct zs_pool *pool;
	struct size_class *class;
	int class_idx;
	enum fullness_group fullness;
	struct zspage *zspage;
	struct address_space *mapping;

	/*
	 * Page is locked so zspage couldn't be destroyed. For detail, look at
	 * lock_zspage in free_zspage.
	 */
	VM_BUG_ON_PAGE(!PageMovable(page), page);
	VM_BUG_ON_PAGE(PageIsolated(page), page);

	zspage = get_zspage(page);

	/*
	 * Without class lock, fullness could be stale while class_idx is okay
	 * because class_idx is constant unless page is freed so we should get
	 * fullness again under class lock.
	 */
	get_zspage_mapping(zspage, &class_idx, &fullness);
	mapping = page_mapping(page);
	pool = mapping->private_data;
	class = pool->size_class[class_idx];

	spin_lock(&class->lock);
	if (get_zspage_inuse(zspage) == 0) {
		spin_unlock(&class->lock);
		return false;
	}

	/* zspage is isolated for object migration */
	if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
		spin_unlock(&class->lock);
		return false;
	}

	/*
	 * If this is first time isolation for the zspage, isolate zspage from
	 * size_class to prevent further object allocation from the zspage.
	 */
	if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
		get_zspage_mapping(zspage, &class_idx, &fullness);
		remove_zspage(class, zspage, fullness);
	}

	inc_zspage_isolation(zspage);
	spin_unlock(&class->lock);

	return true;
}

int zs_page_migrate(struct address_space *mapping, struct page *newpage,
		struct page *page, enum migrate_mode mode)
{
	struct zs_pool *pool;
	struct size_class *class;
	int class_idx;
	enum fullness_group fullness;
	struct zspage *zspage;
	struct page *dummy;
	void *s_addr, *d_addr, *addr;
	int offset, pos;
	unsigned long handle, head;
	unsigned long old_obj, new_obj;
	unsigned int obj_idx;
	int ret = -EAGAIN;

1973 1974 1975 1976 1977 1978 1979 1980
	/*
	 * We cannot support the _NO_COPY case here, because copy needs to
	 * happen under the zs lock, which does not work with
	 * MIGRATE_SYNC_NO_COPY workflow.
	 */
	if (mode == MIGRATE_SYNC_NO_COPY)
		return -EINVAL;

M
Minchan Kim 已提交
1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994
	VM_BUG_ON_PAGE(!PageMovable(page), page);
	VM_BUG_ON_PAGE(!PageIsolated(page), page);

	zspage = get_zspage(page);

	/* Concurrent compactor cannot migrate any subpage in zspage */
	migrate_write_lock(zspage);
	get_zspage_mapping(zspage, &class_idx, &fullness);
	pool = mapping->private_data;
	class = pool->size_class[class_idx];
	offset = get_first_obj_offset(page);

	spin_lock(&class->lock);
	if (!get_zspage_inuse(zspage)) {
1995 1996 1997 1998 1999
		/*
		 * Set "offset" to end of the page so that every loops
		 * skips unnecessary object scanning.
		 */
		offset = PAGE_SIZE;
M
Minchan Kim 已提交
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053
	}

	pos = offset;
	s_addr = kmap_atomic(page);
	while (pos < PAGE_SIZE) {
		head = obj_to_head(page, s_addr + pos);
		if (head & OBJ_ALLOCATED_TAG) {
			handle = head & ~OBJ_ALLOCATED_TAG;
			if (!trypin_tag(handle))
				goto unpin_objects;
		}
		pos += class->size;
	}

	/*
	 * Here, any user cannot access all objects in the zspage so let's move.
	 */
	d_addr = kmap_atomic(newpage);
	memcpy(d_addr, s_addr, PAGE_SIZE);
	kunmap_atomic(d_addr);

	for (addr = s_addr + offset; addr < s_addr + pos;
					addr += class->size) {
		head = obj_to_head(page, addr);
		if (head & OBJ_ALLOCATED_TAG) {
			handle = head & ~OBJ_ALLOCATED_TAG;
			if (!testpin_tag(handle))
				BUG();

			old_obj = handle_to_obj(handle);
			obj_to_location(old_obj, &dummy, &obj_idx);
			new_obj = (unsigned long)location_to_obj(newpage,
								obj_idx);
			new_obj |= BIT(HANDLE_PIN_BIT);
			record_obj(handle, new_obj);
		}
	}

	replace_sub_page(class, zspage, newpage, page);
	get_page(newpage);

	dec_zspage_isolation(zspage);

	/*
	 * Page migration is done so let's putback isolated zspage to
	 * the list if @page is final isolated subpage in the zspage.
	 */
	if (!is_zspage_isolated(zspage))
		putback_zspage(class, zspage);

	reset_page(page);
	put_page(page);
	page = newpage;

2054
	ret = MIGRATEPAGE_SUCCESS;
M
Minchan Kim 已提交
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126
unpin_objects:
	for (addr = s_addr + offset; addr < s_addr + pos;
						addr += class->size) {
		head = obj_to_head(page, addr);
		if (head & OBJ_ALLOCATED_TAG) {
			handle = head & ~OBJ_ALLOCATED_TAG;
			if (!testpin_tag(handle))
				BUG();
			unpin_tag(handle);
		}
	}
	kunmap_atomic(s_addr);
	spin_unlock(&class->lock);
	migrate_write_unlock(zspage);

	return ret;
}

void zs_page_putback(struct page *page)
{
	struct zs_pool *pool;
	struct size_class *class;
	int class_idx;
	enum fullness_group fg;
	struct address_space *mapping;
	struct zspage *zspage;

	VM_BUG_ON_PAGE(!PageMovable(page), page);
	VM_BUG_ON_PAGE(!PageIsolated(page), page);

	zspage = get_zspage(page);
	get_zspage_mapping(zspage, &class_idx, &fg);
	mapping = page_mapping(page);
	pool = mapping->private_data;
	class = pool->size_class[class_idx];

	spin_lock(&class->lock);
	dec_zspage_isolation(zspage);
	if (!is_zspage_isolated(zspage)) {
		fg = putback_zspage(class, zspage);
		/*
		 * Due to page_lock, we cannot free zspage immediately
		 * so let's defer.
		 */
		if (fg == ZS_EMPTY)
			schedule_work(&pool->free_work);
	}
	spin_unlock(&class->lock);
}

const struct address_space_operations zsmalloc_aops = {
	.isolate_page = zs_page_isolate,
	.migratepage = zs_page_migrate,
	.putback_page = zs_page_putback,
};

static int zs_register_migration(struct zs_pool *pool)
{
	pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
	if (IS_ERR(pool->inode)) {
		pool->inode = NULL;
		return 1;
	}

	pool->inode->i_mapping->private_data = pool;
	pool->inode->i_mapping->a_ops = &zsmalloc_aops;
	return 0;
}

static void zs_unregister_migration(struct zs_pool *pool)
{
	flush_work(&pool->free_work);
2127
	iput(pool->inode);
M
Minchan Kim 已提交
2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
}

/*
 * Caller should hold page_lock of all pages in the zspage
 * In here, we cannot use zspage meta data.
 */
static void async_free_zspage(struct work_struct *work)
{
	int i;
	struct size_class *class;
	unsigned int class_idx;
	enum fullness_group fullness;
	struct zspage *zspage, *tmp;
	LIST_HEAD(free_pages);
	struct zs_pool *pool = container_of(work, struct zs_pool,
					free_work);

2145
	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
M
Minchan Kim 已提交
2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190
		class = pool->size_class[i];
		if (class->index != i)
			continue;

		spin_lock(&class->lock);
		list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
		spin_unlock(&class->lock);
	}


	list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
		list_del(&zspage->list);
		lock_zspage(zspage);

		get_zspage_mapping(zspage, &class_idx, &fullness);
		VM_BUG_ON(fullness != ZS_EMPTY);
		class = pool->size_class[class_idx];
		spin_lock(&class->lock);
		__free_zspage(pool, pool->size_class[class_idx], zspage);
		spin_unlock(&class->lock);
	}
};

static void kick_deferred_free(struct zs_pool *pool)
{
	schedule_work(&pool->free_work);
}

static void init_deferred_free(struct zs_pool *pool)
{
	INIT_WORK(&pool->free_work, async_free_zspage);
}

static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
{
	struct page *page = get_first_page(zspage);

	do {
		WARN_ON(!trylock_page(page));
		__SetPageMovable(page, pool->inode->i_mapping);
		unlock_page(page);
	} while ((page = get_next_page(page)) != NULL);
}
#endif

2191 2192 2193 2194 2195 2196 2197 2198
/*
 *
 * Based on the number of unused allocated objects calculate
 * and return the number of pages that we can free.
 */
static unsigned long zs_can_compact(struct size_class *class)
{
	unsigned long obj_wasted;
2199 2200
	unsigned long obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
	unsigned long obj_used = zs_stat_get(class, OBJ_USED);
2201

2202 2203
	if (obj_allocated <= obj_used)
		return 0;
2204

2205
	obj_wasted = obj_allocated - obj_used;
2206
	obj_wasted /= class->objs_per_zspage;
2207

2208
	return obj_wasted * class->pages_per_zspage;
2209 2210
}

2211
static void __zs_compact(struct zs_pool *pool, struct size_class *class)
M
Minchan Kim 已提交
2212 2213
{
	struct zs_compact_control cc;
2214 2215
	struct zspage *src_zspage;
	struct zspage *dst_zspage = NULL;
M
Minchan Kim 已提交
2216 2217

	spin_lock(&class->lock);
2218
	while ((src_zspage = isolate_zspage(class, true))) {
M
Minchan Kim 已提交
2219

2220 2221 2222
		if (!zs_can_compact(class))
			break;

2223
		cc.obj_idx = 0;
M
Minchan Kim 已提交
2224
		cc.s_page = get_first_page(src_zspage);
M
Minchan Kim 已提交
2225

2226
		while ((dst_zspage = isolate_zspage(class, false))) {
M
Minchan Kim 已提交
2227
			cc.d_page = get_first_page(dst_zspage);
M
Minchan Kim 已提交
2228
			/*
2229 2230
			 * If there is no more space in dst_page, resched
			 * and see if anyone had allocated another zspage.
M
Minchan Kim 已提交
2231 2232 2233 2234
			 */
			if (!migrate_zspage(pool, class, &cc))
				break;

2235
			putback_zspage(class, dst_zspage);
M
Minchan Kim 已提交
2236 2237 2238
		}

		/* Stop if we couldn't find slot */
2239
		if (dst_zspage == NULL)
M
Minchan Kim 已提交
2240 2241
			break;

2242 2243
		putback_zspage(class, dst_zspage);
		if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
M
Minchan Kim 已提交
2244
			free_zspage(pool, class, src_zspage);
2245
			pool->stats.pages_compacted += class->pages_per_zspage;
2246
		}
M
Minchan Kim 已提交
2247 2248 2249 2250 2251
		spin_unlock(&class->lock);
		cond_resched();
		spin_lock(&class->lock);
	}

2252
	if (src_zspage)
2253
		putback_zspage(class, src_zspage);
M
Minchan Kim 已提交
2254

2255
	spin_unlock(&class->lock);
M
Minchan Kim 已提交
2256 2257 2258 2259 2260 2261 2262
}

unsigned long zs_compact(struct zs_pool *pool)
{
	int i;
	struct size_class *class;

2263
	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
M
Minchan Kim 已提交
2264 2265 2266 2267 2268
		class = pool->size_class[i];
		if (!class)
			continue;
		if (class->index != i)
			continue;
2269
		__zs_compact(pool, class);
M
Minchan Kim 已提交
2270 2271
	}

2272
	return pool->stats.pages_compacted;
M
Minchan Kim 已提交
2273 2274
}
EXPORT_SYMBOL_GPL(zs_compact);
2275

2276 2277 2278 2279 2280 2281
void zs_pool_stats(struct zs_pool *pool, struct zs_pool_stats *stats)
{
	memcpy(stats, &pool->stats, sizeof(struct zs_pool_stats));
}
EXPORT_SYMBOL_GPL(zs_pool_stats);

2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308
static unsigned long zs_shrinker_scan(struct shrinker *shrinker,
		struct shrink_control *sc)
{
	unsigned long pages_freed;
	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
			shrinker);

	pages_freed = pool->stats.pages_compacted;
	/*
	 * Compact classes and calculate compaction delta.
	 * Can run concurrently with a manually triggered
	 * (by user) compaction.
	 */
	pages_freed = zs_compact(pool) - pages_freed;

	return pages_freed ? pages_freed : SHRINK_STOP;
}

static unsigned long zs_shrinker_count(struct shrinker *shrinker,
		struct shrink_control *sc)
{
	int i;
	struct size_class *class;
	unsigned long pages_to_free = 0;
	struct zs_pool *pool = container_of(shrinker, struct zs_pool,
			shrinker);

2309
	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323
		class = pool->size_class[i];
		if (!class)
			continue;
		if (class->index != i)
			continue;

		pages_to_free += zs_can_compact(class);
	}

	return pages_to_free;
}

static void zs_unregister_shrinker(struct zs_pool *pool)
{
2324
	unregister_shrinker(&pool->shrinker);
2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336
}

static int zs_register_shrinker(struct zs_pool *pool)
{
	pool->shrinker.scan_objects = zs_shrinker_scan;
	pool->shrinker.count_objects = zs_shrinker_count;
	pool->shrinker.batch = 0;
	pool->shrinker.seeks = DEFAULT_SEEKS;

	return register_shrinker(&pool->shrinker);
}

2337
/**
2338
 * zs_create_pool - Creates an allocation pool to work from.
2339
 * @name: pool name to be created
2340
 *
2341 2342
 * This function must be called before anything when using
 * the zsmalloc allocator.
2343
 *
2344 2345
 * On success, a pointer to the newly created pool is returned,
 * otherwise NULL.
2346
 */
2347
struct zs_pool *zs_create_pool(const char *name)
2348
{
2349 2350 2351
	int i;
	struct zs_pool *pool;
	struct size_class *prev_class = NULL;
2352

2353 2354 2355
	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return NULL;
2356

M
Minchan Kim 已提交
2357
	init_deferred_free(pool);
2358

2359 2360 2361 2362
	pool->name = kstrdup(name, GFP_KERNEL);
	if (!pool->name)
		goto err;

2363
	if (create_cache(pool))
2364 2365
		goto err;

2366
	/*
X
Xishi Qiu 已提交
2367
	 * Iterate reversely, because, size of size_class that we want to use
2368
	 * for merging should be larger or equal to current size.
2369
	 */
2370
	for (i = ZS_SIZE_CLASSES - 1; i >= 0; i--) {
2371 2372
		int size;
		int pages_per_zspage;
2373
		int objs_per_zspage;
2374
		struct size_class *class;
2375
		int fullness = 0;
2376

2377 2378 2379 2380
		size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
		if (size > ZS_MAX_ALLOC_SIZE)
			size = ZS_MAX_ALLOC_SIZE;
		pages_per_zspage = get_pages_per_zspage(size);
2381
		objs_per_zspage = pages_per_zspage * PAGE_SIZE / size;
2382

2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
		/*
		 * size_class is used for normal zsmalloc operation such
		 * as alloc/free for that size. Although it is natural that we
		 * have one size_class for each size, there is a chance that we
		 * can get more memory utilization if we use one size_class for
		 * many different sizes whose size_class have same
		 * characteristics. So, we makes size_class point to
		 * previous size_class if possible.
		 */
		if (prev_class) {
2393
			if (can_merge(prev_class, pages_per_zspage, objs_per_zspage)) {
2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405
				pool->size_class[i] = prev_class;
				continue;
			}
		}

		class = kzalloc(sizeof(struct size_class), GFP_KERNEL);
		if (!class)
			goto err;

		class->size = size;
		class->index = i;
		class->pages_per_zspage = pages_per_zspage;
2406
		class->objs_per_zspage = objs_per_zspage;
2407 2408
		spin_lock_init(&class->lock);
		pool->size_class[i] = class;
M
Minchan Kim 已提交
2409 2410
		for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
							fullness++)
2411
			INIT_LIST_HEAD(&class->fullness_list[fullness]);
2412 2413

		prev_class = class;
2414 2415
	}

2416 2417
	/* debug only, don't abort if it fails */
	zs_pool_stat_create(pool, name);
2418

M
Minchan Kim 已提交
2419 2420 2421
	if (zs_register_migration(pool))
		goto err;

2422
	/*
2423 2424 2425 2426
	 * Not critical since shrinker is only used to trigger internal
	 * defragmentation of the pool which is pretty optional thing.  If
	 * registration fails we still can use the pool normally and user can
	 * trigger compaction manually. Thus, ignore return code.
2427
	 */
2428 2429
	zs_register_shrinker(pool);

2430 2431 2432 2433 2434
	return pool;

err:
	zs_destroy_pool(pool);
	return NULL;
2435
}
2436
EXPORT_SYMBOL_GPL(zs_create_pool);
2437

2438
void zs_destroy_pool(struct zs_pool *pool)
2439
{
2440
	int i;
2441

2442
	zs_unregister_shrinker(pool);
M
Minchan Kim 已提交
2443
	zs_unregister_migration(pool);
2444 2445
	zs_pool_stat_destroy(pool);

2446
	for (i = 0; i < ZS_SIZE_CLASSES; i++) {
2447 2448
		int fg;
		struct size_class *class = pool->size_class[i];
2449

2450 2451
		if (!class)
			continue;
2452

2453 2454
		if (class->index != i)
			continue;
2455

M
Minchan Kim 已提交
2456
		for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
2457
			if (!list_empty(&class->fullness_list[fg])) {
2458 2459 2460 2461 2462 2463
				pr_info("Freeing non-empty class with size %db, fullness group %d\n",
					class->size, fg);
			}
		}
		kfree(class);
	}
2464

2465
	destroy_cache(pool);
2466
	kfree(pool->name);
2467 2468 2469
	kfree(pool);
}
EXPORT_SYMBOL_GPL(zs_destroy_pool);
2470

2471 2472
static int __init zs_init(void)
{
M
Minchan Kim 已提交
2473 2474 2475 2476 2477 2478
	int ret;

	ret = zsmalloc_mount();
	if (ret)
		goto out;

2479 2480
	ret = cpuhp_setup_state(CPUHP_MM_ZS_PREPARE, "mm/zsmalloc:prepare",
				zs_cpu_prepare, zs_cpu_dead);
2481
	if (ret)
2482
		goto hp_setup_fail;
2483 2484 2485 2486

#ifdef CONFIG_ZPOOL
	zpool_register_driver(&zs_zpool_driver);
#endif
2487

2488 2489
	zs_stat_init();

2490
	return 0;
2491

2492
hp_setup_fail:
M
Minchan Kim 已提交
2493 2494
	zsmalloc_unmount();
out:
2495
	return ret;
2496 2497
}

2498
static void __exit zs_exit(void)
2499
{
2500 2501 2502
#ifdef CONFIG_ZPOOL
	zpool_unregister_driver(&zs_zpool_driver);
#endif
M
Minchan Kim 已提交
2503
	zsmalloc_unmount();
2504
	cpuhp_remove_state(CPUHP_MM_ZS_PREPARE);
2505 2506

	zs_stat_exit();
2507
}
2508 2509 2510 2511 2512 2513

module_init(zs_init);
module_exit(zs_exit);

MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");