vmw_balloon.c 31.9 KB
Newer Older
D
Dmitry Torokhov 已提交
1 2 3
/*
 * VMware Balloon driver.
 *
4
 * Copyright (C) 2000-2013, VMware, Inc. All Rights Reserved.
D
Dmitry Torokhov 已提交
5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License as published by the
 * Free Software Foundation; version 2 of the License and no later version.
 *
 * This program is distributed in the hope that it will be useful, but
 * WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 * NON INFRINGEMENT.  See the GNU General Public License for more
 * details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
20 21
 * Maintained by:	Xavier Deguillard <xdeguillard@vmware.com>
 *			Philip Moltmann <moltmann@vmware.com>
D
Dmitry Torokhov 已提交
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39
 */

/*
 * This is VMware physical memory management driver for Linux. The driver
 * acts like a "balloon" that can be inflated to reclaim physical pages by
 * reserving them in the guest and invalidating them in the monitor,
 * freeing up the underlying machine pages so they can be allocated to
 * other guests.  The balloon can also be deflated to allow the guest to
 * use more physical memory. Higher level policies can control the sizes
 * of balloons in VMs in order to manage physical memory resources.
 */

//#define DEBUG
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
40
#include <linux/vmalloc.h>
D
Dmitry Torokhov 已提交
41 42 43 44 45
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
46
#include <asm/hypervisor.h>
D
Dmitry Torokhov 已提交
47 48 49

MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMware Memory Control (Balloon) Driver");
50
MODULE_VERSION("1.4.1.0-k");
D
Dmitry Torokhov 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
MODULE_ALIAS("dmi:*:svnVMware*:*");
MODULE_ALIAS("vmware_vmmemctl");
MODULE_LICENSE("GPL");

/*
 * Various constants controlling rate of inflaint/deflating balloon,
 * measured in pages.
 */

/*
 * Rates of memory allocaton when guest experiences memory pressure
 * (driver performs sleeping allocations).
 */
#define VMW_BALLOON_RATE_ALLOC_MIN	512U
#define VMW_BALLOON_RATE_ALLOC_MAX	2048U
#define VMW_BALLOON_RATE_ALLOC_INC	16U

/*
 * When guest is under memory pressure, use a reduced page allocation
 * rate for next several cycles.
 */
#define VMW_BALLOON_SLOW_CYCLES		4

/*
 * Use __GFP_HIGHMEM to allow pages from HIGHMEM zone. We don't
 * allow wait (__GFP_WAIT) for NOSLEEP page allocations. Use
 * __GFP_NOWARN, to suppress page allocation failure warnings.
 */
#define VMW_PAGE_ALLOC_NOSLEEP		(__GFP_HIGHMEM|__GFP_NOWARN)

/*
 * Use GFP_HIGHUSER when executing in a separate kernel thread
 * context and allocation can sleep.  This is less stressful to
 * the guest memory system, since it allows the thread to block
 * while memory is reclaimed, and won't take pages from emergency
 * low-memory pools.
 */
#define VMW_PAGE_ALLOC_CANSLEEP		(GFP_HIGHUSER)

90 91
/* Maximum number of refused pages we accumulate during inflation cycle */
#define VMW_BALLOON_MAX_REFUSED		16
D
Dmitry Torokhov 已提交
92 93 94 95 96 97 98 99

/*
 * Hypervisor communication port definitions.
 */
#define VMW_BALLOON_HV_PORT		0x5670
#define VMW_BALLOON_HV_MAGIC		0x456c6d6f
#define VMW_BALLOON_GUEST_ID		1	/* Linux */

100 101 102 103 104
enum vmwballoon_capabilities {
	/*
	 * Bit 0 is reserved and not associated to any capability.
	 */
	VMW_BALLOON_BASIC_CMDS		= (1 << 1),
105 106
	VMW_BALLOON_BATCHED_CMDS	= (1 << 2),
	VMW_BALLOON_BATCHED_2M_CMDS     = (1 << 3),
107 108
};

109
#define VMW_BALLOON_CAPABILITIES	(VMW_BALLOON_BASIC_CMDS \
110 111 112 113 114
					| VMW_BALLOON_BATCHED_CMDS \
					| VMW_BALLOON_BATCHED_2M_CMDS)

#define VMW_BALLOON_2M_SHIFT		(9)
#define VMW_BALLOON_NUM_PAGE_SIZES	(2)
115

116 117 118 119 120 121 122 123 124
/*
 * Backdoor commands availability:
 *
 * START, GET_TARGET and GUEST_ID are always available,
 *
 * VMW_BALLOON_BASIC_CMDS:
 *	LOCK and UNLOCK commands,
 * VMW_BALLOON_BATCHED_CMDS:
 *	BATCHED_LOCK and BATCHED_UNLOCK commands.
125 126
 * VMW BALLOON_BATCHED_2M_CMDS:
 *	BATCHED_2M_LOCK and BATCHED_2M_UNLOCK commands.
127
 */
128 129 130 131 132 133 134 135 136 137
#define VMW_BALLOON_CMD_START			0
#define VMW_BALLOON_CMD_GET_TARGET		1
#define VMW_BALLOON_CMD_LOCK			2
#define VMW_BALLOON_CMD_UNLOCK			3
#define VMW_BALLOON_CMD_GUEST_ID		4
#define VMW_BALLOON_CMD_BATCHED_LOCK		6
#define VMW_BALLOON_CMD_BATCHED_UNLOCK		7
#define VMW_BALLOON_CMD_BATCHED_2M_LOCK		8
#define VMW_BALLOON_CMD_BATCHED_2M_UNLOCK	9

D
Dmitry Torokhov 已提交
138 139

/* error codes */
140 141 142 143 144 145 146 147 148 149 150 151 152
#define VMW_BALLOON_SUCCESS		        0
#define VMW_BALLOON_FAILURE		        -1
#define VMW_BALLOON_ERROR_CMD_INVALID	        1
#define VMW_BALLOON_ERROR_PPN_INVALID	        2
#define VMW_BALLOON_ERROR_PPN_LOCKED	        3
#define VMW_BALLOON_ERROR_PPN_UNLOCKED	        4
#define VMW_BALLOON_ERROR_PPN_PINNED	        5
#define VMW_BALLOON_ERROR_PPN_NOTNEEDED	        6
#define VMW_BALLOON_ERROR_RESET		        7
#define VMW_BALLOON_ERROR_BUSY		        8

#define VMW_BALLOON_SUCCESS_WITH_CAPABILITIES	(0x03000000)

153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
/* Batch page description */

/*
 * Layout of a page in the batch page:
 *
 * +-------------+----------+--------+
 * |             |          |        |
 * | Page number | Reserved | Status |
 * |             |          |        |
 * +-------------+----------+--------+
 * 64  PAGE_SHIFT          6         0
 *
 * The reserved field should be set to 0.
 */
#define VMW_BALLOON_BATCH_MAX_PAGES	(PAGE_SIZE / sizeof(u64))
#define VMW_BALLOON_BATCH_STATUS_MASK	((1UL << 5) - 1)
#define VMW_BALLOON_BATCH_PAGE_MASK	(~((1UL << PAGE_SHIFT) - 1))

struct vmballoon_batch_page {
	u64 pages[VMW_BALLOON_BATCH_MAX_PAGES];
};

static u64 vmballoon_batch_get_pa(struct vmballoon_batch_page *batch, int idx)
{
	return batch->pages[idx] & VMW_BALLOON_BATCH_PAGE_MASK;
}

static int vmballoon_batch_get_status(struct vmballoon_batch_page *batch,
				int idx)
{
	return (int)(batch->pages[idx] & VMW_BALLOON_BATCH_STATUS_MASK);
}

static void vmballoon_batch_set_pa(struct vmballoon_batch_page *batch, int idx,
				u64 pa)
{
	batch->pages[idx] = pa;
}


#define VMWARE_BALLOON_CMD(cmd, arg1, arg2, result)		\
194
({								\
195
	unsigned long __status, __dummy1, __dummy2, __dummy3;	\
196 197 198 199
	__asm__ __volatile__ ("inl %%dx" :			\
		"=a"(__status),					\
		"=c"(__dummy1),					\
		"=d"(__dummy2),					\
200 201
		"=b"(result),					\
		"=S" (__dummy3) :				\
202 203 204
		"0"(VMW_BALLOON_HV_MAGIC),			\
		"1"(VMW_BALLOON_CMD_##cmd),			\
		"2"(VMW_BALLOON_HV_PORT),			\
205 206
		"3"(arg1),					\
		"4" (arg2) :					\
207 208 209 210 211
		"memory");					\
	if (VMW_BALLOON_CMD_##cmd == VMW_BALLOON_CMD_START)	\
		result = __dummy1;				\
	result &= -1UL;						\
	__status & -1UL;					\
D
Dmitry Torokhov 已提交
212 213 214 215 216 217
})

#ifdef CONFIG_DEBUG_FS
struct vmballoon_stats {
	unsigned int timer;

218
	/* allocation statistics */
219 220
	unsigned int alloc[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int alloc_fail[VMW_BALLOON_NUM_PAGE_SIZES];
D
Dmitry Torokhov 已提交
221 222
	unsigned int sleep_alloc;
	unsigned int sleep_alloc_fail;
223 224 225
	unsigned int refused_alloc[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int refused_free[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int free[VMW_BALLOON_NUM_PAGE_SIZES];
D
Dmitry Torokhov 已提交
226 227

	/* monitor operations */
228 229 230 231
	unsigned int lock[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int lock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int unlock[VMW_BALLOON_NUM_PAGE_SIZES];
	unsigned int unlock_fail[VMW_BALLOON_NUM_PAGE_SIZES];
D
Dmitry Torokhov 已提交
232 233 234 235 236 237 238 239 240 241 242 243 244
	unsigned int target;
	unsigned int target_fail;
	unsigned int start;
	unsigned int start_fail;
	unsigned int guest_type;
	unsigned int guest_type_fail;
};

#define STATS_INC(stat) (stat)++
#else
#define STATS_INC(stat)
#endif

245 246 247 248
struct vmballoon;

struct vmballoon_ops {
	void (*add_page)(struct vmballoon *b, int idx, struct page *p);
249
	int (*lock)(struct vmballoon *b, unsigned int num_pages,
250
			bool is_2m_pages, unsigned int *target);
251
	int (*unlock)(struct vmballoon *b, unsigned int num_pages,
252
			bool is_2m_pages, unsigned int *target);
253 254
};

255
struct vmballoon_page_size {
D
Dmitry Torokhov 已提交
256 257 258 259 260
	/* list of reserved physical pages */
	struct list_head pages;

	/* transient list of non-balloonable pages */
	struct list_head refused_pages;
261
	unsigned int n_refused_pages;
262 263 264 265 266 267 268
};

struct vmballoon {
	struct vmballoon_page_size page_sizes[VMW_BALLOON_NUM_PAGE_SIZES];

	/* supported page sizes. 1 == 4k pages only, 2 == 4k and 2m pages */
	unsigned supported_page_sizes;
D
Dmitry Torokhov 已提交
269 270 271 272 273 274 275 276 277 278 279 280 281 282

	/* balloon size in pages */
	unsigned int size;
	unsigned int target;

	/* reset flag */
	bool reset_required;

	/* adjustment rates (pages per second) */
	unsigned int rate_alloc;

	/* slowdown page allocations for next few cycles */
	unsigned int slow_allocation_cycles;

283 284 285 286 287 288 289 290
	unsigned long capabilities;

	struct vmballoon_batch_page *batch_page;
	unsigned int batch_max_pages;
	struct page *page;

	const struct vmballoon_ops *ops;

D
Dmitry Torokhov 已提交
291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309
#ifdef CONFIG_DEBUG_FS
	/* statistics */
	struct vmballoon_stats stats;

	/* debugfs file exporting statistics */
	struct dentry *dbg_entry;
#endif

	struct sysinfo sysinfo;

	struct delayed_work dwork;
};

static struct vmballoon balloon;

/*
 * Send "start" command to the host, communicating supported version
 * of the protocol.
 */
310
static bool vmballoon_send_start(struct vmballoon *b, unsigned long req_caps)
D
Dmitry Torokhov 已提交
311
{
312
	unsigned long status, capabilities, dummy = 0;
313
	bool success;
D
Dmitry Torokhov 已提交
314 315 316

	STATS_INC(b->stats.start);

317 318 319 320 321
	status = VMWARE_BALLOON_CMD(START, req_caps, dummy, capabilities);

	switch (status) {
	case VMW_BALLOON_SUCCESS_WITH_CAPABILITIES:
		b->capabilities = capabilities;
322 323
		success = true;
		break;
324 325
	case VMW_BALLOON_SUCCESS:
		b->capabilities = VMW_BALLOON_BASIC_CMDS;
326 327 328 329
		success = true;
		break;
	default:
		success = false;
330
	}
D
Dmitry Torokhov 已提交
331

332 333 334 335 336 337 338 339 340 341
	if (b->capabilities & VMW_BALLOON_BATCHED_2M_CMDS)
		b->supported_page_sizes = 2;
	else
		b->supported_page_sizes = 1;

	if (!success) {
		pr_debug("%s - failed, hv returns %ld\n", __func__, status);
		STATS_INC(b->stats.start_fail);
	}
	return success;
D
Dmitry Torokhov 已提交
342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
}

static bool vmballoon_check_status(struct vmballoon *b, unsigned long status)
{
	switch (status) {
	case VMW_BALLOON_SUCCESS:
		return true;

	case VMW_BALLOON_ERROR_RESET:
		b->reset_required = true;
		/* fall through */

	default:
		return false;
	}
}

/*
 * Communicate guest type to the host so that it can adjust ballooning
 * algorithm to the one most appropriate for the guest. This command
 * is normally issued after sending "start" command and is part of
 * standard reset sequence.
 */
static bool vmballoon_send_guest_id(struct vmballoon *b)
{
367
	unsigned long status, dummy = 0;
D
Dmitry Torokhov 已提交
368

369 370
	status = VMWARE_BALLOON_CMD(GUEST_ID, VMW_BALLOON_GUEST_ID, dummy,
				dummy);
D
Dmitry Torokhov 已提交
371 372 373 374 375 376 377 378 379 380 381

	STATS_INC(b->stats.guest_type);

	if (vmballoon_check_status(b, status))
		return true;

	pr_debug("%s - failed, hv returns %ld\n", __func__, status);
	STATS_INC(b->stats.guest_type_fail);
	return false;
}

382 383 384 385 386 387 388 389
static u16 vmballoon_page_size(bool is_2m_page)
{
	if (is_2m_page)
		return 1 << VMW_BALLOON_2M_SHIFT;

	return 1;
}

D
Dmitry Torokhov 已提交
390 391 392 393 394 395 396 397
/*
 * Retrieve desired balloon size from the host.
 */
static bool vmballoon_send_get_target(struct vmballoon *b, u32 *new_target)
{
	unsigned long status;
	unsigned long target;
	unsigned long limit;
398
	unsigned long dummy = 0;
D
Dmitry Torokhov 已提交
399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
	u32 limit32;

	/*
	 * si_meminfo() is cheap. Moreover, we want to provide dynamic
	 * max balloon size later. So let us call si_meminfo() every
	 * iteration.
	 */
	si_meminfo(&b->sysinfo);
	limit = b->sysinfo.totalram;

	/* Ensure limit fits in 32-bits */
	limit32 = (u32)limit;
	if (limit != limit32)
		return false;

	/* update stats */
	STATS_INC(b->stats.target);

417
	status = VMWARE_BALLOON_CMD(GET_TARGET, limit, dummy, target);
D
Dmitry Torokhov 已提交
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432
	if (vmballoon_check_status(b, status)) {
		*new_target = target;
		return true;
	}

	pr_debug("%s - failed, hv returns %ld\n", __func__, status);
	STATS_INC(b->stats.target_fail);
	return false;
}

/*
 * Notify the host about allocated page so that host can use it without
 * fear that guest will need it. Host may reject some pages, we need to
 * check the return value and maybe submit a different page.
 */
433
static int vmballoon_send_lock_page(struct vmballoon *b, unsigned long pfn,
434
				unsigned int *hv_status, unsigned int *target)
D
Dmitry Torokhov 已提交
435
{
436
	unsigned long status, dummy = 0;
D
Dmitry Torokhov 已提交
437 438 439 440
	u32 pfn32;

	pfn32 = (u32)pfn;
	if (pfn32 != pfn)
441
		return -1;
D
Dmitry Torokhov 已提交
442

443
	STATS_INC(b->stats.lock[false]);
D
Dmitry Torokhov 已提交
444

445
	*hv_status = status = VMWARE_BALLOON_CMD(LOCK, pfn, dummy, *target);
D
Dmitry Torokhov 已提交
446
	if (vmballoon_check_status(b, status))
447
		return 0;
D
Dmitry Torokhov 已提交
448 449

	pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
450
	STATS_INC(b->stats.lock_fail[false]);
451
	return 1;
D
Dmitry Torokhov 已提交
452 453
}

454
static int vmballoon_send_batched_lock(struct vmballoon *b,
455
		unsigned int num_pages, bool is_2m_pages, unsigned int *target)
456
{
457
	unsigned long status;
458 459
	unsigned long pfn = page_to_pfn(b->page);

460 461 462 463 464 465 466 467
	STATS_INC(b->stats.lock[is_2m_pages]);

	if (is_2m_pages)
		status = VMWARE_BALLOON_CMD(BATCHED_2M_LOCK, pfn, num_pages,
				*target);
	else
		status = VMWARE_BALLOON_CMD(BATCHED_LOCK, pfn, num_pages,
				*target);
468 469 470 471 472

	if (vmballoon_check_status(b, status))
		return 0;

	pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
473
	STATS_INC(b->stats.lock_fail[is_2m_pages]);
474 475 476
	return 1;
}

D
Dmitry Torokhov 已提交
477 478 479 480
/*
 * Notify the host that guest intends to release given page back into
 * the pool of available (to the guest) pages.
 */
481 482
static bool vmballoon_send_unlock_page(struct vmballoon *b, unsigned long pfn,
							unsigned int *target)
D
Dmitry Torokhov 已提交
483
{
484
	unsigned long status, dummy = 0;
D
Dmitry Torokhov 已提交
485 486 487 488 489 490
	u32 pfn32;

	pfn32 = (u32)pfn;
	if (pfn32 != pfn)
		return false;

491
	STATS_INC(b->stats.unlock[false]);
D
Dmitry Torokhov 已提交
492

493
	status = VMWARE_BALLOON_CMD(UNLOCK, pfn, dummy, *target);
D
Dmitry Torokhov 已提交
494 495 496 497
	if (vmballoon_check_status(b, status))
		return true;

	pr_debug("%s - ppn %lx, hv returns %ld\n", __func__, pfn, status);
498
	STATS_INC(b->stats.unlock_fail[false]);
D
Dmitry Torokhov 已提交
499 500 501
	return false;
}

502
static bool vmballoon_send_batched_unlock(struct vmballoon *b,
503
		unsigned int num_pages, bool is_2m_pages, unsigned int *target)
504
{
505
	unsigned long status;
506 507
	unsigned long pfn = page_to_pfn(b->page);

508 509 510 511 512 513 514 515
	STATS_INC(b->stats.unlock[is_2m_pages]);

	if (is_2m_pages)
		status = VMWARE_BALLOON_CMD(BATCHED_2M_UNLOCK, pfn, num_pages,
				*target);
	else
		status = VMWARE_BALLOON_CMD(BATCHED_UNLOCK, pfn, num_pages,
				*target);
516 517 518 519 520

	if (vmballoon_check_status(b, status))
		return true;

	pr_debug("%s - batch ppn %lx, hv returns %ld\n", __func__, pfn, status);
521
	STATS_INC(b->stats.unlock_fail[is_2m_pages]);
522 523 524
	return false;
}

525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540
static struct page *vmballoon_alloc_page(gfp_t flags, bool is_2m_page)
{
	if (is_2m_page)
		return alloc_pages(flags, VMW_BALLOON_2M_SHIFT);

	return alloc_page(flags);
}

static void vmballoon_free_page(struct page *page, bool is_2m_page)
{
	if (is_2m_page)
		__free_pages(page, VMW_BALLOON_2M_SHIFT);
	else
		__free_page(page);
}

D
Dmitry Torokhov 已提交
541 542 543 544 545 546 547 548 549
/*
 * Quickly release all pages allocated for the balloon. This function is
 * called when host decides to "reset" balloon for one reason or another.
 * Unlike normal "deflate" we do not (shall not) notify host of the pages
 * being released.
 */
static void vmballoon_pop(struct vmballoon *b)
{
	struct page *page, *next;
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564
	unsigned is_2m_pages;

	for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
			is_2m_pages++) {
		struct vmballoon_page_size *page_size =
				&b->page_sizes[is_2m_pages];
		u16 size_per_page = vmballoon_page_size(is_2m_pages);

		list_for_each_entry_safe(page, next, &page_size->pages, lru) {
			list_del(&page->lru);
			vmballoon_free_page(page, is_2m_pages);
			STATS_INC(b->stats.free[is_2m_pages]);
			b->size -= size_per_page;
			cond_resched();
		}
D
Dmitry Torokhov 已提交
565 566
	}

567 568 569 570
	if (b->batch_page) {
		vunmap(b->batch_page);
		b->batch_page = NULL;
	}
D
Dmitry Torokhov 已提交
571

572 573 574
	if (b->page) {
		__free_page(b->page);
		b->page = NULL;
D
Dmitry Torokhov 已提交
575 576 577 578
	}
}

/*
579 580 581
 * Notify the host of a ballooned page. If host rejects the page put it on the
 * refuse list, those refused page are then released at the end of the
 * inflation cycle.
D
Dmitry Torokhov 已提交
582
 */
583
static int vmballoon_lock_page(struct vmballoon *b, unsigned int num_pages,
584
				bool is_2m_pages, unsigned int *target)
D
Dmitry Torokhov 已提交
585
{
586
	int locked, hv_status;
587
	struct page *page = b->page;
588 589 590
	struct vmballoon_page_size *page_size = &b->page_sizes[false];

	/* is_2m_pages can never happen as 2m pages support implies batching */
D
Dmitry Torokhov 已提交
591

592 593
	locked = vmballoon_send_lock_page(b, page_to_pfn(page), &hv_status,
								target);
594
	if (locked > 0) {
595
		STATS_INC(b->stats.refused_alloc[false]);
D
Dmitry Torokhov 已提交
596

597 598
		if (hv_status == VMW_BALLOON_ERROR_RESET ||
				hv_status == VMW_BALLOON_ERROR_PPN_NOTNEEDED) {
599
			vmballoon_free_page(page, false);
600 601
			return -EIO;
		}
D
Dmitry Torokhov 已提交
602

603 604 605 606 607
		/*
		 * Place page on the list of non-balloonable pages
		 * and retry allocation, unless we already accumulated
		 * too many of them, in which case take a breather.
		 */
608 609 610
		if (page_size->n_refused_pages < VMW_BALLOON_MAX_REFUSED) {
			page_size->n_refused_pages++;
			list_add(&page->lru, &page_size->refused_pages);
611
		} else {
612
			vmballoon_free_page(page, false);
D
Dmitry Torokhov 已提交
613
		}
614 615
		return -EIO;
	}
D
Dmitry Torokhov 已提交
616 617

	/* track allocated page */
618
	list_add(&page->lru, &page_size->pages);
D
Dmitry Torokhov 已提交
619 620 621 622 623 624 625

	/* update balloon size */
	b->size++;

	return 0;
}

626
static int vmballoon_lock_batched_page(struct vmballoon *b,
627
		unsigned int num_pages, bool is_2m_pages, unsigned int *target)
628 629
{
	int locked, i;
630
	u16 size_per_page = vmballoon_page_size(is_2m_pages);
631

632 633
	locked = vmballoon_send_batched_lock(b, num_pages, is_2m_pages,
			target);
634 635 636 637 638
	if (locked > 0) {
		for (i = 0; i < num_pages; i++) {
			u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
			struct page *p = pfn_to_page(pa >> PAGE_SHIFT);

639
			vmballoon_free_page(p, is_2m_pages);
640 641 642 643 644 645 646 647
		}

		return -EIO;
	}

	for (i = 0; i < num_pages; i++) {
		u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
		struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
648 649
		struct vmballoon_page_size *page_size =
				&b->page_sizes[is_2m_pages];
650 651 652 653 654

		locked = vmballoon_batch_get_status(b->batch_page, i);

		switch (locked) {
		case VMW_BALLOON_SUCCESS:
655 656
			list_add(&p->lru, &page_size->pages);
			b->size += size_per_page;
657 658 659
			break;
		case VMW_BALLOON_ERROR_PPN_PINNED:
		case VMW_BALLOON_ERROR_PPN_INVALID:
660 661 662 663
			if (page_size->n_refused_pages
					< VMW_BALLOON_MAX_REFUSED) {
				list_add(&p->lru, &page_size->refused_pages);
				page_size->n_refused_pages++;
664 665 666 667 668
				break;
			}
			/* Fallthrough */
		case VMW_BALLOON_ERROR_RESET:
		case VMW_BALLOON_ERROR_PPN_NOTNEEDED:
669
			vmballoon_free_page(p, is_2m_pages);
670 671 672 673 674 675 676 677 678 679
			break;
		default:
			/* This should never happen */
			WARN_ON_ONCE(true);
		}
	}

	return 0;
}

D
Dmitry Torokhov 已提交
680 681 682 683 684
/*
 * Release the page allocated for the balloon. Note that we first notify
 * the host so it can make sure the page will be available for the guest
 * to use, if needed.
 */
685
static int vmballoon_unlock_page(struct vmballoon *b, unsigned int num_pages,
686
		bool is_2m_pages, unsigned int *target)
D
Dmitry Torokhov 已提交
687
{
688
	struct page *page = b->page;
689 690 691
	struct vmballoon_page_size *page_size = &b->page_sizes[false];

	/* is_2m_pages can never happen as 2m pages support implies batching */
D
Dmitry Torokhov 已提交
692

693
	if (!vmballoon_send_unlock_page(b, page_to_pfn(page), target)) {
694
		list_add(&page->lru, &page_size->pages);
695 696
		return -EIO;
	}
D
Dmitry Torokhov 已提交
697 698

	/* deallocate page */
699 700
	vmballoon_free_page(page, false);
	STATS_INC(b->stats.free[false]);
D
Dmitry Torokhov 已提交
701 702 703 704 705 706 707

	/* update balloon size */
	b->size--;

	return 0;
}

708
static int vmballoon_unlock_batched_page(struct vmballoon *b,
709 710
				unsigned int num_pages, bool is_2m_pages,
				unsigned int *target)
711 712 713
{
	int locked, i, ret = 0;
	bool hv_success;
714
	u16 size_per_page = vmballoon_page_size(is_2m_pages);
715

716 717
	hv_success = vmballoon_send_batched_unlock(b, num_pages, is_2m_pages,
			target);
718 719 720 721 722 723
	if (!hv_success)
		ret = -EIO;

	for (i = 0; i < num_pages; i++) {
		u64 pa = vmballoon_batch_get_pa(b->batch_page, i);
		struct page *p = pfn_to_page(pa >> PAGE_SHIFT);
724 725
		struct vmballoon_page_size *page_size =
				&b->page_sizes[is_2m_pages];
726 727 728 729 730 731 732 733

		locked = vmballoon_batch_get_status(b->batch_page, i);
		if (!hv_success || locked != VMW_BALLOON_SUCCESS) {
			/*
			 * That page wasn't successfully unlocked by the
			 * hypervisor, re-add it to the list of pages owned by
			 * the balloon driver.
			 */
734
			list_add(&p->lru, &page_size->pages);
735 736
		} else {
			/* deallocate page */
737 738
			vmballoon_free_page(p, is_2m_pages);
			STATS_INC(b->stats.free[is_2m_pages]);
739 740

			/* update balloon size */
741
			b->size -= size_per_page;
742 743 744 745 746 747
		}
	}

	return ret;
}

D
Dmitry Torokhov 已提交
748 749 750 751
/*
 * Release pages that were allocated while attempting to inflate the
 * balloon but were refused by the host for one reason or another.
 */
752 753
static void vmballoon_release_refused_pages(struct vmballoon *b,
		bool is_2m_pages)
D
Dmitry Torokhov 已提交
754 755
{
	struct page *page, *next;
756 757
	struct vmballoon_page_size *page_size =
			&b->page_sizes[is_2m_pages];
D
Dmitry Torokhov 已提交
758

759
	list_for_each_entry_safe(page, next, &page_size->refused_pages, lru) {
D
Dmitry Torokhov 已提交
760
		list_del(&page->lru);
761 762
		vmballoon_free_page(page, is_2m_pages);
		STATS_INC(b->stats.refused_free[is_2m_pages]);
D
Dmitry Torokhov 已提交
763
	}
764

765
	page_size->n_refused_pages = 0;
D
Dmitry Torokhov 已提交
766 767
}

768 769 770 771 772 773 774 775 776 777 778 779
static void vmballoon_add_page(struct vmballoon *b, int idx, struct page *p)
{
	b->page = p;
}

static void vmballoon_add_batched_page(struct vmballoon *b, int idx,
				struct page *p)
{
	vmballoon_batch_set_pa(b->batch_page, idx,
			(u64)page_to_pfn(p) << PAGE_SHIFT);
}

D
Dmitry Torokhov 已提交
780 781 782 783 784 785 786
/*
 * Inflate the balloon towards its target size. Note that we try to limit
 * the rate of allocation to make sure we are not choking the rest of the
 * system.
 */
static void vmballoon_inflate(struct vmballoon *b)
{
787
	unsigned rate;
D
Dmitry Torokhov 已提交
788
	unsigned int allocations = 0;
789
	unsigned int num_pages = 0;
D
Dmitry Torokhov 已提交
790
	int error = 0;
791
	gfp_t flags = VMW_PAGE_ALLOC_NOSLEEP;
792
	bool is_2m_pages;
D
Dmitry Torokhov 已提交
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814

	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);

	/*
	 * First try NOSLEEP page allocations to inflate balloon.
	 *
	 * If we do not throttle nosleep allocations, we can drain all
	 * free pages in the guest quickly (if the balloon target is high).
	 * As a side-effect, draining free pages helps to inform (force)
	 * the guest to start swapping if balloon target is not met yet,
	 * which is a desired behavior. However, balloon driver can consume
	 * all available CPU cycles if too many pages are allocated in a
	 * second. Therefore, we throttle nosleep allocations even when
	 * the guest is not under memory pressure. OTOH, if we have already
	 * predicted that the guest is under memory pressure, then we
	 * slowdown page allocations considerably.
	 */

	/*
	 * Start with no sleep allocation rate which may be higher
	 * than sleeping allocation rate.
	 */
815 816 817 818 819 820 821 822
	if (b->slow_allocation_cycles) {
		rate = b->rate_alloc;
		is_2m_pages = false;
	} else {
		rate = UINT_MAX;
		is_2m_pages =
			b->supported_page_sizes == VMW_BALLOON_NUM_PAGE_SIZES;
	}
D
Dmitry Torokhov 已提交
823

824
	pr_debug("%s - goal: %d, no-sleep rate: %u, sleep rate: %d\n",
825
		 __func__, b->target - b->size, rate, b->rate_alloc);
D
Dmitry Torokhov 已提交
826

827
	while (!b->reset_required &&
828 829
		b->size + num_pages * vmballoon_page_size(is_2m_pages)
		< b->target) {
830
		struct page *page;
D
Dmitry Torokhov 已提交
831

832
		if (flags == VMW_PAGE_ALLOC_NOSLEEP)
833
			STATS_INC(b->stats.alloc[is_2m_pages]);
834 835
		else
			STATS_INC(b->stats.sleep_alloc);
D
Dmitry Torokhov 已提交
836

837
		page = vmballoon_alloc_page(flags, is_2m_pages);
838
		if (!page) {
839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854
			STATS_INC(b->stats.alloc_fail[is_2m_pages]);

			if (is_2m_pages) {
				b->ops->lock(b, num_pages, true, &b->target);

				/*
				 * ignore errors from locking as we now switch
				 * to 4k pages and we might get different
				 * errors.
				 */

				num_pages = 0;
				is_2m_pages = false;
				continue;
			}

855
			if (flags == VMW_PAGE_ALLOC_CANSLEEP) {
D
Dmitry Torokhov 已提交
856 857 858 859 860 861 862
				/*
				 * CANSLEEP page allocation failed, so guest
				 * is under severe memory pressure. Quickly
				 * decrease allocation rate.
				 */
				b->rate_alloc = max(b->rate_alloc / 2,
						    VMW_BALLOON_RATE_ALLOC_MIN);
863
				STATS_INC(b->stats.sleep_alloc_fail);
D
Dmitry Torokhov 已提交
864 865 866 867 868 869 870 871 872 873 874 875 876
				break;
			}

			/*
			 * NOSLEEP page allocation failed, so the guest is
			 * under memory pressure. Let us slow down page
			 * allocations for next few cycles so that the guest
			 * gets out of memory pressure. Also, if we already
			 * allocated b->rate_alloc pages, let's pause,
			 * otherwise switch to sleeping allocations.
			 */
			b->slow_allocation_cycles = VMW_BALLOON_SLOW_CYCLES;

877
			if (allocations >= b->rate_alloc)
D
Dmitry Torokhov 已提交
878 879
				break;

880
			flags = VMW_PAGE_ALLOC_CANSLEEP;
D
Dmitry Torokhov 已提交
881 882
			/* Lower rate for sleeping allocations. */
			rate = b->rate_alloc;
883
			continue;
D
Dmitry Torokhov 已提交
884 885
		}

886 887
		b->ops->add_page(b, num_pages++, page);
		if (num_pages == b->batch_max_pages) {
888 889
			error = b->ops->lock(b, num_pages, is_2m_pages,
					&b->target);
890 891 892 893
			num_pages = 0;
			if (error)
				break;
		}
894

895
		cond_resched();
D
Dmitry Torokhov 已提交
896

897
		if (allocations >= rate) {
D
Dmitry Torokhov 已提交
898 899 900 901 902
			/* We allocated enough pages, let's take a break. */
			break;
		}
	}

903
	if (num_pages > 0)
904
		b->ops->lock(b, num_pages, is_2m_pages, &b->target);
905

D
Dmitry Torokhov 已提交
906 907 908 909
	/*
	 * We reached our goal without failures so try increasing
	 * allocation rate.
	 */
910 911
	if (error == 0 && allocations >= b->rate_alloc) {
		unsigned int mult = allocations / b->rate_alloc;
D
Dmitry Torokhov 已提交
912 913 914 915 916 917

		b->rate_alloc =
			min(b->rate_alloc + mult * VMW_BALLOON_RATE_ALLOC_INC,
			    VMW_BALLOON_RATE_ALLOC_MAX);
	}

918 919
	vmballoon_release_refused_pages(b, true);
	vmballoon_release_refused_pages(b, false);
D
Dmitry Torokhov 已提交
920 921 922 923 924 925 926
}

/*
 * Decrease the size of the balloon allowing guest to use more memory.
 */
static void vmballoon_deflate(struct vmballoon *b)
{
927
	unsigned is_2m_pages;
D
Dmitry Torokhov 已提交
928

929
	pr_debug("%s - size: %d, target %d\n", __func__, b->size, b->target);
D
Dmitry Torokhov 已提交
930 931

	/* free pages to reach target */
932 933 934 935 936 937 938 939 940 941 942 943 944 945
	for (is_2m_pages = 0; is_2m_pages < b->supported_page_sizes;
			is_2m_pages++) {
		struct page *page, *next;
		unsigned int num_pages = 0;
		struct vmballoon_page_size *page_size =
				&b->page_sizes[is_2m_pages];

		list_for_each_entry_safe(page, next, &page_size->pages, lru) {
			if (b->reset_required ||
				(b->target > 0 &&
					b->size - num_pages
					* vmballoon_page_size(is_2m_pages)
				< b->target + vmballoon_page_size(true)))
				break;
946

947 948
			list_del(&page->lru);
			b->ops->add_page(b, num_pages++, page);
949

950 951
			if (num_pages == b->batch_max_pages) {
				int error;
D
Dmitry Torokhov 已提交
952

953 954 955 956 957 958
				error = b->ops->unlock(b, num_pages,
						is_2m_pages, &b->target);
				num_pages = 0;
				if (error)
					return;
			}
959

960 961
			cond_resched();
		}
D
Dmitry Torokhov 已提交
962

963 964 965
		if (num_pages > 0)
			b->ops->unlock(b, num_pages, is_2m_pages, &b->target);
	}
966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
}

static const struct vmballoon_ops vmballoon_basic_ops = {
	.add_page = vmballoon_add_page,
	.lock = vmballoon_lock_page,
	.unlock = vmballoon_unlock_page
};

static const struct vmballoon_ops vmballoon_batched_ops = {
	.add_page = vmballoon_add_batched_page,
	.lock = vmballoon_lock_batched_page,
	.unlock = vmballoon_unlock_batched_page
};

static bool vmballoon_init_batching(struct vmballoon *b)
{
	b->page = alloc_page(VMW_PAGE_ALLOC_NOSLEEP);
	if (!b->page)
		return false;

	b->batch_page = vmap(&b->page, 1, VM_MAP, PAGE_KERNEL);
	if (!b->batch_page) {
		__free_page(b->page);
		return false;
	}

	return true;
}

/*
 * Perform standard reset sequence by popping the balloon (in case it
 * is not  empty) and then restarting protocol. This operation normally
 * happens when host responds with VMW_BALLOON_ERROR_RESET to a command.
 */
static void vmballoon_reset(struct vmballoon *b)
{
	/* free all pages, skipping monitor unlock */
	vmballoon_pop(b);

	if (!vmballoon_send_start(b, VMW_BALLOON_CAPABILITIES))
		return;

	if ((b->capabilities & VMW_BALLOON_BATCHED_CMDS) != 0) {
		b->ops = &vmballoon_batched_ops;
		b->batch_max_pages = VMW_BALLOON_BATCH_MAX_PAGES;
		if (!vmballoon_init_batching(b)) {
			/*
			 * We failed to initialize batching, inform the monitor
			 * about it by sending a null capability.
			 *
			 * The guest will retry in one second.
			 */
			vmballoon_send_start(b, 0);
			return;
		}
	} else if ((b->capabilities & VMW_BALLOON_BASIC_CMDS) != 0) {
		b->ops = &vmballoon_basic_ops;
		b->batch_max_pages = 1;
	}

	b->reset_required = false;
	if (!vmballoon_send_guest_id(b))
		pr_err("failed to send guest ID to the host\n");
D
Dmitry Torokhov 已提交
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048
}

/*
 * Balloon work function: reset protocol, if needed, get the new size and
 * adjust balloon as needed. Repeat in 1 sec.
 */
static void vmballoon_work(struct work_struct *work)
{
	struct delayed_work *dwork = to_delayed_work(work);
	struct vmballoon *b = container_of(dwork, struct vmballoon, dwork);
	unsigned int target;

	STATS_INC(b->stats.timer);

	if (b->reset_required)
		vmballoon_reset(b);

	if (b->slow_allocation_cycles > 0)
		b->slow_allocation_cycles--;

1049
	if (!b->reset_required && vmballoon_send_get_target(b, &target)) {
D
Dmitry Torokhov 已提交
1050 1051 1052 1053 1054
		/* update target, adjust size */
		b->target = target;

		if (b->size < target)
			vmballoon_inflate(b);
1055 1056
		else if (target == 0 ||
				b->size > target + vmballoon_page_size(true))
D
Dmitry Torokhov 已提交
1057 1058 1059
			vmballoon_deflate(b);
	}

1060 1061 1062 1063 1064 1065
	/*
	 * We are using a freezable workqueue so that balloon operations are
	 * stopped while the system transitions to/from sleep/hibernation.
	 */
	queue_delayed_work(system_freezable_wq,
			   dwork, round_jiffies_relative(HZ));
D
Dmitry Torokhov 已提交
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
}

/*
 * DEBUGFS Interface
 */
#ifdef CONFIG_DEBUG_FS

static int vmballoon_debug_show(struct seq_file *f, void *offset)
{
	struct vmballoon *b = f->private;
	struct vmballoon_stats *stats = &b->stats;

1078 1079 1080
	/* format capabilities info */
	seq_printf(f,
		   "balloon capabilities:   %#4x\n"
1081 1082 1083 1084
		   "used capabilities:      %#4lx\n"
		   "is resetting:           %c\n",
		   VMW_BALLOON_CAPABILITIES, b->capabilities,
		   b->reset_required ? 'y' : 'n');
1085

D
Dmitry Torokhov 已提交
1086 1087 1088 1089 1090 1091 1092 1093
	/* format size info */
	seq_printf(f,
		   "target:             %8d pages\n"
		   "current:            %8d pages\n",
		   b->target, b->size);

	/* format rate info */
	seq_printf(f,
1094 1095
		   "rateSleepAlloc:     %8d pages/sec\n",
		   b->rate_alloc);
D
Dmitry Torokhov 已提交
1096 1097 1098 1099 1100 1101

	seq_printf(f,
		   "\n"
		   "timer:              %8u\n"
		   "start:              %8u (%4u failed)\n"
		   "guestType:          %8u (%4u failed)\n"
1102
		   "2m-lock:            %8u (%4u failed)\n"
D
Dmitry Torokhov 已提交
1103
		   "lock:               %8u (%4u failed)\n"
1104
		   "2m-unlock:          %8u (%4u failed)\n"
D
Dmitry Torokhov 已提交
1105 1106
		   "unlock:             %8u (%4u failed)\n"
		   "target:             %8u (%4u failed)\n"
1107
		   "prim2mAlloc:        %8u (%4u failed)\n"
D
Dmitry Torokhov 已提交
1108 1109
		   "primNoSleepAlloc:   %8u (%4u failed)\n"
		   "primCanSleepAlloc:  %8u (%4u failed)\n"
1110
		   "prim2mFree:         %8u\n"
D
Dmitry Torokhov 已提交
1111
		   "primFree:           %8u\n"
1112
		   "err2mAlloc:         %8u\n"
D
Dmitry Torokhov 已提交
1113
		   "errAlloc:           %8u\n"
1114
		   "err2mFree:          %8u\n"
D
Dmitry Torokhov 已提交
1115 1116 1117 1118
		   "errFree:            %8u\n",
		   stats->timer,
		   stats->start, stats->start_fail,
		   stats->guest_type, stats->guest_type_fail,
1119 1120 1121 1122
		   stats->lock[true],  stats->lock_fail[true],
		   stats->lock[false],  stats->lock_fail[false],
		   stats->unlock[true], stats->unlock_fail[true],
		   stats->unlock[false], stats->unlock_fail[false],
D
Dmitry Torokhov 已提交
1123
		   stats->target, stats->target_fail,
1124 1125
		   stats->alloc[true], stats->alloc_fail[true],
		   stats->alloc[false], stats->alloc_fail[false],
D
Dmitry Torokhov 已提交
1126
		   stats->sleep_alloc, stats->sleep_alloc_fail,
1127 1128 1129 1130
		   stats->free[true],
		   stats->free[false],
		   stats->refused_alloc[true], stats->refused_alloc[false],
		   stats->refused_free[true], stats->refused_free[false]);
D
Dmitry Torokhov 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183

	return 0;
}

static int vmballoon_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, vmballoon_debug_show, inode->i_private);
}

static const struct file_operations vmballoon_debug_fops = {
	.owner		= THIS_MODULE,
	.open		= vmballoon_debug_open,
	.read		= seq_read,
	.llseek		= seq_lseek,
	.release	= single_release,
};

static int __init vmballoon_debugfs_init(struct vmballoon *b)
{
	int error;

	b->dbg_entry = debugfs_create_file("vmmemctl", S_IRUGO, NULL, b,
					   &vmballoon_debug_fops);
	if (IS_ERR(b->dbg_entry)) {
		error = PTR_ERR(b->dbg_entry);
		pr_err("failed to create debugfs entry, error: %d\n", error);
		return error;
	}

	return 0;
}

static void __exit vmballoon_debugfs_exit(struct vmballoon *b)
{
	debugfs_remove(b->dbg_entry);
}

#else

static inline int vmballoon_debugfs_init(struct vmballoon *b)
{
	return 0;
}

static inline void vmballoon_debugfs_exit(struct vmballoon *b)
{
}

#endif	/* CONFIG_DEBUG_FS */

static int __init vmballoon_init(void)
{
	int error;
1184
	unsigned is_2m_pages;
D
Dmitry Torokhov 已提交
1185 1186 1187 1188
	/*
	 * Check if we are running on VMware's hypervisor and bail out
	 * if we are not.
	 */
1189
	if (x86_hyper != &x86_hyper_vmware)
D
Dmitry Torokhov 已提交
1190 1191
		return -ENODEV;

1192 1193 1194 1195 1196
	for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES;
			is_2m_pages++) {
		INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].pages);
		INIT_LIST_HEAD(&balloon.page_sizes[is_2m_pages].refused_pages);
	}
D
Dmitry Torokhov 已提交
1197 1198 1199 1200 1201 1202 1203 1204

	/* initialize rates */
	balloon.rate_alloc = VMW_BALLOON_RATE_ALLOC_MAX;

	INIT_DELAYED_WORK(&balloon.dwork, vmballoon_work);

	error = vmballoon_debugfs_init(&balloon);
	if (error)
1205
		return error;
D
Dmitry Torokhov 已提交
1206

1207 1208 1209 1210
	balloon.batch_page = NULL;
	balloon.page = NULL;
	balloon.reset_required = true;

1211
	queue_delayed_work(system_freezable_wq, &balloon.dwork, 0);
D
Dmitry Torokhov 已提交
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227

	return 0;
}
module_init(vmballoon_init);

static void __exit vmballoon_exit(void)
{
	cancel_delayed_work_sync(&balloon.dwork);

	vmballoon_debugfs_exit(&balloon);

	/*
	 * Deallocate all reserved memory, and reset connection with monitor.
	 * Reset connection before deallocating memory to avoid potential for
	 * additional spurious resets from guest touching deallocated pages.
	 */
1228
	vmballoon_send_start(&balloon, 0);
D
Dmitry Torokhov 已提交
1229 1230 1231
	vmballoon_pop(&balloon);
}
module_exit(vmballoon_exit);