frontswap.c 13.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
/*
 * Frontswap frontend
 *
 * This code provides the generic "frontend" layer to call a matching
 * "backend" driver implementation of frontswap.  See
 * Documentation/vm/frontswap.txt for more information.
 *
 * Copyright (C) 2009-2012 Oracle Corp.  All rights reserved.
 * Author: Dan Magenheimer
 *
 * This work is licensed under the terms of the GNU GPL, version 2.
 */

#include <linux/mman.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/security.h>
#include <linux/module.h>
#include <linux/debugfs.h>
#include <linux/frontswap.h>
#include <linux/swapfile.h>

/*
 * frontswap_ops is set by frontswap_register_ops to contain the pointers
 * to the frontswap "backend" implementation functions.
 */
27
static struct frontswap_ops *frontswap_ops __read_mostly;
28 29

/*
30
 * If enabled, frontswap_store will return failure even on success.  As
31 32 33 34 35 36 37 38
 * a result, the swap subsystem will always write the page to swap, in
 * effect converting frontswap into a writethrough cache.  In this mode,
 * there is no direct reduction in swap writes, but a frontswap backend
 * can unilaterally "reclaim" any pages in use with no data loss, thus
 * providing increases control over maximum memory usage due to frontswap.
 */
static bool frontswap_writethrough_enabled __read_mostly;

39 40 41 42 43 44 45
/*
 * If enabled, the underlying tmem implementation is capable of doing
 * exclusive gets, so frontswap_load, on a successful tmem_get must
 * mark the page as no longer in frontswap AND mark it dirty.
 */
static bool frontswap_tmem_exclusive_gets_enabled __read_mostly;

46 47 48 49 50 51
#ifdef CONFIG_DEBUG_FS
/*
 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
 * properly configured).  These are for information only so are not protected
 * against increment races.
 */
52 53 54
static u64 frontswap_loads;
static u64 frontswap_succ_stores;
static u64 frontswap_failed_stores;
55 56
static u64 frontswap_invalidates;

57 58
static inline void inc_frontswap_loads(void) {
	frontswap_loads++;
59
}
60 61
static inline void inc_frontswap_succ_stores(void) {
	frontswap_succ_stores++;
62
}
63 64
static inline void inc_frontswap_failed_stores(void) {
	frontswap_failed_stores++;
65 66 67 68 69
}
static inline void inc_frontswap_invalidates(void) {
	frontswap_invalidates++;
}
#else
70 71 72
static inline void inc_frontswap_loads(void) { }
static inline void inc_frontswap_succ_stores(void) { }
static inline void inc_frontswap_failed_stores(void) { }
73 74
static inline void inc_frontswap_invalidates(void) { }
#endif
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102

/*
 * Due to the asynchronous nature of the backends loading potentially
 * _after_ the swap system has been activated, we have chokepoints
 * on all frontswap functions to not call the backend until the backend
 * has registered.
 *
 * Specifically when no backend is registered (nobody called
 * frontswap_register_ops) all calls to frontswap_init (which is done via
 * swapon -> enable_swap_info -> frontswap_init) are registered and remembered
 * (via the setting of need_init bitmap) but fail to create tmem_pools. When a
 * backend registers with frontswap at some later point the previous
 * calls to frontswap_init are executed (by iterating over the need_init
 * bitmap) to create tmem_pools and set the respective poolids. All of that is
 * guarded by us using atomic bit operations on the 'need_init' bitmap.
 *
 * This would not guards us against the user deciding to call swapoff right as
 * we are calling the backend to initialize (so swapon is in action).
 * Fortunatly for us, the swapon_mutex has been taked by the callee so we are
 * OK. The other scenario where calls to frontswap_store (called via
 * swap_writepage) is racing with frontswap_invalidate_area (called via
 * swapoff) is again guarded by the swap subsystem.
 *
 * While no backend is registered all calls to frontswap_[store|load|
 * invalidate_area|invalidate_page] are ignored or fail.
 *
 * The time between the backend being registered and the swap file system
 * calling the backend (via the frontswap_* functions) is indeterminate as
103
 * frontswap_ops is not atomic_t (or a value guarded by a spinlock).
104 105 106 107 108
 * That is OK as we are comfortable missing some of these calls to the newly
 * registered backend.
 *
 * Obviously the opposite (unloading the backend) must be done after all
 * the frontswap_[store|load|invalidate_area|invalidate_page] start
109
 * ignorning or failing the requests - at which point frontswap_ops
110 111 112 113
 * would have to be made in some fashion atomic.
 */
static DECLARE_BITMAP(need_init, MAX_SWAPFILES);

114 115 116 117
/*
 * Register operations for frontswap, returning previous thus allowing
 * detection of multiple backends and possible nesting.
 */
118
struct frontswap_ops *frontswap_register_ops(struct frontswap_ops *ops)
119
{
120
	struct frontswap_ops *old = frontswap_ops;
121
	int i;
122

123
	for (i = 0; i < MAX_SWAPFILES; i++) {
124 125 126 127 128
		if (test_and_clear_bit(i, need_init)) {
			struct swap_info_struct *sis = swap_info[i];
			/* __frontswap_init _should_ have set it! */
			if (!sis->frontswap_map)
				return ERR_PTR(-EINVAL);
129
			ops->init(i);
130
		}
131 132
	}
	/*
133
	 * We MUST have frontswap_ops set _after_ the frontswap_init's
134 135 136 137
	 * have been called. Otherwise __frontswap_store might fail. Hence
	 * the barrier to make sure compiler does not re-order us.
	 */
	barrier();
138
	frontswap_ops = ops;
139 140 141 142 143 144 145 146 147 148 149 150 151
	return old;
}
EXPORT_SYMBOL(frontswap_register_ops);

/*
 * Enable/disable frontswap writethrough (see above).
 */
void frontswap_writethrough(bool enable)
{
	frontswap_writethrough_enabled = enable;
}
EXPORT_SYMBOL(frontswap_writethrough);

152 153 154 155 156 157 158 159 160
/*
 * Enable/disable frontswap exclusive gets (see above).
 */
void frontswap_tmem_exclusive_gets(bool enable)
{
	frontswap_tmem_exclusive_gets_enabled = enable;
}
EXPORT_SYMBOL(frontswap_tmem_exclusive_gets);

161 162 163
/*
 * Called when a swap device is swapon'd.
 */
164
void __frontswap_init(unsigned type, unsigned long *map)
165 166 167
{
	struct swap_info_struct *sis = swap_info[type];

168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
	BUG_ON(sis == NULL);

	/*
	 * p->frontswap is a bitmap that we MUST have to figure out which page
	 * has gone in frontswap. Without it there is no point of continuing.
	 */
	if (WARN_ON(!map))
		return;
	/*
	 * Irregardless of whether the frontswap backend has been loaded
	 * before this function or it will be later, we _MUST_ have the
	 * p->frontswap set to something valid to work properly.
	 */
	frontswap_map_set(sis, map);
	if (frontswap_ops)
183
		frontswap_ops->init(type);
184
	else {
185 186 187
		BUG_ON(type > MAX_SWAPFILES);
		set_bit(type, need_init);
	}
188 189 190
}
EXPORT_SYMBOL(__frontswap_init);

B
Bob Liu 已提交
191 192 193 194 195 196 197 198 199 200 201 202 203
bool __frontswap_test(struct swap_info_struct *sis,
				pgoff_t offset)
{
	bool ret = false;

	if (frontswap_ops && sis->frontswap_map)
		ret = test_bit(offset, sis->frontswap_map);
	return ret;
}
EXPORT_SYMBOL(__frontswap_test);

static inline void __frontswap_clear(struct swap_info_struct *sis,
				pgoff_t offset)
204
{
B
Bob Liu 已提交
205
	clear_bit(offset, sis->frontswap_map);
206 207 208
	atomic_dec(&sis->frontswap_pages);
}

209
/*
210
 * "Store" data from a page to frontswap and associate it with the page's
211 212
 * swaptype and offset.  Page must be locked and in the swap cache.
 * If frontswap already contains a page with matching swaptype and
213
 * offset, the frontswap implementation may either overwrite the data and
214 215
 * return success or invalidate the page from frontswap and return failure.
 */
216
int __frontswap_store(struct page *page)
217 218 219 220 221 222 223
{
	int ret = -1, dup = 0;
	swp_entry_t entry = { .val = page_private(page), };
	int type = swp_type(entry);
	struct swap_info_struct *sis = swap_info[type];
	pgoff_t offset = swp_offset(entry);

B
Bob Liu 已提交
224 225 226 227 228
	/*
	 * Return if no backend registed.
	 * Don't need to inc frontswap_failed_stores here.
	 */
	if (!frontswap_ops)
229 230
		return ret;

231 232
	BUG_ON(!PageLocked(page));
	BUG_ON(sis == NULL);
B
Bob Liu 已提交
233
	if (__frontswap_test(sis, offset))
234
		dup = 1;
235
	ret = frontswap_ops->store(type, offset, page);
236
	if (ret == 0) {
B
Bob Liu 已提交
237
		set_bit(offset, sis->frontswap_map);
238
		inc_frontswap_succ_stores();
239 240
		if (!dup)
			atomic_inc(&sis->frontswap_pages);
241
	} else {
242 243 244 245
		/*
		  failed dup always results in automatic invalidate of
		  the (older) page from frontswap
		 */
246
		inc_frontswap_failed_stores();
247 248
		if (dup)
			__frontswap_clear(sis, offset);
249
	}
250 251 252 253 254
	if (frontswap_writethrough_enabled)
		/* report failure so swap also writes to swap device */
		ret = -1;
	return ret;
}
255
EXPORT_SYMBOL(__frontswap_store);
256 257 258 259 260 261

/*
 * "Get" data from frontswap associated with swaptype and offset that were
 * specified when the data was put to frontswap and use it to fill the
 * specified page with data. Page must be locked and in the swap cache.
 */
262
int __frontswap_load(struct page *page)
263 264 265 266 267 268 269 270 271
{
	int ret = -1;
	swp_entry_t entry = { .val = page_private(page), };
	int type = swp_type(entry);
	struct swap_info_struct *sis = swap_info[type];
	pgoff_t offset = swp_offset(entry);

	BUG_ON(!PageLocked(page));
	BUG_ON(sis == NULL);
B
Bob Liu 已提交
272 273 274 275
	/*
	 * __frontswap_test() will check whether there is backend registered
	 */
	if (__frontswap_test(sis, offset))
276
		ret = frontswap_ops->load(type, offset, page);
277
	if (ret == 0) {
278
		inc_frontswap_loads();
279 280
		if (frontswap_tmem_exclusive_gets_enabled) {
			SetPageDirty(page);
B
Bob Liu 已提交
281
			__frontswap_clear(sis, offset);
282 283
		}
	}
284 285
	return ret;
}
286
EXPORT_SYMBOL(__frontswap_load);
287 288 289 290 291 292 293 294 295 296

/*
 * Invalidate any data from frontswap associated with the specified swaptype
 * and offset so that a subsequent "get" will fail.
 */
void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
{
	struct swap_info_struct *sis = swap_info[type];

	BUG_ON(sis == NULL);
B
Bob Liu 已提交
297 298 299 300
	/*
	 * __frontswap_test() will check whether there is backend registered
	 */
	if (__frontswap_test(sis, offset)) {
301
		frontswap_ops->invalidate_page(type, offset);
302
		__frontswap_clear(sis, offset);
303 304 305 306 307 308 309 310 311 312 313 314 315
		inc_frontswap_invalidates();
	}
}
EXPORT_SYMBOL(__frontswap_invalidate_page);

/*
 * Invalidate all data from frontswap associated with all offsets for the
 * specified swaptype.
 */
void __frontswap_invalidate_area(unsigned type)
{
	struct swap_info_struct *sis = swap_info[type];

316
	if (frontswap_ops) {
317 318 319
		BUG_ON(sis == NULL);
		if (sis->frontswap_map == NULL)
			return;
320
		frontswap_ops->invalidate_area(type);
321 322 323 324
		atomic_set(&sis->frontswap_pages, 0);
		memset(sis->frontswap_map, 0, sis->max / sizeof(long));
	}
	clear_bit(type, need_init);
325 326 327
}
EXPORT_SYMBOL(__frontswap_invalidate_area);

328 329 330 331 332 333 334 335 336 337 338 339 340 341
static unsigned long __frontswap_curr_pages(void)
{
	int type;
	unsigned long totalpages = 0;
	struct swap_info_struct *si = NULL;

	assert_spin_locked(&swap_lock);
	for (type = swap_list.head; type >= 0; type = si->next) {
		si = swap_info[type];
		totalpages += atomic_read(&si->frontswap_pages);
	}
	return totalpages;
}

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
static int __frontswap_unuse_pages(unsigned long total, unsigned long *unused,
					int *swapid)
{
	int ret = -EINVAL;
	struct swap_info_struct *si = NULL;
	int si_frontswap_pages;
	unsigned long total_pages_to_unuse = total;
	unsigned long pages = 0, pages_to_unuse = 0;
	int type;

	assert_spin_locked(&swap_lock);
	for (type = swap_list.head; type >= 0; type = si->next) {
		si = swap_info[type];
		si_frontswap_pages = atomic_read(&si->frontswap_pages);
		if (total_pages_to_unuse < si_frontswap_pages) {
			pages = pages_to_unuse = total_pages_to_unuse;
		} else {
			pages = si_frontswap_pages;
			pages_to_unuse = 0; /* unuse all */
		}
		/* ensure there is enough RAM to fetch pages from frontswap */
		if (security_vm_enough_memory_mm(current->mm, pages)) {
			ret = -ENOMEM;
			continue;
		}
		vm_unacct_memory(pages);
		*unused = pages_to_unuse;
		*swapid = type;
		ret = 0;
		break;
	}

	return ret;
}

377 378 379 380 381
/*
 * Used to check if it's necessory and feasible to unuse pages.
 * Return 1 when nothing to do, 0 when need to shink pages,
 * error code when there is an error.
 */
382 383 384 385 386 387 388 389 390 391 392 393
static int __frontswap_shrink(unsigned long target_pages,
				unsigned long *pages_to_unuse,
				int *type)
{
	unsigned long total_pages = 0, total_pages_to_unuse;

	assert_spin_locked(&swap_lock);

	total_pages = __frontswap_curr_pages();
	if (total_pages <= target_pages) {
		/* Nothing to do */
		*pages_to_unuse = 0;
394
		return 1;
395 396 397 398 399
	}
	total_pages_to_unuse = total_pages - target_pages;
	return __frontswap_unuse_pages(total_pages_to_unuse, pages_to_unuse, type);
}

400 401 402 403 404 405 406 407 408 409
/*
 * Frontswap, like a true swap device, may unnecessarily retain pages
 * under certain circumstances; "shrink" frontswap is essentially a
 * "partial swapoff" and works by calling try_to_unuse to attempt to
 * unuse enough frontswap pages to attempt to -- subject to memory
 * constraints -- reduce the number of pages in frontswap to the
 * number given in the parameter target_pages.
 */
void frontswap_shrink(unsigned long target_pages)
{
410
	unsigned long pages_to_unuse = 0;
411
	int uninitialized_var(type), ret;
412 413 414 415 416 417 418

	/*
	 * we don't want to hold swap_lock while doing a very
	 * lengthy try_to_unuse, but swap_list may change
	 * so restart scan from swap_list.head each time
	 */
	spin_lock(&swap_lock);
419
	ret = __frontswap_shrink(target_pages, &pages_to_unuse, &type);
420
	spin_unlock(&swap_lock);
421
	if (ret == 0)
422
		try_to_unuse(type, true, pages_to_unuse);
423 424 425 426 427 428 429 430 431 432 433 434 435 436
	return;
}
EXPORT_SYMBOL(frontswap_shrink);

/*
 * Count and return the number of frontswap pages across all
 * swap devices.  This is exported so that backend drivers can
 * determine current usage without reading debugfs.
 */
unsigned long frontswap_curr_pages(void)
{
	unsigned long totalpages = 0;

	spin_lock(&swap_lock);
437
	totalpages = __frontswap_curr_pages();
438
	spin_unlock(&swap_lock);
439

440 441 442 443 444 445 446 447 448 449
	return totalpages;
}
EXPORT_SYMBOL(frontswap_curr_pages);

static int __init init_frontswap(void)
{
#ifdef CONFIG_DEBUG_FS
	struct dentry *root = debugfs_create_dir("frontswap", NULL);
	if (root == NULL)
		return -ENXIO;
450 451 452 453
	debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
	debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
	debugfs_create_u64("failed_stores", S_IRUGO, root,
				&frontswap_failed_stores);
454 455 456 457 458 459 460
	debugfs_create_u64("invalidates", S_IRUGO,
				root, &frontswap_invalidates);
#endif
	return 0;
}

module_init(init_frontswap);