tmem.c 10.4 KB
Newer Older
1 2 3
/*
 * Xen implementation for transcendent memory (tmem)
 *
4
 * Copyright (C) 2009-2011 Oracle Corp.  All rights reserved.
5 6 7
 * Author: Dan Magenheimer
 */

J
Joe Perches 已提交
8 9
#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt

10
#include <linux/module.h>
11 12 13 14 15
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/pagemap.h>
#include <linux/cleancache.h>
16 17
#include <linux/frontswap.h>

18 19
#include <xen/xen.h>
#include <xen/interface/xen.h>
20
#include <xen/page.h>
21 22
#include <asm/xen/hypercall.h>
#include <asm/xen/hypervisor.h>
23
#include <xen/tmem.h>
24

25 26 27 28 29 30 31 32 33 34 35 36
#ifndef CONFIG_XEN_TMEM_MODULE
bool __read_mostly tmem_enabled = false;

static int __init enable_tmem(char *s)
{
	tmem_enabled = true;
	return 1;
}
__setup("tmem", enable_tmem);
#endif

#ifdef CONFIG_CLEANCACHE
37 38
static bool cleancache __read_mostly = true;
module_param(cleancache, bool, S_IRUGO);
39
static bool selfballooning __read_mostly = true;
40
module_param(selfballooning, bool, S_IRUGO);
41 42 43
#endif /* CONFIG_CLEANCACHE */

#ifdef CONFIG_FRONTSWAP
44 45
static bool frontswap __read_mostly = true;
module_param(frontswap, bool, S_IRUGO);
46 47
#else /* CONFIG_FRONTSWAP */
#define frontswap (0)
48 49
#endif /* CONFIG_FRONTSWAP */

50
#ifdef CONFIG_XEN_SELFBALLOONING
51 52
static bool selfshrinking __read_mostly = true;
module_param(selfshrinking, bool, S_IRUGO);
53
#endif /* CONFIG_XEN_SELFBALLOONING */
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
#define TMEM_CONTROL               0
#define TMEM_NEW_POOL              1
#define TMEM_DESTROY_POOL          2
#define TMEM_NEW_PAGE              3
#define TMEM_PUT_PAGE              4
#define TMEM_GET_PAGE              5
#define TMEM_FLUSH_PAGE            6
#define TMEM_FLUSH_OBJECT          7
#define TMEM_READ                  8
#define TMEM_WRITE                 9
#define TMEM_XCHG                 10

/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
#define TMEM_POOL_PERSIST          1
#define TMEM_POOL_SHARED           2
#define TMEM_POOL_PAGESIZE_SHIFT   4
#define TMEM_VERSION_SHIFT        24


struct tmem_pool_uuid {
	u64 uuid_lo;
	u64 uuid_hi;
};

struct tmem_oid {
	u64 oid[3];
};

#define TMEM_POOL_PRIVATE_UUID	{ 0, 0 }

/* flags for tmem_ops.new_pool */
#define TMEM_POOL_PERSIST          1
#define TMEM_POOL_SHARED           2

/* xen tmem foundation ops/hypercalls */

static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
	u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
{
	struct tmem_op op;
	int rc = 0;

	op.cmd = tmem_cmd;
	op.pool_id = tmem_pool;
	op.u.gen.oid[0] = oid.oid[0];
	op.u.gen.oid[1] = oid.oid[1];
	op.u.gen.oid[2] = oid.oid[2];
	op.u.gen.index = index;
	op.u.gen.tmem_offset = tmem_offset;
	op.u.gen.pfn_offset = pfn_offset;
	op.u.gen.len = len;
	set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
	rc = HYPERVISOR_tmem_op(&op);
	return rc;
}

static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
				u32 flags, unsigned long pagesize)
{
	struct tmem_op op;
	int rc = 0, pageshift;

	for (pageshift = 0; pagesize != 1; pageshift++)
		pagesize >>= 1;
	flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
	flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
	op.cmd = TMEM_NEW_POOL;
	op.u.new.uuid[0] = uuid.uuid_lo;
	op.u.new.uuid[1] = uuid.uuid_hi;
	op.u.new.flags = flags;
	rc = HYPERVISOR_tmem_op(&op);
	return rc;
}

/* xen generic tmem ops */

static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
			     u32 index, unsigned long pfn)
{
134
	unsigned long gmfn = pfn_to_gfn(pfn);
135 136 137 138 139 140 141 142

	return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
		gmfn, 0, 0, 0);
}

static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
			     u32 index, unsigned long pfn)
{
143
	unsigned long gmfn = pfn_to_gfn(pfn);
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160

	return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
		gmfn, 0, 0, 0);
}

static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
{
	return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
		0, 0, 0, 0);
}

static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
{
	return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
}


161 162 163 164 165 166 167 168
#ifdef CONFIG_CLEANCACHE
static int xen_tmem_destroy_pool(u32 pool_id)
{
	struct tmem_oid oid = { { 0 } };

	return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
}

169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
/* cleancache ops */

static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
				     pgoff_t index, struct page *page)
{
	u32 ind = (u32) index;
	struct tmem_oid oid = *(struct tmem_oid *)&key;
	unsigned long pfn = page_to_pfn(page);

	if (pool < 0)
		return;
	if (ind != index)
		return;
	mb(); /* ensure page is quiescent; tmem may address it with an alias */
	(void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
}

static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
				    pgoff_t index, struct page *page)
{
	u32 ind = (u32) index;
	struct tmem_oid oid = *(struct tmem_oid *)&key;
	unsigned long pfn = page_to_pfn(page);
	int ret;

	/* translate return values to linux semantics */
	if (pool < 0)
		return -1;
	if (ind != index)
		return -1;
	ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
	if (ret == 1)
		return 0;
	else
		return -1;
}

static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
				       pgoff_t index)
{
	u32 ind = (u32) index;
	struct tmem_oid oid = *(struct tmem_oid *)&key;

	if (pool < 0)
		return;
	if (ind != index)
		return;
	(void)xen_tmem_flush_page((u32)pool, oid, ind);
}

static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
{
	struct tmem_oid oid = *(struct tmem_oid *)&key;

	if (pool < 0)
		return;
	(void)xen_tmem_flush_object((u32)pool, oid);
}

static void tmem_cleancache_flush_fs(int pool)
{
	if (pool < 0)
		return;
	(void)xen_tmem_destroy_pool((u32)pool);
}

static int tmem_cleancache_init_fs(size_t pagesize)
{
	struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;

	return xen_tmem_new_pool(uuid_private, 0, pagesize);
}

static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
{
	struct tmem_pool_uuid shared_uuid;

	shared_uuid.uuid_lo = *(u64 *)uuid;
	shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
	return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
}

251
static struct cleancache_ops tmem_cleancache_ops = {
252 253
	.put_page = tmem_cleancache_put_page,
	.get_page = tmem_cleancache_get_page,
254 255 256
	.invalidate_page = tmem_cleancache_flush_page,
	.invalidate_inode = tmem_cleancache_flush_inode,
	.invalidate_fs = tmem_cleancache_flush_fs,
257 258 259
	.init_shared_fs = tmem_cleancache_init_shared_fs,
	.init_fs = tmem_cleancache_init_fs
};
260
#endif
261

262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277
#ifdef CONFIG_FRONTSWAP
/* frontswap tmem operations */

/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
static int tmem_frontswap_poolid;

/*
 * Swizzling increases objects per swaptype, increasing tmem concurrency
 * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
 */
#define SWIZ_BITS		4
#define SWIZ_MASK		((1 << SWIZ_BITS) - 1)
#define _oswiz(_type, _ind)	((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
#define iswiz(_ind)		(_ind >> SWIZ_BITS)

static inline struct tmem_oid oswiz(unsigned type, u32 ind)
278
{
279 280 281 282
	struct tmem_oid oid = { .oid = { 0 } };
	oid.oid[0] = _oswiz(type, ind);
	return oid;
}
283

284
/* returns 0 if the page was successfully put into frontswap, -1 if not */
285
static int tmem_frontswap_store(unsigned type, pgoff_t offset,
286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
				   struct page *page)
{
	u64 ind64 = (u64)offset;
	u32 ind = (u32)offset;
	unsigned long pfn = page_to_pfn(page);
	int pool = tmem_frontswap_poolid;
	int ret;

	if (pool < 0)
		return -1;
	if (ind64 != ind)
		return -1;
	mb(); /* ensure page is quiescent; tmem may address it with an alias */
	ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
	/* translate Xen tmem return values to linux semantics */
	if (ret == 1)
		return 0;
	else
		return -1;
}

/*
 * returns 0 if the page was successfully gotten from frontswap, -1 if
 * was not present (should never happen!)
 */
311
static int tmem_frontswap_load(unsigned type, pgoff_t offset,
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367
				   struct page *page)
{
	u64 ind64 = (u64)offset;
	u32 ind = (u32)offset;
	unsigned long pfn = page_to_pfn(page);
	int pool = tmem_frontswap_poolid;
	int ret;

	if (pool < 0)
		return -1;
	if (ind64 != ind)
		return -1;
	ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
	/* translate Xen tmem return values to linux semantics */
	if (ret == 1)
		return 0;
	else
		return -1;
}

/* flush a single page from frontswap */
static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
{
	u64 ind64 = (u64)offset;
	u32 ind = (u32)offset;
	int pool = tmem_frontswap_poolid;

	if (pool < 0)
		return;
	if (ind64 != ind)
		return;
	(void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
}

/* flush all pages from the passed swaptype */
static void tmem_frontswap_flush_area(unsigned type)
{
	int pool = tmem_frontswap_poolid;
	int ind;

	if (pool < 0)
		return;
	for (ind = SWIZ_MASK; ind >= 0; ind--)
		(void)xen_tmem_flush_object(pool, oswiz(type, ind));
}

static void tmem_frontswap_init(unsigned ignored)
{
	struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;

	/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
	if (tmem_frontswap_poolid < 0)
		tmem_frontswap_poolid =
		    xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
}

368
static struct frontswap_ops tmem_frontswap_ops = {
369 370
	.store = tmem_frontswap_store,
	.load = tmem_frontswap_load,
371 372
	.invalidate_page = tmem_frontswap_flush_page,
	.invalidate_area = tmem_frontswap_flush_area,
373 374 375 376
	.init = tmem_frontswap_init
};
#endif

377
static int __init xen_tmem_init(void)
378
{
379 380
	if (!xen_domain())
		return 0;
381
#ifdef CONFIG_FRONTSWAP
382
	if (tmem_enabled && frontswap) {
383 384 385
		char *s = "";

		tmem_frontswap_poolid = -1;
386
		frontswap_register_ops(&tmem_frontswap_ops);
J
Joe Perches 已提交
387 388
		pr_info("frontswap enabled, RAM provided by Xen Transcendent Memory%s\n",
			s);
389 390
	}
#endif
391
#ifdef CONFIG_CLEANCACHE
392
	BUILD_BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
393
	if (tmem_enabled && cleancache) {
394 395 396 397 398 399 400 401 402
		int err;

		err = cleancache_register_ops(&tmem_cleancache_ops);
		if (err)
			pr_warn("xen-tmem: failed to enable cleancache: %d\n",
				err);
		else
			pr_info("cleancache enabled, RAM provided by "
				"Xen Transcendent Memory\n");
403
	}
404 405
#endif
#ifdef CONFIG_XEN_SELFBALLOONING
406 407 408 409 410 411 412 413
	/*
	 * There is no point of driving pages to the swap system if they
	 * aren't going anywhere in tmem universe.
	 */
	if (!frontswap) {
		selfshrinking = false;
		selfballooning = false;
	}
414
	xen_selfballoon_init(selfballooning, selfshrinking);
415 416 417 418 419
#endif
	return 0;
}

module_init(xen_tmem_init)
420 421 422
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
MODULE_DESCRIPTION("Shim to Xen transcendent memory");