提交 669c7433 编写于 作者: L Linus Torvalds

Merge tag 'stable/for-linus-3.10-rc1-tag' of...

Merge tag 'stable/for-linus-3.10-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen

Pull Xen fixes from Konrad Rzeszutek Wilk:
 - Regression fix in xen privcmd fixing a memory leak.
 - Add Documentation for tmem driver.
 - Simplify and remove code in the tmem driver.
 - Cleanups.

* tag 'stable/for-linus-3.10-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen:
  xen: Fixed assignment error in if statement
  xen/xenbus: Fixed over 80 character limit issue
  xen/xenbus: Fixed indentation error in switch case
  xen/tmem: Don't use self[ballooning|shrinking] if frontswap is off.
  xen/tmem: Remove the usage of '[no|]selfballoon' and use 'tmem.selfballooning' bool instead.
  xen/tmem: Remove the usage of 'noselfshrink' and use 'tmem.selfshrink' bool instead.
  xen/tmem: Remove the boot options and fold them in the tmem.X parameters.
  xen/tmem: s/disable_// and change the logic.
  xen/tmem: Fix compile warning.
  xen/tmem: Split out the different module/boot options.
  xen/tmem: Move all of the boot and module parameters to the top of the file.
  xen/tmem: Cleanup. Remove the parts that say temporary.
  xen/privcmd: fix condition in privcmd_close()
...@@ -3005,6 +3005,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -3005,6 +3005,27 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Force threading of all interrupt handlers except those Force threading of all interrupt handlers except those
marked explicitly IRQF_NO_THREAD. marked explicitly IRQF_NO_THREAD.
tmem [KNL,XEN]
Enable the Transcendent memory driver if built-in.
tmem.cleancache=0|1 [KNL, XEN]
Default is on (1). Disable the usage of the cleancache
API to send anonymous pages to the hypervisor.
tmem.frontswap=0|1 [KNL, XEN]
Default is on (1). Disable the usage of the frontswap
API to send swap pages to the hypervisor. If disabled
the selfballooning and selfshrinking are force disabled.
tmem.selfballooning=0|1 [KNL, XEN]
Default is on (1). Disable the driving of swap pages
to the hypervisor.
tmem.selfshrinking=0|1 [KNL, XEN]
Default is on (1). Partial swapoff that immediately
transfers pages from Xen hypervisor back to the
kernel based on different criteria.
topology= [S390] topology= [S390]
Format: {off | on} Format: {off | on}
Specify if the kernel should make use of the cpu Specify if the kernel should make use of the cpu
......
...@@ -19,11 +19,10 @@ config XEN_SELFBALLOONING ...@@ -19,11 +19,10 @@ config XEN_SELFBALLOONING
by the current usage of anonymous memory ("committed AS") and by the current usage of anonymous memory ("committed AS") and
controlled by various sysfs-settable parameters. Configuring controlled by various sysfs-settable parameters. Configuring
FRONTSWAP is highly recommended; if it is not configured, self- FRONTSWAP is highly recommended; if it is not configured, self-
ballooning is disabled by default but can be enabled with the ballooning is disabled by default. If FRONTSWAP is configured,
'selfballooning' kernel boot parameter. If FRONTSWAP is configured,
frontswap-selfshrinking is enabled by default but can be disabled frontswap-selfshrinking is enabled by default but can be disabled
with the 'noselfshrink' kernel boot parameter; and self-ballooning with the 'tmem.selfshrink=0' kernel boot parameter; and self-ballooning
is enabled by default but can be disabled with the 'noselfballooning' is enabled by default but can be disabled with the 'tmem.selfballooning=0'
kernel boot parameter. Note that systems without a sufficiently kernel boot parameter. Note that systems without a sufficiently
large swap device should not enable self-ballooning. large swap device should not enable self-ballooning.
......
...@@ -407,7 +407,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp) ...@@ -407,7 +407,8 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
nr_pages = ARRAY_SIZE(frame_list); nr_pages = ARRAY_SIZE(frame_list);
for (i = 0; i < nr_pages; i++) { for (i = 0; i < nr_pages; i++) {
if ((page = alloc_page(gfp)) == NULL) { page = alloc_page(gfp);
if (page == NULL) {
nr_pages = i; nr_pages = i;
state = BP_EAGAIN; state = BP_EAGAIN;
break; break;
......
...@@ -504,7 +504,7 @@ static void privcmd_close(struct vm_area_struct *vma) ...@@ -504,7 +504,7 @@ static void privcmd_close(struct vm_area_struct *vma)
struct page **pages = vma->vm_private_data; struct page **pages = vma->vm_private_data;
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
if (!xen_feature(XENFEAT_auto_translated_physmap || !numpgs || !pages)) if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return; return;
xen_unmap_domain_mfn_range(vma, numpgs, pages); xen_unmap_domain_mfn_range(vma, numpgs, pages);
......
...@@ -11,11 +11,7 @@ ...@@ -11,11 +11,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/cleancache.h> #include <linux/cleancache.h>
/* temporary ifdef until include/linux/frontswap.h is upstream */
#ifdef CONFIG_FRONTSWAP
#include <linux/frontswap.h> #include <linux/frontswap.h>
#endif
#include <xen/xen.h> #include <xen/xen.h>
#include <xen/interface/xen.h> #include <xen/interface/xen.h>
...@@ -24,6 +20,34 @@ ...@@ -24,6 +20,34 @@
#include <asm/xen/hypervisor.h> #include <asm/xen/hypervisor.h>
#include <xen/tmem.h> #include <xen/tmem.h>
#ifndef CONFIG_XEN_TMEM_MODULE
bool __read_mostly tmem_enabled = false;
static int __init enable_tmem(char *s)
{
tmem_enabled = true;
return 1;
}
__setup("tmem", enable_tmem);
#endif
#ifdef CONFIG_CLEANCACHE
static bool cleancache __read_mostly = true;
module_param(cleancache, bool, S_IRUGO);
static bool selfballooning __read_mostly = true;
module_param(selfballooning, bool, S_IRUGO);
#endif /* CONFIG_CLEANCACHE */
#ifdef CONFIG_FRONTSWAP
static bool frontswap __read_mostly = true;
module_param(frontswap, bool, S_IRUGO);
#endif /* CONFIG_FRONTSWAP */
#ifdef CONFIG_XEN_SELFBALLOONING
static bool selfshrinking __read_mostly = true;
module_param(selfshrinking, bool, S_IRUGO);
#endif /* CONFIG_XEN_SELFBALLOONING */
#define TMEM_CONTROL 0 #define TMEM_CONTROL 0
#define TMEM_NEW_POOL 1 #define TMEM_NEW_POOL 1
#define TMEM_DESTROY_POOL 2 #define TMEM_DESTROY_POOL 2
...@@ -129,16 +153,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) ...@@ -129,16 +153,6 @@ static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
} }
#ifndef CONFIG_XEN_TMEM_MODULE
bool __read_mostly tmem_enabled = false;
static int __init enable_tmem(char *s)
{
tmem_enabled = true;
return 1;
}
__setup("tmem", enable_tmem);
#endif
#ifdef CONFIG_CLEANCACHE #ifdef CONFIG_CLEANCACHE
static int xen_tmem_destroy_pool(u32 pool_id) static int xen_tmem_destroy_pool(u32 pool_id)
...@@ -230,20 +244,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) ...@@ -230,20 +244,6 @@ static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
} }
static bool disable_cleancache __read_mostly;
static bool disable_selfballooning __read_mostly;
#ifdef CONFIG_XEN_TMEM_MODULE
module_param(disable_cleancache, bool, S_IRUGO);
module_param(disable_selfballooning, bool, S_IRUGO);
#else
static int __init no_cleancache(char *s)
{
disable_cleancache = true;
return 1;
}
__setup("nocleancache", no_cleancache);
#endif
static struct cleancache_ops tmem_cleancache_ops = { static struct cleancache_ops tmem_cleancache_ops = {
.put_page = tmem_cleancache_put_page, .put_page = tmem_cleancache_put_page,
.get_page = tmem_cleancache_get_page, .get_page = tmem_cleancache_get_page,
...@@ -361,20 +361,6 @@ static void tmem_frontswap_init(unsigned ignored) ...@@ -361,20 +361,6 @@ static void tmem_frontswap_init(unsigned ignored)
xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE); xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
} }
static bool disable_frontswap __read_mostly;
static bool disable_frontswap_selfshrinking __read_mostly;
#ifdef CONFIG_XEN_TMEM_MODULE
module_param(disable_frontswap, bool, S_IRUGO);
module_param(disable_frontswap_selfshrinking, bool, S_IRUGO);
#else
static int __init no_frontswap(char *s)
{
disable_frontswap = true;
return 1;
}
__setup("nofrontswap", no_frontswap);
#endif
static struct frontswap_ops tmem_frontswap_ops = { static struct frontswap_ops tmem_frontswap_ops = {
.store = tmem_frontswap_store, .store = tmem_frontswap_store,
.load = tmem_frontswap_load, .load = tmem_frontswap_load,
...@@ -382,8 +368,6 @@ static struct frontswap_ops tmem_frontswap_ops = { ...@@ -382,8 +368,6 @@ static struct frontswap_ops tmem_frontswap_ops = {
.invalidate_area = tmem_frontswap_flush_area, .invalidate_area = tmem_frontswap_flush_area,
.init = tmem_frontswap_init .init = tmem_frontswap_init
}; };
#else /* CONFIG_FRONTSWAP */
#define disable_frontswap_selfshrinking 1
#endif #endif
static int xen_tmem_init(void) static int xen_tmem_init(void)
...@@ -391,7 +375,7 @@ static int xen_tmem_init(void) ...@@ -391,7 +375,7 @@ static int xen_tmem_init(void)
if (!xen_domain()) if (!xen_domain())
return 0; return 0;
#ifdef CONFIG_FRONTSWAP #ifdef CONFIG_FRONTSWAP
if (tmem_enabled && !disable_frontswap) { if (tmem_enabled && frontswap) {
char *s = ""; char *s = "";
struct frontswap_ops *old_ops = struct frontswap_ops *old_ops =
frontswap_register_ops(&tmem_frontswap_ops); frontswap_register_ops(&tmem_frontswap_ops);
...@@ -408,7 +392,7 @@ static int xen_tmem_init(void) ...@@ -408,7 +392,7 @@ static int xen_tmem_init(void)
#endif #endif
#ifdef CONFIG_CLEANCACHE #ifdef CONFIG_CLEANCACHE
BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
if (tmem_enabled && !disable_cleancache) { if (tmem_enabled && cleancache) {
char *s = ""; char *s = "";
struct cleancache_ops *old_ops = struct cleancache_ops *old_ops =
cleancache_register_ops(&tmem_cleancache_ops); cleancache_register_ops(&tmem_cleancache_ops);
...@@ -419,8 +403,15 @@ static int xen_tmem_init(void) ...@@ -419,8 +403,15 @@ static int xen_tmem_init(void)
} }
#endif #endif
#ifdef CONFIG_XEN_SELFBALLOONING #ifdef CONFIG_XEN_SELFBALLOONING
xen_selfballoon_init(!disable_selfballooning, /*
!disable_frontswap_selfshrinking); * There is no point of driving pages to the swap system if they
* aren't going anywhere in tmem universe.
*/
if (!frontswap) {
selfshrinking = false;
selfballooning = false;
}
xen_selfballoon_init(selfballooning, selfshrinking);
#endif #endif
return 0; return 0;
} }
......
...@@ -53,15 +53,12 @@ ...@@ -53,15 +53,12 @@
* System configuration note: Selfballooning should not be enabled on * System configuration note: Selfballooning should not be enabled on
* systems without a sufficiently large swap device configured; for best * systems without a sufficiently large swap device configured; for best
* results, it is recommended that total swap be increased by the size * results, it is recommended that total swap be increased by the size
* of the guest memory. Also, while technically not required to be * of the guest memory. Note, that selfballooning should be disabled by default
* configured, it is highly recommended that frontswap also be configured * if frontswap is not configured. Similarly selfballooning should be enabled
* and enabled when selfballooning is running. So, selfballooning * by default if frontswap is configured and can be disabled with the
* is disabled by default if frontswap is not configured and can only * "tmem.selfballooning=0" kernel boot option. Finally, when frontswap is
* be enabled with the "selfballooning" kernel boot option; similarly * configured, frontswap-selfshrinking can be disabled with the
* selfballooning is enabled by default if frontswap is configured and * "tmem.selfshrink=0" kernel boot option.
* can be disabled with the "noselfballooning" kernel boot option. Finally,
* when frontswap is configured, frontswap-selfshrinking can be disabled
* with the "noselfshrink" kernel boot option.
* *
* Selfballooning is disallowed in domain0 and force-disabled. * Selfballooning is disallowed in domain0 and force-disabled.
* *
...@@ -120,9 +117,6 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process); ...@@ -120,9 +117,6 @@ static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
/* Enable/disable with sysfs. */ /* Enable/disable with sysfs. */
static bool frontswap_selfshrinking __read_mostly; static bool frontswap_selfshrinking __read_mostly;
/* Enable/disable with kernel boot option. */
static bool use_frontswap_selfshrink = true;
/* /*
* The default values for the following parameters were deemed reasonable * The default values for the following parameters were deemed reasonable
* by experimentation, may be workload-dependent, and can all be * by experimentation, may be workload-dependent, and can all be
...@@ -176,35 +170,6 @@ static void frontswap_selfshrink(void) ...@@ -176,35 +170,6 @@ static void frontswap_selfshrink(void)
frontswap_shrink(tgt_frontswap_pages); frontswap_shrink(tgt_frontswap_pages);
} }
static int __init xen_nofrontswap_selfshrink_setup(char *s)
{
use_frontswap_selfshrink = false;
return 1;
}
__setup("noselfshrink", xen_nofrontswap_selfshrink_setup);
/* Disable with kernel boot option. */
static bool use_selfballooning = true;
static int __init xen_noselfballooning_setup(char *s)
{
use_selfballooning = false;
return 1;
}
__setup("noselfballooning", xen_noselfballooning_setup);
#else /* !CONFIG_FRONTSWAP */
/* Enable with kernel boot option. */
static bool use_selfballooning;
static int __init xen_selfballooning_setup(char *s)
{
use_selfballooning = true;
return 1;
}
__setup("selfballooning", xen_selfballooning_setup);
#endif /* CONFIG_FRONTSWAP */ #endif /* CONFIG_FRONTSWAP */
#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) #define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
......
...@@ -70,22 +70,21 @@ static long xenbus_alloc(domid_t domid) ...@@ -70,22 +70,21 @@ static long xenbus_alloc(domid_t domid)
return err; return err;
} }
static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) static long xenbus_backend_ioctl(struct file *file, unsigned int cmd,
unsigned long data)
{ {
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
switch (cmd) { switch (cmd) {
case IOCTL_XENBUS_BACKEND_EVTCHN: case IOCTL_XENBUS_BACKEND_EVTCHN:
if (xen_store_evtchn > 0) if (xen_store_evtchn > 0)
return xen_store_evtchn; return xen_store_evtchn;
return -ENODEV; return -ENODEV;
case IOCTL_XENBUS_BACKEND_SETUP:
case IOCTL_XENBUS_BACKEND_SETUP: return xenbus_alloc(data);
return xenbus_alloc(data); default:
return -ENOTTY;
default:
return -ENOTTY;
} }
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册