diff --git a/Documentation/ABI/stable/sysfs-devices-system-xen_memory b/Documentation/ABI/stable/sysfs-devices-system-xen_memory index caa311d59ac1d24c92643f37b396407a1ab654f0..6d83f95a8a8e131c9a2f0454cd39128ffd3d7723 100644 --- a/Documentation/ABI/stable/sysfs-devices-system-xen_memory +++ b/Documentation/ABI/stable/sysfs-devices-system-xen_memory @@ -75,3 +75,12 @@ Contact: Konrad Rzeszutek Wilk Description: Amount (in KiB) of low (or normal) memory in the balloon. + +What: /sys/devices/system/xen_memory/xen_memory0/scrub_pages +Date: September 2018 +KernelVersion: 4.20 +Contact: xen-devel@lists.xenproject.org +Description: + Control scrubbing pages before returning them to Xen for others domains + use. Can be set with xen_scrub_pages cmdline + parameter. Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT. diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 64a3bf54b97492e3b2b25508a7785d1f1e9e4186..92eb1f42240d7168354dc7129898e2500ef95c1a 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5000,6 +5000,12 @@ Disables the PV optimizations forcing the HVM guest to run as generic HVM guest with no PV drivers. + xen_scrub_pages= [XEN] + Boolean option to control scrubbing pages before giving them back + to Xen, for use by other domains. Can be also changed at runtime + with /sys/devices/system/xen_memory/xen_memory0/scrub_pages. + Default value controlled with CONFIG_XEN_SCRUB_PAGES_DEFAULT. + xirc2ps_cs= [NET,PCMCIA] Format: ,,,,,[,[,[,]]] diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index b459edfacff35e0526fdc912cd0179ef8126ea74..90d387b50ab747f505597e87b439f1dcfe5f489f 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -79,15 +79,19 @@ config XEN_BALLOON_MEMORY_HOTPLUG_LIMIT This value is used to allocate enough space in internal tables needed for physical memory administration. -config XEN_SCRUB_PAGES - bool "Scrub pages before returning them to system" +config XEN_SCRUB_PAGES_DEFAULT + bool "Scrub pages before returning them to system by default" depends on XEN_BALLOON default y help Scrub pages before returning them to the system for reuse by other domains. This makes sure that any confidential data is not accidentally visible to other domains. Is it more - secure, but slightly less efficient. + secure, but slightly less efficient. This can be controlled with + xen_scrub_pages=0 parameter and + /sys/devices/system/xen_memory/xen_memory0/scrub_pages. + This option only sets the default value. + If in doubt, say yes. config XEN_DEV_EVTCHN diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c index d4265c8ebb22a24b1ab8794ab79b592793d14908..b1357aa4bc552eb3a5989dab5eeacf9295d2a2d8 100644 --- a/drivers/xen/cpu_hotplug.c +++ b/drivers/xen/cpu_hotplug.c @@ -19,15 +19,16 @@ static void enable_hotplug_cpu(int cpu) static void disable_hotplug_cpu(int cpu) { - if (cpu_online(cpu)) { - lock_device_hotplug(); + if (!cpu_is_hotpluggable(cpu)) + return; + lock_device_hotplug(); + if (cpu_online(cpu)) device_offline(get_cpu_device(cpu)); - unlock_device_hotplug(); - } - if (cpu_present(cpu)) + if (!cpu_online(cpu) && cpu_present(cpu)) { xen_arch_unregister_cpu(cpu); - - set_cpu_present(cpu, false); + set_cpu_present(cpu, false); + } + unlock_device_hotplug(); } static int vcpu_online(unsigned int cpu) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 08e4af04d6f2c32850a049a83721933a82883b8c..e6c1934734b7d9bdde87e8a9e006e1584c88f15d 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -138,7 +138,7 @@ static int set_evtchn_to_irq(unsigned evtchn, unsigned irq) clear_evtchn_to_irq_row(row); } - evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq; + evtchn_to_irq[row][col] = irq; return 0; } diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 57390c7666e5dd8d44bfe9bdf1e503afb13de189..b0b02a5011672b6670e136728b2c2a8a8f2ee68e 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -492,12 +492,19 @@ static bool in_range(struct gntdev_grant_map *map, return true; } -static void unmap_if_in_range(struct gntdev_grant_map *map, - unsigned long start, unsigned long end) +static int unmap_if_in_range(struct gntdev_grant_map *map, + unsigned long start, unsigned long end, + bool blockable) { unsigned long mstart, mend; int err; + if (!in_range(map, start, end)) + return 0; + + if (!blockable) + return -EAGAIN; + mstart = max(start, map->vma->vm_start); mend = min(end, map->vma->vm_end); pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n", @@ -508,6 +515,8 @@ static void unmap_if_in_range(struct gntdev_grant_map *map, (mstart - map->vma->vm_start) >> PAGE_SHIFT, (mend - mstart) >> PAGE_SHIFT); WARN_ON(err); + + return 0; } static int mn_invl_range_start(struct mmu_notifier *mn, @@ -519,25 +528,20 @@ static int mn_invl_range_start(struct mmu_notifier *mn, struct gntdev_grant_map *map; int ret = 0; - /* TODO do we really need a mutex here? */ if (blockable) mutex_lock(&priv->lock); else if (!mutex_trylock(&priv->lock)) return -EAGAIN; list_for_each_entry(map, &priv->maps, next) { - if (in_range(map, start, end)) { - ret = -EAGAIN; + ret = unmap_if_in_range(map, start, end, blockable); + if (ret) goto out_unlock; - } - unmap_if_in_range(map, start, end); } list_for_each_entry(map, &priv->freeable_maps, next) { - if (in_range(map, start, end)) { - ret = -EAGAIN; + ret = unmap_if_in_range(map, start, end, blockable); + if (ret) goto out_unlock; - } - unmap_if_in_range(map, start, end); } out_unlock: diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index c93d8ef8df3483bbc393b2101c189120f844b634..5bb01a62f214da835ca2e941c3c928b50cf733a0 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c @@ -280,9 +280,11 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path, /* * The Xenstore watch fires directly after registering it and * after a suspend/resume cycle. So ENOENT is no error but - * might happen in those cases. + * might happen in those cases. ERANGE is observed when we get + * an empty value (''), this happens when we acknowledge the + * request by writing '\0' below. */ - if (err != -ENOENT) + if (err != -ENOENT && err != -ERANGE) pr_err("Error %d reading sysrq code in control/sysrq\n", err); xenbus_transaction_end(xbt, 1); diff --git a/drivers/xen/mem-reservation.c b/drivers/xen/mem-reservation.c index 084799c6180e6e6cb281eb882637b42999a1e683..3782cf070338e3fa5f830184a784dd9e2d0c666a 100644 --- a/drivers/xen/mem-reservation.c +++ b/drivers/xen/mem-reservation.c @@ -14,6 +14,10 @@ #include #include +#include + +bool __read_mostly xen_scrub_pages = IS_ENABLED(CONFIG_XEN_SCRUB_PAGES_DEFAULT); +core_param(xen_scrub_pages, xen_scrub_pages, bool, 0); /* * Use one extent per PAGE_SIZE to avoid to break down the page into diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index 294f35ce9e46bc063a7e01061462cf3e298b4364..63c1494a8d73bf2f53f0f0a1887290f338ae454a 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -44,6 +44,7 @@ #include #include #include +#include #define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10)) @@ -137,6 +138,7 @@ static DEVICE_ULONG_ATTR(schedule_delay, 0444, balloon_stats.schedule_delay); static DEVICE_ULONG_ATTR(max_schedule_delay, 0644, balloon_stats.max_schedule_delay); static DEVICE_ULONG_ATTR(retry_count, 0444, balloon_stats.retry_count); static DEVICE_ULONG_ATTR(max_retry_count, 0644, balloon_stats.max_retry_count); +static DEVICE_BOOL_ATTR(scrub_pages, 0644, xen_scrub_pages); static ssize_t show_target_kb(struct device *dev, struct device_attribute *attr, char *buf) @@ -203,6 +205,7 @@ static struct attribute *balloon_attrs[] = { &dev_attr_max_schedule_delay.attr.attr, &dev_attr_retry_count.attr.attr, &dev_attr_max_retry_count.attr.attr, + &dev_attr_scrub_pages.attr.attr, NULL }; diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h index 80b52b4945e965d96ce068fe3d729e9364cc9868..a2ab516fcd2caf33167f3e64a76c2e77b726d6df 100644 --- a/include/xen/mem-reservation.h +++ b/include/xen/mem-reservation.h @@ -17,11 +17,12 @@ #include +extern bool xen_scrub_pages; + static inline void xenmem_reservation_scrub_page(struct page *page) { -#ifdef CONFIG_XEN_SCRUB_PAGES - clear_highpage(page); -#endif + if (xen_scrub_pages) + clear_highpage(page); } #ifdef CONFIG_XEN_HAVE_PVMMU