提交 a413f474 编写于 作者: I Ian Munsie 提交者: Benjamin Herrenschmidt

powerpc: Disable relocation on exceptions whenever PR KVM is active

For PR KVM we allow userspace to map 0xc000000000000000. Because
transitioning from userspace to the guest kernel may use the relocated
exception vectors we have to disable relocation on exceptions whenever
PR KVM is active as we cannot trust that address.

This issue does not apply to HV KVM, since changing from a guest to the
hypervisor will never use the relocated exception vectors.

Currently the hypervisor interface only allows us to toggle relocation
on exceptions on a partition wide scope, so we need to globally disable
relocation on exceptions when the first PR KVM instance is started and
only re-enable them when all PR KVM instances have been destroyed.

It's a bit heavy handed, but until the hypervisor gives us a lightweight
way to toggle relocation on exceptions on a single thread it's only real
option.
Signed-off-by: NIan Munsie <imunsie@au1.ibm.com>
Signed-off-by: NBenjamin Herrenschmidt <benh@kernel.crashing.org>
上级 96f013fe
......@@ -395,6 +395,15 @@ static inline unsigned long cmo_get_page_size(void)
{
return CMO_PageSize;
}
extern long pSeries_enable_reloc_on_exc(void);
extern long pSeries_disable_reloc_on_exc(void);
#else
#define pSeries_enable_reloc_on_exc() do {} while (0)
#define pSeries_disable_reloc_on_exc() do {} while (0)
#endif /* CONFIG_PPC_PSERIES */
#endif /* __ASSEMBLY__ */
......
......@@ -34,6 +34,7 @@
#include <asm/kvm_book3s.h>
#include <asm/mmu_context.h>
#include <asm/switch_to.h>
#include <asm/firmware.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
......@@ -1284,12 +1285,21 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
}
static unsigned int kvm_global_user_count = 0;
static DEFINE_SPINLOCK(kvm_global_user_count_lock);
int kvmppc_core_init_vm(struct kvm *kvm)
{
#ifdef CONFIG_PPC64
INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
#endif
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
if (++kvm_global_user_count == 1)
pSeries_disable_reloc_on_exc();
spin_unlock(&kvm_global_user_count_lock);
}
return 0;
}
......@@ -1298,6 +1308,14 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
#ifdef CONFIG_PPC64
WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
#endif
if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
spin_lock(&kvm_global_user_count_lock);
BUG_ON(kvm_global_user_count == 0);
if (--kvm_global_user_count == 0)
pSeries_enable_reloc_on_exc();
spin_unlock(&kvm_global_user_count_lock);
}
}
static int kvmppc_book3s_init(void)
......
......@@ -375,7 +375,7 @@ static void pSeries_idle(void)
* to ever be a problem in practice we can move this into a kernel thread to
* finish off the process later in boot.
*/
static int __init pSeries_enable_reloc_on_exc(void)
long pSeries_enable_reloc_on_exc(void)
{
long rc;
unsigned int delay, total_delay = 0;
......@@ -397,9 +397,9 @@ static int __init pSeries_enable_reloc_on_exc(void)
mdelay(delay);
}
}
EXPORT_SYMBOL(pSeries_enable_reloc_on_exc);
#ifdef CONFIG_KEXEC
static long pSeries_disable_reloc_on_exc(void)
long pSeries_disable_reloc_on_exc(void)
{
long rc;
......@@ -410,7 +410,9 @@ static long pSeries_disable_reloc_on_exc(void)
mdelay(get_longbusy_msecs(rc));
}
}
EXPORT_SYMBOL(pSeries_disable_reloc_on_exc);
#ifdef CONFIG_KEXEC
static void pSeries_machine_kexec(struct kimage *image)
{
long rc;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册