enlighten_hvm.c 4.9 KB
Newer Older
1 2
#include <linux/cpu.h>
#include <linux/kexec.h>
3
#include <linux/memblock.h>
4 5 6 7 8 9 10 11 12 13

#include <xen/features.h>
#include <xen/events.h>
#include <xen/interface/memory.h>

#include <asm/cpu.h>
#include <asm/smp.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/hypervisor.h>
14
#include <asm/e820/api.h>
15 16 17

#include <asm/xen/cpuid.h>
#include <asm/xen/hypervisor.h>
18
#include <asm/xen/page.h>
19 20 21 22 23

#include "xen-ops.h"
#include "mmu.h"
#include "smp.h"

24
void xen_hvm_init_shared_info(void)
25 26 27 28 29 30
{
	struct xen_add_to_physmap xatp;

	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
31
	xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
32 33 34 35
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
		BUG();
}

36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
static void __init reserve_shared_info(void)
{
	u64 pa;

	/*
	 * Search for a free page starting at 4kB physical address.
	 * Low memory is preferred to avoid an EPT large page split up
	 * by the mapping.
	 * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
	 * the BIOS used for HVM guests is well behaved and won't
	 * clobber memory other than the first 4kB.
	 */
	for (pa = PAGE_SIZE;
	     !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
	     memblock_is_reserved(pa);
	     pa += PAGE_SIZE)
		;

	memblock_reserve(pa, PAGE_SIZE);
	HYPERVISOR_shared_info = __va(pa);
}

58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
static void __init init_hvm_pv_info(void)
{
	int major, minor;
	uint32_t eax, ebx, ecx, edx, base;

	base = xen_cpuid_base();
	eax = cpuid_eax(base + 1);

	major = eax >> 16;
	minor = eax & 0xffff;
	printk(KERN_INFO "Xen version %d.%d.\n", major, minor);

	xen_domain_type = XEN_HVM_DOMAIN;

	/* PVH set up hypercall page in xen_prepare_pvh(). */
	if (xen_pvh_domain())
		pv_info.name = "Xen PVH";
	else {
		u64 pfn;
		uint32_t msr;

		pv_info.name = "Xen HVM";
		msr = cpuid_ebx(base + 2);
		pfn = __pa(hypercall_page);
		wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
	}

	xen_setup_features();

	cpuid(base + 4, &eax, &ebx, &ecx, &edx);
	if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
		this_cpu_write(xen_vcpu_id, ebx);
	else
		this_cpu_write(xen_vcpu_id, smp_processor_id());
}

#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{
	native_machine_shutdown();
	if (kexec_in_progress)
		xen_reboot(SHUTDOWN_soft_reset);
}

static void xen_hvm_crash_shutdown(struct pt_regs *regs)
{
	native_machine_crash_shutdown(regs);
	xen_reboot(SHUTDOWN_soft_reset);
}
#endif

static int xen_cpu_up_prepare_hvm(unsigned int cpu)
{
111
	int rc = 0;
112 113 114 115 116 117 118 119 120 121 122 123 124 125

	/*
	 * This can happen if CPU was offlined earlier and
	 * offlining timed out in common_cpu_die().
	 */
	if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
		xen_smp_intr_free(cpu);
		xen_uninit_lock_cpu(cpu);
	}

	if (cpu_acpi_id(cpu) != U32_MAX)
		per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
	else
		per_cpu(xen_vcpu_id, cpu) = cpu;
126 127 128
	rc = xen_vcpu_setup(cpu);
	if (rc)
		return rc;
129

130
	if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
131 132 133 134 135 136 137
		xen_setup_timer(cpu);

	rc = xen_smp_intr_init(cpu);
	if (rc) {
		WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
		     cpu, rc);
	}
138
	return rc;
139 140 141 142 143 144
}

static int xen_cpu_dead_hvm(unsigned int cpu)
{
	xen_smp_intr_free(cpu);

145
	if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
146 147 148 149 150 151 152 153 154 155 156 157
		xen_teardown_timer(cpu);

       return 0;
}

static void __init xen_hvm_guest_init(void)
{
	if (xen_pv_domain())
		return;

	init_hvm_pv_info();

158
	reserve_shared_info();
159 160
	xen_hvm_init_shared_info();

161 162 163 164 165 166 167
	/*
	 * xen_vcpu is a pointer to the vcpu_info struct in the shared_info
	 * page, we use it in the event channel upcall and in some pvclock
	 * related functions.
	 */
	xen_vcpu_info_reset(0);

168 169
	xen_panic_handler_init();

170 171
	if (xen_feature(XENFEAT_hvm_callback_vector))
		xen_have_vector_callback = 1;
172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203

	xen_hvm_smp_init();
	WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
	xen_unplug_emulated_devices();
	x86_init.irqs.intr_init = xen_init_IRQ;
	xen_hvm_init_time_ops();
	xen_hvm_init_mmu_ops();

	if (xen_pvh_domain())
		machine_ops.emergency_restart = xen_emergency_restart;
#ifdef CONFIG_KEXEC_CORE
	machine_ops.shutdown = xen_hvm_shutdown;
	machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
#endif
}

static bool xen_nopv;
static __init int xen_parse_nopv(char *arg)
{
       xen_nopv = true;
       return 0;
}
early_param("xen_nopv", xen_parse_nopv);

bool xen_hvm_need_lapic(void)
{
	if (xen_nopv)
		return false;
	if (xen_pv_domain())
		return false;
	if (!xen_hvm_domain())
		return false;
204
	if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
		return false;
	return true;
}
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);

static uint32_t __init xen_platform_hvm(void)
{
	if (xen_pv_domain() || xen_nopv)
		return 0;

	return xen_cpuid_base();
}

const struct hypervisor_x86 x86_hyper_xen_hvm = {
	.name                   = "Xen HVM",
	.detect                 = xen_platform_hvm,
	.init_platform          = xen_hvm_guest_init,
	.pin_vcpu               = xen_pin_vcpu,
	.x2apic_available       = xen_x2apic_para_available,
};
EXPORT_SYMBOL(x86_hyper_xen_hvm);