enlighten_hvm.c 4.9 KB
Newer Older
1 2
#include <linux/cpu.h>
#include <linux/kexec.h>
3
#include <linux/memblock.h>
4 5 6 7 8 9 10 11 12 13

#include <xen/features.h>
#include <xen/events.h>
#include <xen/interface/memory.h>

#include <asm/cpu.h>
#include <asm/smp.h>
#include <asm/reboot.h>
#include <asm/setup.h>
#include <asm/hypervisor.h>
14
#include <asm/e820/api.h>
15 16 17

#include <asm/xen/cpuid.h>
#include <asm/xen/hypervisor.h>
18
#include <asm/xen/page.h>
19 20 21 22 23 24 25 26

#include "xen-ops.h"
#include "mmu.h"
#include "smp.h"

void __ref xen_hvm_init_shared_info(void)
{
	struct xen_add_to_physmap xatp;
27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
	u64 pa;

	if (HYPERVISOR_shared_info == &xen_dummy_shared_info) {
		/*
		 * Search for a free page starting at 4kB physical address.
		 * Low memory is preferred to avoid an EPT large page split up
		 * by the mapping.
		 * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
		 * the BIOS used for HVM guests is well behaved and won't
		 * clobber memory other than the first 4kB.
		 */
		for (pa = PAGE_SIZE;
		     !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
		     memblock_is_reserved(pa);
		     pa += PAGE_SIZE)
			;

		memblock_reserve(pa, PAGE_SIZE);
		HYPERVISOR_shared_info = __va(pa);
	}
47 48 49 50

	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
51
	xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
	if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
		BUG();
}

static void __init init_hvm_pv_info(void)
{
	int major, minor;
	uint32_t eax, ebx, ecx, edx, base;

	base = xen_cpuid_base();
	eax = cpuid_eax(base + 1);

	major = eax >> 16;
	minor = eax & 0xffff;
	printk(KERN_INFO "Xen version %d.%d.\n", major, minor);

	xen_domain_type = XEN_HVM_DOMAIN;

	/* PVH set up hypercall page in xen_prepare_pvh(). */
	if (xen_pvh_domain())
		pv_info.name = "Xen PVH";
	else {
		u64 pfn;
		uint32_t msr;

		pv_info.name = "Xen HVM";
		msr = cpuid_ebx(base + 2);
		pfn = __pa(hypercall_page);
		wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
	}

	xen_setup_features();

	cpuid(base + 4, &eax, &ebx, &ecx, &edx);
	if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
		this_cpu_write(xen_vcpu_id, ebx);
	else
		this_cpu_write(xen_vcpu_id, smp_processor_id());
}

#ifdef CONFIG_KEXEC_CORE
static void xen_hvm_shutdown(void)
{
	native_machine_shutdown();
	if (kexec_in_progress)
		xen_reboot(SHUTDOWN_soft_reset);
}

static void xen_hvm_crash_shutdown(struct pt_regs *regs)
{
	native_machine_crash_shutdown(regs);
	xen_reboot(SHUTDOWN_soft_reset);
}
#endif

static int xen_cpu_up_prepare_hvm(unsigned int cpu)
{
109
	int rc = 0;
110 111 112 113 114 115 116 117 118 119 120 121 122 123

	/*
	 * This can happen if CPU was offlined earlier and
	 * offlining timed out in common_cpu_die().
	 */
	if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
		xen_smp_intr_free(cpu);
		xen_uninit_lock_cpu(cpu);
	}

	if (cpu_acpi_id(cpu) != U32_MAX)
		per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
	else
		per_cpu(xen_vcpu_id, cpu) = cpu;
124 125 126
	rc = xen_vcpu_setup(cpu);
	if (rc)
		return rc;
127

128
	if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
129 130 131 132 133 134 135
		xen_setup_timer(cpu);

	rc = xen_smp_intr_init(cpu);
	if (rc) {
		WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
		     cpu, rc);
	}
136
	return rc;
137 138 139 140 141 142
}

static int xen_cpu_dead_hvm(unsigned int cpu)
{
	xen_smp_intr_free(cpu);

143
	if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
144 145 146 147 148 149 150 151 152 153 154 155 156 157
		xen_teardown_timer(cpu);

       return 0;
}

static void __init xen_hvm_guest_init(void)
{
	if (xen_pv_domain())
		return;

	init_hvm_pv_info();

	xen_hvm_init_shared_info();

158 159 160 161 162 163 164
	/*
	 * xen_vcpu is a pointer to the vcpu_info struct in the shared_info
	 * page, we use it in the event channel upcall and in some pvclock
	 * related functions.
	 */
	xen_vcpu_info_reset(0);

165 166
	xen_panic_handler_init();

167 168
	if (xen_feature(XENFEAT_hvm_callback_vector))
		xen_have_vector_callback = 1;
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200

	xen_hvm_smp_init();
	WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
	xen_unplug_emulated_devices();
	x86_init.irqs.intr_init = xen_init_IRQ;
	xen_hvm_init_time_ops();
	xen_hvm_init_mmu_ops();

	if (xen_pvh_domain())
		machine_ops.emergency_restart = xen_emergency_restart;
#ifdef CONFIG_KEXEC_CORE
	machine_ops.shutdown = xen_hvm_shutdown;
	machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
#endif
}

static bool xen_nopv;
static __init int xen_parse_nopv(char *arg)
{
       xen_nopv = true;
       return 0;
}
early_param("xen_nopv", xen_parse_nopv);

bool xen_hvm_need_lapic(void)
{
	if (xen_nopv)
		return false;
	if (xen_pv_domain())
		return false;
	if (!xen_hvm_domain())
		return false;
201
	if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
		return false;
	return true;
}
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);

static uint32_t __init xen_platform_hvm(void)
{
	if (xen_pv_domain() || xen_nopv)
		return 0;

	return xen_cpuid_base();
}

const struct hypervisor_x86 x86_hyper_xen_hvm = {
	.name                   = "Xen HVM",
	.detect                 = xen_platform_hvm,
	.init_platform          = xen_hvm_guest_init,
	.pin_vcpu               = xen_pin_vcpu,
	.x2apic_available       = xen_x2apic_para_available,
};
EXPORT_SYMBOL(x86_hyper_xen_hvm);