kvm_asm.h 3.3 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0-only */
2 3 4 5 6 7 8 9
/*
 * Copyright (C) 2012,2013 - ARM Ltd
 * Author: Marc Zyngier <marc.zyngier@arm.com>
 */

#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__

10 11
#include <asm/virt.h>

12 13 14
#define	VCPU_WORKAROUND_2_FLAG_SHIFT	0
#define	VCPU_WORKAROUND_2_FLAG		(_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)

15 16
#define ARM_EXIT_WITH_SERROR_BIT  31
#define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
17
#define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
18 19
#define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))

20
#define ARM_EXCEPTION_IRQ	  0
21 22
#define ARM_EXCEPTION_EL1_SERROR  1
#define ARM_EXCEPTION_TRAP	  2
23
#define ARM_EXCEPTION_IL	  3
24
/* The hyp-stub will return this for any kvm_call_hyp() call */
25
#define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
26

27 28 29 30 31 32
#define kvm_arm_exception_type					\
	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}

33 34 35 36
/*
 * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
 * that jumps over this.
 */
37
#define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
38

39 40
#define __SMCCC_WORKAROUND_1_SMC_SZ 36

41 42 43
#ifndef __ASSEMBLY__

#include <linux/mm.h>
44

45
/* Translate a kernel address of @sym into its equivalent linear mapping */
46 47 48 49
#define kvm_ksym_ref(sym)						\
	({								\
		void *val = &sym;					\
		if (!is_kernel_in_hyp_mode())				\
50
			val = lm_alias(&sym);				\
51 52
		val;							\
	 })
53

54 55 56 57 58 59 60 61 62 63
struct kvm;
struct kvm_vcpu;

extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];

extern char __kvm_hyp_vector[];

extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
64
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
65
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
66

67
extern void __kvm_timer_set_cntvoff(u64 cntvoff);
68

69 70 71
extern int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu);

extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
72

73 74
extern void __kvm_enable_ssbs(void);

75
extern u64 __vgic_v3_get_ich_vtr_el2(void);
76 77
extern u64 __vgic_v3_read_vmcr(void);
extern void __vgic_v3_write_vmcr(u32 vmcr);
78
extern void __vgic_v3_init_lrs(void);
79

80 81
extern u32 __kvm_get_mdcr_el2(void);

82 83
extern char __smccc_workaround_1_smc[__SMCCC_WORKAROUND_1_SMC_SZ];

84 85 86 87 88 89
/*
 * Home-grown __this_cpu_{ptr,read} variants that always work at HYP,
 * provided that sym is really a *symbol* and not a pointer obtained from
 * a data structure. As for SHIFT_PERCPU_PTR(), the creative casting keeps
 * sparse quiet.
 */
90 91
#define __hyp_this_cpu_ptr(sym)						\
	({								\
92 93 94
		void *__ptr;						\
		__verify_pcpu_ptr(&sym);				\
		__ptr = hyp_symbol_addr(sym);				\
95
		__ptr += read_sysreg(tpidr_el2);			\
96
		(typeof(sym) __kernel __force *)__ptr;			\
97 98 99 100 101 102 103
	 })

#define __hyp_this_cpu_read(sym)					\
	({								\
		*__hyp_this_cpu_ptr(sym);				\
	 })

104 105
#else /* __ASSEMBLY__ */

106 107
.macro hyp_adr_this_cpu reg, sym, tmp
	adr_l	\reg, \sym
108 109 110 111
	mrs	\tmp, tpidr_el2
	add	\reg, \reg, \tmp
.endm

112 113 114 115 116 117 118
.macro hyp_ldr_this_cpu reg, sym, tmp
	adr_l	\reg, \sym
	mrs	\tmp, tpidr_el2
	ldr	\reg,  [\reg, \tmp]
.endm

.macro get_host_ctxt reg, tmp
119 120
	hyp_adr_this_cpu \reg, kvm_host_data, \tmp
	add	\reg, \reg, #HOST_DATA_CONTEXT
121 122
.endm

123 124 125 126 127 128
.macro get_vcpu_ptr vcpu, ctxt
	get_host_ctxt \ctxt, \vcpu
	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
	kern_hyp_va	\vcpu
.endm

129 130 131
#endif

#endif /* __ARM_KVM_ASM_H__ */