diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index d08ce9c41df4f2751b7d7ba280c4b3862a6341ec..48edb1f4ced4167beba364a6c3b81168ce09b518 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h @@ -306,6 +306,11 @@ static inline void *kvm_get_hyp_vector(void) return kvm_ksym_ref(__kvm_hyp_vector_bp_inv); } + case ARM_CPU_PART_CORTEX_A15: + { + extern char __kvm_hyp_vector_ic_inv[]; + return kvm_ksym_ref(__kvm_hyp_vector_ic_inv); + } #endif default: { diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S index e789f52a51290e2b8c6b5b011b3992c25aa28174..918a05dd2d63b41dfdcf68c958dada6cb610939f 100644 --- a/arch/arm/kvm/hyp/hyp-entry.S +++ b/arch/arm/kvm/hyp/hyp-entry.S @@ -72,6 +72,28 @@ __kvm_hyp_vector: W(b) hyp_fiq #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR + .align 5 +__kvm_hyp_vector_ic_inv: + .global __kvm_hyp_vector_ic_inv + + /* + * We encode the exception entry in the bottom 3 bits of + * SP, and we have to guarantee to be 8 bytes aligned. + */ + W(add) sp, sp, #1 /* Reset 7 */ + W(add) sp, sp, #1 /* Undef 6 */ + W(add) sp, sp, #1 /* Syscall 5 */ + W(add) sp, sp, #1 /* Prefetch abort 4 */ + W(add) sp, sp, #1 /* Data abort 3 */ + W(add) sp, sp, #1 /* HVC 2 */ + W(add) sp, sp, #1 /* IRQ 1 */ + W(nop) /* FIQ 0 */ + + mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */ + isb + + b decode_vectors + .align 5 __kvm_hyp_vector_bp_inv: .global __kvm_hyp_vector_bp_inv @@ -92,6 +114,8 @@ __kvm_hyp_vector_bp_inv: mcr p15, 0, r0, c7, c5, 6 /* BPIALL */ isb +decode_vectors: + #ifdef CONFIG_THUMB2_KERNEL /* * Yet another silly hack: Use VPIDR as a temp register.