提交 facaaec1 编写于 作者: J James Hogan 提交者: Paolo Bonzini

MIPS: KVM: Use local_flush_icache_range to fix RI on XBurst

MIPS KVM uses mips32_SyncICache to synchronise the icache with the
dcache after dynamically modifying guest instructions or writing guest
exception vector. However this uses rdhwr to get the SYNCI step, which
causes a reserved instruction exception on Ingenic XBurst cores.

It would seem to make more sense to use local_flush_icache_range()
instead which does the same thing but is more portable.
Signed-off-by: NJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: kvm@vger.kernel.org
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: Sanjay Lal <sanjayl@kymasys.com>
Signed-off-by: NPaolo Bonzini <pbonzini@redhat.com>
上级 90f91356
...@@ -646,7 +646,6 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, ...@@ -646,7 +646,6 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
/* Misc */ /* Misc */
extern void mips32_SyncICache(unsigned long addr, unsigned long size);
extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu); extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu);
extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
......
...@@ -611,35 +611,3 @@ MIPSX(exceptions): ...@@ -611,35 +611,3 @@ MIPSX(exceptions):
.word _C_LABEL(MIPSX(GuestException)) # 29 .word _C_LABEL(MIPSX(GuestException)) # 29
.word _C_LABEL(MIPSX(GuestException)) # 30 .word _C_LABEL(MIPSX(GuestException)) # 30
.word _C_LABEL(MIPSX(GuestException)) # 31 .word _C_LABEL(MIPSX(GuestException)) # 31
/* This routine makes changes to the instruction stream effective to the hardware.
* It should be called after the instruction stream is written.
* On return, the new instructions are effective.
* Inputs:
* a0 = Start address of new instruction stream
* a1 = Size, in bytes, of new instruction stream
*/
#define HW_SYNCI_Step $1
LEAF(MIPSX(SyncICache))
.set push
.set mips32r2
beq a1, zero, 20f
nop
REG_ADDU a1, a0, a1
rdhwr v0, HW_SYNCI_Step
beq v0, zero, 20f
nop
10:
synci 0(a0)
REG_ADDU a0, a0, v0
sltu v1, a0, a1
bne v1, zero, 10b
nop
sync
20:
jr.hb ra
nop
.set pop
END(MIPSX(SyncICache))
...@@ -345,7 +345,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) ...@@ -345,7 +345,8 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
mips32_GuestExceptionEnd - mips32_GuestException); mips32_GuestExceptionEnd - mips32_GuestException);
/* Invalidate the icache for these ranges */ /* Invalidate the icache for these ranges */
mips32_SyncICache((unsigned long) gebase, ALIGN(size, PAGE_SIZE)); local_flush_icache_range((unsigned long)gebase,
(unsigned long)gebase + ALIGN(size, PAGE_SIZE));
/* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */
vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <asm/cacheflush.h>
#include "kvm_mips_comm.h" #include "kvm_mips_comm.h"
...@@ -40,7 +41,7 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, ...@@ -40,7 +41,7 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc)); (vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
mips32_SyncICache(kseg0_opc, 32); local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
return result; return result;
} }
...@@ -66,7 +67,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, ...@@ -66,7 +67,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc)); (vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t)); memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
mips32_SyncICache(kseg0_opc, 32); local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
return result; return result;
} }
...@@ -99,11 +100,12 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) ...@@ -99,11 +100,12 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc)); (vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t)); memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
mips32_SyncICache(kseg0_opc, 32); local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags); local_irq_save(flags);
memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t)); memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
mips32_SyncICache((unsigned long) opc, 32); local_flush_icache_range((unsigned long)opc,
(unsigned long)opc + 32);
local_irq_restore(flags); local_irq_restore(flags);
} else { } else {
kvm_err("%s: Invalid address: %p\n", __func__, opc); kvm_err("%s: Invalid address: %p\n", __func__, opc);
...@@ -134,11 +136,12 @@ kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) ...@@ -134,11 +136,12 @@ kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
(vcpu, (unsigned long) opc)); (vcpu, (unsigned long) opc));
memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t)); memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
mips32_SyncICache(kseg0_opc, 32); local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) { } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
local_irq_save(flags); local_irq_save(flags);
memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t)); memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
mips32_SyncICache((unsigned long) opc, 32); local_flush_icache_range((unsigned long)opc,
(unsigned long)opc + 32);
local_irq_restore(flags); local_irq_restore(flags);
} else { } else {
kvm_err("%s: Invalid address: %p\n", __func__, opc); kvm_err("%s: Invalid address: %p\n", __func__, opc);
......
...@@ -887,7 +887,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) ...@@ -887,7 +887,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
mips32_SyncICache(CKSEG0ADDR(pa), 32); local_flush_icache_range(CKSEG0ADDR(pa), 32);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册