提交 a98dd741 编写于 作者: J James Hogan

KVM: MIPS/MMU: Drop kvm_get_new_mmu_context()

MIPS KVM uses its own variation of get_new_mmu_context() which takes an
extra vcpu pointer (unused) and does exactly the same thing.

Switch to just using get_new_mmu_context() directly and drop KVM's
version of it as it doesn't really serve any purpose.

The nearby declarations of kvm_mips_alloc_new_mmu_context(),
kvm_mips_vcpu_load() and kvm_mips_vcpu_put() are also removed from
kvm_host.h, as no definitions or users exist.
Signed-off-by: NJames Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
上级 7071a885
...@@ -638,11 +638,6 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags); ...@@ -638,11 +638,6 @@ void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags);
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
bool user); bool user);
extern void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
struct kvm_vcpu *vcpu);
extern void kvm_mips_alloc_new_mmu_context(struct kvm_vcpu *vcpu);
extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
/* Emulation */ /* Emulation */
u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu); u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu);
......
...@@ -1198,8 +1198,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, ...@@ -1198,8 +1198,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
*/ */
preempt_disable(); preempt_disable();
cpu = smp_processor_id(); cpu = smp_processor_id();
kvm_get_new_mmu_context(kern_mm, get_new_mmu_context(kern_mm, cpu);
cpu, vcpu);
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (i != cpu) if (i != cpu)
cpu_context(i, kern_mm) = 0; cpu_context(i, kern_mm) = 0;
......
...@@ -443,25 +443,6 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, ...@@ -443,25 +443,6 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
return 0; return 0;
} }
void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
struct kvm_vcpu *vcpu)
{
unsigned long asid = asid_cache(cpu);
asid += cpu_asid_inc();
if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
local_flush_tlb_all(); /* start new asid cycle */
if (!asid) /* fix version if needed */
asid = asid_first_version(cpu);
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
}
/** /**
* kvm_mips_migrate_count() - Migrate timer. * kvm_mips_migrate_count() - Migrate timer.
* @vcpu: Virtual CPU. * @vcpu: Virtual CPU.
......
...@@ -706,7 +706,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -706,7 +706,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) & if ((cpu_context(cpu, kern_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) { asid_version_mask(cpu)) {
kvm_get_new_mmu_context(kern_mm, cpu, vcpu); get_new_mmu_context(kern_mm, cpu);
kvm_debug("[%d]: cpu_context: %#lx\n", cpu, kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm)); cpu_context(cpu, current->mm));
...@@ -716,7 +716,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -716,7 +716,7 @@ static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) & if ((cpu_context(cpu, user_mm) ^ asid_cache(cpu)) &
asid_version_mask(cpu)) { asid_version_mask(cpu)) {
kvm_get_new_mmu_context(user_mm, cpu, vcpu); get_new_mmu_context(user_mm, cpu);
kvm_debug("[%d]: cpu_context: %#lx\n", cpu, kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
cpu_context(cpu, current->mm)); cpu_context(cpu, current->mm));
...@@ -779,7 +779,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run, ...@@ -779,7 +779,7 @@ static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
if (gasid != vcpu->arch.last_user_gasid) { if (gasid != vcpu->arch.last_user_gasid) {
kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
kvm_get_new_mmu_context(user_mm, cpu, vcpu); get_new_mmu_context(user_mm, cpu);
for_each_possible_cpu(i) for_each_possible_cpu(i)
if (i != cpu) if (i != cpu)
cpu_context(i, user_mm) = 0; cpu_context(i, user_mm) = 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册