提交 4a969980 编写于 作者: G Guo Chao 提交者: Marcelo Tosatti

KVM: x86: Fix typos in x86.c

Signed-off-by: NGuo Chao <yan@linux.vnet.ibm.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 c5ec2e56
...@@ -1093,7 +1093,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) ...@@ -1093,7 +1093,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
* For each generation, we track the original measured * For each generation, we track the original measured
* nanosecond time, offset, and write, so if TSCs are in * nanosecond time, offset, and write, so if TSCs are in
* sync, we can match exact offset, and if not, we can match * sync, we can match exact offset, and if not, we can match
* exact software computaion in compute_guest_tsc() * exact software computation in compute_guest_tsc()
* *
* These values are tracked in kvm->arch.cur_xxx variables. * These values are tracked in kvm->arch.cur_xxx variables.
*/ */
...@@ -1500,7 +1500,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) ...@@ -1500,7 +1500,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
{ {
gpa_t gpa = data & ~0x3f; gpa_t gpa = data & ~0x3f;
/* Bits 2:5 are resrved, Should be zero */ /* Bits 2:5 are reserved, Should be zero */
if (data & 0x3c) if (data & 0x3c)
return 1; return 1;
...@@ -1723,7 +1723,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) ...@@ -1723,7 +1723,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
* Ignore all writes to this no longer documented MSR. * Ignore all writes to this no longer documented MSR.
* Writes are only relevant for old K7 processors, * Writes are only relevant for old K7 processors,
* all pre-dating SVM, but a recommended workaround from * all pre-dating SVM, but a recommended workaround from
* AMD for these chips. It is possible to speicify the * AMD for these chips. It is possible to specify the
* affected processor models on the command line, hence * affected processor models on the command line, hence
* the need to ignore the workaround. * the need to ignore the workaround.
*/ */
...@@ -4491,7 +4491,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva) ...@@ -4491,7 +4491,7 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
/* /*
* if emulation was due to access to shadowed page table * if emulation was due to access to shadowed page table
* and it failed try to unshadow page and re-entetr the * and it failed try to unshadow page and re-enter the
* guest to let CPU execute the instruction. * guest to let CPU execute the instruction.
*/ */
if (kvm_mmu_unprotect_page_virt(vcpu, gva)) if (kvm_mmu_unprotect_page_virt(vcpu, gva))
...@@ -5587,7 +5587,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -5587,7 +5587,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
/* /*
* We are here if userspace calls get_regs() in the middle of * We are here if userspace calls get_regs() in the middle of
* instruction emulation. Registers state needs to be copied * instruction emulation. Registers state needs to be copied
* back from emulation context to vcpu. Usrapace shouldn't do * back from emulation context to vcpu. Userspace shouldn't do
* that usually, but some bad designed PV devices (vmware * that usually, but some bad designed PV devices (vmware
* backdoor interface) need this to work * backdoor interface) need this to work
*/ */
...@@ -6116,7 +6116,7 @@ int kvm_arch_hardware_enable(void *garbage) ...@@ -6116,7 +6116,7 @@ int kvm_arch_hardware_enable(void *garbage)
* as we reset last_host_tsc on all VCPUs to stop this from being * as we reset last_host_tsc on all VCPUs to stop this from being
* called multiple times (one for each physical CPU bringup). * called multiple times (one for each physical CPU bringup).
* *
* Platforms with unnreliable TSCs don't have to deal with this, they * Platforms with unreliable TSCs don't have to deal with this, they
* will be compensated by the logic in vcpu_load, which sets the TSC to * will be compensated by the logic in vcpu_load, which sets the TSC to
* catchup mode. This will catchup all VCPUs to real time, but cannot * catchup mode. This will catchup all VCPUs to real time, but cannot
* guarantee that they stay in perfect synchronization. * guarantee that they stay in perfect synchronization.
...@@ -6391,7 +6391,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, ...@@ -6391,7 +6391,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
map_flags = MAP_SHARED | MAP_ANONYMOUS; map_flags = MAP_SHARED | MAP_ANONYMOUS;
/*To keep backward compatibility with older userspace, /*To keep backward compatibility with older userspace,
*x86 needs to hanlde !user_alloc case. *x86 needs to handle !user_alloc case.
*/ */
if (!user_alloc) { if (!user_alloc) {
if (npages && !old.rmap) { if (npages && !old.rmap) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册