提交 8a403249 编写于 作者: A Avi Kivity

Merge tag 'powerpc-fixes' of git://github.com/paulusmack/linux into new/master

Five fixes for bugs that have crept in to the powerpc KVM implementations.
These are all small simple patches that only affect arch/powerpc/kvm.
They come from the series that Alex Graf put together but which was too
late for the 3.4 merge window.

* tag 'powerpc-fixes' of git://github.com/paulusmack/linux:
  KVM: PPC: Book3S: PR: Fix preemption
  KVM: PPC: Save/Restore CR over vcpu_run
  KVM: PPC: Book3S HV: Save and restore CR in __kvmppc_vcore_entry
  KVM: PPC: Book3S HV: Fix kvm_alloc_linear in case where no linears exist
  KVM: PPC: Book3S: Compile fix for ppc32 in HIOR access code
Signed-off-by: NAvi Kivity <avi@redhat.com>
......@@ -173,9 +173,9 @@ static void __init kvm_linear_init_one(ulong size, int count, int type)
static struct kvmppc_linear_info *kvm_alloc_linear(int type)
{
struct kvmppc_linear_info *ri;
struct kvmppc_linear_info *ri, *ret;
ri = NULL;
ret = NULL;
spin_lock(&linear_lock);
list_for_each_entry(ri, &free_linears, list) {
if (ri->type != type)
......@@ -183,11 +183,12 @@ static struct kvmppc_linear_info *kvm_alloc_linear(int type)
list_del(&ri->list);
atomic_inc(&ri->use_count);
memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
ret = ri;
break;
}
spin_unlock(&linear_lock);
memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
return ri;
return ret;
}
static void kvm_release_linear(struct kvmppc_linear_info *ri)
......
......@@ -46,8 +46,10 @@ _GLOBAL(__kvmppc_vcore_entry)
/* Save host state to the stack */
stdu r1, -SWITCH_FRAME_SIZE(r1)
/* Save non-volatile registers (r14 - r31) */
/* Save non-volatile registers (r14 - r31) and CR */
SAVE_NVGPRS(r1)
mfcr r3
std r3, _CCR(r1)
/* Save host DSCR */
BEGIN_FTR_SECTION
......@@ -157,8 +159,10 @@ kvmppc_handler_highmem:
* R13 = PACA
*/
/* Restore non-volatile host registers (r14 - r31) */
/* Restore non-volatile host registers (r14 - r31) and CR */
REST_NVGPRS(r1)
ld r4, _CCR(r1)
mtcr r4
addi r1, r1, SWITCH_FRAME_SIZE
ld r0, PPC_LR_STKOFF(r1)
......
......@@ -84,6 +84,10 @@ kvm_start_entry:
/* Save non-volatile registers (r14 - r31) */
SAVE_NVGPRS(r1)
/* Save CR */
mfcr r14
stw r14, _CCR(r1)
/* Save LR */
PPC_STL r0, _LINK(r1)
......@@ -165,6 +169,9 @@ kvm_exit_loop:
PPC_LL r4, _LINK(r1)
mtlr r4
lwz r14, _CCR(r1)
mtcr r14
/* Restore non-volatile host registers (r14 - r31) */
REST_NVGPRS(r1)
......
......@@ -777,6 +777,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
}
preempt_disable();
if (!(r & RESUME_HOST)) {
/* To avoid clobbering exit_reason, only check for signals if
* we aren't already exiting to userspace for some other
......@@ -798,8 +799,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_INTR;
r = -EINTR;
} else {
preempt_disable();
/* In case an interrupt came in that was triggered
* from userspace (like DEC), we need to check what
* to inject now! */
......@@ -881,7 +880,8 @@ int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
switch (reg->id) {
case KVM_REG_PPC_HIOR:
r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
r = copy_to_user((u64 __user *)(long)reg->addr,
&to_book3s(vcpu)->hior, sizeof(u64));
break;
default:
break;
......@@ -896,7 +896,8 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
switch (reg->id) {
case KVM_REG_PPC_HIOR:
r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
r = copy_from_user(&to_book3s(vcpu)->hior,
(u64 __user *)(long)reg->addr, sizeof(u64));
if (!r)
to_book3s(vcpu)->hior_explicit = true;
break;
......
......@@ -34,7 +34,8 @@
/* r2 is special: it holds 'current', and it made nonvolatile in the
* kernel with the -ffixed-r2 gcc option. */
#define HOST_R2 12
#define HOST_NV_GPRS 16
#define HOST_CR 16
#define HOST_NV_GPRS 20
#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
......@@ -296,8 +297,10 @@ heavyweight_exit:
/* Return to kvm_vcpu_run(). */
lwz r4, HOST_STACK_LR(r1)
lwz r5, HOST_CR(r1)
addi r1, r1, HOST_STACK_SIZE
mtlr r4
mtcr r5
/* r3 still contains the return code from kvmppc_handle_exit(). */
blr
......@@ -314,6 +317,8 @@ _GLOBAL(__kvmppc_vcpu_run)
stw r3, HOST_RUN(r1)
mflr r3
stw r3, HOST_STACK_LR(r1)
mfcr r5
stw r5, HOST_CR(r1)
/* Save host non-volatile register state to stack. */
stw r14, HOST_NV_GPR(r14)(r1)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册