提交 cea5d8c9 编写于 作者: H Hollis Blanchard 提交者: Avi Kivity

KVM: ppc: use macros instead of hardcoded literals for instruction decoding

Signed-off-by: NHollis Blanchard <hollisb@us.ibm.com>
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 f4435361
...@@ -30,6 +30,39 @@ ...@@ -30,6 +30,39 @@
#include <asm/disassemble.h> #include <asm/disassemble.h>
#include "timing.h" #include "timing.h"
#define OP_TRAP 3
#define OP_31_XOP_LWZX 23
#define OP_31_XOP_LBZX 87
#define OP_31_XOP_STWX 151
#define OP_31_XOP_STBX 215
#define OP_31_XOP_STBUX 247
#define OP_31_XOP_LHZX 279
#define OP_31_XOP_LHZUX 311
#define OP_31_XOP_MFSPR 339
#define OP_31_XOP_STHX 407
#define OP_31_XOP_STHUX 439
#define OP_31_XOP_MTSPR 467
#define OP_31_XOP_DCBI 470
#define OP_31_XOP_LWBRX 534
#define OP_31_XOP_TLBSYNC 566
#define OP_31_XOP_STWBRX 662
#define OP_31_XOP_LHBRX 790
#define OP_31_XOP_STHBRX 918
#define OP_LWZ 32
#define OP_LWZU 33
#define OP_LBZ 34
#define OP_LBZU 35
#define OP_STW 36
#define OP_STWU 37
#define OP_STB 38
#define OP_STBU 39
#define OP_LHZ 40
#define OP_LHZU 41
#define OP_STH 44
#define OP_STHU 45
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
{ {
if (vcpu->arch.tcr & TCR_DIE) { if (vcpu->arch.tcr & TCR_DIE) {
...@@ -78,7 +111,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -78,7 +111,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
switch (get_op(inst)) { switch (get_op(inst)) {
case 3: /* trap */ case OP_TRAP:
vcpu->arch.esr |= ESR_PTR; vcpu->arch.esr |= ESR_PTR;
kvmppc_core_queue_program(vcpu); kvmppc_core_queue_program(vcpu);
advance = 0; advance = 0;
...@@ -87,31 +120,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -87,31 +120,31 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
case 31: case 31:
switch (get_xop(inst)) { switch (get_xop(inst)) {
case 23: /* lwzx */ case OP_31_XOP_LWZX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break; break;
case 87: /* lbzx */ case OP_31_XOP_LBZX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break; break;
case 151: /* stwx */ case OP_31_XOP_STWX:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
vcpu->arch.gpr[rs], vcpu->arch.gpr[rs],
4, 1); 4, 1);
break; break;
case 215: /* stbx */ case OP_31_XOP_STBX:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, emulated = kvmppc_handle_store(run, vcpu,
vcpu->arch.gpr[rs], vcpu->arch.gpr[rs],
1, 1); 1, 1);
break; break;
case 247: /* stbux */ case OP_31_XOP_STBUX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -126,12 +159,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -126,12 +159,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[rs] = ea; vcpu->arch.gpr[rs] = ea;
break; break;
case 279: /* lhzx */ case OP_31_XOP_LHZX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break; break;
case 311: /* lhzux */ case OP_31_XOP_LHZUX:
rt = get_rt(inst); rt = get_rt(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -144,7 +177,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -144,7 +177,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = ea; vcpu->arch.gpr[ra] = ea;
break; break;
case 339: /* mfspr */ case OP_31_XOP_MFSPR:
sprn = get_sprn(inst); sprn = get_sprn(inst);
rt = get_rt(inst); rt = get_rt(inst);
...@@ -185,7 +218,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -185,7 +218,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
break; break;
case 407: /* sthx */ case OP_31_XOP_STHX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -195,7 +228,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -195,7 +228,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
2, 1); 2, 1);
break; break;
case 439: /* sthux */ case OP_31_XOP_STHUX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -210,7 +243,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -210,7 +243,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = ea; vcpu->arch.gpr[ra] = ea;
break; break;
case 467: /* mtspr */ case OP_31_XOP_MTSPR:
sprn = get_sprn(inst); sprn = get_sprn(inst);
rs = get_rs(inst); rs = get_rs(inst);
switch (sprn) { switch (sprn) {
...@@ -246,7 +279,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -246,7 +279,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
break; break;
case 470: /* dcbi */ case OP_31_XOP_DCBI:
/* Do nothing. The guest is performing dcbi because /* Do nothing. The guest is performing dcbi because
* hardware DMA is not snooped by the dcache, but * hardware DMA is not snooped by the dcache, but
* emulated DMA either goes through the dcache as * emulated DMA either goes through the dcache as
...@@ -254,15 +287,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -254,15 +287,15 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
* coherence. */ * coherence. */
break; break;
case 534: /* lwbrx */ case OP_31_XOP_LWBRX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
break; break;
case 566: /* tlbsync */ case OP_31_XOP_TLBSYNC:
break; break;
case 662: /* stwbrx */ case OP_31_XOP_STWBRX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -272,12 +305,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -272,12 +305,12 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
4, 0); 4, 0);
break; break;
case 790: /* lhbrx */ case OP_31_XOP_LHBRX:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
break; break;
case 918: /* sthbrx */ case OP_31_XOP_STHBRX:
rs = get_rs(inst); rs = get_rs(inst);
ra = get_ra(inst); ra = get_ra(inst);
rb = get_rb(inst); rb = get_rb(inst);
...@@ -293,37 +326,37 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -293,37 +326,37 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
} }
break; break;
case 32: /* lwz */ case OP_LWZ:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
break; break;
case 33: /* lwzu */ case OP_LWZU:
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 34: /* lbz */ case OP_LBZ:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
break; break;
case 35: /* lbzu */ case OP_LBZU:
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 36: /* stw */ case OP_STW:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
4, 1); 4, 1);
break; break;
case 37: /* stwu */ case OP_STWU:
ra = get_ra(inst); ra = get_ra(inst);
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
...@@ -331,13 +364,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -331,13 +364,13 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 38: /* stb */ case OP_STB:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
1, 1); 1, 1);
break; break;
case 39: /* stbu */ case OP_STBU:
ra = get_ra(inst); ra = get_ra(inst);
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
...@@ -345,25 +378,25 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) ...@@ -345,25 +378,25 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 40: /* lhz */ case OP_LHZ:
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
break; break;
case 41: /* lhzu */ case OP_LHZU:
ra = get_ra(inst); ra = get_ra(inst);
rt = get_rt(inst); rt = get_rt(inst);
emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed;
break; break;
case 44: /* sth */ case OP_STH:
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
2, 1); 2, 1);
break; break;
case 45: /* sthu */ case OP_STHU:
ra = get_ra(inst); ra = get_ra(inst);
rs = get_rs(inst); rs = get_rs(inst);
emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册