diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index b36c6b3fe1444ae7398a235e4f523edb30de627f..b48c133c95ab91eb5bb23b37ea88ca5f5c51da24 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h @@ -159,7 +159,10 @@ struct operand { }; union { unsigned long *reg; - unsigned long mem; + struct segmented_address { + ulong ea; + unsigned seg; + } mem; } addr; union { unsigned long val; diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 3325b4747394b1c7ab31cc2f5ad44facbd7c8d0e..e96705542634b2dfa33db23ba38065ffd072504e 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c @@ -410,9 +410,9 @@ address_mask(struct decode_cache *c, unsigned long reg) } static inline unsigned long -register_address(struct decode_cache *c, unsigned long base, unsigned long reg) +register_address(struct decode_cache *c, unsigned long reg) { - return base + address_mask(c, reg); + return address_mask(c, reg); } static inline void @@ -444,26 +444,26 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, return ops->get_cached_segment_base(seg, ctxt->vcpu); } -static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops, - struct decode_cache *c) +static unsigned seg_override(struct x86_emulate_ctxt *ctxt, + struct x86_emulate_ops *ops, + struct decode_cache *c) { if (!c->has_seg_override) return 0; - return seg_base(ctxt, ops, c->seg_override); + return c->seg_override; } -static unsigned long es_base(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) +static ulong linear(struct x86_emulate_ctxt *ctxt, + struct segmented_address addr) { - return seg_base(ctxt, ops, VCPU_SREG_ES); -} + struct decode_cache *c = &ctxt->decode; + ulong la; -static unsigned long ss_base(struct x86_emulate_ctxt *ctxt, - struct x86_emulate_ops *ops) -{ - return seg_base(ctxt, ops, VCPU_SREG_SS); + la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; + if (c->ad_bytes != 8) + la &= (u32)-1; + return la; } static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, @@ -556,7 +556,7 @@ static void *decode_register(u8 modrm_reg, unsigned long *regs, static int read_descriptor(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops, - ulong addr, + struct segmented_address addr, u16 *size, unsigned long *address, int op_bytes) { int rc; @@ -564,10 +564,12 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt, if (op_bytes == 2) op_bytes = 3; *address = 0; - rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL); + rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2, + ctxt->vcpu, NULL); if (rc != X86EMUL_CONTINUE) return rc; - rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL); + rc = ops->read_std(linear(ctxt, addr) + 2, address, op_bytes, + ctxt->vcpu, NULL); return rc; } @@ -760,7 +762,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, break; } } - op->addr.mem = modrm_ea; + op->addr.mem.ea = modrm_ea; done: return rc; } @@ -775,13 +777,13 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt, op->type = OP_MEM; switch (c->ad_bytes) { case 2: - op->addr.mem = insn_fetch(u16, 2, c->eip); + op->addr.mem.ea = insn_fetch(u16, 2, c->eip); break; case 4: - op->addr.mem = insn_fetch(u32, 4, c->eip); + op->addr.mem.ea = insn_fetch(u32, 4, c->eip); break; case 8: - op->addr.mem = insn_fetch(u64, 8, c->eip); + op->addr.mem.ea = insn_fetch(u64, 8, c->eip); break; } done: @@ -800,7 +802,7 @@ static void fetch_bit_operand(struct decode_cache *c) else if (c->src.bytes == 4) sv = (s32)c->src.val & (s32)mask; - c->dst.addr.mem += (sv >> 3); + c->dst.addr.mem.ea += (sv >> 3); } /* only subword offset */ @@ -1093,7 +1095,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, case OP_MEM: if (c->lock_prefix) rc = ops->cmpxchg_emulated( - c->dst.addr.mem, + linear(ctxt, c->dst.addr.mem), &c->dst.orig_val, &c->dst.val, c->dst.bytes, @@ -1101,7 +1103,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, ctxt->vcpu); else rc = ops->write_emulated( - c->dst.addr.mem, + linear(ctxt, c->dst.addr.mem), &c->dst.val, c->dst.bytes, &err, @@ -1129,8 +1131,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt, c->dst.bytes = c->op_bytes; c->dst.val = c->src.val; register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); - c->dst.addr.mem = register_address(c, ss_base(ctxt, ops), - c->regs[VCPU_REGS_RSP]); + c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]); + c->dst.addr.mem.seg = VCPU_SREG_SS; } static int emulate_pop(struct x86_emulate_ctxt *ctxt, @@ -1139,10 +1141,11 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, { struct decode_cache *c = &ctxt->decode; int rc; + struct segmented_address addr; - rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops), - c->regs[VCPU_REGS_RSP]), - dest, len); + addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); + addr.seg = VCPU_SREG_SS; + rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len); if (rc != X86EMUL_CONTINUE) return rc; @@ -2223,14 +2226,15 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; } -static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base, +static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, int reg, struct operand *op) { struct decode_cache *c = &ctxt->decode; int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; register_address_increment(c, &c->regs[reg], df * op->bytes); - op->addr.mem = register_address(c, base, c->regs[reg]); + op->addr.mem.ea = register_address(c, c->regs[reg]); + op->addr.mem.seg = seg; } static int em_push(struct x86_emulate_ctxt *ctxt) @@ -2639,7 +2643,7 @@ static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, op->type = OP_IMM; op->bytes = size; - op->addr.mem = c->eip; + op->addr.mem.ea = c->eip; /* NB. Immediates are sign-extended as necessary. */ switch (op->bytes) { case 1: @@ -2821,14 +2825,13 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt) if (!c->has_seg_override) set_seg_override(c, VCPU_SREG_DS); - if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d)) - memop.addr.mem += seg_override_base(ctxt, ops, c); + memop.addr.mem.seg = seg_override(ctxt, ops, c); if (memop.type == OP_MEM && c->ad_bytes != 8) - memop.addr.mem = (u32)memop.addr.mem; + memop.addr.mem.ea = (u32)memop.addr.mem.ea; if (memop.type == OP_MEM && c->rip_relative) - memop.addr.mem += c->eip; + memop.addr.mem.ea += c->eip; /* * Decode and fetch the source operand: register, memory @@ -2880,14 +2883,14 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt) case SrcSI: c->src.type = OP_MEM; c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - c->src.addr.mem = - register_address(c, seg_override_base(ctxt, ops, c), - c->regs[VCPU_REGS_RSI]); + c->src.addr.mem.ea = + register_address(c, c->regs[VCPU_REGS_RSI]); + c->src.addr.mem.seg = seg_override(ctxt, ops, c), c->src.val = 0; break; case SrcImmFAddr: c->src.type = OP_IMM; - c->src.addr.mem = c->eip; + c->src.addr.mem.ea = c->eip; c->src.bytes = c->op_bytes + 2; insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip); break; @@ -2934,7 +2937,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt) break; case DstImmUByte: c->dst.type = OP_IMM; - c->dst.addr.mem = c->eip; + c->dst.addr.mem.ea = c->eip; c->dst.bytes = 1; c->dst.val = insn_fetch(u8, 1, c->eip); break; @@ -2959,9 +2962,9 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt) case DstDI: c->dst.type = OP_MEM; c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; - c->dst.addr.mem = - register_address(c, es_base(ctxt, ops), - c->regs[VCPU_REGS_RDI]); + c->dst.addr.mem.ea = + register_address(c, c->regs[VCPU_REGS_RDI]); + c->dst.addr.mem.seg = VCPU_SREG_ES; c->dst.val = 0; break; case ImplicitOps: @@ -3040,7 +3043,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) } if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) { - rc = read_emulated(ctxt, ops, c->src.addr.mem, + rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem), c->src.valptr, c->src.bytes); if (rc != X86EMUL_CONTINUE) goto done; @@ -3048,7 +3051,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) } if (c->src2.type == OP_MEM) { - rc = read_emulated(ctxt, ops, c->src2.addr.mem, + rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem), &c->src2.val, c->src2.bytes); if (rc != X86EMUL_CONTINUE) goto done; @@ -3060,7 +3063,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { /* optimisation - avoid slow emulated read if Mov */ - rc = read_emulated(ctxt, ops, c->dst.addr.mem, + rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem), &c->dst.val, c->dst.bytes); if (rc != X86EMUL_CONTINUE) goto done; @@ -3211,7 +3214,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); break; case 0x8d: /* lea r16/r32, m */ - c->dst.val = c->src.addr.mem; + c->dst.val = c->src.addr.mem.ea; break; case 0x8e: { /* mov seg, r/m16 */ uint16_t sel; @@ -3438,11 +3441,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) c->dst.type = saved_dst_type; if ((c->d & SrcMask) == SrcSI) - string_addr_inc(ctxt, seg_override_base(ctxt, ops, c), + string_addr_inc(ctxt, seg_override(ctxt, ops, c), VCPU_REGS_RSI, &c->src); if ((c->d & DstMask) == DstDI) - string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI, + string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI, &c->dst); if (c->rep_prefix && (c->d & String)) { @@ -3535,7 +3538,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) emulate_ud(ctxt); goto done; case 7: /* invlpg*/ - emulate_invlpg(ctxt->vcpu, c->src.addr.mem); + emulate_invlpg(ctxt->vcpu, + linear(ctxt, c->src.addr.mem)); /* Disable writeback. */ c->dst.type = OP_NONE; break;