提交 5ad105e5 编写于 作者: A Avi Kivity 提交者: Marcelo Tosatti

KVM: x86 emulator: use stack size attribute to mask rsp in stack ops

The sub-register used to access the stack (sp, esp, or rsp) is not
determined by the address size attribute like other memory references,
but by the stack segment's B bit (if not in x86_64 mode).

Fix by using the existing stack_mask() to figure out the correct mask.

This long-existing bug was exposed by a combination of a27685c3
(emulate invalid guest state by default), which causes many more
instructions to be emulated, and a seabios change (possibly a bug) which
causes the high 16 bits of esp to become polluted across calls to real
mode software interrupts.
Signed-off-by: NAvi Kivity <avi@redhat.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 35f2d16b
...@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) ...@@ -475,13 +475,26 @@ register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg)
return address_mask(ctxt, reg); return address_mask(ctxt, reg);
} }
static void masked_increment(ulong *reg, ulong mask, int inc)
{
assign_masked(reg, *reg + inc, mask);
}
static inline void static inline void
register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc)
{ {
ulong mask;
if (ctxt->ad_bytes == sizeof(unsigned long)) if (ctxt->ad_bytes == sizeof(unsigned long))
*reg += inc; mask = ~0UL;
else else
*reg = (*reg & ~ad_mask(ctxt)) | ((*reg + inc) & ad_mask(ctxt)); mask = ad_mask(ctxt);
masked_increment(reg, mask, inc);
}
static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
{
masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc);
} }
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
...@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) ...@@ -1522,8 +1535,8 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
{ {
struct segmented_address addr; struct segmented_address addr;
register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes); rsp_increment(ctxt, -bytes);
addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS; addr.seg = VCPU_SREG_SS;
return segmented_write(ctxt, addr, data, bytes); return segmented_write(ctxt, addr, data, bytes);
...@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, ...@@ -1542,13 +1555,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
int rc; int rc;
struct segmented_address addr; struct segmented_address addr;
addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt);
addr.seg = VCPU_SREG_SS; addr.seg = VCPU_SREG_SS;
rc = segmented_read(ctxt, addr, dest, len); rc = segmented_read(ctxt, addr, dest, len);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], len); rsp_increment(ctxt, len);
return rc; return rc;
} }
...@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) ...@@ -1688,8 +1701,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
while (reg >= VCPU_REGS_RAX) { while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) { if (reg == VCPU_REGS_RSP) {
register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], rsp_increment(ctxt, ctxt->op_bytes);
ctxt->op_bytes);
--reg; --reg;
} }
...@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) ...@@ -2825,7 +2837,7 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); rsp_increment(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册