提交 1aa36616 编写于 作者: A Avi Kivity

KVM: x86 emulator: consolidate segment accessors

Instead of separate accessors for the segment selector and cached descriptor,
use one accessor for both.  This simplifies the code somewhat.
Signed-off-by: NAvi Kivity <avi@redhat.com>
上级 0a434bb2
...@@ -164,15 +164,10 @@ struct x86_emulate_ops { ...@@ -164,15 +164,10 @@ struct x86_emulate_ops {
int size, unsigned short port, const void *val, int size, unsigned short port, const void *val,
unsigned int count); unsigned int count);
bool (*get_cached_descriptor)(struct x86_emulate_ctxt *ctxt, bool (*get_segment)(struct x86_emulate_ctxt *ctxt, u16 *selector,
struct desc_struct *desc, u32 *base3, struct desc_struct *desc, u32 *base3, int seg);
int seg); void (*set_segment)(struct x86_emulate_ctxt *ctxt, u16 selector,
void (*set_cached_descriptor)(struct x86_emulate_ctxt *ctxt, struct desc_struct *desc, u32 base3, int seg);
struct desc_struct *desc, u32 base3,
int seg);
u16 (*get_segment_selector)(struct x86_emulate_ctxt *ctxt, int seg);
void (*set_segment_selector)(struct x86_emulate_ctxt *ctxt,
u16 sel, int seg);
unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt, unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt,
int seg); int seg);
void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
......
...@@ -553,6 +553,26 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt) ...@@ -553,6 +553,26 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
return emulate_exception(ctxt, NM_VECTOR, 0, false); return emulate_exception(ctxt, NM_VECTOR, 0, false);
} }
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
{
u16 selector;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
return selector;
}
static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
unsigned seg)
{
u16 dummy;
u32 base3;
struct desc_struct desc;
ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
}
static int __linearize(struct x86_emulate_ctxt *ctxt, static int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr, struct segmented_address addr,
unsigned size, bool write, bool fetch, unsigned size, bool write, bool fetch,
...@@ -563,6 +583,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, ...@@ -563,6 +583,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
bool usable; bool usable;
ulong la; ulong la;
u32 lim; u32 lim;
u16 sel;
unsigned cpl, rpl; unsigned cpl, rpl;
la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
...@@ -574,8 +595,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, ...@@ -574,8 +595,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
break; break;
default: default:
usable = ctxt->ops->get_cached_descriptor(ctxt, &desc, NULL, usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg); addr.seg);
if (!usable) if (!usable)
goto bad; goto bad;
/* code segment or read-only data segment */ /* code segment or read-only data segment */
...@@ -598,7 +619,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, ...@@ -598,7 +619,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
goto bad; goto bad;
} }
cpl = ctxt->ops->cpl(ctxt); cpl = ctxt->ops->cpl(ctxt);
rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3; rpl = sel & 3;
cpl = max(cpl, rpl); cpl = max(cpl, rpl);
if (!(desc.type & 8)) { if (!(desc.type & 8)) {
/* data segment */ /* data segment */
...@@ -1142,9 +1163,10 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, ...@@ -1142,9 +1163,10 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
{ {
if (selector & 1 << 2) { if (selector & 1 << 2) {
struct desc_struct desc; struct desc_struct desc;
u16 sel;
memset (dt, 0, sizeof *dt); memset (dt, 0, sizeof *dt);
if (!ops->get_cached_descriptor(ctxt, &desc, NULL, if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
VCPU_SREG_LDTR))
return; return;
dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
...@@ -1305,8 +1327,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -1305,8 +1327,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
return ret; return ret;
} }
load: load:
ops->set_segment_selector(ctxt, selector, seg); ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
ops->set_cached_descriptor(ctxt, &seg_desc, 0, seg);
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
exception: exception:
emulate_exception(ctxt, err_vec, err_code, true); emulate_exception(ctxt, err_vec, err_code, true);
...@@ -1464,7 +1485,7 @@ static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, ...@@ -1464,7 +1485,7 @@ static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
c->src.val = ops->get_segment_selector(ctxt, seg); c->src.val = get_segment_selector(ctxt, seg);
return em_push(ctxt); return em_push(ctxt);
} }
...@@ -1552,7 +1573,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, ...@@ -1552,7 +1573,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
c->src.val = ops->get_segment_selector(ctxt, VCPU_SREG_CS); c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
rc = em_push(ctxt); rc = em_push(ctxt);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
...@@ -1838,8 +1859,10 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, ...@@ -1838,8 +1859,10 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct desc_struct *cs, struct x86_emulate_ops *ops, struct desc_struct *cs,
struct desc_struct *ss) struct desc_struct *ss)
{ {
u16 selector;
memset(cs, 0, sizeof(struct desc_struct)); memset(cs, 0, sizeof(struct desc_struct));
ops->get_cached_descriptor(ctxt, cs, NULL, VCPU_SREG_CS); ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
memset(ss, 0, sizeof(struct desc_struct)); memset(ss, 0, sizeof(struct desc_struct));
cs->l = 0; /* will be adjusted later */ cs->l = 0; /* will be adjusted later */
...@@ -1888,10 +1911,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1888,10 +1911,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
cs.d = 0; cs.d = 0;
cs.l = 1; cs.l = 1;
} }
ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
c->regs[VCPU_REGS_RCX] = c->eip; c->regs[VCPU_REGS_RCX] = c->eip;
if (efer & EFER_LMA) { if (efer & EFER_LMA) {
...@@ -1961,10 +1982,8 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1961,10 +1982,8 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
cs.l = 1; cs.l = 1;
} }
ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
c->eip = msr_data; c->eip = msr_data;
...@@ -2018,10 +2037,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2018,10 +2037,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
cs_sel |= SELECTOR_RPL_MASK; cs_sel |= SELECTOR_RPL_MASK;
ss_sel |= SELECTOR_RPL_MASK; ss_sel |= SELECTOR_RPL_MASK;
ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
c->eip = c->regs[VCPU_REGS_RDX]; c->eip = c->regs[VCPU_REGS_RDX];
c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
...@@ -2048,11 +2065,11 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, ...@@ -2048,11 +2065,11 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
struct desc_struct tr_seg; struct desc_struct tr_seg;
u32 base3; u32 base3;
int r; int r;
u16 io_bitmap_ptr, perm, bit_idx = port & 0x7; u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
unsigned mask = (1 << len) - 1; unsigned mask = (1 << len) - 1;
unsigned long base; unsigned long base;
ops->get_cached_descriptor(ctxt, &tr_seg, &base3, VCPU_SREG_TR); ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
if (!tr_seg.p) if (!tr_seg.p)
return false; return false;
if (desc_limit_scaled(&tr_seg) < 103) if (desc_limit_scaled(&tr_seg) < 103)
...@@ -2107,11 +2124,11 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, ...@@ -2107,11 +2124,11 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
tss->si = c->regs[VCPU_REGS_RSI]; tss->si = c->regs[VCPU_REGS_RSI];
tss->di = c->regs[VCPU_REGS_RDI]; tss->di = c->regs[VCPU_REGS_RDI];
tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->ldt = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR); tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
} }
static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
...@@ -2136,11 +2153,11 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, ...@@ -2136,11 +2153,11 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
* SDM says that segment selectors are loaded before segment * SDM says that segment selectors are loaded before segment
* descriptors * descriptors
*/ */
ops->set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
/* /*
* Now load segment descriptors. If fault happenes at this stage * Now load segment descriptors. If fault happenes at this stage
...@@ -2227,13 +2244,13 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2227,13 +2244,13 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
tss->esi = c->regs[VCPU_REGS_RSI]; tss->esi = c->regs[VCPU_REGS_RSI];
tss->edi = c->regs[VCPU_REGS_RDI]; tss->edi = c->regs[VCPU_REGS_RDI];
tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES); tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS); tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS); tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS); tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
tss->fs = ops->get_segment_selector(ctxt, VCPU_SREG_FS); tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
tss->gs = ops->get_segment_selector(ctxt, VCPU_SREG_GS); tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
tss->ldt_selector = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR); tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
} }
static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
...@@ -2260,13 +2277,13 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, ...@@ -2260,13 +2277,13 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
* SDM says that segment selectors are loaded before segment * SDM says that segment selectors are loaded before segment
* descriptors * descriptors
*/ */
ops->set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
ops->set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
ops->set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
/* /*
* Now load segment descriptors. If fault happenes at this stage * Now load segment descriptors. If fault happenes at this stage
...@@ -2348,7 +2365,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2348,7 +2365,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
{ {
struct desc_struct curr_tss_desc, next_tss_desc; struct desc_struct curr_tss_desc, next_tss_desc;
int ret; int ret;
u16 old_tss_sel = ops->get_segment_selector(ctxt, VCPU_SREG_TR); u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
ulong old_tss_base = ulong old_tss_base =
ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
u32 desc_limit; u32 desc_limit;
...@@ -2411,8 +2428,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, ...@@ -2411,8 +2428,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
} }
ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR); ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR);
if (has_error_code) { if (has_error_code) {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
...@@ -2503,7 +2519,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) ...@@ -2503,7 +2519,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
ulong old_eip; ulong old_eip;
int rc; int rc;
old_cs = ctxt->ops->get_segment_selector(ctxt, VCPU_SREG_CS); old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
old_eip = c->eip; old_eip = c->eip;
memcpy(&sel, c->src.valptr + c->op_bytes, 2); memcpy(&sel, c->src.valptr + c->op_bytes, 2);
...@@ -3881,7 +3897,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3881,7 +3897,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
rc = emulate_ud(ctxt); rc = emulate_ud(ctxt);
goto done; goto done;
} }
c->dst.val = ops->get_segment_selector(ctxt, c->modrm_reg); c->dst.val = get_segment_selector(ctxt, c->modrm_reg);
break; break;
case 0x8d: /* lea r16/r32, m */ case 0x8d: /* lea r16/r32, m */
c->dst.val = c->src.addr.mem.ea; c->dst.val = c->src.addr.mem.ea;
......
...@@ -4304,13 +4304,14 @@ static unsigned long emulator_get_cached_segment_base( ...@@ -4304,13 +4304,14 @@ static unsigned long emulator_get_cached_segment_base(
return get_segment_base(emul_to_vcpu(ctxt), seg); return get_segment_base(emul_to_vcpu(ctxt), seg);
} }
static bool emulator_get_cached_descriptor(struct x86_emulate_ctxt *ctxt, static bool emulator_get_segment(struct x86_emulate_ctxt *ctxt, u16 *selector,
struct desc_struct *desc, u32 *base3, struct desc_struct *desc, u32 *base3,
int seg) int seg)
{ {
struct kvm_segment var; struct kvm_segment var;
kvm_get_segment(emul_to_vcpu(ctxt), &var, seg); kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
*selector = var.selector;
if (var.unusable) if (var.unusable)
return false; return false;
...@@ -4335,16 +4336,14 @@ static bool emulator_get_cached_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -4335,16 +4336,14 @@ static bool emulator_get_cached_descriptor(struct x86_emulate_ctxt *ctxt,
return true; return true;
} }
static void emulator_set_cached_descriptor(struct x86_emulate_ctxt *ctxt, static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector,
struct desc_struct *desc, u32 base3, struct desc_struct *desc, u32 base3,
int seg) int seg)
{ {
struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
struct kvm_segment var; struct kvm_segment var;
/* needed to preserve selector */ var.selector = selector;
kvm_get_segment(vcpu, &var, seg);
var.base = get_desc_base(desc); var.base = get_desc_base(desc);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
var.base |= ((u64)base3) << 32; var.base |= ((u64)base3) << 32;
...@@ -4368,24 +4367,6 @@ static void emulator_set_cached_descriptor(struct x86_emulate_ctxt *ctxt, ...@@ -4368,24 +4367,6 @@ static void emulator_set_cached_descriptor(struct x86_emulate_ctxt *ctxt,
return; return;
} }
static u16 emulator_get_segment_selector(struct x86_emulate_ctxt *ctxt, int seg)
{
struct kvm_segment kvm_seg;
kvm_get_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
return kvm_seg.selector;
}
static void emulator_set_segment_selector(struct x86_emulate_ctxt *ctxt,
u16 sel, int seg)
{
struct kvm_segment kvm_seg;
kvm_get_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
kvm_seg.selector = sel;
kvm_set_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
}
static int emulator_get_msr(struct x86_emulate_ctxt *ctxt, static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
u32 msr_index, u64 *pdata) u32 msr_index, u64 *pdata)
{ {
...@@ -4436,10 +4417,8 @@ static struct x86_emulate_ops emulate_ops = { ...@@ -4436,10 +4417,8 @@ static struct x86_emulate_ops emulate_ops = {
.invlpg = emulator_invlpg, .invlpg = emulator_invlpg,
.pio_in_emulated = emulator_pio_in_emulated, .pio_in_emulated = emulator_pio_in_emulated,
.pio_out_emulated = emulator_pio_out_emulated, .pio_out_emulated = emulator_pio_out_emulated,
.get_cached_descriptor = emulator_get_cached_descriptor, .get_segment = emulator_get_segment,
.set_cached_descriptor = emulator_set_cached_descriptor, .set_segment = emulator_set_segment,
.get_segment_selector = emulator_get_segment_selector,
.set_segment_selector = emulator_set_segment_selector,
.get_cached_segment_base = emulator_get_cached_segment_base, .get_cached_segment_base = emulator_get_cached_segment_base,
.get_gdt = emulator_get_gdt, .get_gdt = emulator_get_gdt,
.get_idt = emulator_get_idt, .get_idt = emulator_get_idt,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册