提交 2dafc6c2 编写于 作者: G Gleb Natapov 提交者: Avi Kivity

KVM: x86 emulator: Provide more callbacks for x86 emulator.

Provide get_cached_descriptor(), set_cached_descriptor(),
get_segment_selector(), set_segment_selector(), get_gdt(),
write_std() callbacks.
Signed-off-by: NGleb Natapov <gleb@redhat.com>
Signed-off-by: NMarcelo Tosatti <mtosatti@redhat.com>
上级 aca06a83
...@@ -62,6 +62,15 @@ struct x86_emulate_ops { ...@@ -62,6 +62,15 @@ struct x86_emulate_ops {
int (*read_std)(unsigned long addr, void *val, int (*read_std)(unsigned long addr, void *val,
unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error); unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
/*
* write_std: Write bytes of standard (non-emulated/special) memory.
* Used for descriptor writing.
* @addr: [IN ] Linear address to which to write.
* @val: [OUT] Value write to memory, zero-extended to 'u_long'.
* @bytes: [IN ] Number of bytes to write to memory.
*/
int (*write_std)(unsigned long addr, void *val,
unsigned int bytes, struct kvm_vcpu *vcpu, u32 *error);
/* /*
* fetch: Read bytes of standard (non-emulated/special) memory. * fetch: Read bytes of standard (non-emulated/special) memory.
* Used for instruction fetch. * Used for instruction fetch.
...@@ -108,6 +117,13 @@ struct x86_emulate_ops { ...@@ -108,6 +117,13 @@ struct x86_emulate_ops {
const void *new, const void *new,
unsigned int bytes, unsigned int bytes,
struct kvm_vcpu *vcpu); struct kvm_vcpu *vcpu);
bool (*get_cached_descriptor)(struct desc_struct *desc,
int seg, struct kvm_vcpu *vcpu);
void (*set_cached_descriptor)(struct desc_struct *desc,
int seg, struct kvm_vcpu *vcpu);
u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu);
void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu);
void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu);
ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu); ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
void (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu); void (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
int (*cpl)(struct kvm_vcpu *vcpu); int (*cpl)(struct kvm_vcpu *vcpu);
......
...@@ -3058,6 +3058,18 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v) ...@@ -3058,6 +3058,18 @@ static int vcpu_mmio_read(struct kvm_vcpu *vcpu, gpa_t addr, int len, void *v)
return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v); return kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, addr, len, v);
} }
static void kvm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
kvm_x86_ops->set_segment(vcpu, var, seg);
}
void kvm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
kvm_x86_ops->get_segment(vcpu, var, seg);
}
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error) gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva, u32 *error)
{ {
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
...@@ -3138,14 +3150,18 @@ static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes, ...@@ -3138,14 +3150,18 @@ static int kvm_read_guest_virt_system(gva_t addr, void *val, unsigned int bytes,
return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error); return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
} }
static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, static int kvm_write_guest_virt_helper(gva_t addr, void *val,
struct kvm_vcpu *vcpu, u32 *error) unsigned int bytes,
struct kvm_vcpu *vcpu, u32 access,
u32 *error)
{ {
void *data = val; void *data = val;
int r = X86EMUL_CONTINUE; int r = X86EMUL_CONTINUE;
access |= PFERR_WRITE_MASK;
while (bytes) { while (bytes) {
gpa_t gpa = kvm_mmu_gva_to_gpa_write(vcpu, addr, error); gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, addr, access, error);
unsigned offset = addr & (PAGE_SIZE-1); unsigned offset = addr & (PAGE_SIZE-1);
unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset); unsigned towrite = min(bytes, (unsigned)PAGE_SIZE - offset);
int ret; int ret;
...@@ -3168,6 +3184,19 @@ static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes, ...@@ -3168,6 +3184,19 @@ static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
return r; return r;
} }
static int kvm_write_guest_virt(gva_t addr, void *val, unsigned int bytes,
struct kvm_vcpu *vcpu, u32 *error)
{
u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0;
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, access, error);
}
static int kvm_write_guest_virt_system(gva_t addr, void *val,
unsigned int bytes,
struct kvm_vcpu *vcpu, u32 *error)
{
return kvm_write_guest_virt_helper(addr, val, bytes, vcpu, 0, error);
}
static int emulator_read_emulated(unsigned long addr, static int emulator_read_emulated(unsigned long addr,
void *val, void *val,
...@@ -3484,12 +3513,95 @@ static int emulator_get_cpl(struct kvm_vcpu *vcpu) ...@@ -3484,12 +3513,95 @@ static int emulator_get_cpl(struct kvm_vcpu *vcpu)
return kvm_x86_ops->get_cpl(vcpu); return kvm_x86_ops->get_cpl(vcpu);
} }
static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu)
{
kvm_x86_ops->get_gdt(vcpu, dt);
}
static bool emulator_get_cached_descriptor(struct desc_struct *desc, int seg,
struct kvm_vcpu *vcpu)
{
struct kvm_segment var;
kvm_get_segment(vcpu, &var, seg);
if (var.unusable)
return false;
if (var.g)
var.limit >>= 12;
set_desc_limit(desc, var.limit);
set_desc_base(desc, (unsigned long)var.base);
desc->type = var.type;
desc->s = var.s;
desc->dpl = var.dpl;
desc->p = var.present;
desc->avl = var.avl;
desc->l = var.l;
desc->d = var.db;
desc->g = var.g;
return true;
}
static void emulator_set_cached_descriptor(struct desc_struct *desc, int seg,
struct kvm_vcpu *vcpu)
{
struct kvm_segment var;
/* needed to preserve selector */
kvm_get_segment(vcpu, &var, seg);
var.base = get_desc_base(desc);
var.limit = get_desc_limit(desc);
if (desc->g)
var.limit = (var.limit << 12) | 0xfff;
var.type = desc->type;
var.present = desc->p;
var.dpl = desc->dpl;
var.db = desc->d;
var.s = desc->s;
var.l = desc->l;
var.g = desc->g;
var.avl = desc->avl;
var.present = desc->p;
var.unusable = !var.present;
var.padding = 0;
kvm_set_segment(vcpu, &var, seg);
return;
}
static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu)
{
struct kvm_segment kvm_seg;
kvm_get_segment(vcpu, &kvm_seg, seg);
return kvm_seg.selector;
}
static void emulator_set_segment_selector(u16 sel, int seg,
struct kvm_vcpu *vcpu)
{
struct kvm_segment kvm_seg;
kvm_get_segment(vcpu, &kvm_seg, seg);
kvm_seg.selector = sel;
kvm_set_segment(vcpu, &kvm_seg, seg);
}
static struct x86_emulate_ops emulate_ops = { static struct x86_emulate_ops emulate_ops = {
.read_std = kvm_read_guest_virt_system, .read_std = kvm_read_guest_virt_system,
.write_std = kvm_write_guest_virt_system,
.fetch = kvm_fetch_guest_virt, .fetch = kvm_fetch_guest_virt,
.read_emulated = emulator_read_emulated, .read_emulated = emulator_read_emulated,
.write_emulated = emulator_write_emulated, .write_emulated = emulator_write_emulated,
.cmpxchg_emulated = emulator_cmpxchg_emulated, .cmpxchg_emulated = emulator_cmpxchg_emulated,
.get_cached_descriptor = emulator_get_cached_descriptor,
.set_cached_descriptor = emulator_set_cached_descriptor,
.get_segment_selector = emulator_get_segment_selector,
.set_segment_selector = emulator_set_segment_selector,
.get_gdt = emulator_get_gdt,
.get_cr = emulator_get_cr, .get_cr = emulator_get_cr,
.set_cr = emulator_set_cr, .set_cr = emulator_set_cr,
.cpl = emulator_get_cpl, .cpl = emulator_get_cpl,
...@@ -4649,12 +4761,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) ...@@ -4649,12 +4761,6 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
return 0; return 0;
} }
void kvm_get_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
kvm_x86_ops->get_segment(vcpu, var, seg);
}
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
{ {
struct kvm_segment cs; struct kvm_segment cs;
...@@ -4726,12 +4832,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, ...@@ -4726,12 +4832,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
return 0; return 0;
} }
static void kvm_set_segment(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg)
{
kvm_x86_ops->set_segment(vcpu, var, seg);
}
static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector, static void seg_desct_to_kvm_desct(struct desc_struct *seg_desc, u16 selector,
struct kvm_segment *kvm_desct) struct kvm_segment *kvm_desct)
{ {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册