提交 ad312c7c 编写于 作者: Z Zhang Xiantao 提交者: Avi Kivity

KVM: Portability: Introduce kvm_vcpu_arch

Move all the architecture-specific fields in kvm_vcpu into a new struct
kvm_vcpu_arch.
Signed-off-by: NZhang Xiantao <xiantao.zhang@intel.com>
Acked-by: NCarsten Otte <cotte@de.ibm.com>
Signed-off-by: NAvi Kivity <avi@qumranet.com>
上级 682c59a3
...@@ -158,7 +158,7 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, ...@@ -158,7 +158,7 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
if (dest_mode == 0) { /* Physical mode. */ if (dest_mode == 0) { /* Physical mode. */
if (dest == 0xFF) { /* Broadcast. */ if (dest == 0xFF) { /* Broadcast. */
for (i = 0; i < KVM_MAX_VCPUS; ++i) for (i = 0; i < KVM_MAX_VCPUS; ++i)
if (kvm->vcpus[i] && kvm->vcpus[i]->apic) if (kvm->vcpus[i] && kvm->vcpus[i]->arch.apic)
mask |= 1 << i; mask |= 1 << i;
return mask; return mask;
} }
...@@ -166,8 +166,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, ...@@ -166,8 +166,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
vcpu = kvm->vcpus[i]; vcpu = kvm->vcpus[i];
if (!vcpu) if (!vcpu)
continue; continue;
if (kvm_apic_match_physical_addr(vcpu->apic, dest)) { if (kvm_apic_match_physical_addr(vcpu->arch.apic, dest)) {
if (vcpu->apic) if (vcpu->arch.apic)
mask = 1 << i; mask = 1 << i;
break; break;
} }
...@@ -177,8 +177,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest, ...@@ -177,8 +177,8 @@ static u32 ioapic_get_delivery_bitmask(struct kvm_ioapic *ioapic, u8 dest,
vcpu = kvm->vcpus[i]; vcpu = kvm->vcpus[i];
if (!vcpu) if (!vcpu)
continue; continue;
if (vcpu->apic && if (vcpu->arch.apic &&
kvm_apic_match_logical_addr(vcpu->apic, dest)) kvm_apic_match_logical_addr(vcpu->arch.apic, dest))
mask |= 1 << vcpu->vcpu_id; mask |= 1 << vcpu->vcpu_id;
} }
ioapic_debug("mask %x\n", mask); ioapic_debug("mask %x\n", mask);
......
...@@ -670,7 +670,7 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -670,7 +670,7 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
if (vmf->pgoff == 0) if (vmf->pgoff == 0)
page = virt_to_page(vcpu->run); page = virt_to_page(vcpu->run);
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET) else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->pio_data); page = virt_to_page(vcpu->arch.pio_data);
else else
return VM_FAULT_SIGBUS; return VM_FAULT_SIGBUS;
get_page(page); get_page(page);
......
...@@ -58,6 +58,7 @@ ...@@ -58,6 +58,7 @@
#define VEC_POS(v) ((v) & (32 - 1)) #define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4) #define REG_POS(v) (((v) >> 5) << 4)
static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off) static inline u32 apic_get_reg(struct kvm_lapic *apic, int reg_off)
{ {
return *((u32 *) (apic->regs + reg_off)); return *((u32 *) (apic->regs + reg_off));
...@@ -90,7 +91,7 @@ static inline void apic_clear_vector(int vec, void *bitmap) ...@@ -90,7 +91,7 @@ static inline void apic_clear_vector(int vec, void *bitmap)
static inline int apic_hw_enabled(struct kvm_lapic *apic) static inline int apic_hw_enabled(struct kvm_lapic *apic)
{ {
return (apic)->vcpu->apic_base & MSR_IA32_APICBASE_ENABLE; return (apic)->vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE;
} }
static inline int apic_sw_enabled(struct kvm_lapic *apic) static inline int apic_sw_enabled(struct kvm_lapic *apic)
...@@ -174,7 +175,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic) ...@@ -174,7 +175,7 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu) int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr; int highest_irr;
if (!apic) if (!apic)
...@@ -187,7 +188,7 @@ EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr); ...@@ -187,7 +188,7 @@ EXPORT_SYMBOL_GPL(kvm_lapic_find_highest_irr);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig) int kvm_apic_set_irq(struct kvm_vcpu *vcpu, u8 vec, u8 trig)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (!apic_test_and_set_irr(vec, apic)) { if (!apic_test_and_set_irr(vec, apic)) {
/* a new pending irq is set in IRR */ /* a new pending irq is set in IRR */
...@@ -272,7 +273,7 @@ static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source, ...@@ -272,7 +273,7 @@ static int apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
int short_hand, int dest, int dest_mode) int short_hand, int dest, int dest_mode)
{ {
int result = 0; int result = 0;
struct kvm_lapic *target = vcpu->apic; struct kvm_lapic *target = vcpu->arch.apic;
apic_debug("target %p, source %p, dest 0x%x, " apic_debug("target %p, source %p, dest 0x%x, "
"dest_mode 0x%x, short_hand 0x%x", "dest_mode 0x%x, short_hand 0x%x",
...@@ -339,10 +340,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -339,10 +340,10 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
} else } else
apic_clear_vector(vector, apic->regs + APIC_TMR); apic_clear_vector(vector, apic->regs + APIC_TMR);
if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE) if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
else if (vcpu->mp_state == VCPU_MP_STATE_HALTED) { else if (vcpu->arch.mp_state == VCPU_MP_STATE_HALTED) {
vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
if (waitqueue_active(&vcpu->wq)) if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
} }
...@@ -363,11 +364,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -363,11 +364,11 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_INIT: case APIC_DM_INIT:
if (level) { if (level) {
if (vcpu->mp_state == VCPU_MP_STATE_RUNNABLE) if (vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE)
printk(KERN_DEBUG printk(KERN_DEBUG
"INIT on a runnable vcpu %d\n", "INIT on a runnable vcpu %d\n",
vcpu->vcpu_id); vcpu->vcpu_id);
vcpu->mp_state = VCPU_MP_STATE_INIT_RECEIVED; vcpu->arch.mp_state = VCPU_MP_STATE_INIT_RECEIVED;
kvm_vcpu_kick(vcpu); kvm_vcpu_kick(vcpu);
} else { } else {
printk(KERN_DEBUG printk(KERN_DEBUG
...@@ -380,9 +381,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, ...@@ -380,9 +381,9 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
case APIC_DM_STARTUP: case APIC_DM_STARTUP:
printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n", printk(KERN_DEBUG "SIPI to vcpu %d vector 0x%02x\n",
vcpu->vcpu_id, vector); vcpu->vcpu_id, vector);
if (vcpu->mp_state == VCPU_MP_STATE_INIT_RECEIVED) { if (vcpu->arch.mp_state == VCPU_MP_STATE_INIT_RECEIVED) {
vcpu->sipi_vector = vector; vcpu->arch.sipi_vector = vector;
vcpu->mp_state = VCPU_MP_STATE_SIPI_RECEIVED; vcpu->arch.mp_state = VCPU_MP_STATE_SIPI_RECEIVED;
if (waitqueue_active(&vcpu->wq)) if (waitqueue_active(&vcpu->wq))
wake_up_interruptible(&vcpu->wq); wake_up_interruptible(&vcpu->wq);
} }
...@@ -411,7 +412,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector, ...@@ -411,7 +412,7 @@ static struct kvm_lapic *kvm_apic_round_robin(struct kvm *kvm, u8 vector,
next = 0; next = 0;
if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap)) if (kvm->vcpus[next] == NULL || !test_bit(next, &bitmap))
continue; continue;
apic = kvm->vcpus[next]->apic; apic = kvm->vcpus[next]->arch.apic;
if (apic && apic_enabled(apic)) if (apic && apic_enabled(apic))
break; break;
apic = NULL; apic = NULL;
...@@ -482,12 +483,12 @@ static void apic_send_ipi(struct kvm_lapic *apic) ...@@ -482,12 +483,12 @@ static void apic_send_ipi(struct kvm_lapic *apic)
if (!vcpu) if (!vcpu)
continue; continue;
if (vcpu->apic && if (vcpu->arch.apic &&
apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) { apic_match_dest(vcpu, apic, short_hand, dest, dest_mode)) {
if (delivery_mode == APIC_DM_LOWEST) if (delivery_mode == APIC_DM_LOWEST)
set_bit(vcpu->vcpu_id, &lpr_map); set_bit(vcpu->vcpu_id, &lpr_map);
else else
__apic_accept_irq(vcpu->apic, delivery_mode, __apic_accept_irq(vcpu->arch.apic, delivery_mode,
vector, level, trig_mode); vector, level, trig_mode);
} }
} }
...@@ -495,7 +496,7 @@ static void apic_send_ipi(struct kvm_lapic *apic) ...@@ -495,7 +496,7 @@ static void apic_send_ipi(struct kvm_lapic *apic)
if (delivery_mode == APIC_DM_LOWEST) { if (delivery_mode == APIC_DM_LOWEST) {
target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map); target = kvm_get_lowest_prio_vcpu(vcpu->kvm, vector, lpr_map);
if (target != NULL) if (target != NULL)
__apic_accept_irq(target->apic, delivery_mode, __apic_accept_irq(target->arch.apic, delivery_mode,
vector, level, trig_mode); vector, level, trig_mode);
} }
} }
...@@ -772,15 +773,15 @@ static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr) ...@@ -772,15 +773,15 @@ static int apic_mmio_range(struct kvm_io_device *this, gpa_t addr)
void kvm_free_lapic(struct kvm_vcpu *vcpu) void kvm_free_lapic(struct kvm_vcpu *vcpu)
{ {
if (!vcpu->apic) if (!vcpu->arch.apic)
return; return;
hrtimer_cancel(&vcpu->apic->timer.dev); hrtimer_cancel(&vcpu->arch.apic->timer.dev);
if (vcpu->apic->regs_page) if (vcpu->arch.apic->regs_page)
__free_page(vcpu->apic->regs_page); __free_page(vcpu->arch.apic->regs_page);
kfree(vcpu->apic); kfree(vcpu->arch.apic);
} }
/* /*
...@@ -791,7 +792,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu) ...@@ -791,7 +792,7 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu)
void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (!apic) if (!apic)
return; return;
...@@ -800,7 +801,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) ...@@ -800,7 +801,7 @@ void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8)
u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu) u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
u64 tpr; u64 tpr;
if (!apic) if (!apic)
...@@ -813,29 +814,29 @@ EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8); ...@@ -813,29 +814,29 @@ EXPORT_SYMBOL_GPL(kvm_lapic_get_cr8);
void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (!apic) { if (!apic) {
value |= MSR_IA32_APICBASE_BSP; value |= MSR_IA32_APICBASE_BSP;
vcpu->apic_base = value; vcpu->arch.apic_base = value;
return; return;
} }
if (apic->vcpu->vcpu_id) if (apic->vcpu->vcpu_id)
value &= ~MSR_IA32_APICBASE_BSP; value &= ~MSR_IA32_APICBASE_BSP;
vcpu->apic_base = value; vcpu->arch.apic_base = value;
apic->base_address = apic->vcpu->apic_base & apic->base_address = apic->vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE; MSR_IA32_APICBASE_BASE;
/* with FSB delivery interrupt, we can restart APIC functionality */ /* with FSB delivery interrupt, we can restart APIC functionality */
apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is " apic_debug("apic base msr is 0x%016" PRIx64 ", and base address is "
"0x%lx.\n", apic->vcpu->apic_base, apic->base_address); "0x%lx.\n", apic->vcpu->arch.apic_base, apic->base_address);
} }
u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu) u64 kvm_lapic_get_base(struct kvm_vcpu *vcpu)
{ {
return vcpu->apic_base; return vcpu->arch.apic_base;
} }
EXPORT_SYMBOL_GPL(kvm_lapic_get_base); EXPORT_SYMBOL_GPL(kvm_lapic_get_base);
...@@ -847,7 +848,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) ...@@ -847,7 +848,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
apic_debug("%s\n", __FUNCTION__); apic_debug("%s\n", __FUNCTION__);
ASSERT(vcpu); ASSERT(vcpu);
apic = vcpu->apic; apic = vcpu->arch.apic;
ASSERT(apic != NULL); ASSERT(apic != NULL);
/* Stop the timer in case it's a reset to an active apic */ /* Stop the timer in case it's a reset to an active apic */
...@@ -878,19 +879,19 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) ...@@ -878,19 +879,19 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
update_divide_count(apic); update_divide_count(apic);
atomic_set(&apic->timer.pending, 0); atomic_set(&apic->timer.pending, 0);
if (vcpu->vcpu_id == 0) if (vcpu->vcpu_id == 0)
vcpu->apic_base |= MSR_IA32_APICBASE_BSP; vcpu->arch.apic_base |= MSR_IA32_APICBASE_BSP;
apic_update_ppr(apic); apic_update_ppr(apic);
apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
"0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__, "0x%016" PRIx64 ", base_address=0x%0lx.\n", __FUNCTION__,
vcpu, kvm_apic_id(apic), vcpu, kvm_apic_id(apic),
vcpu->apic_base, apic->base_address); vcpu->arch.apic_base, apic->base_address);
} }
EXPORT_SYMBOL_GPL(kvm_lapic_reset); EXPORT_SYMBOL_GPL(kvm_lapic_reset);
int kvm_lapic_enabled(struct kvm_vcpu *vcpu) int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
int ret = 0; int ret = 0;
if (!apic) if (!apic)
...@@ -915,7 +916,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic) ...@@ -915,7 +916,7 @@ static int __apic_timer_fn(struct kvm_lapic *apic)
atomic_inc(&apic->timer.pending); atomic_inc(&apic->timer.pending);
if (waitqueue_active(q)) { if (waitqueue_active(q)) {
apic->vcpu->mp_state = VCPU_MP_STATE_RUNNABLE; apic->vcpu->arch.mp_state = VCPU_MP_STATE_RUNNABLE;
wake_up_interruptible(q); wake_up_interruptible(q);
} }
if (apic_lvtt_period(apic)) { if (apic_lvtt_period(apic)) {
...@@ -961,7 +962,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) ...@@ -961,7 +962,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
if (!apic) if (!apic)
goto nomem; goto nomem;
vcpu->apic = apic; vcpu->arch.apic = apic;
apic->regs_page = alloc_page(GFP_KERNEL); apic->regs_page = alloc_page(GFP_KERNEL);
if (apic->regs_page == NULL) { if (apic->regs_page == NULL) {
...@@ -976,7 +977,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) ...@@ -976,7 +977,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu)
hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); hrtimer_init(&apic->timer.dev, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
apic->timer.dev.function = apic_timer_fn; apic->timer.dev.function = apic_timer_fn;
apic->base_address = APIC_DEFAULT_PHYS_BASE; apic->base_address = APIC_DEFAULT_PHYS_BASE;
vcpu->apic_base = APIC_DEFAULT_PHYS_BASE; vcpu->arch.apic_base = APIC_DEFAULT_PHYS_BASE;
kvm_lapic_reset(vcpu); kvm_lapic_reset(vcpu);
apic->dev.read = apic_mmio_read; apic->dev.read = apic_mmio_read;
...@@ -994,7 +995,7 @@ EXPORT_SYMBOL_GPL(kvm_create_lapic); ...@@ -994,7 +995,7 @@ EXPORT_SYMBOL_GPL(kvm_create_lapic);
int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
int highest_irr; int highest_irr;
if (!apic || !apic_enabled(apic)) if (!apic || !apic_enabled(apic))
...@@ -1010,11 +1011,11 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu) ...@@ -1010,11 +1011,11 @@ int kvm_apic_has_interrupt(struct kvm_vcpu *vcpu)
int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
{ {
u32 lvt0 = apic_get_reg(vcpu->apic, APIC_LVT0); u32 lvt0 = apic_get_reg(vcpu->arch.apic, APIC_LVT0);
int r = 0; int r = 0;
if (vcpu->vcpu_id == 0) { if (vcpu->vcpu_id == 0) {
if (!apic_hw_enabled(vcpu->apic)) if (!apic_hw_enabled(vcpu->arch.apic))
r = 1; r = 1;
if ((lvt0 & APIC_LVT_MASKED) == 0 && if ((lvt0 & APIC_LVT_MASKED) == 0 &&
GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT) GET_APIC_DELIVERY_MODE(lvt0) == APIC_MODE_EXTINT)
...@@ -1025,7 +1026,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu) ...@@ -1025,7 +1026,7 @@ int kvm_apic_accept_pic_intr(struct kvm_vcpu *vcpu)
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (apic && apic_lvt_enabled(apic, APIC_LVTT) && if (apic && apic_lvt_enabled(apic, APIC_LVTT) &&
atomic_read(&apic->timer.pending) > 0) { atomic_read(&apic->timer.pending) > 0) {
...@@ -1036,7 +1037,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) ...@@ -1036,7 +1037,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec) void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec) if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
apic->timer.last_update = ktime_add_ns( apic->timer.last_update = ktime_add_ns(
...@@ -1047,7 +1048,7 @@ void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec) ...@@ -1047,7 +1048,7 @@ void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
{ {
int vector = kvm_apic_has_interrupt(vcpu); int vector = kvm_apic_has_interrupt(vcpu);
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
if (vector == -1) if (vector == -1)
return -1; return -1;
...@@ -1060,9 +1061,9 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu) ...@@ -1060,9 +1061,9 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
apic->base_address = vcpu->apic_base & apic->base_address = vcpu->arch.apic_base &
MSR_IA32_APICBASE_BASE; MSR_IA32_APICBASE_BASE;
apic_set_reg(apic, APIC_LVR, APIC_VERSION); apic_set_reg(apic, APIC_LVR, APIC_VERSION);
apic_update_ppr(apic); apic_update_ppr(apic);
...@@ -1073,7 +1074,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) ...@@ -1073,7 +1074,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu)
void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu)
{ {
struct kvm_lapic *apic = vcpu->apic; struct kvm_lapic *apic = vcpu->arch.apic;
struct hrtimer *timer; struct hrtimer *timer;
if (!apic) if (!apic)
......
...@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes); ...@@ -180,7 +180,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
static int is_write_protection(struct kvm_vcpu *vcpu) static int is_write_protection(struct kvm_vcpu *vcpu)
{ {
return vcpu->cr0 & X86_CR0_WP; return vcpu->arch.cr0 & X86_CR0_WP;
} }
static int is_cpuid_PSE36(void) static int is_cpuid_PSE36(void)
...@@ -190,7 +190,7 @@ static int is_cpuid_PSE36(void) ...@@ -190,7 +190,7 @@ static int is_cpuid_PSE36(void)
static int is_nx(struct kvm_vcpu *vcpu) static int is_nx(struct kvm_vcpu *vcpu)
{ {
return vcpu->shadow_efer & EFER_NX; return vcpu->arch.shadow_efer & EFER_NX;
} }
static int is_present_pte(unsigned long pte) static int is_present_pte(unsigned long pte)
...@@ -292,18 +292,18 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) ...@@ -292,18 +292,18 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
int r; int r;
kvm_mmu_free_some_pages(vcpu); kvm_mmu_free_some_pages(vcpu);
r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache, r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
pte_chain_cache, 4); pte_chain_cache, 4);
if (r) if (r)
goto out; goto out;
r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache, r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
rmap_desc_cache, 1); rmap_desc_cache, 1);
if (r) if (r)
goto out; goto out;
r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8); r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
if (r) if (r)
goto out; goto out;
r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache, r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
mmu_page_header_cache, 4); mmu_page_header_cache, 4);
out: out:
return r; return r;
...@@ -311,10 +311,10 @@ out: ...@@ -311,10 +311,10 @@ out:
static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
{ {
mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache); mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache); mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
mmu_free_memory_cache_page(&vcpu->mmu_page_cache); mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
mmu_free_memory_cache(&vcpu->mmu_page_header_cache); mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
} }
static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
...@@ -330,7 +330,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, ...@@ -330,7 +330,7 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
{ {
return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache, return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
sizeof(struct kvm_pte_chain)); sizeof(struct kvm_pte_chain));
} }
...@@ -341,7 +341,7 @@ static void mmu_free_pte_chain(struct kvm_pte_chain *pc) ...@@ -341,7 +341,7 @@ static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu) static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
{ {
return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache, return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
sizeof(struct kvm_rmap_desc)); sizeof(struct kvm_rmap_desc));
} }
...@@ -568,9 +568,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, ...@@ -568,9 +568,9 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
if (!vcpu->kvm->n_free_mmu_pages) if (!vcpu->kvm->n_free_mmu_pages)
return NULL; return NULL;
sp = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache, sizeof *sp); sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
sp->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
sp->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE); sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
set_page_private(virt_to_page(sp->spt), (unsigned long)sp); set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
list_add(&sp->link, &vcpu->kvm->active_mmu_pages); list_add(&sp->link, &vcpu->kvm->active_mmu_pages);
ASSERT(is_empty_shadow_page(sp->spt)); ASSERT(is_empty_shadow_page(sp->spt));
...@@ -692,11 +692,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -692,11 +692,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
struct hlist_node *node; struct hlist_node *node;
role.word = 0; role.word = 0;
role.glevels = vcpu->mmu.root_level; role.glevels = vcpu->arch.mmu.root_level;
role.level = level; role.level = level;
role.metaphysical = metaphysical; role.metaphysical = metaphysical;
role.access = access; role.access = access;
if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) { if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level)); quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
role.quadrant = quadrant; role.quadrant = quadrant;
...@@ -718,7 +718,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, ...@@ -718,7 +718,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
sp->gfn = gfn; sp->gfn = gfn;
sp->role = role; sp->role = role;
hlist_add_head(&sp->hash_link, bucket); hlist_add_head(&sp->hash_link, bucket);
vcpu->mmu.prefetch_page(vcpu, sp); vcpu->arch.mmu.prefetch_page(vcpu, sp);
if (!metaphysical) if (!metaphysical)
rmap_write_protect(vcpu->kvm, gfn); rmap_write_protect(vcpu->kvm, gfn);
if (new_page) if (new_page)
...@@ -768,7 +768,7 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm) ...@@ -768,7 +768,7 @@ static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
for (i = 0; i < KVM_MAX_VCPUS; ++i) for (i = 0; i < KVM_MAX_VCPUS; ++i)
if (kvm->vcpus[i]) if (kvm->vcpus[i])
kvm->vcpus[i]->last_pte_updated = NULL; kvm->vcpus[i]->arch.last_pte_updated = NULL;
} }
static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
...@@ -875,7 +875,7 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn) ...@@ -875,7 +875,7 @@ static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva) struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
{ {
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
if (gpa == UNMAPPED_GVA) if (gpa == UNMAPPED_GVA)
return NULL; return NULL;
...@@ -962,7 +962,7 @@ unshadowed: ...@@ -962,7 +962,7 @@ unshadowed:
else else
kvm_release_page_clean(page); kvm_release_page_clean(page);
if (!ptwrite || !*ptwrite) if (!ptwrite || !*ptwrite)
vcpu->last_pte_updated = shadow_pte; vcpu->arch.last_pte_updated = shadow_pte;
} }
static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
...@@ -972,7 +972,7 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu) ...@@ -972,7 +972,7 @@ static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn) static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
{ {
int level = PT32E_ROOT_LEVEL; int level = PT32E_ROOT_LEVEL;
hpa_t table_addr = vcpu->mmu.root_hpa; hpa_t table_addr = vcpu->arch.mmu.root_hpa;
int pt_write = 0; int pt_write = 0;
for (; ; level--) { for (; ; level--) {
...@@ -1024,29 +1024,29 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu) ...@@ -1024,29 +1024,29 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
int i; int i;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
if (!VALID_PAGE(vcpu->mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return; return;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->mmu.root_hpa; hpa_t root = vcpu->arch.mmu.root_hpa;
sp = page_header(root); sp = page_header(root);
--sp->root_count; --sp->root_count;
vcpu->mmu.root_hpa = INVALID_PAGE; vcpu->arch.mmu.root_hpa = INVALID_PAGE;
return; return;
} }
#endif #endif
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu.pae_root[i];
if (root) { if (root) {
root &= PT64_BASE_ADDR_MASK; root &= PT64_BASE_ADDR_MASK;
sp = page_header(root); sp = page_header(root);
--sp->root_count; --sp->root_count;
} }
vcpu->mmu.pae_root[i] = INVALID_PAGE; vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
} }
vcpu->mmu.root_hpa = INVALID_PAGE; vcpu->arch.mmu.root_hpa = INVALID_PAGE;
} }
static void mmu_alloc_roots(struct kvm_vcpu *vcpu) static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
...@@ -1055,41 +1055,41 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu) ...@@ -1055,41 +1055,41 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
gfn_t root_gfn; gfn_t root_gfn;
struct kvm_mmu_page *sp; struct kvm_mmu_page *sp;
root_gfn = vcpu->cr3 >> PAGE_SHIFT; root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) { if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->mmu.root_hpa; hpa_t root = vcpu->arch.mmu.root_hpa;
ASSERT(!VALID_PAGE(root)); ASSERT(!VALID_PAGE(root));
sp = kvm_mmu_get_page(vcpu, root_gfn, 0, sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL); PT64_ROOT_LEVEL, 0, ACC_ALL, NULL, NULL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
vcpu->mmu.root_hpa = root; vcpu->arch.mmu.root_hpa = root;
return; return;
} }
#endif #endif
for (i = 0; i < 4; ++i) { for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->mmu.pae_root[i]; hpa_t root = vcpu->arch.mmu.pae_root[i];
ASSERT(!VALID_PAGE(root)); ASSERT(!VALID_PAGE(root));
if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) { if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
if (!is_present_pte(vcpu->pdptrs[i])) { if (!is_present_pte(vcpu->arch.pdptrs[i])) {
vcpu->mmu.pae_root[i] = 0; vcpu->arch.mmu.pae_root[i] = 0;
continue; continue;
} }
root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT; root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
} else if (vcpu->mmu.root_level == 0) } else if (vcpu->arch.mmu.root_level == 0)
root_gfn = 0; root_gfn = 0;
sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30, sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
PT32_ROOT_LEVEL, !is_paging(vcpu), PT32_ROOT_LEVEL, !is_paging(vcpu),
ACC_ALL, NULL, NULL); ACC_ALL, NULL, NULL);
root = __pa(sp->spt); root = __pa(sp->spt);
++sp->root_count; ++sp->root_count;
vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK; vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
} }
vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root); vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
} }
static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr) static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
...@@ -1109,7 +1109,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, ...@@ -1109,7 +1109,7 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
return r; return r;
ASSERT(vcpu); ASSERT(vcpu);
ASSERT(VALID_PAGE(vcpu->mmu.root_hpa)); ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
gfn = gva >> PAGE_SHIFT; gfn = gva >> PAGE_SHIFT;
...@@ -1124,7 +1124,7 @@ static void nonpaging_free(struct kvm_vcpu *vcpu) ...@@ -1124,7 +1124,7 @@ static void nonpaging_free(struct kvm_vcpu *vcpu)
static int nonpaging_init_context(struct kvm_vcpu *vcpu) static int nonpaging_init_context(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *context = &vcpu->mmu; struct kvm_mmu *context = &vcpu->arch.mmu;
context->new_cr3 = nonpaging_new_cr3; context->new_cr3 = nonpaging_new_cr3;
context->page_fault = nonpaging_page_fault; context->page_fault = nonpaging_page_fault;
...@@ -1171,7 +1171,7 @@ static void paging_free(struct kvm_vcpu *vcpu) ...@@ -1171,7 +1171,7 @@ static void paging_free(struct kvm_vcpu *vcpu)
static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level) static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
{ {
struct kvm_mmu *context = &vcpu->mmu; struct kvm_mmu *context = &vcpu->arch.mmu;
ASSERT(is_pae(vcpu)); ASSERT(is_pae(vcpu));
context->new_cr3 = paging_new_cr3; context->new_cr3 = paging_new_cr3;
...@@ -1192,7 +1192,7 @@ static int paging64_init_context(struct kvm_vcpu *vcpu) ...@@ -1192,7 +1192,7 @@ static int paging64_init_context(struct kvm_vcpu *vcpu)
static int paging32_init_context(struct kvm_vcpu *vcpu) static int paging32_init_context(struct kvm_vcpu *vcpu)
{ {
struct kvm_mmu *context = &vcpu->mmu; struct kvm_mmu *context = &vcpu->arch.mmu;
context->new_cr3 = paging_new_cr3; context->new_cr3 = paging_new_cr3;
context->page_fault = paging32_page_fault; context->page_fault = paging32_page_fault;
...@@ -1213,7 +1213,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu) ...@@ -1213,7 +1213,7 @@ static int paging32E_init_context(struct kvm_vcpu *vcpu)
static int init_kvm_mmu(struct kvm_vcpu *vcpu) static int init_kvm_mmu(struct kvm_vcpu *vcpu)
{ {
ASSERT(vcpu); ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
if (!is_paging(vcpu)) if (!is_paging(vcpu))
return nonpaging_init_context(vcpu); return nonpaging_init_context(vcpu);
...@@ -1228,9 +1228,9 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu) ...@@ -1228,9 +1228,9 @@ static int init_kvm_mmu(struct kvm_vcpu *vcpu)
static void destroy_kvm_mmu(struct kvm_vcpu *vcpu) static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
{ {
ASSERT(vcpu); ASSERT(vcpu);
if (VALID_PAGE(vcpu->mmu.root_hpa)) { if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
vcpu->mmu.free(vcpu); vcpu->arch.mmu.free(vcpu);
vcpu->mmu.root_hpa = INVALID_PAGE; vcpu->arch.mmu.root_hpa = INVALID_PAGE;
} }
} }
...@@ -1250,7 +1250,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu) ...@@ -1250,7 +1250,7 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
if (r) if (r)
goto out; goto out;
mmu_alloc_roots(vcpu); mmu_alloc_roots(vcpu);
kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa); kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
kvm_mmu_flush_tlb(vcpu); kvm_mmu_flush_tlb(vcpu);
out: out:
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
...@@ -1323,7 +1323,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new) ...@@ -1323,7 +1323,7 @@ static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu) static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{ {
u64 *spte = vcpu->last_pte_updated; u64 *spte = vcpu->arch.last_pte_updated;
return !!(spte && (*spte & PT_ACCESSED_MASK)); return !!(spte && (*spte & PT_ACCESSED_MASK));
} }
...@@ -1350,15 +1350,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1350,15 +1350,15 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
++vcpu->kvm->stat.mmu_pte_write; ++vcpu->kvm->stat.mmu_pte_write;
kvm_mmu_audit(vcpu, "pre pte write"); kvm_mmu_audit(vcpu, "pre pte write");
if (gfn == vcpu->last_pt_write_gfn if (gfn == vcpu->arch.last_pt_write_gfn
&& !last_updated_pte_accessed(vcpu)) { && !last_updated_pte_accessed(vcpu)) {
++vcpu->last_pt_write_count; ++vcpu->arch.last_pt_write_count;
if (vcpu->last_pt_write_count >= 3) if (vcpu->arch.last_pt_write_count >= 3)
flooded = 1; flooded = 1;
} else { } else {
vcpu->last_pt_write_gfn = gfn; vcpu->arch.last_pt_write_gfn = gfn;
vcpu->last_pt_write_count = 1; vcpu->arch.last_pt_write_count = 1;
vcpu->last_pte_updated = NULL; vcpu->arch.last_pte_updated = NULL;
} }
index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES; index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
bucket = &vcpu->kvm->mmu_page_hash[index]; bucket = &vcpu->kvm->mmu_page_hash[index];
...@@ -1420,7 +1420,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1420,7 +1420,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva) int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
{ {
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva); gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT); return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
} }
...@@ -1443,7 +1443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) ...@@ -1443,7 +1443,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
enum emulation_result er; enum emulation_result er;
mutex_lock(&vcpu->kvm->lock); mutex_lock(&vcpu->kvm->lock);
r = vcpu->mmu.page_fault(vcpu, cr2, error_code); r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
if (r < 0) if (r < 0)
goto out; goto out;
...@@ -1486,7 +1486,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu) ...@@ -1486,7 +1486,7 @@ static void free_mmu_pages(struct kvm_vcpu *vcpu)
struct kvm_mmu_page, link); struct kvm_mmu_page, link);
kvm_mmu_zap_page(vcpu->kvm, sp); kvm_mmu_zap_page(vcpu->kvm, sp);
} }
free_page((unsigned long)vcpu->mmu.pae_root); free_page((unsigned long)vcpu->arch.mmu.pae_root);
} }
static int alloc_mmu_pages(struct kvm_vcpu *vcpu) static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
...@@ -1508,9 +1508,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu) ...@@ -1508,9 +1508,9 @@ static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
page = alloc_page(GFP_KERNEL | __GFP_DMA32); page = alloc_page(GFP_KERNEL | __GFP_DMA32);
if (!page) if (!page)
goto error_1; goto error_1;
vcpu->mmu.pae_root = page_address(page); vcpu->arch.mmu.pae_root = page_address(page);
for (i = 0; i < 4; ++i) for (i = 0; i < 4; ++i)
vcpu->mmu.pae_root[i] = INVALID_PAGE; vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
return 0; return 0;
...@@ -1522,7 +1522,7 @@ error_1: ...@@ -1522,7 +1522,7 @@ error_1:
int kvm_mmu_create(struct kvm_vcpu *vcpu) int kvm_mmu_create(struct kvm_vcpu *vcpu)
{ {
ASSERT(vcpu); ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
return alloc_mmu_pages(vcpu); return alloc_mmu_pages(vcpu);
} }
...@@ -1530,7 +1530,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu) ...@@ -1530,7 +1530,7 @@ int kvm_mmu_create(struct kvm_vcpu *vcpu)
int kvm_mmu_setup(struct kvm_vcpu *vcpu) int kvm_mmu_setup(struct kvm_vcpu *vcpu)
{ {
ASSERT(vcpu); ASSERT(vcpu);
ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa)); ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
return init_kvm_mmu(vcpu); return init_kvm_mmu(vcpu);
} }
...@@ -1659,11 +1659,11 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, ...@@ -1659,11 +1659,11 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
printk(KERN_ERR "audit: (%s) nontrapping pte" printk(KERN_ERR "audit: (%s) nontrapping pte"
" in nonleaf level: levels %d gva %lx" " in nonleaf level: levels %d gva %lx"
" level %d pte %llx\n", audit_msg, " level %d pte %llx\n", audit_msg,
vcpu->mmu.root_level, va, level, ent); vcpu->arch.mmu.root_level, va, level, ent);
audit_mappings_page(vcpu, ent, va, level - 1); audit_mappings_page(vcpu, ent, va, level - 1);
} else { } else {
gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va); gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
struct page *page = gpa_to_page(vcpu, gpa); struct page *page = gpa_to_page(vcpu, gpa);
hpa_t hpa = page_to_phys(page); hpa_t hpa = page_to_phys(page);
...@@ -1671,7 +1671,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte, ...@@ -1671,7 +1671,7 @@ static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
&& (ent & PT64_BASE_ADDR_MASK) != hpa) && (ent & PT64_BASE_ADDR_MASK) != hpa)
printk(KERN_ERR "xx audit error: (%s) levels %d" printk(KERN_ERR "xx audit error: (%s) levels %d"
" gva %lx gpa %llx hpa %llx ent %llx %d\n", " gva %lx gpa %llx hpa %llx ent %llx %d\n",
audit_msg, vcpu->mmu.root_level, audit_msg, vcpu->arch.mmu.root_level,
va, gpa, hpa, ent, va, gpa, hpa, ent,
is_shadow_present_pte(ent)); is_shadow_present_pte(ent));
else if (ent == shadow_notrap_nonpresent_pte else if (ent == shadow_notrap_nonpresent_pte
...@@ -1688,13 +1688,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu) ...@@ -1688,13 +1688,13 @@ static void audit_mappings(struct kvm_vcpu *vcpu)
{ {
unsigned i; unsigned i;
if (vcpu->mmu.root_level == 4) if (vcpu->arch.mmu.root_level == 4)
audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4); audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
else else
for (i = 0; i < 4; ++i) for (i = 0; i < 4; ++i)
if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK) if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
audit_mappings_page(vcpu, audit_mappings_page(vcpu,
vcpu->mmu.pae_root[i], vcpu->arch.mmu.pae_root[i],
i << 30, i << 30,
2); 2);
} }
......
...@@ -129,11 +129,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker, ...@@ -129,11 +129,11 @@ static int FNAME(walk_addr)(struct guest_walker *walker,
pgprintk("%s: addr %lx\n", __FUNCTION__, addr); pgprintk("%s: addr %lx\n", __FUNCTION__, addr);
walk: walk:
walker->level = vcpu->mmu.root_level; walker->level = vcpu->arch.mmu.root_level;
pte = vcpu->cr3; pte = vcpu->arch.cr3;
#if PTTYPE == 64 #if PTTYPE == 64
if (!is_long_mode(vcpu)) { if (!is_long_mode(vcpu)) {
pte = vcpu->pdptrs[(addr >> 30) & 3]; pte = vcpu->arch.pdptrs[(addr >> 30) & 3];
if (!is_present_pte(pte)) if (!is_present_pte(pte))
goto not_present; goto not_present;
--walker->level; --walker->level;
...@@ -275,10 +275,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -275,10 +275,10 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
if (!is_present_pte(walker->ptes[walker->level - 1])) if (!is_present_pte(walker->ptes[walker->level - 1]))
return NULL; return NULL;
shadow_addr = vcpu->mmu.root_hpa; shadow_addr = vcpu->arch.mmu.root_hpa;
level = vcpu->mmu.shadow_root_level; level = vcpu->arch.mmu.shadow_root_level;
if (level == PT32E_ROOT_LEVEL) { if (level == PT32E_ROOT_LEVEL) {
shadow_addr = vcpu->mmu.pae_root[(addr >> 30) & 3]; shadow_addr = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
shadow_addr &= PT64_BASE_ADDR_MASK; shadow_addr &= PT64_BASE_ADDR_MASK;
--level; --level;
} }
...@@ -380,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -380,7 +380,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (!r) { if (!r) {
pgprintk("%s: guest page fault\n", __FUNCTION__); pgprintk("%s: guest page fault\n", __FUNCTION__);
inject_page_fault(vcpu, addr, walker.error_code); inject_page_fault(vcpu, addr, walker.error_code);
vcpu->last_pt_write_count = 0; /* reset fork detector */ vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
return 0; return 0;
} }
...@@ -390,7 +390,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, ...@@ -390,7 +390,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
shadow_pte, *shadow_pte, write_pt); shadow_pte, *shadow_pte, write_pt);
if (!write_pt) if (!write_pt)
vcpu->last_pt_write_count = 0; /* reset fork detector */ vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
/* /*
* mmio: emulate if accessible, otherwise its a guest fault. * mmio: emulate if accessible, otherwise its a guest fault.
......
...@@ -99,20 +99,20 @@ static inline u32 svm_has(u32 feat) ...@@ -99,20 +99,20 @@ static inline u32 svm_has(u32 feat)
static inline u8 pop_irq(struct kvm_vcpu *vcpu) static inline u8 pop_irq(struct kvm_vcpu *vcpu)
{ {
int word_index = __ffs(vcpu->irq_summary); int word_index = __ffs(vcpu->arch.irq_summary);
int bit_index = __ffs(vcpu->irq_pending[word_index]); int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
int irq = word_index * BITS_PER_LONG + bit_index; int irq = word_index * BITS_PER_LONG + bit_index;
clear_bit(bit_index, &vcpu->irq_pending[word_index]); clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
if (!vcpu->irq_pending[word_index]) if (!vcpu->arch.irq_pending[word_index])
clear_bit(word_index, &vcpu->irq_summary); clear_bit(word_index, &vcpu->arch.irq_summary);
return irq; return irq;
} }
static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq) static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
{ {
set_bit(irq, vcpu->irq_pending); set_bit(irq, vcpu->arch.irq_pending);
set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary); set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
} }
static inline void clgi(void) static inline void clgi(void)
...@@ -185,7 +185,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer) ...@@ -185,7 +185,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
efer &= ~EFER_LME; efer &= ~EFER_LME;
to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
vcpu->shadow_efer = efer; vcpu->arch.shadow_efer = efer;
} }
static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
...@@ -227,10 +227,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) ...@@ -227,10 +227,10 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
svm->vmcb->save.rip, svm->vmcb->save.rip,
svm->next_rip); svm->next_rip);
vcpu->rip = svm->vmcb->save.rip = svm->next_rip; vcpu->arch.rip = svm->vmcb->save.rip = svm->next_rip;
svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
vcpu->interrupt_window_open = 1; vcpu->arch.interrupt_window_open = 1;
} }
static int has_svm(void) static int has_svm(void)
...@@ -559,8 +559,8 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu) ...@@ -559,8 +559,8 @@ static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
if (vcpu->vcpu_id != 0) { if (vcpu->vcpu_id != 0) {
svm->vmcb->save.rip = 0; svm->vmcb->save.rip = 0;
svm->vmcb->save.cs.base = svm->vcpu.sipi_vector << 12; svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
svm->vmcb->save.cs.selector = svm->vcpu.sipi_vector << 8; svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
} }
return 0; return 0;
...@@ -597,9 +597,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) ...@@ -597,9 +597,9 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
fx_init(&svm->vcpu); fx_init(&svm->vcpu);
svm->vcpu.fpu_active = 1; svm->vcpu.fpu_active = 1;
svm->vcpu.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
if (svm->vcpu.vcpu_id == 0) if (svm->vcpu.vcpu_id == 0)
svm->vcpu.apic_base |= MSR_IA32_APICBASE_BSP; svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
return &svm->vcpu; return &svm->vcpu;
...@@ -633,7 +633,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) ...@@ -633,7 +633,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
* increasing TSC. * increasing TSC.
*/ */
rdtscll(tsc_this); rdtscll(tsc_this);
delta = vcpu->host_tsc - tsc_this; delta = vcpu->arch.host_tsc - tsc_this;
svm->vmcb->control.tsc_offset += delta; svm->vmcb->control.tsc_offset += delta;
vcpu->cpu = cpu; vcpu->cpu = cpu;
kvm_migrate_apic_timer(vcpu); kvm_migrate_apic_timer(vcpu);
...@@ -652,7 +652,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu) ...@@ -652,7 +652,7 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
rdtscll(vcpu->host_tsc); rdtscll(vcpu->arch.host_tsc);
} }
static void svm_vcpu_decache(struct kvm_vcpu *vcpu) static void svm_vcpu_decache(struct kvm_vcpu *vcpu)
...@@ -663,17 +663,17 @@ static void svm_cache_regs(struct kvm_vcpu *vcpu) ...@@ -663,17 +663,17 @@ static void svm_cache_regs(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
vcpu->regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
vcpu->regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
vcpu->rip = svm->vmcb->save.rip; vcpu->arch.rip = svm->vmcb->save.rip;
} }
static void svm_decache_regs(struct kvm_vcpu *vcpu) static void svm_decache_regs(struct kvm_vcpu *vcpu)
{ {
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
svm->vmcb->save.rax = vcpu->regs[VCPU_REGS_RAX]; svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
svm->vmcb->save.rsp = vcpu->regs[VCPU_REGS_RSP]; svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
svm->vmcb->save.rip = vcpu->rip; svm->vmcb->save.rip = vcpu->arch.rip;
} }
static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
...@@ -771,24 +771,24 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -771,24 +771,24 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if (vcpu->shadow_efer & EFER_LME) { if (vcpu->arch.shadow_efer & EFER_LME) {
if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
vcpu->shadow_efer |= EFER_LMA; vcpu->arch.shadow_efer |= EFER_LMA;
svm->vmcb->save.efer |= EFER_LMA | EFER_LME; svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
} }
if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
vcpu->shadow_efer &= ~EFER_LMA; vcpu->arch.shadow_efer &= ~EFER_LMA;
svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
} }
} }
#endif #endif
if ((vcpu->cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
vcpu->fpu_active = 1; vcpu->fpu_active = 1;
} }
vcpu->cr0 = cr0; vcpu->arch.cr0 = cr0;
cr0 |= X86_CR0_PG | X86_CR0_WP; cr0 |= X86_CR0_PG | X86_CR0_WP;
cr0 &= ~(X86_CR0_CD | X86_CR0_NW); cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
svm->vmcb->save.cr0 = cr0; svm->vmcb->save.cr0 = cr0;
...@@ -796,7 +796,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -796,7 +796,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
{ {
vcpu->cr4 = cr4; vcpu->arch.cr4 = cr4;
to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE; to_svm(vcpu)->vmcb->save.cr4 = cr4 | X86_CR4_PAE;
} }
...@@ -901,7 +901,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, ...@@ -901,7 +901,7 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
svm->db_regs[dr] = value; svm->db_regs[dr] = value;
return; return;
case 4 ... 5: case 4 ... 5:
if (vcpu->cr4 & X86_CR4_DE) { if (vcpu->arch.cr4 & X86_CR4_DE) {
*exception = UD_VECTOR; *exception = UD_VECTOR;
return; return;
} }
...@@ -950,7 +950,7 @@ static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -950,7 +950,7 @@ static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
if (!(svm->vcpu.cr0 & X86_CR0_TS)) if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
svm->vmcb->save.cr0 &= ~X86_CR0_TS; svm->vmcb->save.cr0 &= ~X86_CR0_TS;
svm->vcpu.fpu_active = 1; svm->vcpu.fpu_active = 1;
...@@ -1103,14 +1103,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data) ...@@ -1103,14 +1103,14 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data; u64 data;
if (svm_get_msr(&svm->vcpu, ecx, &data)) if (svm_get_msr(&svm->vcpu, ecx, &data))
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
else { else {
svm->vmcb->save.rax = data & 0xffffffff; svm->vmcb->save.rax = data & 0xffffffff;
svm->vcpu.regs[VCPU_REGS_RDX] = data >> 32; svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
svm->next_rip = svm->vmcb->save.rip + 2; svm->next_rip = svm->vmcb->save.rip + 2;
skip_emulated_instruction(&svm->vcpu); skip_emulated_instruction(&svm->vcpu);
} }
...@@ -1176,9 +1176,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data) ...@@ -1176,9 +1176,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
u32 ecx = svm->vcpu.regs[VCPU_REGS_RCX]; u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
u64 data = (svm->vmcb->save.rax & -1u) u64 data = (svm->vmcb->save.rax & -1u)
| ((u64)(svm->vcpu.regs[VCPU_REGS_RDX] & -1u) << 32); | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
svm->next_rip = svm->vmcb->save.rip + 2; svm->next_rip = svm->vmcb->save.rip + 2;
if (svm_set_msr(&svm->vcpu, ecx, data)) if (svm_set_msr(&svm->vcpu, ecx, data))
kvm_inject_gp(&svm->vcpu, 0); kvm_inject_gp(&svm->vcpu, 0);
...@@ -1205,7 +1205,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm, ...@@ -1205,7 +1205,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
* possible * possible
*/ */
if (kvm_run->request_interrupt_window && if (kvm_run->request_interrupt_window &&
!svm->vcpu.irq_summary) { !svm->vcpu.arch.irq_summary) {
++svm->vcpu.stat.irq_window_exits; ++svm->vcpu.stat.irq_window_exits;
kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
return 0; return 0;
...@@ -1382,20 +1382,20 @@ static void kvm_reput_irq(struct vcpu_svm *svm) ...@@ -1382,20 +1382,20 @@ static void kvm_reput_irq(struct vcpu_svm *svm)
push_irq(&svm->vcpu, control->int_vector); push_irq(&svm->vcpu, control->int_vector);
} }
svm->vcpu.interrupt_window_open = svm->vcpu.arch.interrupt_window_open =
!(control->int_state & SVM_INTERRUPT_SHADOW_MASK); !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
} }
static void svm_do_inject_vector(struct vcpu_svm *svm) static void svm_do_inject_vector(struct vcpu_svm *svm)
{ {
struct kvm_vcpu *vcpu = &svm->vcpu; struct kvm_vcpu *vcpu = &svm->vcpu;
int word_index = __ffs(vcpu->irq_summary); int word_index = __ffs(vcpu->arch.irq_summary);
int bit_index = __ffs(vcpu->irq_pending[word_index]); int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
int irq = word_index * BITS_PER_LONG + bit_index; int irq = word_index * BITS_PER_LONG + bit_index;
clear_bit(bit_index, &vcpu->irq_pending[word_index]); clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
if (!vcpu->irq_pending[word_index]) if (!vcpu->arch.irq_pending[word_index])
clear_bit(word_index, &vcpu->irq_summary); clear_bit(word_index, &vcpu->arch.irq_summary);
svm_inject_irq(svm, irq); svm_inject_irq(svm, irq);
} }
...@@ -1405,11 +1405,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, ...@@ -1405,11 +1405,11 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
struct vcpu_svm *svm = to_svm(vcpu); struct vcpu_svm *svm = to_svm(vcpu);
struct vmcb_control_area *control = &svm->vmcb->control; struct vmcb_control_area *control = &svm->vmcb->control;
svm->vcpu.interrupt_window_open = svm->vcpu.arch.interrupt_window_open =
(!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
(svm->vmcb->save.rflags & X86_EFLAGS_IF)); (svm->vmcb->save.rflags & X86_EFLAGS_IF));
if (svm->vcpu.interrupt_window_open && svm->vcpu.irq_summary) if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
/* /*
* If interrupts enabled, and not blocked by sti or mov ss. Good. * If interrupts enabled, and not blocked by sti or mov ss. Good.
*/ */
...@@ -1418,8 +1418,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu, ...@@ -1418,8 +1418,8 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
/* /*
* Interrupts blocked. Wait for unblock. * Interrupts blocked. Wait for unblock.
*/ */
if (!svm->vcpu.interrupt_window_open && if (!svm->vcpu.arch.interrupt_window_open &&
(svm->vcpu.irq_summary || kvm_run->request_interrupt_window)) (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
control->intercept |= 1ULL << INTERCEPT_VINTR; control->intercept |= 1ULL << INTERCEPT_VINTR;
else else
control->intercept &= ~(1ULL << INTERCEPT_VINTR); control->intercept &= ~(1ULL << INTERCEPT_VINTR);
...@@ -1471,7 +1471,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1471,7 +1471,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
svm->host_cr2 = kvm_read_cr2(); svm->host_cr2 = kvm_read_cr2();
svm->host_dr6 = read_dr6(); svm->host_dr6 = read_dr6();
svm->host_dr7 = read_dr7(); svm->host_dr7 = read_dr7();
svm->vmcb->save.cr2 = vcpu->cr2; svm->vmcb->save.cr2 = vcpu->arch.cr2;
if (svm->vmcb->save.dr7 & 0xff) { if (svm->vmcb->save.dr7 & 0xff) {
write_dr7(0); write_dr7(0);
...@@ -1563,21 +1563,21 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1563,21 +1563,21 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
: :
: [svm]"a"(svm), : [svm]"a"(svm),
[vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)), [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
[rbx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RBX])), [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
[rcx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RCX])), [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
[rdx]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RDX])), [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
[rsi]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RSI])), [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
[rdi]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RDI])), [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
[rbp]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_RBP])) [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
, [r8]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R8])), , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
[r9]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R9])), [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
[r10]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R10])), [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
[r11]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R11])), [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
[r12]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R12])), [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
[r13]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R13])), [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
[r14]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R14])), [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
[r15]"i"(offsetof(struct vcpu_svm, vcpu.regs[VCPU_REGS_R15])) [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
#endif #endif
: "cc", "memory" : "cc", "memory"
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -1591,7 +1591,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1591,7 +1591,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if ((svm->vmcb->save.dr7 & 0xff)) if ((svm->vmcb->save.dr7 & 0xff))
load_db_regs(svm->host_db_regs); load_db_regs(svm->host_db_regs);
vcpu->cr2 = svm->vmcb->save.cr2; vcpu->arch.cr2 = svm->vmcb->save.cr2;
write_dr6(svm->host_dr6); write_dr6(svm->host_dr6);
write_dr7(svm->host_dr7); write_dr7(svm->host_dr7);
......
此差异已折叠。
此差异已折叠。
...@@ -92,8 +92,7 @@ enum { ...@@ -92,8 +92,7 @@ enum {
#include "x86_emulate.h" #include "x86_emulate.h"
struct kvm_vcpu { struct kvm_vcpu_arch {
KVM_VCPU_COMM;
u64 host_tsc; u64 host_tsc;
int interrupt_window_open; int interrupt_window_open;
unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */ unsigned long irq_summary; /* bit vector: 1 per word in irq_pending */
...@@ -130,7 +129,6 @@ struct kvm_vcpu { ...@@ -130,7 +129,6 @@ struct kvm_vcpu {
int last_pt_write_count; int last_pt_write_count;
u64 *last_pte_updated; u64 *last_pte_updated;
struct i387_fxsave_struct host_fx_image; struct i387_fxsave_struct host_fx_image;
struct i387_fxsave_struct guest_fx_image; struct i387_fxsave_struct guest_fx_image;
...@@ -159,12 +157,17 @@ struct kvm_vcpu { ...@@ -159,12 +157,17 @@ struct kvm_vcpu {
int cpuid_nent; int cpuid_nent;
struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES]; struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
/* emulate context */ /* emulate context */
struct x86_emulate_ctxt emulate_ctxt; struct x86_emulate_ctxt emulate_ctxt;
}; };
struct kvm_vcpu {
KVM_VCPU_COMM;
struct kvm_vcpu_arch arch;
};
struct descriptor_table { struct descriptor_table {
u16 limit; u16 limit;
unsigned long base; unsigned long base;
...@@ -339,7 +342,7 @@ static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) ...@@ -339,7 +342,7 @@ static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
{ {
if (likely(vcpu->mmu.root_hpa != INVALID_PAGE)) if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE))
return 0; return 0;
return kvm_mmu_load(vcpu); return kvm_mmu_load(vcpu);
...@@ -348,7 +351,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) ...@@ -348,7 +351,7 @@ static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
static inline int is_long_mode(struct kvm_vcpu *vcpu) static inline int is_long_mode(struct kvm_vcpu *vcpu)
{ {
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
return vcpu->shadow_efer & EFER_LME; return vcpu->arch.shadow_efer & EFER_LME;
#else #else
return 0; return 0;
#endif #endif
...@@ -356,17 +359,17 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) ...@@ -356,17 +359,17 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu)
static inline int is_pae(struct kvm_vcpu *vcpu) static inline int is_pae(struct kvm_vcpu *vcpu)
{ {
return vcpu->cr4 & X86_CR4_PAE; return vcpu->arch.cr4 & X86_CR4_PAE;
} }
static inline int is_pse(struct kvm_vcpu *vcpu) static inline int is_pse(struct kvm_vcpu *vcpu)
{ {
return vcpu->cr4 & X86_CR4_PSE; return vcpu->arch.cr4 & X86_CR4_PSE;
} }
static inline int is_paging(struct kvm_vcpu *vcpu) static inline int is_paging(struct kvm_vcpu *vcpu)
{ {
return vcpu->cr0 & X86_CR0_PG; return vcpu->arch.cr0 & X86_CR0_PG;
} }
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3); int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
...@@ -489,8 +492,8 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code) ...@@ -489,8 +492,8 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
static inline int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) static inline int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{ {
return vcpu->mp_state == VCPU_MP_STATE_RUNNABLE return vcpu->arch.mp_state == VCPU_MP_STATE_RUNNABLE
|| vcpu->mp_state == VCPU_MP_STATE_SIPI_RECEIVED; || vcpu->arch.mp_state == VCPU_MP_STATE_SIPI_RECEIVED;
} }
#endif #endif
...@@ -769,8 +769,8 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -769,8 +769,8 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
/* Shadow copy of register state. Committed on successful emulation. */ /* Shadow copy of register state. Committed on successful emulation. */
memset(c, 0, sizeof(struct decode_cache)); memset(c, 0, sizeof(struct decode_cache));
c->eip = ctxt->vcpu->rip; c->eip = ctxt->vcpu->arch.rip;
memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs); memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
switch (mode) { switch (mode) {
case X86EMUL_MODE_REAL: case X86EMUL_MODE_REAL:
...@@ -1226,7 +1226,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1226,7 +1226,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
* modify them. * modify them.
*/ */
memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs); memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
saved_eip = c->eip; saved_eip = c->eip;
if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs)) if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs))
...@@ -1235,7 +1235,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1235,7 +1235,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if (c->rep_prefix && (c->d & String)) { if (c->rep_prefix && (c->d & String)) {
/* All REP prefixes have the same first termination condition */ /* All REP prefixes have the same first termination condition */
if (c->regs[VCPU_REGS_RCX] == 0) { if (c->regs[VCPU_REGS_RCX] == 0) {
ctxt->vcpu->rip = c->eip; ctxt->vcpu->arch.rip = c->eip;
goto done; goto done;
} }
/* The second termination condition only applies for REPE /* The second termination condition only applies for REPE
...@@ -1249,17 +1249,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1249,17 +1249,17 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
(c->b == 0xae) || (c->b == 0xaf)) { (c->b == 0xae) || (c->b == 0xaf)) {
if ((c->rep_prefix == REPE_PREFIX) && if ((c->rep_prefix == REPE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == 0)) { ((ctxt->eflags & EFLG_ZF) == 0)) {
ctxt->vcpu->rip = c->eip; ctxt->vcpu->arch.rip = c->eip;
goto done; goto done;
} }
if ((c->rep_prefix == REPNE_PREFIX) && if ((c->rep_prefix == REPNE_PREFIX) &&
((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) { ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) {
ctxt->vcpu->rip = c->eip; ctxt->vcpu->arch.rip = c->eip;
goto done; goto done;
} }
} }
c->regs[VCPU_REGS_RCX]--; c->regs[VCPU_REGS_RCX]--;
c->eip = ctxt->vcpu->rip; c->eip = ctxt->vcpu->arch.rip;
} }
if (c->src.type == OP_MEM) { if (c->src.type == OP_MEM) {
...@@ -1628,7 +1628,7 @@ special_insn: ...@@ -1628,7 +1628,7 @@ special_insn:
c->dst.type = OP_NONE; /* Disable writeback. */ c->dst.type = OP_NONE; /* Disable writeback. */
break; break;
case 0xf4: /* hlt */ case 0xf4: /* hlt */
ctxt->vcpu->halt_request = 1; ctxt->vcpu->arch.halt_request = 1;
goto done; goto done;
case 0xf5: /* cmc */ case 0xf5: /* cmc */
/* complement carry flag from eflags reg */ /* complement carry flag from eflags reg */
...@@ -1665,8 +1665,8 @@ writeback: ...@@ -1665,8 +1665,8 @@ writeback:
goto done; goto done;
/* Commit shadow register state. */ /* Commit shadow register state. */
memcpy(ctxt->vcpu->regs, c->regs, sizeof c->regs); memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
ctxt->vcpu->rip = c->eip; ctxt->vcpu->arch.rip = c->eip;
done: done:
if (rc == X86EMUL_UNHANDLEABLE) { if (rc == X86EMUL_UNHANDLEABLE) {
...@@ -1783,7 +1783,7 @@ twobyte_insn: ...@@ -1783,7 +1783,7 @@ twobyte_insn:
rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data);
if (rc) { if (rc) {
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
c->eip = ctxt->vcpu->rip; c->eip = ctxt->vcpu->arch.rip;
} }
rc = X86EMUL_CONTINUE; rc = X86EMUL_CONTINUE;
c->dst.type = OP_NONE; c->dst.type = OP_NONE;
...@@ -1793,7 +1793,7 @@ twobyte_insn: ...@@ -1793,7 +1793,7 @@ twobyte_insn:
rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data);
if (rc) { if (rc) {
kvm_inject_gp(ctxt->vcpu, 0); kvm_inject_gp(ctxt->vcpu, 0);
c->eip = ctxt->vcpu->rip; c->eip = ctxt->vcpu->arch.rip;
} else { } else {
c->regs[VCPU_REGS_RAX] = (u32)msr_data; c->regs[VCPU_REGS_RAX] = (u32)msr_data;
c->regs[VCPU_REGS_RDX] = msr_data >> 32; c->regs[VCPU_REGS_RDX] = msr_data >> 32;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册