提交 f419b321 编写于 作者: B bellard

sysret fix - better cpuid support - lcall support for x86_64 - efer access in i386 emulation


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1373 c046a42c-6fe2-441c-8c8c-71466251a162
上级 8d9bfc2b
...@@ -933,6 +933,7 @@ static void do_interrupt64(int intno, int is_int, int error_code, ...@@ -933,6 +933,7 @@ static void do_interrupt64(int intno, int is_int, int error_code,
} }
env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK); env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
} }
#endif
void helper_syscall(int next_eip_addend) void helper_syscall(int next_eip_addend)
{ {
...@@ -942,6 +943,7 @@ void helper_syscall(int next_eip_addend) ...@@ -942,6 +943,7 @@ void helper_syscall(int next_eip_addend)
raise_exception_err(EXCP06_ILLOP, 0); raise_exception_err(EXCP06_ILLOP, 0);
} }
selector = (env->star >> 32) & 0xffff; selector = (env->star >> 32) & 0xffff;
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) { if (env->hflags & HF_LMA_MASK) {
ECX = env->eip + next_eip_addend; ECX = env->eip + next_eip_addend;
env->regs[11] = compute_eflags(); env->regs[11] = compute_eflags();
...@@ -962,7 +964,9 @@ void helper_syscall(int next_eip_addend) ...@@ -962,7 +964,9 @@ void helper_syscall(int next_eip_addend)
env->eip = env->lstar; env->eip = env->lstar;
else else
env->eip = env->cstar; env->eip = env->cstar;
} else { } else
#endif
{
ECX = (uint32_t)(env->eip + next_eip_addend); ECX = (uint32_t)(env->eip + next_eip_addend);
cpu_x86_set_cpl(env, 0); cpu_x86_set_cpl(env, 0);
...@@ -985,11 +989,15 @@ void helper_sysret(int dflag) ...@@ -985,11 +989,15 @@ void helper_sysret(int dflag)
{ {
int cpl, selector; int cpl, selector;
if (!(env->efer & MSR_EFER_SCE)) {
raise_exception_err(EXCP06_ILLOP, 0);
}
cpl = env->hflags & HF_CPL_MASK; cpl = env->hflags & HF_CPL_MASK;
if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) { if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
raise_exception_err(EXCP0D_GPF, 0); raise_exception_err(EXCP0D_GPF, 0);
} }
selector = (env->star >> 48) & 0xffff; selector = (env->star >> 48) & 0xffff;
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) { if (env->hflags & HF_LMA_MASK) {
if (dflag == 2) { if (dflag == 2) {
cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3, cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
...@@ -1015,7 +1023,9 @@ void helper_sysret(int dflag) ...@@ -1015,7 +1023,9 @@ void helper_sysret(int dflag)
load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK | load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK); IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
cpu_x86_set_cpl(env, 3); cpu_x86_set_cpl(env, 3);
} else { } else
#endif
{
cpu_x86_load_seg_cache(env, R_CS, selector | 3, cpu_x86_load_seg_cache(env, R_CS, selector | 3,
0, 0xffffffff, 0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK | DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
...@@ -1030,8 +1040,15 @@ void helper_sysret(int dflag) ...@@ -1030,8 +1040,15 @@ void helper_sysret(int dflag)
env->eflags |= IF_MASK; env->eflags |= IF_MASK;
cpu_x86_set_cpl(env, 3); cpu_x86_set_cpl(env, 3);
} }
} #ifdef USE_KQEMU
if (kqemu_is_ok(env)) {
if (env->hflags & HF_LMA_MASK)
CC_OP = CC_OP_EFLAGS;
env->exception_index = -1;
cpu_loop_exit();
}
#endif #endif
}
/* real mode interrupt */ /* real mode interrupt */
static void do_interrupt_real(int intno, int is_int, int error_code, static void do_interrupt_real(int intno, int is_int, int error_code,
...@@ -1265,9 +1282,21 @@ void helper_cmpxchg8b(void) ...@@ -1265,9 +1282,21 @@ void helper_cmpxchg8b(void)
void helper_cpuid(void) void helper_cpuid(void)
{ {
switch((uint32_t)EAX) { uint32_t index;
index = (uint32_t)EAX;
/* test if maximum index reached */
if (index & 0x80000000) {
if (index > env->cpuid_xlevel)
index = env->cpuid_level;
} else {
if (index > env->cpuid_level)
index = env->cpuid_level;
}
switch(index) {
case 0: case 0:
EAX = 2; /* max EAX index supported */ EAX = env->cpuid_level;
EBX = env->cpuid_vendor1; EBX = env->cpuid_vendor1;
EDX = env->cpuid_vendor2; EDX = env->cpuid_vendor2;
ECX = env->cpuid_vendor3; ECX = env->cpuid_vendor3;
...@@ -1278,16 +1307,15 @@ void helper_cpuid(void) ...@@ -1278,16 +1307,15 @@ void helper_cpuid(void)
ECX = env->cpuid_ext_features; ECX = env->cpuid_ext_features;
EDX = env->cpuid_features; EDX = env->cpuid_features;
break; break;
default: case 2:
/* cache info: needed for Pentium Pro compatibility */ /* cache info: needed for Pentium Pro compatibility */
EAX = 0x410601; EAX = 0x410601;
EBX = 0; EBX = 0;
ECX = 0; ECX = 0;
EDX = 0; EDX = 0;
break; break;
#ifdef TARGET_X86_64
case 0x80000000: case 0x80000000:
EAX = 0x80000008; EAX = env->cpuid_xlevel;
EBX = env->cpuid_vendor1; EBX = env->cpuid_vendor1;
EDX = env->cpuid_vendor2; EDX = env->cpuid_vendor2;
ECX = env->cpuid_vendor3; ECX = env->cpuid_vendor3;
...@@ -1296,8 +1324,15 @@ void helper_cpuid(void) ...@@ -1296,8 +1324,15 @@ void helper_cpuid(void)
EAX = env->cpuid_features; EAX = env->cpuid_features;
EBX = 0; EBX = 0;
ECX = 0; ECX = 0;
/* long mode + syscall/sysret features */ EDX = env->cpuid_ext2_features;
EDX = (env->cpuid_features & 0x0183F3FF) | (1 << 29) | (1 << 11); break;
case 0x80000002:
case 0x80000003:
case 0x80000004:
EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
break; break;
case 0x80000008: case 0x80000008:
/* virtual & phys address size in low 2 bytes. */ /* virtual & phys address size in low 2 bytes. */
...@@ -1306,7 +1341,13 @@ void helper_cpuid(void) ...@@ -1306,7 +1341,13 @@ void helper_cpuid(void)
ECX = 0; ECX = 0;
EDX = 0; EDX = 0;
break; break;
#endif default:
/* reserved values: zero */
EAX = 0;
EBX = 0;
ECX = 0;
EDX = 0;
break;
} }
} }
...@@ -1523,11 +1564,11 @@ void load_seg(int seg_reg, int selector) ...@@ -1523,11 +1564,11 @@ void load_seg(int seg_reg, int selector)
} }
/* protected mode jump */ /* protected mode jump */
void helper_ljmp_protected_T0_T1(int next_eip) void helper_ljmp_protected_T0_T1(int next_eip_addend)
{ {
int new_cs, gate_cs, type; int new_cs, gate_cs, type;
uint32_t e1, e2, cpl, dpl, rpl, limit; uint32_t e1, e2, cpl, dpl, rpl, limit;
target_ulong new_eip; target_ulong new_eip, next_eip;
new_cs = T0; new_cs = T0;
new_eip = T1; new_eip = T1;
...@@ -1573,6 +1614,7 @@ void helper_ljmp_protected_T0_T1(int next_eip) ...@@ -1573,6 +1614,7 @@ void helper_ljmp_protected_T0_T1(int next_eip)
case 5: /* task gate */ case 5: /* task gate */
if (dpl < cpl || dpl < rpl) if (dpl < cpl || dpl < rpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
next_eip = env->eip + next_eip_addend;
switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip); switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
break; break;
case 4: /* 286 call gate */ case 4: /* 286 call gate */
...@@ -1638,16 +1680,17 @@ void helper_lcall_real_T0_T1(int shift, int next_eip) ...@@ -1638,16 +1680,17 @@ void helper_lcall_real_T0_T1(int shift, int next_eip)
} }
/* protected mode call */ /* protected mode call */
void helper_lcall_protected_T0_T1(int shift, int next_eip) void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
{ {
int new_cs, new_eip, new_stack, i; int new_cs, new_eip, new_stack, i;
uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count; uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask; uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
uint32_t val, limit, old_sp_mask; uint32_t val, limit, old_sp_mask;
target_ulong ssp, old_ssp; target_ulong ssp, old_ssp, next_eip;
new_cs = T0; new_cs = T0;
new_eip = T1; new_eip = T1;
next_eip = env->eip + next_eip_addend;
#ifdef DEBUG_PCALL #ifdef DEBUG_PCALL
if (loglevel & CPU_LOG_PCALL) { if (loglevel & CPU_LOG_PCALL) {
fprintf(logfile, "lcall %04x:%08x s=%d\n", fprintf(logfile, "lcall %04x:%08x s=%d\n",
...@@ -1684,25 +1727,43 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) ...@@ -1684,25 +1727,43 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
if (!(e2 & DESC_P_MASK)) if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc); raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
sp = ESP; #ifdef TARGET_X86_64
sp_mask = get_sp_mask(env->segs[R_SS].flags); /* XXX: check 16/32 bit cases in long mode */
ssp = env->segs[R_SS].base; if (shift == 2) {
if (shift) { target_ulong rsp;
PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector); /* 64 bit case */
PUSHL(ssp, sp, sp_mask, next_eip); rsp = ESP;
} else { PUSHQ(rsp, env->segs[R_CS].selector);
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector); PUSHQ(rsp, next_eip);
PUSHW(ssp, sp, sp_mask, next_eip); /* from this point, not restartable */
ESP = rsp;
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2),
get_seg_limit(e1, e2), e2);
EIP = new_eip;
} else
#endif
{
sp = ESP;
sp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
if (shift) {
PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
PUSHL(ssp, sp, sp_mask, next_eip);
} else {
PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
PUSHW(ssp, sp, sp_mask, next_eip);
}
limit = get_seg_limit(e1, e2);
if (new_eip > limit)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
/* from this point, not restartable */
ESP = (ESP & ~sp_mask) | (sp & sp_mask);
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
EIP = new_eip;
} }
limit = get_seg_limit(e1, e2);
if (new_eip > limit)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
/* from this point, not restartable */
ESP = (ESP & ~sp_mask) | (sp & sp_mask);
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
EIP = new_eip;
} else { } else {
/* check gate type */ /* check gate type */
type = (e2 >> DESC_TYPE_SHIFT) & 0x1f; type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
...@@ -2245,16 +2306,26 @@ void helper_wrmsr(void) ...@@ -2245,16 +2306,26 @@ void helper_wrmsr(void)
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
cpu_set_apic_base(env, val); cpu_set_apic_base(env, val);
break; break;
#ifdef TARGET_X86_64
case MSR_EFER: case MSR_EFER:
#define MSR_EFER_UPDATE_MASK (MSR_EFER_SCE | MSR_EFER_LME | \ {
MSR_EFER_NXE | MSR_EFER_FFXSR) uint64_t update_mask;
env->efer = (env->efer & ~MSR_EFER_UPDATE_MASK) | update_mask = 0;
(val & MSR_EFER_UPDATE_MASK); if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
update_mask |= MSR_EFER_SCE;
if (env->cpuid_ext2_features & CPUID_EXT2_LM)
update_mask |= MSR_EFER_LME;
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
update_mask |= MSR_EFER_FFXSR;
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
update_mask |= MSR_EFER_NXE;
env->efer = (env->efer & ~update_mask) |
(val & update_mask);
}
break; break;
case MSR_STAR: case MSR_STAR:
env->star = val; env->star = val;
break; break;
#ifdef TARGET_X86_64
case MSR_LSTAR: case MSR_LSTAR:
env->lstar = val; env->lstar = val;
break; break;
...@@ -2296,13 +2367,13 @@ void helper_rdmsr(void) ...@@ -2296,13 +2367,13 @@ void helper_rdmsr(void)
case MSR_IA32_APICBASE: case MSR_IA32_APICBASE:
val = cpu_get_apic_base(env); val = cpu_get_apic_base(env);
break; break;
#ifdef TARGET_X86_64
case MSR_EFER: case MSR_EFER:
val = env->efer; val = env->efer;
break; break;
case MSR_STAR: case MSR_STAR:
val = env->star; val = env->star;
break; break;
#ifdef TARGET_X86_64
case MSR_LSTAR: case MSR_LSTAR:
val = env->lstar; val = env->lstar;
break; break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册