提交 3f337316 编写于 作者: B bellard

pop ss, mov ss, x and sti disable irqs for the next instruction - began...

pop ss, mov ss, x and sti disable irqs for the next instruction - began dispatch optimization by adding new x86 cpu 'hidden' flags


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@372 c046a42c-6fe2-441c-8c8c-71466251a162
上级 d05e66d2
...@@ -186,7 +186,8 @@ int cpu_exec(CPUState *env1) ...@@ -186,7 +186,8 @@ int cpu_exec(CPUState *env1)
#if defined(TARGET_I386) #if defined(TARGET_I386)
/* if hardware interrupt pending, we execute it */ /* if hardware interrupt pending, we execute it */
if ((interrupt_request & CPU_INTERRUPT_HARD) && if ((interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) { (env->eflags & IF_MASK) &&
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno; int intno;
intno = cpu_x86_get_pic_interrupt(env); intno = cpu_x86_get_pic_interrupt(env);
if (loglevel) { if (loglevel) {
...@@ -233,21 +234,20 @@ int cpu_exec(CPUState *env1) ...@@ -233,21 +234,20 @@ int cpu_exec(CPUState *env1)
#endif #endif
} }
#endif #endif
/* we compute the CPU state. We assume it will not /* we record a subset of the CPU state. It will
change during the whole generated block. */ always be the same before a given translated block
is executed. */
#if defined(TARGET_I386) #if defined(TARGET_I386)
flags = (env->segs[R_CS].flags & DESC_B_MASK) flags = (env->segs[R_CS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - GEN_FLAG_CODE32_SHIFT); >> (DESC_B_SHIFT - HF_CS32_SHIFT);
flags |= (env->segs[R_SS].flags & DESC_B_MASK) flags |= (env->segs[R_SS].flags & DESC_B_MASK)
>> (DESC_B_SHIFT - GEN_FLAG_SS32_SHIFT); >> (DESC_B_SHIFT - HF_SS32_SHIFT);
flags |= (((unsigned long)env->segs[R_DS].base | flags |= (((unsigned long)env->segs[R_DS].base |
(unsigned long)env->segs[R_ES].base | (unsigned long)env->segs[R_ES].base |
(unsigned long)env->segs[R_SS].base) != 0) << (unsigned long)env->segs[R_SS].base) != 0) <<
GEN_FLAG_ADDSEG_SHIFT; HF_ADDSEG_SHIFT;
flags |= env->cpl << GEN_FLAG_CPL_SHIFT; flags |= env->hflags;
flags |= env->soft_mmu << GEN_FLAG_SOFT_MMU_SHIFT; flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK));
flags |= (env->eflags & VM_MASK) >> (17 - GEN_FLAG_VM_SHIFT);
flags |= (env->eflags & (IOPL_MASK | TF_MASK));
cs_base = env->segs[R_CS].base; cs_base = env->segs[R_CS].base;
pc = cs_base + env->eip; pc = cs_base + env->eip;
#elif defined(TARGET_ARM) #elif defined(TARGET_ARM)
...@@ -337,8 +337,8 @@ int cpu_exec(CPUState *env1) ...@@ -337,8 +337,8 @@ int cpu_exec(CPUState *env1)
/* reset soft MMU for next block (it can currently /* reset soft MMU for next block (it can currently
only be set by a memory fault) */ only be set by a memory fault) */
#if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU) #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU)
if (env->soft_mmu) { if (env->hflags & HF_SOFTMMU_MASK) {
env->soft_mmu = 0; env->hflags &= ~HF_SOFTMMU_MASK;
/* do not allow linking to another block */ /* do not allow linking to another block */
T0 = 0; T0 = 0;
} }
...@@ -499,7 +499,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, ...@@ -499,7 +499,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address,
raise_exception_err(EXCP0E_PAGE, env->error_code); raise_exception_err(EXCP0E_PAGE, env->error_code);
} else { } else {
/* activate soft MMU for this block */ /* activate soft MMU for this block */
env->soft_mmu = 1; env->hflags |= HF_SOFTMMU_MASK;
sigprocmask(SIG_SETMASK, old_set, NULL); sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(); cpu_loop_exit();
} }
......
...@@ -73,6 +73,10 @@ ...@@ -73,6 +73,10 @@
#define CC_S 0x0080 #define CC_S 0x0080
#define CC_O 0x0800 #define CC_O 0x0800
#define TF_SHIFT 8
#define IOPL_SHIFT 12
#define VM_SHIFT 17
#define TF_MASK 0x00000100 #define TF_MASK 0x00000100
#define IF_MASK 0x00000200 #define IF_MASK 0x00000200
#define DF_MASK 0x00000400 #define DF_MASK 0x00000400
...@@ -85,6 +89,29 @@ ...@@ -85,6 +89,29 @@
#define VIP_MASK 0x00100000 #define VIP_MASK 0x00100000
#define ID_MASK 0x00200000 #define ID_MASK 0x00200000
/* hidden flags - used internally by qemu to represent additionnal cpu
states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid
using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring
with eflags. */
/* current cpl */
#define HF_CPL_SHIFT 0
/* true if soft mmu is being used */
#define HF_SOFTMMU_SHIFT 2
/* true if hardware interrupts must be disabled for next instruction */
#define HF_INHIBIT_IRQ_SHIFT 3
/* 16 or 32 segments */
#define HF_CS32_SHIFT 4
#define HF_SS32_SHIFT 5
/* zero base for DS, ES and SS */
#define HF_ADDSEG_SHIFT 6
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT)
#define HF_CS32_MASK (1 << HF_CS32_SHIFT)
#define HF_SS32_MASK (1 << HF_CS32_SHIFT)
#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT)
#define CR0_PE_MASK (1 << 0) #define CR0_PE_MASK (1 << 0)
#define CR0_TS_MASK (1 << 3) #define CR0_TS_MASK (1 << 3)
#define CR0_WP_MASK (1 << 16) #define CR0_WP_MASK (1 << 16)
...@@ -226,6 +253,7 @@ typedef struct CPUX86State { ...@@ -226,6 +253,7 @@ typedef struct CPUX86State {
uint32_t cc_dst; uint32_t cc_dst;
uint32_t cc_op; uint32_t cc_op;
int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
uint32_t hflags; /* hidden flags, see HF_xxx constants */
/* FPU state */ /* FPU state */
unsigned int fpstt; /* top of stack index */ unsigned int fpstt; /* top of stack index */
...@@ -249,8 +277,6 @@ typedef struct CPUX86State { ...@@ -249,8 +277,6 @@ typedef struct CPUX86State {
SegmentCache tr; SegmentCache tr;
SegmentCache gdt; /* only base and limit are used */ SegmentCache gdt; /* only base and limit are used */
SegmentCache idt; /* only base and limit are used */ SegmentCache idt; /* only base and limit are used */
int cpl; /* current cpl */
int soft_mmu; /* TRUE if soft mmu is being used */
/* sysenter registers */ /* sysenter registers */
uint32_t sysenter_cs; uint32_t sysenter_cs;
...@@ -303,7 +329,11 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); ...@@ -303,7 +329,11 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
/* wrapper, just in case memory mappings must be changed */ /* wrapper, just in case memory mappings must be changed */
static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
{ {
s->cpl = cpl; #if HF_CPL_MASK == 3
s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl;
#else
#error HF_CPL_MASK is hardcoded
#endif
} }
/* simulate fsave/frstor */ /* simulate fsave/frstor */
......
...@@ -61,16 +61,6 @@ extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; ...@@ -61,16 +61,6 @@ extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE];
#if defined(TARGET_I386) #if defined(TARGET_I386)
#define GEN_FLAG_CODE32_SHIFT 0
#define GEN_FLAG_ADDSEG_SHIFT 1
#define GEN_FLAG_SS32_SHIFT 2
#define GEN_FLAG_VM_SHIFT 3
#define GEN_FLAG_ST_SHIFT 4
#define GEN_FLAG_TF_SHIFT 8 /* same position as eflags */
#define GEN_FLAG_CPL_SHIFT 9
#define GEN_FLAG_SOFT_MMU_SHIFT 11
#define GEN_FLAG_IOPL_SHIFT 12 /* same position as eflags */
void optimize_flags_init(void); void optimize_flags_init(void);
#endif #endif
......
...@@ -189,7 +189,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, ...@@ -189,7 +189,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
{ {
SegmentCache *dt; SegmentCache *dt;
uint8_t *ptr, *ssp; uint8_t *ptr, *ssp;
int type, dpl, selector, ss_dpl; int type, dpl, selector, ss_dpl, cpl;
int has_error_code, new_stack, shift; int has_error_code, new_stack, shift;
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size; uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
uint32_t old_cs, old_ss, old_esp, old_eip; uint32_t old_cs, old_ss, old_esp, old_eip;
...@@ -216,8 +216,9 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, ...@@ -216,8 +216,9 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
break; break;
} }
dpl = (e2 >> DESC_DPL_SHIFT) & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3;
cpl = env->hflags & HF_CPL_MASK;
/* check privledge if software int */ /* check privledge if software int */
if (is_int && dpl < env->cpl) if (is_int && dpl < cpl)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2); raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
/* check valid bit */ /* check valid bit */
if (!(e2 & DESC_P_MASK)) if (!(e2 & DESC_P_MASK))
...@@ -232,11 +233,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, ...@@ -232,11 +233,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3;
if (dpl > env->cpl) if (dpl > cpl)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc); raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
if (!(e2 & DESC_P_MASK)) if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
if (!(e2 & DESC_C_MASK) && dpl < env->cpl) { if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner priviledge */ /* to inner priviledge */
get_ss_esp_from_tss(&ss, &esp, dpl); get_ss_esp_from_tss(&ss, &esp, dpl);
if ((ss & 0xfffc) == 0) if ((ss & 0xfffc) == 0)
...@@ -255,7 +256,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, ...@@ -255,7 +256,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code,
if (!(ss_e2 & DESC_P_MASK)) if (!(ss_e2 & DESC_P_MASK))
raise_exception_err(EXCP0A_TSS, ss & 0xfffc); raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
new_stack = 1; new_stack = 1;
} else if ((e2 & DESC_C_MASK) || dpl == env->cpl) { } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
/* to same priviledge */ /* to same priviledge */
new_stack = 0; new_stack = 0;
} else { } else {
...@@ -402,7 +403,7 @@ void do_interrupt_user(int intno, int is_int, int error_code, ...@@ -402,7 +403,7 @@ void do_interrupt_user(int intno, int is_int, int error_code,
{ {
SegmentCache *dt; SegmentCache *dt;
uint8_t *ptr; uint8_t *ptr;
int dpl; int dpl, cpl;
uint32_t e2; uint32_t e2;
dt = &env->idt; dt = &env->idt;
...@@ -410,8 +411,9 @@ void do_interrupt_user(int intno, int is_int, int error_code, ...@@ -410,8 +411,9 @@ void do_interrupt_user(int intno, int is_int, int error_code,
e2 = ldl(ptr + 4); e2 = ldl(ptr + 4);
dpl = (e2 >> DESC_DPL_SHIFT) & 3; dpl = (e2 >> DESC_DPL_SHIFT) & 3;
cpl = env->hflags & HF_CPL_MASK;
/* check privledge if software int */ /* check privledge if software int */
if (is_int && dpl < env->cpl) if (is_int && dpl < cpl)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2); raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
/* Since we emulate only user space, we cannot do more than /* Since we emulate only user space, we cannot do more than
...@@ -742,7 +744,7 @@ void helper_ljmp_protected_T0_T1(void) ...@@ -742,7 +744,7 @@ void helper_ljmp_protected_T0_T1(void)
raise_exception_err(EXCP0D_GPF, 0); raise_exception_err(EXCP0D_GPF, 0);
if (load_segment(&e1, &e2, new_cs) != 0) if (load_segment(&e1, &e2, new_cs) != 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpl = env->cpl; cpl = env->hflags & HF_CPL_MASK;
if (e2 & DESC_S_MASK) { if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK)) if (!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
...@@ -826,7 +828,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) ...@@ -826,7 +828,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip)
raise_exception_err(EXCP0D_GPF, 0); raise_exception_err(EXCP0D_GPF, 0);
if (load_segment(&e1, &e2, new_cs) != 0) if (load_segment(&e1, &e2, new_cs) != 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpl = env->cpl; cpl = env->hflags & HF_CPL_MASK;
if (e2 & DESC_S_MASK) { if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK)) if (!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
...@@ -1079,7 +1081,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) ...@@ -1079,7 +1081,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend)
if (!(e2 & DESC_S_MASK) || if (!(e2 & DESC_S_MASK) ||
!(e2 & DESC_CS_MASK)) !(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpl = env->cpl; cpl = env->hflags & HF_CPL_MASK;
rpl = new_cs & 3; rpl = new_cs & 3;
if (rpl < cpl) if (rpl < cpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
......
...@@ -52,7 +52,7 @@ CPUX86State *cpu_x86_init(void) ...@@ -52,7 +52,7 @@ CPUX86State *cpu_x86_init(void)
tlb_flush(env); tlb_flush(env);
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
env->soft_mmu = 1; env->hflags |= HF_SOFTMMU_MASK;
#endif #endif
/* init various static tables */ /* init various static tables */
if (!inited) { if (!inited) {
...@@ -228,7 +228,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) ...@@ -228,7 +228,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
int cpl, error_code, is_dirty, is_user, prot, page_size, ret; int cpl, error_code, is_dirty, is_user, prot, page_size, ret;
unsigned long pd; unsigned long pd;
cpl = env->cpl; cpl = env->hflags & HF_CPL_MASK;
is_user = (cpl == 3); is_user = (cpl == 3);
#ifdef DEBUG_MMU #ifdef DEBUG_MMU
...@@ -325,7 +325,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) ...@@ -325,7 +325,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
} }
do_mapping: do_mapping:
if (env->soft_mmu) { if (env->hflags & HF_SOFTMMU_MASK) {
unsigned long paddr, vaddr, address, addend, page_offset; unsigned long paddr, vaddr, address, addend, page_offset;
int index; int index;
...@@ -359,7 +359,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) ...@@ -359,7 +359,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write)
if ((pd & 0xfff) != 0) { if ((pd & 0xfff) != 0) {
/* IO access: no mapping is done as it will be handled by the /* IO access: no mapping is done as it will be handled by the
soft MMU */ soft MMU */
if (!env->soft_mmu) if (!(env->hflags & HF_SOFTMMU_MASK))
ret = 2; ret = 2;
} else { } else {
void *map_addr; void *map_addr;
......
...@@ -457,6 +457,16 @@ void OPPROTO op_sti(void) ...@@ -457,6 +457,16 @@ void OPPROTO op_sti(void)
env->eflags |= IF_MASK; env->eflags |= IF_MASK;
} }
void OPPROTO op_set_inhibit_irq(void)
{
env->hflags |= HF_INHIBIT_IRQ_MASK;
}
void OPPROTO op_reset_inhibit_irq(void)
{
env->hflags &= ~HF_INHIBIT_IRQ_MASK;
}
#if 0 #if 0
/* vm86plus instructions */ /* vm86plus instructions */
void OPPROTO op_cli_vm(void) void OPPROTO op_cli_vm(void)
......
...@@ -90,7 +90,7 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr) ...@@ -90,7 +90,7 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr)
/* test if there is match for unaligned or IO access */ /* test if there is match for unaligned or IO access */
/* XXX: could done more in memory macro in a non portable way */ /* XXX: could done more in memory macro in a non portable way */
is_user = (env->cpl == 3); is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo: redo:
tlb_addr = env->tlb_read[is_user][index].address; tlb_addr = env->tlb_read[is_user][index].address;
...@@ -126,7 +126,7 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr) ...@@ -126,7 +126,7 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr)
int is_user, index, shift; int is_user, index, shift;
unsigned long physaddr, tlb_addr, addr1, addr2; unsigned long physaddr, tlb_addr, addr1, addr2;
is_user = (env->cpl == 3); is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo: redo:
tlb_addr = env->tlb_read[is_user][index].address; tlb_addr = env->tlb_read[is_user][index].address;
...@@ -169,7 +169,7 @@ void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val ...@@ -169,7 +169,7 @@ void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val
void *retaddr; void *retaddr;
int is_user, index; int is_user, index;
is_user = (env->cpl == 3); is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo: redo:
tlb_addr = env->tlb_write[is_user][index].address; tlb_addr = env->tlb_write[is_user][index].address;
...@@ -203,7 +203,7 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, ...@@ -203,7 +203,7 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val,
unsigned long physaddr, tlb_addr; unsigned long physaddr, tlb_addr;
int is_user, index, i; int is_user, index, i;
is_user = (env->cpl == 3); is_user = ((env->hflags & HF_CPL_MASK) == 3);
index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
redo: redo:
tlb_addr = env->tlb_write[is_user][index].address; tlb_addr = env->tlb_write[is_user][index].address;
......
...@@ -1552,7 +1552,9 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, unsigned int cur_eip) ...@@ -1552,7 +1552,9 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, unsigned int cur_eip)
else else
gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg])); gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg]));
/* abort translation because the register may have a non zero base /* abort translation because the register may have a non zero base
or because ss32 may change */ or because ss32 may change. For R_SS, translation must always
stop as a special handling must be done to disable hardware
interrupts for the next instruction */
if (seg_reg == R_SS || (!s->addseg && seg_reg < R_FS)) if (seg_reg == R_SS || (!s->addseg && seg_reg < R_FS))
s->is_jmp = 2; s->is_jmp = 2;
} }
...@@ -2356,10 +2358,14 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) ...@@ -2356,10 +2358,14 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
case 0x07: /* pop es */ case 0x07: /* pop es */
case 0x17: /* pop ss */ case 0x17: /* pop ss */
case 0x1f: /* pop ds */ case 0x1f: /* pop ds */
reg = b >> 3;
gen_pop_T0(s); gen_pop_T0(s);
gen_movl_seg_T0(s, b >> 3, pc_start - s->cs_base); gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
gen_pop_update(s); gen_pop_update(s);
/* XXX: if reg == SS, inhibit interrupts/trace */ if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace */
gen_op_set_inhibit_irq();
}
break; break;
case 0x1a1: /* pop fs */ case 0x1a1: /* pop fs */
case 0x1a9: /* pop gs */ case 0x1a9: /* pop gs */
...@@ -2418,7 +2424,10 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) ...@@ -2418,7 +2424,10 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
goto illegal_op; goto illegal_op;
gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
gen_movl_seg_T0(s, reg, pc_start - s->cs_base); gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
/* XXX: if reg == SS, inhibit interrupts/trace */ if (reg == R_SS) {
/* if reg == SS, inhibit interrupts/trace */
gen_op_set_inhibit_irq();
}
break; break;
case 0x8c: /* mov Gv, seg */ case 0x8c: /* mov Gv, seg */
modrm = ldub(s->pc++); modrm = ldub(s->pc++);
...@@ -3704,6 +3713,8 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) ...@@ -3704,6 +3713,8 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
if (!s->vm86) { if (!s->vm86) {
if (s->cpl <= s->iopl) { if (s->cpl <= s->iopl) {
gen_op_sti(); gen_op_sti();
/* interruptions are enabled only the first insn after sti */
gen_op_set_inhibit_irq();
s->is_jmp = 2; /* give a chance to handle pending irqs */ s->is_jmp = 2; /* give a chance to handle pending irqs */
} else { } else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
...@@ -3711,12 +3722,13 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) ...@@ -3711,12 +3722,13 @@ long disas_insn(DisasContext *s, uint8_t *pc_start)
} else { } else {
if (s->iopl == 3) { if (s->iopl == 3) {
gen_op_sti(); gen_op_sti();
/* interruptions are enabled only the first insn after sti */
gen_op_set_inhibit_irq();
s->is_jmp = 2; /* give a chance to handle pending irqs */ s->is_jmp = 2; /* give a chance to handle pending irqs */
} else { } else {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} }
} }
/* XXX: interruptions are enabled only the first insn after sti */
break; break;
case 0x62: /* bound */ case 0x62: /* bound */
ot = dflag ? OT_LONG : OT_WORD; ot = dflag ? OT_LONG : OT_WORD;
...@@ -4380,21 +4392,21 @@ static inline int gen_intermediate_code_internal(CPUState *env, ...@@ -4380,21 +4392,21 @@ static inline int gen_intermediate_code_internal(CPUState *env,
flags = tb->flags; flags = tb->flags;
dc->pe = env->cr[0] & CR0_PE_MASK; dc->pe = env->cr[0] & CR0_PE_MASK;
dc->code32 = (flags >> GEN_FLAG_CODE32_SHIFT) & 1; dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
dc->ss32 = (flags >> GEN_FLAG_SS32_SHIFT) & 1; dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
dc->addseg = (flags >> GEN_FLAG_ADDSEG_SHIFT) & 1; dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
dc->f_st = (flags >> GEN_FLAG_ST_SHIFT) & 7; dc->f_st = 0;
dc->vm86 = (flags >> GEN_FLAG_VM_SHIFT) & 1; dc->vm86 = (flags >> VM_SHIFT) & 1;
dc->cpl = (flags >> GEN_FLAG_CPL_SHIFT) & 3; dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
dc->iopl = (flags >> GEN_FLAG_IOPL_SHIFT) & 3; dc->iopl = (flags >> IOPL_SHIFT) & 3;
dc->tf = (flags >> GEN_FLAG_TF_SHIFT) & 1; dc->tf = (flags >> TF_SHIFT) & 1;
dc->cc_op = CC_OP_DYNAMIC; dc->cc_op = CC_OP_DYNAMIC;
dc->cs_base = cs_base; dc->cs_base = cs_base;
dc->tb = tb; dc->tb = tb;
dc->popl_esp_hack = 0; dc->popl_esp_hack = 0;
/* select memory access functions */ /* select memory access functions */
dc->mem_index = 0; dc->mem_index = 0;
if ((flags >> GEN_FLAG_SOFT_MMU_SHIFT) & 1) { if (flags & HF_SOFTMMU_MASK) {
if (dc->cpl == 3) if (dc->cpl == 3)
dc->mem_index = 6; dc->mem_index = 6;
else else
...@@ -4408,6 +4420,13 @@ static inline int gen_intermediate_code_internal(CPUState *env, ...@@ -4408,6 +4420,13 @@ static inline int gen_intermediate_code_internal(CPUState *env,
dc->is_jmp = DISAS_NEXT; dc->is_jmp = DISAS_NEXT;
pc_ptr = pc_start; pc_ptr = pc_start;
lj = -1; lj = -1;
/* if irq were inhibited for the next instruction, we can disable
them here as it is simpler (otherwise jumps would have to
handled as special case) */
if (flags & HF_INHIBIT_IRQ_MASK) {
gen_op_reset_inhibit_irq();
}
do { do {
if (env->nb_breakpoints > 0) { if (env->nb_breakpoints > 0) {
for(j = 0; j < env->nb_breakpoints; j++) { for(j = 0; j < env->nb_breakpoints; j++) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册