提交 fd479c60 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20170603' into staging

Queued TCG patches

# gpg: Signature made Fri 30 Jun 2017 20:03:53 BST
# gpg:                using RSA key 0xAD1270CC4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"
# Primary key fingerprint: 9CB1 8DDA F8E8 49AD 2AFC  16A4 AD12 70CC 4DD0 279B

* remotes/rth/tags/pull-tcg-20170603:
  tcg: consistently access cpu->tb_jmp_cache atomically
  gen-icount: use tcg_ctx.tcg_env instead of cpu_env
  gen-icount: add missing inline to gen_tb_end
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
...@@ -118,7 +118,7 @@ static void tlb_flush_nocheck(CPUState *cpu) ...@@ -118,7 +118,7 @@ static void tlb_flush_nocheck(CPUState *cpu)
memset(env->tlb_table, -1, sizeof(env->tlb_table)); memset(env->tlb_table, -1, sizeof(env->tlb_table));
memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); cpu_tb_jmp_cache_clear(cpu);
env->vtlb_index = 0; env->vtlb_index = 0;
env->tlb_flush_addr = -1; env->tlb_flush_addr = -1;
...@@ -183,7 +183,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) ...@@ -183,7 +183,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
} }
} }
memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache)); cpu_tb_jmp_cache_clear(cpu);
tlb_debug("done\n"); tlb_debug("done\n");
......
...@@ -928,11 +928,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count) ...@@ -928,11 +928,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
} }
CPU_FOREACH(cpu) { CPU_FOREACH(cpu) {
int i; cpu_tb_jmp_cache_clear(cpu);
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
} }
tcg_ctx.tb_ctx.nb_tbs = 0; tcg_ctx.tb_ctx.nb_tbs = 0;
...@@ -1813,19 +1809,21 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) ...@@ -1813,19 +1809,21 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
cpu_loop_exit_noexc(cpu); cpu_loop_exit_noexc(cpu);
} }
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
{ {
unsigned int i; unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
atomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
}
}
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
{
/* Discard jump cache entries for any tb which might potentially /* Discard jump cache entries for any tb which might potentially
overlap the flushed page. */ overlap the flushed page. */
i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
memset(&cpu->tb_jmp_cache[i], 0, tb_jmp_cache_clear_page(cpu, addr);
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
i = tb_jmp_cache_hash_page(addr);
memset(&cpu->tb_jmp_cache[i], 0,
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
} }
static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf, static void print_qht_statistics(FILE *f, fprintf_function cpu_fprintf,
......
...@@ -19,7 +19,7 @@ static inline void gen_tb_start(TranslationBlock *tb) ...@@ -19,7 +19,7 @@ static inline void gen_tb_start(TranslationBlock *tb)
count = tcg_temp_new_i32(); count = tcg_temp_new_i32();
} }
tcg_gen_ld_i32(count, cpu_env, tcg_gen_ld_i32(count, tcg_ctx.tcg_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u32)); -ENV_OFFSET + offsetof(CPUState, icount_decr.u32));
if (tb->cflags & CF_USE_ICOUNT) { if (tb->cflags & CF_USE_ICOUNT) {
...@@ -37,14 +37,14 @@ static inline void gen_tb_start(TranslationBlock *tb) ...@@ -37,14 +37,14 @@ static inline void gen_tb_start(TranslationBlock *tb)
tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, exitreq_label); tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, exitreq_label);
if (tb->cflags & CF_USE_ICOUNT) { if (tb->cflags & CF_USE_ICOUNT) {
tcg_gen_st16_i32(count, cpu_env, tcg_gen_st16_i32(count, tcg_ctx.tcg_env,
-ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low)); -ENV_OFFSET + offsetof(CPUState, icount_decr.u16.low));
} }
tcg_temp_free_i32(count); tcg_temp_free_i32(count);
} }
static void gen_tb_end(TranslationBlock *tb, int num_insns) static inline void gen_tb_end(TranslationBlock *tb, int num_insns)
{ {
if (tb->cflags & CF_USE_ICOUNT) { if (tb->cflags & CF_USE_ICOUNT) {
/* Update the num_insn immediate parameter now that we know /* Update the num_insn immediate parameter now that we know
...@@ -62,14 +62,16 @@ static void gen_tb_end(TranslationBlock *tb, int num_insns) ...@@ -62,14 +62,16 @@ static void gen_tb_end(TranslationBlock *tb, int num_insns)
static inline void gen_io_start(void) static inline void gen_io_start(void)
{ {
TCGv_i32 tmp = tcg_const_i32(1); TCGv_i32 tmp = tcg_const_i32(1);
tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io)); tcg_gen_st_i32(tmp, tcg_ctx.tcg_env,
-ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }
static inline void gen_io_end(void) static inline void gen_io_end(void)
{ {
TCGv_i32 tmp = tcg_const_i32(0); TCGv_i32 tmp = tcg_const_i32(0);
tcg_gen_st_i32(tmp, cpu_env, -ENV_OFFSET + offsetof(CPUState, can_do_io)); tcg_gen_st_i32(tmp, tcg_ctx.tcg_env,
-ENV_OFFSET + offsetof(CPUState, can_do_io));
tcg_temp_free_i32(tmp); tcg_temp_free_i32(tmp);
} }
......
...@@ -346,7 +346,7 @@ struct CPUState { ...@@ -346,7 +346,7 @@ struct CPUState {
void *env_ptr; /* CPUArchState */ void *env_ptr; /* CPUArchState */
/* Writes protected by tb_lock, reads not thread-safe */ /* Accessed in parallel; all accesses must be atomic */
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
struct GDBRegisterState *gdb_regs; struct GDBRegisterState *gdb_regs;
...@@ -422,6 +422,15 @@ extern struct CPUTailQ cpus; ...@@ -422,6 +422,15 @@ extern struct CPUTailQ cpus;
extern __thread CPUState *current_cpu; extern __thread CPUState *current_cpu;
static inline void cpu_tb_jmp_cache_clear(CPUState *cpu)
{
unsigned int i;
for (i = 0; i < TB_JMP_CACHE_SIZE; i++) {
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
}
/** /**
* qemu_tcg_mttcg_enabled: * qemu_tcg_mttcg_enabled:
* Check whether we are running MultiThread TCG or not. * Check whether we are running MultiThread TCG or not.
......
...@@ -274,7 +274,6 @@ void cpu_reset(CPUState *cpu) ...@@ -274,7 +274,6 @@ void cpu_reset(CPUState *cpu)
static void cpu_common_reset(CPUState *cpu) static void cpu_common_reset(CPUState *cpu)
{ {
CPUClass *cc = CPU_GET_CLASS(cpu); CPUClass *cc = CPU_GET_CLASS(cpu);
int i;
if (qemu_loglevel_mask(CPU_LOG_RESET)) { if (qemu_loglevel_mask(CPU_LOG_RESET)) {
qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index); qemu_log("CPU Reset (CPU %d)\n", cpu->cpu_index);
...@@ -292,9 +291,7 @@ static void cpu_common_reset(CPUState *cpu) ...@@ -292,9 +291,7 @@ static void cpu_common_reset(CPUState *cpu)
cpu->crash_occurred = false; cpu->crash_occurred = false;
if (tcg_enabled()) { if (tcg_enabled()) {
for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { cpu_tb_jmp_cache_clear(cpu);
atomic_set(&cpu->tb_jmp_cache[i], NULL);
}
#ifdef CONFIG_SOFTMMU #ifdef CONFIG_SOFTMMU
tlb_flush(cpu, 0); tlb_flush(cpu, 0);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册