提交 60126df9 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20181031' into staging

Track mmu_idx for which the TLB is clean and need not be flushed again.

# gpg: Signature made Wed 31 Oct 2018 12:19:31 GMT
# gpg:                using RSA key 64DF38E8AF7E215F
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>"
# Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A  05C0 64DF 38E8 AF7E 215F

* remotes/rth/tags/pull-tcg-20181031:
  cputlb: Remove tlb_c.pending_flushes
  cputlb: Filter flushes on already clean tlbs
  cputlb: Count "partial" and "elided" tlb flushes
  cputlb: Merge tlb_flush_page into tlb_flush_page_by_mmuidx
  cputlb: Merge tlb_flush_nocheck into tlb_flush_by_mmuidx_async_work
  cputlb: Move env->vtlb_index to env->tlb_d.vindex
  cputlb: Split large page tracking per mmu_idx
  cputlb: Move cpu->pending_tlb_flush to env->tlb_c.pending_flush
  cputlb: Remove tcg_enabled hack from tlb_flush_nocheck
  cputlb: Move tlb_lock to CPUTLBCommon
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
此差异已折叠。
......@@ -2290,7 +2290,7 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
{
struct tb_tree_stats tst = {};
struct qht_stats hst;
size_t nb_tbs;
size_t nb_tbs, flush_full, flush_part, flush_elide;
tcg_tb_foreach(tb_tree_stats_iter, &tst);
nb_tbs = tst.nb_tbs;
......@@ -2326,7 +2326,11 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
cpu_fprintf(f, "TB flush count %u\n",
atomic_read(&tb_ctx.tb_flush_count));
cpu_fprintf(f, "TB invalidate count %zu\n", tcg_tb_phys_invalidate_count());
cpu_fprintf(f, "TLB flush count %zu\n", tlb_flush_count());
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
cpu_fprintf(f, "TLB full flushes %zu\n", flush_full);
cpu_fprintf(f, "TLB partial flushes %zu\n", flush_part);
cpu_fprintf(f, "TLB elided flushes %zu\n", flush_elide);
tcg_dump_info(f, cpu_fprintf);
}
......
......@@ -141,18 +141,53 @@ typedef struct CPUIOTLBEntry {
MemTxAttrs attrs;
} CPUIOTLBEntry;
typedef struct CPUTLBDesc {
/*
* Describe a region covering all of the large pages allocated
* into the tlb. When any page within this region is flushed,
* we must flush the entire tlb. The region is matched if
* (addr & large_page_mask) == large_page_addr.
*/
target_ulong large_page_addr;
target_ulong large_page_mask;
/* The next index to use in the tlb victim table. */
size_t vindex;
} CPUTLBDesc;
/*
* Data elements that are shared between all MMU modes.
*/
typedef struct CPUTLBCommon {
/* Serialize updates to tlb_table and tlb_v_table, and others as noted. */
QemuSpin lock;
/*
* Within dirty, for each bit N, modifications have been made to
* mmu_idx N since the last time that mmu_idx was flushed.
* Protected by tlb_c.lock.
*/
uint16_t dirty;
/*
* Statistics. These are not lock protected, but are read and
* written atomically. This allows the monitor to print a snapshot
* of the stats without interfering with the cpu.
*/
size_t full_flush_count;
size_t part_flush_count;
size_t elide_flush_count;
} CPUTLBCommon;
/*
* The meaning of each of the MMU modes is defined in the target code.
* Note that NB_MMU_MODES is not yet defined; we can only reference it
* within preprocessor defines that will be expanded later.
*/
#define CPU_COMMON_TLB \
/* The meaning of the MMU modes is defined in the target code. */ \
/* tlb_lock serializes updates to tlb_table and tlb_v_table */ \
QemuSpin tlb_lock; \
CPUTLBCommon tlb_c; \
CPUTLBDesc tlb_d[NB_MMU_MODES]; \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPUTLBEntry tlb_v_table[NB_MMU_MODES][CPU_VTLB_SIZE]; \
CPUIOTLBEntry iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE]; \
size_t tlb_flush_count; \
target_ulong tlb_flush_addr; \
target_ulong tlb_flush_mask; \
target_ulong vtlb_index; \
CPUIOTLBEntry iotlb_v[NB_MMU_MODES][CPU_VTLB_SIZE];
#else
......
......@@ -23,6 +23,6 @@
/* cputlb.c */
void tlb_protect_code(ram_addr_t ram_addr);
void tlb_unprotect_code(ram_addr_t ram_addr);
size_t tlb_flush_count(void);
void tlb_flush_counts(size_t *full, size_t *part, size_t *elide);
#endif
#endif
......@@ -429,12 +429,6 @@ struct CPUState {
struct hax_vcpu_state *hax_vcpu;
/* The pending_tlb_flush flag is set and cleared atomically to
* avoid potential races. The aim of the flag is to avoid
* unnecessary flushes.
*/
uint16_t pending_tlb_flush;
int hvf_fd;
/* track IOMMUs whose translations we've cached in the TCG TLB */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册