exec-all.h 14.8 KB
Newer Older
B
bellard 已提交
1 2
/*
 * internal execution defines for qemu
3
 *
B
bellard 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18 19
 */

20 21
#ifndef EXEC_ALL_H
#define EXEC_ALL_H
B
blueswir1 已提交
22 23

#include "qemu-common.h"
P
Paolo Bonzini 已提交
24
#include "exec/tb-context.h"
B
blueswir1 已提交
25

B
bellard 已提交
26
/* allow to see translation results - the slowdown should be negligible, so we leave it */
27
#define DEBUG_DISAS
B
bellard 已提交
28

P
Paul Brook 已提交
29 30 31 32
/* Page tracking code uses ram addresses in system mode, and virtual
   addresses in userspace mode.  Define tb_page_addr_t to be an appropriate
   type.  */
#if defined(CONFIG_USER_ONLY)
P
Paul Brook 已提交
33
typedef abi_ulong tb_page_addr_t;
P
Paul Brook 已提交
34 35 36 37
#else
typedef ram_addr_t tb_page_addr_t;
#endif

B
bellard 已提交
38 39 40 41 42 43
/* is_jmp field values */
#define DISAS_NEXT    0 /* next instruction can be analyzed */
#define DISAS_JUMP    1 /* only pc was modified dynamically */
#define DISAS_UPDATE  2 /* cpu state was modified dynamically */
#define DISAS_TB_JUMP 3 /* only pc was modified statically */

44
#include "qemu/log.h"
B
bellard 已提交
45

46 47
void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
48
                          target_ulong *data);
A
aurel32 已提交
49

B
bellard 已提交
50
void cpu_gen_init(void);
51
bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
B
Blue Swirl 已提交
52

53
void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu);
54
void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
55
TranslationBlock *tb_gen_code(CPUState *cpu,
56 57
                              target_ulong pc, target_ulong cs_base,
                              uint32_t flags,
P
pbrook 已提交
58
                              int cflags);
59

60
void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
61
void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc);
R
Richard Henderson 已提交
62
void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc);
63

64
#if !defined(CONFIG_USER_ONLY)
65
void cpu_reloading_memory_map(void);
66 67 68 69 70 71 72 73 74 75 76 77
/**
 * cpu_address_space_init:
 * @cpu: CPU to add this address space to
 * @as: address space to add
 * @asidx: integer index of this address space
 *
 * Add the specified address space to the CPU's cpu_ases list.
 * The address space added with @asidx 0 is the one used for the
 * convenience pointer cpu->as.
 * The target-specific code which registers ASes is responsible
 * for defining what semantics address space 0, 1, 2, etc have.
 *
78 79 80 81
 * Before the first call to this function, the caller must set
 * cpu->num_ases to the total number of address spaces it needs
 * to support.
 *
82 83 84
 * Note that with KVM only one address space is supported.
 */
void cpu_address_space_init(CPUState *cpu, AddressSpace *as, int asidx);
85
/* cputlb.c */
86 87 88 89 90 91 92 93
/**
 * tlb_flush_page:
 * @cpu: CPU whose TLB should be flushed
 * @addr: virtual address of page to be flushed
 *
 * Flush one page from the TLB of the specified CPU, for all
 * MMU indexes.
 */
94
void tlb_flush_page(CPUState *cpu, target_ulong addr);
95 96 97 98
/**
 * tlb_flush:
 * @cpu: CPU whose TLB should be flushed
 *
99 100 101 102
 * Flush the entire TLB for the specified CPU. Most CPU architectures
 * allow the implementation to drop entries from the TLB at any time
 * so this is generally safe. If more selective flushing is required
 * use one of the other functions for efficiency.
103
 */
104
void tlb_flush(CPUState *cpu);
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
/**
 * tlb_flush_page_by_mmuidx:
 * @cpu: CPU whose TLB should be flushed
 * @addr: virtual address of page to be flushed
 * @...: list of MMU indexes to flush, terminated by a negative value
 *
 * Flush one page from the TLB of the specified CPU, for the specified
 * MMU indexes.
 */
void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, ...);
/**
 * tlb_flush_by_mmuidx:
 * @cpu: CPU whose TLB should be flushed
 * @...: list of MMU indexes to flush, terminated by a negative value
 *
 * Flush all entries from the TLB of the specified CPU, for the specified
 * MMU indexes.
 */
void tlb_flush_by_mmuidx(CPUState *cpu, ...);
124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
/**
 * tlb_set_page_with_attrs:
 * @cpu: CPU to add this TLB entry for
 * @vaddr: virtual address of page to add entry for
 * @paddr: physical address of the page
 * @attrs: memory transaction attributes
 * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
 * @mmu_idx: MMU index to insert TLB entry for
 * @size: size of the page in bytes
 *
 * Add an entry to this CPU's TLB (a mapping from virtual address
 * @vaddr to physical address @paddr) with the specified memory
 * transaction attributes. This is generally called by the target CPU
 * specific code after it has been called through the tlb_fill()
 * entry point and performed a successful page table walk to find
 * the physical address and attributes for the virtual address
 * which provoked the TLB miss.
 *
 * At most one entry for a given virtual address is permitted. Only a
 * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
 * used by tlb_flush_page.
 */
P
Peter Maydell 已提交
146 147 148
void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
                             hwaddr paddr, MemTxAttrs attrs,
                             int prot, int mmu_idx, target_ulong size);
149 150 151 152 153 154 155 156 157
/* tlb_set_page:
 *
 * This function is equivalent to calling tlb_set_page_with_attrs()
 * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
 * as a convenience for CPUs which don't use memory transaction attributes.
 */
void tlb_set_page(CPUState *cpu, target_ulong vaddr,
                  hwaddr paddr, int prot,
                  int mmu_idx, target_ulong size);
158
void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
Y
Yongbok Kim 已提交
159 160
void probe_write(CPUArchState *env, target_ulong addr, int mmu_idx,
                 uintptr_t retaddr);
161
void tlb_flush_page_all(target_ulong addr);
162
#else
163
static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
164 165 166
{
}

167
static inline void tlb_flush(CPUState *cpu)
168 169
{
}
170 171 172 173 174 175 176 177 178

static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
                                            target_ulong addr, ...)
{
}

static inline void tlb_flush_by_mmuidx(CPUState *cpu, ...)
{
}
179
#endif
B
bellard 已提交
180 181 182

#define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */

183 184 185 186
/* Estimated block size for TB allocation.  */
/* ??? The following is based on a 2015 survey of x86_64 host output.
   Better would seem to be some sort of dynamically sized TB array,
   adapting to the block sizes actually being produced.  */
187
#if defined(CONFIG_SOFTMMU)
188
#define CODE_GEN_AVG_BLOCK_SIZE 400
189
#else
190
#define CODE_GEN_AVG_BLOCK_SIZE 150
191 192
#endif

193 194
#if defined(__arm__) || defined(_ARCH_PPC) \
    || defined(__x86_64__) || defined(__i386__) \
195
    || defined(__sparc__) || defined(__aarch64__) \
196
    || defined(__s390x__) || defined(__mips__) \
197
    || defined(CONFIG_TCG_INTERPRETER)
198
/* NOTE: Direct jump patching must be atomic to be thread-safe. */
199
#define USE_DIRECT_JUMP
B
bellard 已提交
200 201
#endif

P
pbrook 已提交
202
struct TranslationBlock {
203 204
    target_ulong pc;   /* simulated PC corresponding to this block (EIP + CS base) */
    target_ulong cs_base; /* CS base for this block */
205
    uint32_t flags; /* flags defining in which context the code was generated */
B
bellard 已提交
206 207
    uint16_t size;      /* size of target code for this block (1 <=
                           size <= TARGET_PAGE_SIZE) */
208 209
    uint16_t icount;
    uint32_t cflags;    /* compile flags */
P
pbrook 已提交
210 211
#define CF_COUNT_MASK  0x7fff
#define CF_LAST_IO     0x8000 /* Last insn may be an IO access.  */
212
#define CF_NOCACHE     0x10000 /* To be freed after execution */
213
#define CF_USE_ICOUNT  0x20000
214
#define CF_IGNORE_ICOUNT 0x40000 /* Do not generate icount code */
B
bellard 已提交
215

216 217
    uint16_t invalid;

218
    void *tc_ptr;    /* pointer to the translated code */
219
    uint8_t *tc_search;  /* pointer to search data */
220 221
    /* original tb when cflags has CF_NOCACHE */
    struct TranslationBlock *orig_tb;
222 223
    /* first and second physical page containing code. The lower bit
       of the pointer tells the index in page_next[] */
224
    struct TranslationBlock *page_next[2];
P
Paul Brook 已提交
225
    tb_page_addr_t page_addr[2];
226

227 228 229 230 231 232 233 234 235
    /* The following data are used to directly call another TB from
     * the code of this one. This can be done either by emitting direct or
     * indirect native jump instructions. These jumps are reset so that the TB
     * just continue its execution. The TB can be linked to another one by
     * setting one of the jump targets (or patching the jump instruction). Only
     * two of such jumps are supported.
     */
    uint16_t jmp_reset_offset[2]; /* offset of original jump target */
#define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */
B
bellard 已提交
236
#ifdef USE_DIRECT_JUMP
237
    uint16_t jmp_insn_offset[2]; /* offset of native jump instruction */
B
bellard 已提交
238
#else
239
    uintptr_t jmp_target_addr[2]; /* target address for indirect jump */
B
bellard 已提交
240
#endif
241 242 243 244
    /* Each TB has an assosiated circular list of TBs jumping to this one.
     * jmp_list_first points to the first TB jumping to this one.
     * jmp_list_next is used to point to the next TB in a list.
     * Since each TB can have two jumps, it can participate in two lists.
245 246 247 248
     * jmp_list_first and jmp_list_next are 4-byte aligned pointers to a
     * TranslationBlock structure, but the two least significant bits of
     * them are used to encode which data field of the pointed TB should
     * be used to traverse the list further from that TB:
249 250 251 252
     * 0 => jmp_list_next[0], 1 => jmp_list_next[1], 2 => jmp_list_first.
     * In other words, 0/1 tells which jump is used in the pointed TB,
     * and 2 means that this is a pointer back to the target TB of this list.
     */
253 254
    uintptr_t jmp_list_next[2];
    uintptr_t jmp_list_first;
P
pbrook 已提交
255
};
B
bellard 已提交
256

P
pbrook 已提交
257
void tb_free(TranslationBlock *tb);
258
void tb_flush(CPUState *cpu);
P
Paul Brook 已提交
259
void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
B
bellard 已提交
260

261 262
#if defined(USE_DIRECT_JUMP)

263 264 265 266
#if defined(CONFIG_TCG_INTERPRETER)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
    /* patch the branch destination */
267
    atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
268 269 270
    /* no need to flush icache explicitly */
}
#elif defined(_ARCH_PPC)
271
void ppc_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
M
malc 已提交
272
#define tb_set_jmp_target1 ppc_tb_set_jmp_target
B
bellard 已提交
273
#elif defined(__i386__) || defined(__x86_64__)
274
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
275 276
{
    /* patch the branch destination */
277
    atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4));
T
ths 已提交
278
    /* no need to flush icache explicitly */
279
}
280 281 282 283 284
#elif defined(__s390x__)
static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
{
    /* patch the branch destination */
    intptr_t disp = addr - (jmp_addr - 2);
285
    atomic_set((int32_t *)jmp_addr, disp / 2);
286 287
    /* no need to flush icache explicitly */
}
288 289 290
#elif defined(__aarch64__)
void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
B
balrog 已提交
291
#elif defined(__arm__)
292 293
void arm_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
#define tb_set_jmp_target1 arm_tb_set_jmp_target
294
#elif defined(__sparc__) || defined(__mips__)
295
void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
296 297
#else
#error tb_set_jmp_target1 is missing
298
#endif
B
bellard 已提交
299

300
static inline void tb_set_jmp_target(TranslationBlock *tb,
301
                                     int n, uintptr_t addr)
302
{
303
    uint16_t offset = tb->jmp_insn_offset[n];
304
    tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
305 306
}

B
bellard 已提交
307 308 309
#else

/* set the jump target */
310
static inline void tb_set_jmp_target(TranslationBlock *tb,
311
                                     int n, uintptr_t addr)
B
bellard 已提交
312
{
313
    tb->jmp_target_addr[n] = addr;
B
bellard 已提交
314 315 316 317
}

#endif

318
/* Called with tb_lock held.  */
319
static inline void tb_add_jump(TranslationBlock *tb, int n,
B
bellard 已提交
320 321
                               TranslationBlock *tb_next)
{
322
    assert(n < ARRAY_SIZE(tb->jmp_list_next));
323 324 325 326
    if (tb->jmp_list_next[n]) {
        /* Another thread has already done this while we were
         * outside of the lock; nothing to do in this case */
        return;
B
bellard 已提交
327
    }
328 329 330 331 332 333 334 335 336 337 338 339
    qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
                           "Linking TBs %p [" TARGET_FMT_lx
                           "] index %d -> %p [" TARGET_FMT_lx "]\n",
                           tb->tc_ptr, tb->pc, n,
                           tb_next->tc_ptr, tb_next->pc);

    /* patch the native jump address */
    tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);

    /* add in TB jmp circular list */
    tb->jmp_list_next[n] = tb_next->jmp_list_first;
    tb_next->jmp_list_first = (uintptr_t)tb | n;
B
bellard 已提交
340 341
}

R
Richard Henderson 已提交
342
/* GETPC is the true target of the return instruction that we'll execute.  */
343
#if defined(CONFIG_TCG_INTERPRETER)
344
extern uintptr_t tci_tb_ptr;
R
Richard Henderson 已提交
345
# define GETPC() tci_tb_ptr
346
#else
R
Richard Henderson 已提交
347
# define GETPC() \
348 349 350 351 352 353 354 355 356 357
    ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
#endif

/* The true return address will often point to a host insn that is part of
   the next translated guest insn.  Adjust the address backward to point to
   the middle of the call insn.  Subtracting one would do the job except for
   several compressed mode architectures (arm, mips) which set the low bit
   to indicate the compressed mode; subtracting two works around that.  It
   is also the case that there are no host isas that contain a call insn
   smaller than 4 bytes, so we don't worry about special-casing this.  */
358
#define GETPC_ADJ   2
359

360
#if !defined(CONFIG_USER_ONLY)
B
bellard 已提交
361

P
Paolo Bonzini 已提交
362
struct MemoryRegion *iotlb_to_region(CPUState *cpu,
363
                                     hwaddr index, MemTxAttrs attrs);
364

365 366
void tlb_fill(CPUState *cpu, target_ulong addr, MMUAccessType access_type,
              int mmu_idx, uintptr_t retaddr);
B
bellard 已提交
367 368

#endif
369 370

#if defined(CONFIG_USER_ONLY)
371 372
void mmap_lock(void);
void mmap_unlock(void);
373
bool have_mmap_lock(void);
374

375
static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
376 377 378 379
{
    return addr;
}
#else
380 381 382
static inline void mmap_lock(void) {}
static inline void mmap_unlock(void) {}

383
/* cputlb.c */
384
tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
385 386 387 388 389 390 391 392

void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
void tlb_set_dirty(CPUState *cpu, target_ulong vaddr);

/* exec.c */
void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr);

MemoryRegionSection *
393 394
address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
                                  hwaddr *xlat, hwaddr *plen);
395 396 397 398 399 400 401 402
hwaddr memory_region_section_get_iotlb(CPUState *cpu,
                                       MemoryRegionSection *section,
                                       target_ulong vaddr,
                                       hwaddr paddr, hwaddr xlat,
                                       int prot,
                                       target_ulong *address);
bool memory_region_is_unassigned(MemoryRegion *mr);

403
#endif
B
bellard 已提交
404

405 406 407
/* vl.c */
extern int singlestep;

408
#endif