tcg.h 40.9 KB
Newer Older
B
bellard 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
/*
 * Tiny Code Generator for QEMU
 *
 * Copyright (c) 2008 Fabrice Bellard
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 */
R
Richard Henderson 已提交
24 25 26 27

#ifndef TCG_H
#define TCG_H

28
#include "qemu-common.h"
29
#include "cpu.h"
P
Paolo Bonzini 已提交
30
#include "exec/tb-context.h"
31
#include "qemu/bitops.h"
32 33
#include "tcg-target.h"

P
Paolo Bonzini 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
/* XXX: make safe guess about sizes */
#define MAX_OP_PER_INSTR 266

#if HOST_LONG_BITS == 32
#define MAX_OPC_PARAM_PER_ARG 2
#else
#define MAX_OPC_PARAM_PER_ARG 1
#endif
#define MAX_OPC_PARAM_IARGS 5
#define MAX_OPC_PARAM_OARGS 1
#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)

/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
 * and up to 4 + N parameters on 64-bit archs
 * (N = number of input arguments + output arguments).  */
#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
#define OPC_BUF_SIZE 640
#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)

#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)

55 56
#define CPU_TEMP_BUF_NLONGS 128

57 58 59 60 61 62 63 64 65
/* Default target word size to pointer size.  */
#ifndef TCG_TARGET_REG_BITS
# if UINTPTR_MAX == UINT32_MAX
#  define TCG_TARGET_REG_BITS 32
# elif UINTPTR_MAX == UINT64_MAX
#  define TCG_TARGET_REG_BITS 64
# else
#  error Unknown pointer size for tcg target
# endif
66 67
#endif

B
bellard 已提交
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
#if TCG_TARGET_REG_BITS == 32
typedef int32_t tcg_target_long;
typedef uint32_t tcg_target_ulong;
#define TCG_PRIlx PRIx32
#define TCG_PRIld PRId32
#elif TCG_TARGET_REG_BITS == 64
typedef int64_t tcg_target_long;
typedef uint64_t tcg_target_ulong;
#define TCG_PRIlx PRIx64
#define TCG_PRIld PRId64
#else
#error unsupported
#endif

#if TCG_TARGET_NB_REGS <= 32
typedef uint32_t TCGRegSet;
#elif TCG_TARGET_NB_REGS <= 64
typedef uint64_t TCGRegSet;
#else
#error unsupported
#endif

90
#if TCG_TARGET_REG_BITS == 32
91
/* Turn some undef macros into false macros.  */
92 93
#define TCG_TARGET_HAS_extrl_i64_i32    0
#define TCG_TARGET_HAS_extrh_i64_i32    0
94
#define TCG_TARGET_HAS_div_i64          0
95
#define TCG_TARGET_HAS_rem_i64          0
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
#define TCG_TARGET_HAS_div2_i64         0
#define TCG_TARGET_HAS_rot_i64          0
#define TCG_TARGET_HAS_ext8s_i64        0
#define TCG_TARGET_HAS_ext16s_i64       0
#define TCG_TARGET_HAS_ext32s_i64       0
#define TCG_TARGET_HAS_ext8u_i64        0
#define TCG_TARGET_HAS_ext16u_i64       0
#define TCG_TARGET_HAS_ext32u_i64       0
#define TCG_TARGET_HAS_bswap16_i64      0
#define TCG_TARGET_HAS_bswap32_i64      0
#define TCG_TARGET_HAS_bswap64_i64      0
#define TCG_TARGET_HAS_neg_i64          0
#define TCG_TARGET_HAS_not_i64          0
#define TCG_TARGET_HAS_andc_i64         0
#define TCG_TARGET_HAS_orc_i64          0
#define TCG_TARGET_HAS_eqv_i64          0
#define TCG_TARGET_HAS_nand_i64         0
#define TCG_TARGET_HAS_nor_i64          0
#define TCG_TARGET_HAS_deposit_i64      0
R
Richard Henderson 已提交
115
#define TCG_TARGET_HAS_movcond_i64      0
116 117 118
#define TCG_TARGET_HAS_add2_i64         0
#define TCG_TARGET_HAS_sub2_i64         0
#define TCG_TARGET_HAS_mulu2_i64        0
119
#define TCG_TARGET_HAS_muls2_i64        0
120 121
#define TCG_TARGET_HAS_muluh_i64        0
#define TCG_TARGET_HAS_mulsh_i64        0
122 123 124
/* Turn some undef macros into true macros.  */
#define TCG_TARGET_HAS_add2_i32         1
#define TCG_TARGET_HAS_sub2_i32         1
125 126
#endif

127 128 129 130 131 132 133
#ifndef TCG_TARGET_deposit_i32_valid
#define TCG_TARGET_deposit_i32_valid(ofs, len) 1
#endif
#ifndef TCG_TARGET_deposit_i64_valid
#define TCG_TARGET_deposit_i64_valid(ofs, len) 1
#endif

134 135 136 137 138
/* Only one of DIV or DIV2 should be defined.  */
#if defined(TCG_TARGET_HAS_div_i32)
#define TCG_TARGET_HAS_div2_i32         0
#elif defined(TCG_TARGET_HAS_div2_i32)
#define TCG_TARGET_HAS_div_i32          0
139
#define TCG_TARGET_HAS_rem_i32          0
140 141 142 143 144
#endif
#if defined(TCG_TARGET_HAS_div_i64)
#define TCG_TARGET_HAS_div2_i64         0
#elif defined(TCG_TARGET_HAS_div2_i64)
#define TCG_TARGET_HAS_div_i64          0
145
#define TCG_TARGET_HAS_rem_i64          0
146 147
#endif

148 149 150 151 152 153 154
/* For 32-bit targets, some sort of unsigned widening multiply is required.  */
#if TCG_TARGET_REG_BITS == 32 \
    && !(defined(TCG_TARGET_HAS_mulu2_i32) \
         || defined(TCG_TARGET_HAS_muluh_i32))
# error "Missing unsigned widening multiply"
#endif

155 156 157 158 159 160
#ifndef TARGET_INSN_START_EXTRA_WORDS
# define TARGET_INSN_START_WORDS 1
#else
# define TARGET_INSN_START_WORDS (1 + TARGET_INSN_START_EXTRA_WORDS)
#endif

161
typedef enum TCGOpcode {
162
#define DEF(name, oargs, iargs, cargs, flags) INDEX_op_ ## name,
B
bellard 已提交
163 164 165
#include "tcg-opc.h"
#undef DEF
    NB_OPS,
166
} TCGOpcode;
B
bellard 已提交
167 168 169 170

#define tcg_regset_clear(d) (d) = 0
#define tcg_regset_set(d, s) (d) = (s)
#define tcg_regset_set32(d, reg, val32) (d) |= (val32) << (reg)
171 172
#define tcg_regset_set_reg(d, r) (d) |= 1L << (r)
#define tcg_regset_reset_reg(d, r) (d) &= ~(1L << (r))
B
bellard 已提交
173 174 175 176 177 178
#define tcg_regset_test_reg(d, r) (((d) >> (r)) & 1)
#define tcg_regset_or(d, a, b) (d) = (a) | (b)
#define tcg_regset_and(d, a, b) (d) = (a) & (b)
#define tcg_regset_andnot(d, a, b) (d) = (a) & ~(b)
#define tcg_regset_not(d, a) (d) = ~(a)

179
#ifndef TCG_TARGET_INSN_UNIT_SIZE
180 181
# error "Missing TCG_TARGET_INSN_UNIT_SIZE"
#elif TCG_TARGET_INSN_UNIT_SIZE == 1
182 183 184 185 186 187 188 189 190 191 192 193
typedef uint8_t tcg_insn_unit;
#elif TCG_TARGET_INSN_UNIT_SIZE == 2
typedef uint16_t tcg_insn_unit;
#elif TCG_TARGET_INSN_UNIT_SIZE == 4
typedef uint32_t tcg_insn_unit;
#elif TCG_TARGET_INSN_UNIT_SIZE == 8
typedef uint64_t tcg_insn_unit;
#else
/* The port better have done this.  */
#endif


194
#if defined CONFIG_DEBUG_TCG || defined QEMU_STATIC_ANALYSIS
195 196 197 198 199 200 201 202
# define tcg_debug_assert(X) do { assert(X); } while (0)
#elif QEMU_GNUC_PREREQ(4, 5)
# define tcg_debug_assert(X) \
    do { if (!(X)) { __builtin_unreachable(); } } while (0)
#else
# define tcg_debug_assert(X) do { (void)(X); } while (0)
#endif

B
bellard 已提交
203 204 205
typedef struct TCGRelocation {
    struct TCGRelocation *next;
    int type;
206
    tcg_insn_unit *ptr;
207
    intptr_t addend;
B
bellard 已提交
208 209 210
} TCGRelocation; 

typedef struct TCGLabel {
211 212
    unsigned has_value : 1;
    unsigned id : 31;
B
bellard 已提交
213
    union {
214
        uintptr_t value;
215
        tcg_insn_unit *value_ptr;
B
bellard 已提交
216 217 218 219 220 221
        TCGRelocation *first_reloc;
    } u;
} TCGLabel;

typedef struct TCGPool {
    struct TCGPool *next;
222 223
    int size;
    uint8_t data[0] __attribute__ ((aligned));
B
bellard 已提交
224 225 226 227
} TCGPool;

#define TCG_POOL_CHUNK_SIZE 32768

B
blueswir1 已提交
228
#define TCG_MAX_TEMPS 512
R
Richard Henderson 已提交
229
#define TCG_MAX_INSNS 512
B
bellard 已提交
230

231 232 233 234
/* when the size of the arguments of a called function is smaller than
   this value, they are statically allocated in the TB stack frame */
#define TCG_STATIC_CALL_ARGS_SIZE 128

235 236 237 238
typedef enum TCGType {
    TCG_TYPE_I32,
    TCG_TYPE_I64,
    TCG_TYPE_COUNT, /* number of different types */
B
bellard 已提交
239

240
    /* An alias for the size of the host register.  */
B
bellard 已提交
241
#if TCG_TARGET_REG_BITS == 32
242
    TCG_TYPE_REG = TCG_TYPE_I32,
243
#else
244
    TCG_TYPE_REG = TCG_TYPE_I64,
245
#endif
246

247 248 249 250 251 252
    /* An alias for the size of the native pointer.  */
#if UINTPTR_MAX == UINT32_MAX
    TCG_TYPE_PTR = TCG_TYPE_I32,
#else
    TCG_TYPE_PTR = TCG_TYPE_I64,
#endif
253 254

    /* An alias for the size of the target "long", aka register.  */
255 256
#if TARGET_LONG_BITS == 64
    TCG_TYPE_TL = TCG_TYPE_I64,
B
bellard 已提交
257
#else
258
    TCG_TYPE_TL = TCG_TYPE_I32,
B
bellard 已提交
259
#endif
260
} TCGType;
B
bellard 已提交
261

R
Richard Henderson 已提交
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285
/* Constants for qemu_ld and qemu_st for the Memory Operation field.  */
typedef enum TCGMemOp {
    MO_8     = 0,
    MO_16    = 1,
    MO_32    = 2,
    MO_64    = 3,
    MO_SIZE  = 3,   /* Mask for the above.  */

    MO_SIGN  = 4,   /* Sign-extended, otherwise zero-extended.  */

    MO_BSWAP = 8,   /* Host reverse endian.  */
#ifdef HOST_WORDS_BIGENDIAN
    MO_LE    = MO_BSWAP,
    MO_BE    = 0,
#else
    MO_LE    = 0,
    MO_BE    = MO_BSWAP,
#endif
#ifdef TARGET_WORDS_BIGENDIAN
    MO_TE    = MO_BE,
#else
    MO_TE    = MO_LE,
#endif

R
Richard Henderson 已提交
286
    /* MO_UNALN accesses are never checked for alignment.
287 288 289
     * MO_ALIGN accesses will result in a call to the CPU's
     * do_unaligned_access hook if the guest address is not aligned.
     * The default depends on whether the target CPU defines ALIGNED_ONLY.
290
     *
291 292
     * Some architectures (e.g. ARMv8) need the address which is aligned
     * to a size more than the size of the memory access.
293 294 295 296 297
     * Some architectures (e.g. SPARCv9) need an address which is aligned,
     * but less strictly than the natural alignment.
     *
     * MO_ALIGN supposes the alignment size is the size of a memory access.
     *
298 299
     * There are three options:
     * - unaligned access permitted (MO_UNALN).
300 301 302
     * - an alignment to the size of an access (MO_ALIGN);
     * - an alignment to a specified size, which may be more or less than
     *   the access size (MO_ALIGN_x where 'x' is a size in bytes);
303 304 305
     */
    MO_ASHIFT = 4,
    MO_AMASK = 7 << MO_ASHIFT,
R
Richard Henderson 已提交
306 307 308 309 310 311 312
#ifdef ALIGNED_ONLY
    MO_ALIGN = 0,
    MO_UNALN = MO_AMASK,
#else
    MO_ALIGN = MO_AMASK,
    MO_UNALN = 0,
#endif
313 314 315 316 317 318
    MO_ALIGN_2  = 1 << MO_ASHIFT,
    MO_ALIGN_4  = 2 << MO_ASHIFT,
    MO_ALIGN_8  = 3 << MO_ASHIFT,
    MO_ALIGN_16 = 4 << MO_ASHIFT,
    MO_ALIGN_32 = 5 << MO_ASHIFT,
    MO_ALIGN_64 = 6 << MO_ASHIFT,
R
Richard Henderson 已提交
319

R
Richard Henderson 已提交
320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
    /* Combinations of the above, for ease of use.  */
    MO_UB    = MO_8,
    MO_UW    = MO_16,
    MO_UL    = MO_32,
    MO_SB    = MO_SIGN | MO_8,
    MO_SW    = MO_SIGN | MO_16,
    MO_SL    = MO_SIGN | MO_32,
    MO_Q     = MO_64,

    MO_LEUW  = MO_LE | MO_UW,
    MO_LEUL  = MO_LE | MO_UL,
    MO_LESW  = MO_LE | MO_SW,
    MO_LESL  = MO_LE | MO_SL,
    MO_LEQ   = MO_LE | MO_Q,

    MO_BEUW  = MO_BE | MO_UW,
    MO_BEUL  = MO_BE | MO_UL,
    MO_BESW  = MO_BE | MO_SW,
    MO_BESL  = MO_BE | MO_SL,
    MO_BEQ   = MO_BE | MO_Q,

    MO_TEUW  = MO_TE | MO_UW,
    MO_TEUL  = MO_TE | MO_UL,
    MO_TESW  = MO_TE | MO_SW,
    MO_TESL  = MO_TE | MO_SL,
    MO_TEQ   = MO_TE | MO_Q,

    MO_SSIZE = MO_SIZE | MO_SIGN,
} TCGMemOp;

350 351 352 353 354 355
/**
 * get_alignment_bits
 * @memop: TCGMemOp value
 *
 * Extract the alignment size from the memop.
 */
356
static inline unsigned get_alignment_bits(TCGMemOp memop)
357
{
358
    unsigned a = memop & MO_AMASK;
359 360

    if (a == MO_UNALN) {
361 362
        /* No alignment required.  */
        a = 0;
363
    } else if (a == MO_ALIGN) {
364 365
        /* A natural alignment requirement.  */
        a = memop & MO_SIZE;
366
    } else {
367 368
        /* A specific alignment requirement.  */
        a = a >> MO_ASHIFT;
369 370 371
    }
#if defined(CONFIG_SOFTMMU)
    /* The requested alignment cannot overlap the TLB flags.  */
372
    tcg_debug_assert((TLB_FLAGS_MASK & ((1 << a) - 1)) == 0);
373
#endif
374
    return a;
375 376
}

B
bellard 已提交
377 378
typedef tcg_target_ulong TCGArg;

379 380 381 382 383
/* Define a type and accessor macros for variables.  Using pointer types
   is nice because it gives some level of type safely.  Converting to and
   from intptr_t rather than int reduces the number of sign-extension
   instructions that get implied on 64-bit hosts.  Users of tcg_gen_* don't
   need to know about any of this, and should treat TCGv as an opaque type.
384
   In addition we do typechecking for different types of variables.  TCGv_i32
P
pbrook 已提交
385
   and TCGv_i64 are 32/64-bit variables respectively.  TCGv and TCGv_ptr
386
   are aliases for target_ulong and host pointer sized values respectively.  */
P
pbrook 已提交
387

388 389 390
typedef struct TCGv_i32_d *TCGv_i32;
typedef struct TCGv_i64_d *TCGv_i64;
typedef struct TCGv_ptr_d *TCGv_ptr;
391
typedef TCGv_ptr TCGv_env;
392 393 394 395 396 397 398
#if TARGET_LONG_BITS == 32
#define TCGv TCGv_i32
#elif TARGET_LONG_BITS == 64
#define TCGv TCGv_i64
#else
#error Unhandled TARGET_LONG_BITS value
#endif
P
pbrook 已提交
399

400 401 402 403
static inline TCGv_i32 QEMU_ARTIFICIAL MAKE_TCGV_I32(intptr_t i)
{
    return (TCGv_i32)i;
}
P
pbrook 已提交
404

405
static inline TCGv_i64 QEMU_ARTIFICIAL MAKE_TCGV_I64(intptr_t i)
P
pbrook 已提交
406
{
407 408
    return (TCGv_i64)i;
}
P
pbrook 已提交
409

410
static inline TCGv_ptr QEMU_ARTIFICIAL MAKE_TCGV_PTR(intptr_t i)
P
pbrook 已提交
411
{
412 413
    return (TCGv_ptr)i;
}
P
pbrook 已提交
414

415 416 417 418
static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I32(TCGv_i32 t)
{
    return (intptr_t)t;
}
P
pbrook 已提交
419

420 421 422 423 424 425 426 427 428
static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_I64(TCGv_i64 t)
{
    return (intptr_t)t;
}

static inline intptr_t QEMU_ARTIFICIAL GET_TCGV_PTR(TCGv_ptr t)
{
    return (intptr_t)t;
}
A
aurel32 已提交
429

P
pbrook 已提交
430
#if TCG_TARGET_REG_BITS == 32
431 432
#define TCGV_LOW(t) MAKE_TCGV_I32(GET_TCGV_I64(t))
#define TCGV_HIGH(t) MAKE_TCGV_I32(GET_TCGV_I64(t) + 1)
P
pbrook 已提交
433 434
#endif

A
aurel32 已提交
435 436
#define TCGV_EQUAL_I32(a, b) (GET_TCGV_I32(a) == GET_TCGV_I32(b))
#define TCGV_EQUAL_I64(a, b) (GET_TCGV_I64(a) == GET_TCGV_I64(b))
437
#define TCGV_EQUAL_PTR(a, b) (GET_TCGV_PTR(a) == GET_TCGV_PTR(b))
A
aurel32 已提交
438

P
pbrook 已提交
439
/* Dummy definition to avoid compiler warnings.  */
P
pbrook 已提交
440 441
#define TCGV_UNUSED_I32(x) x = MAKE_TCGV_I32(-1)
#define TCGV_UNUSED_I64(x) x = MAKE_TCGV_I64(-1)
442
#define TCGV_UNUSED_PTR(x) x = MAKE_TCGV_PTR(-1)
P
pbrook 已提交
443

R
Richard Henderson 已提交
444 445
#define TCGV_IS_UNUSED_I32(x) (GET_TCGV_I32(x) == -1)
#define TCGV_IS_UNUSED_I64(x) (GET_TCGV_I64(x) == -1)
446
#define TCGV_IS_UNUSED_PTR(x) (GET_TCGV_PTR(x) == -1)
R
Richard Henderson 已提交
447

B
bellard 已提交
448
/* call flags */
A
Aurelien Jarno 已提交
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
/* Helper does not read globals (either directly or through an exception). It
   implies TCG_CALL_NO_WRITE_GLOBALS. */
#define TCG_CALL_NO_READ_GLOBALS    0x0010
/* Helper does not write globals */
#define TCG_CALL_NO_WRITE_GLOBALS   0x0020
/* Helper can be safely suppressed if the return value is not used. */
#define TCG_CALL_NO_SIDE_EFFECTS    0x0040

/* convenience version of most used call flags */
#define TCG_CALL_NO_RWG         TCG_CALL_NO_READ_GLOBALS
#define TCG_CALL_NO_WG          TCG_CALL_NO_WRITE_GLOBALS
#define TCG_CALL_NO_SE          TCG_CALL_NO_SIDE_EFFECTS
#define TCG_CALL_NO_RWG_SE      (TCG_CALL_NO_RWG | TCG_CALL_NO_SE)
#define TCG_CALL_NO_WG_SE       (TCG_CALL_NO_WG | TCG_CALL_NO_SE)

B
bellard 已提交
464
/* used to align parameters */
P
pbrook 已提交
465
#define TCG_CALL_DUMMY_TCGV     MAKE_TCGV_I32(-1)
B
bellard 已提交
466 467
#define TCG_CALL_DUMMY_ARG      ((TCGArg)(-1))

468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484
typedef enum {
    /* Used to indicate the type of accesses on which ordering
       is to be ensured.  Modeled after SPARC barriers.  */
    TCG_MO_LD_LD  = 0x01,
    TCG_MO_ST_LD  = 0x02,
    TCG_MO_LD_ST  = 0x04,
    TCG_MO_ST_ST  = 0x08,
    TCG_MO_ALL    = 0x0F,  /* OR of the above */

    /* Used to indicate the kind of ordering which is to be ensured by the
       instruction.  These types are derived from x86/aarch64 instructions.
       It should be noted that these are different from C11 semantics.  */
    TCG_BAR_LDAQ  = 0x10,  /* Following ops will not come forward */
    TCG_BAR_STRL  = 0x20,  /* Previous ops will not be delayed */
    TCG_BAR_SC    = 0x30,  /* No ops cross barrier; OR of the above */
} TCGBar;

485 486
/* Conditions.  Note that these are laid out for easy manipulation by
   the functions below:
487 488 489 490
     bit 0 is used for inverting;
     bit 1 is signed,
     bit 2 is unsigned,
     bit 3 is used with bit 0 for swapping signed/unsigned.  */
B
bellard 已提交
491
typedef enum {
492 493 494 495 496 497 498 499 500 501
    /* non-signed */
    TCG_COND_NEVER  = 0 | 0 | 0 | 0,
    TCG_COND_ALWAYS = 0 | 0 | 0 | 1,
    TCG_COND_EQ     = 8 | 0 | 0 | 0,
    TCG_COND_NE     = 8 | 0 | 0 | 1,
    /* signed */
    TCG_COND_LT     = 0 | 0 | 2 | 0,
    TCG_COND_GE     = 0 | 0 | 2 | 1,
    TCG_COND_LE     = 8 | 0 | 2 | 0,
    TCG_COND_GT     = 8 | 0 | 2 | 1,
B
bellard 已提交
502
    /* unsigned */
503 504 505 506
    TCG_COND_LTU    = 0 | 4 | 0 | 0,
    TCG_COND_GEU    = 0 | 4 | 0 | 1,
    TCG_COND_LEU    = 8 | 4 | 0 | 0,
    TCG_COND_GTU    = 8 | 4 | 0 | 1,
B
bellard 已提交
507 508
} TCGCond;

R
Richard Henderson 已提交
509
/* Invert the sense of the comparison.  */
R
Richard Henderson 已提交
510 511 512 513 514
static inline TCGCond tcg_invert_cond(TCGCond c)
{
    return (TCGCond)(c ^ 1);
}

R
Richard Henderson 已提交
515 516 517
/* Swap the operands in a comparison.  */
static inline TCGCond tcg_swap_cond(TCGCond c)
{
518
    return c & 6 ? (TCGCond)(c ^ 9) : c;
R
Richard Henderson 已提交
519 520
}

R
Richard Henderson 已提交
521
/* Create an "unsigned" version of a "signed" comparison.  */
R
Richard Henderson 已提交
522 523
static inline TCGCond tcg_unsigned_cond(TCGCond c)
{
524
    return c & 2 ? (TCGCond)(c ^ 6) : c;
R
Richard Henderson 已提交
525 526
}

R
Richard Henderson 已提交
527
/* Must a comparison be considered unsigned?  */
R
Richard Henderson 已提交
528 529
static inline bool is_unsigned_cond(TCGCond c)
{
530
    return (c & 4) != 0;
R
Richard Henderson 已提交
531 532
}

R
Richard Henderson 已提交
533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
/* Create a "high" version of a double-word comparison.
   This removes equality from a LTE or GTE comparison.  */
static inline TCGCond tcg_high_cond(TCGCond c)
{
    switch (c) {
    case TCG_COND_GE:
    case TCG_COND_LE:
    case TCG_COND_GEU:
    case TCG_COND_LEU:
        return (TCGCond)(c ^ 8);
    default:
        return c;
    }
}

548 549 550 551 552 553
typedef enum TCGTempVal {
    TEMP_VAL_DEAD,
    TEMP_VAL_REG,
    TEMP_VAL_MEM,
    TEMP_VAL_CONST,
} TCGTempVal;
B
bellard 已提交
554 555

typedef struct TCGTemp {
556
    TCGReg reg:8;
557 558 559
    TCGTempVal val_type:8;
    TCGType base_type:8;
    TCGType type:8;
B
bellard 已提交
560
    unsigned int fixed_reg:1;
561 562
    unsigned int indirect_reg:1;
    unsigned int indirect_base:1;
B
bellard 已提交
563 564
    unsigned int mem_coherent:1;
    unsigned int mem_allocated:1;
565
    unsigned int temp_local:1; /* If true, the temp is saved across
B
bellard 已提交
566
                                  basic blocks. Otherwise, it is not
567
                                  preserved across basic blocks. */
568
    unsigned int temp_allocated:1; /* never used for code gen */
569 570

    tcg_target_long val;
571
    struct TCGTemp *mem_base;
572
    intptr_t mem_offset;
B
bellard 已提交
573 574 575 576 577
    const char *name;
} TCGTemp;

typedef struct TCGContext TCGContext;

578 579 580 581
typedef struct TCGTempSet {
    unsigned long l[BITS_TO_LONGS(TCG_MAX_TEMPS)];
} TCGTempSet;

582 583 584 585 586 587 588 589
/* While we limit helpers to 6 arguments, for 32-bit hosts, with padding,
   this imples a max of 6*2 (64-bit in) + 2 (64-bit out) = 14 operands.
   There are never more than 2 outputs, which means that we can store all
   dead + sync data within 16 bits.  */
#define DEAD_ARG  4
#define SYNC_ARG  1
typedef uint16_t TCGLifeData;

590 591
/* The layout here is designed to avoid crossing of a 32-bit boundary.
   If we do so, gcc adds padding, expanding the size to 12.  */
592
typedef struct TCGOp {
593 594 595 596 597
    TCGOpcode opc   : 8;        /*  8 */

    /* Index of the prev/next op, or 0 for the end of the list.  */
    unsigned prev   : 10;       /* 18 */
    unsigned next   : 10;       /* 28 */
598 599

    /* The number of out and in parameter for a call.  */
600 601
    unsigned calli  : 4;        /* 32 */
    unsigned callo  : 2;        /* 34 */
602

R
Richard Henderson 已提交
603
    /* Index of the arguments for this op, or 0 for zero-operand ops.  */
604
    unsigned args   : 14;       /* 48 */
605

606 607
    /* Lifetime data of the operands.  */
    unsigned life   : 16;       /* 64 */
608 609
} TCGOp;

R
Richard Henderson 已提交
610 611
/* Make sure operands fit in the bitfields above.  */
QEMU_BUILD_BUG_ON(NB_OPS > (1 << 8));
612 613
QEMU_BUILD_BUG_ON(OPC_BUF_SIZE > (1 << 10));
QEMU_BUILD_BUG_ON(OPPARAM_BUF_SIZE > (1 << 14));
R
Richard Henderson 已提交
614 615 616

/* Make sure that we don't overflow 64 bits without noticing.  */
QEMU_BUILD_BUG_ON(sizeof(TCGOp) > 8);
617

B
bellard 已提交
618 619
struct TCGContext {
    uint8_t *pool_cur, *pool_end;
620
    TCGPool *pool_first, *pool_current, *pool_first_large;
B
bellard 已提交
621 622 623
    int nb_labels;
    int nb_globals;
    int nb_temps;
624
    int nb_indirects;
B
bellard 已提交
625 626

    /* goto_tb support */
627
    tcg_insn_unit *code_buf;
628 629 630
    uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */
    uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */
    uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */
B
bellard 已提交
631 632

    TCGRegSet reserved_regs;
633 634 635
    intptr_t current_frame_offset;
    intptr_t frame_start;
    intptr_t frame_end;
636
    TCGTemp *frame_temp;
B
bellard 已提交
637

638
    tcg_insn_unit *code_ptr;
B
bellard 已提交
639

640
    GHashTable *helpers;
B
bellard 已提交
641 642 643 644 645 646 647 648 649 650 651 652

#ifdef CONFIG_PROFILER
    /* profiling info */
    int64_t tb_count1;
    int64_t tb_count;
    int64_t op_count; /* total insn count */
    int op_count_max; /* max insn per TB */
    int64_t temp_count;
    int temp_count_max;
    int64_t del_op_count;
    int64_t code_in_len;
    int64_t code_out_len;
653
    int64_t search_out_len;
B
bellard 已提交
654 655 656
    int64_t interm_time;
    int64_t code_time;
    int64_t la_time;
A
Aurelien Jarno 已提交
657
    int64_t opt_time;
B
bellard 已提交
658 659 660
    int64_t restore_count;
    int64_t restore_time;
#endif
661 662 663

#ifdef CONFIG_DEBUG_TCG
    int temps_in_use;
664
    int goto_tb_issue_mask;
665
#endif
666

667 668
    int gen_next_op_idx;
    int gen_next_parm_idx;
669

670 671 672 673
    /* Code generation.  Note that we specifically do not use tcg_insn_unit
       here, because there's too much arithmetic throughout that relies
       on addition and subtraction working on bytes.  Rely on the GCC
       extension that allows arithmetic on void*.  */
E
Evgeny Voevodin 已提交
674
    int code_gen_max_blocks;
675 676
    void *code_gen_prologue;
    void *code_gen_buffer;
E
Evgeny Voevodin 已提交
677
    size_t code_gen_buffer_size;
678
    void *code_gen_ptr;
E
Evgeny Voevodin 已提交
679

680 681 682
    /* Threshold to flush the translated code buffer.  */
    void *code_gen_highwater;

683 684
    TBContext tb_ctx;

685 686 687 688
    /* Track which vCPU triggers events */
    CPUState *cpu;                      /* *_trans */
    TCGv_env tcg_env;                   /* *_exec  */

689
    /* The TCGBackendData structure is private to tcg-target.inc.c.  */
R
Richard Henderson 已提交
690
    struct TCGBackendData *be;
691 692 693 694

    TCGTempSet free_temps[TCG_TYPE_COUNT * 2];
    TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */

695 696 697
    /* Tells which temporary holds a given register.
       It does not take into account fixed registers */
    TCGTemp *reg_to_temp[TCG_TARGET_NB_REGS];
698 699 700 701

    TCGOp gen_op_buf[OPC_BUF_SIZE];
    TCGArg gen_opparam_buf[OPPARAM_BUF_SIZE];

702 703
    uint16_t gen_insn_end_off[TCG_MAX_INSNS];
    target_ulong gen_insn_data[TCG_MAX_INSNS][TARGET_INSN_START_WORDS];
B
bellard 已提交
704 705 706
};

extern TCGContext tcg_ctx;
R
Richard Henderson 已提交
707
extern bool parallel_cpus;
B
bellard 已提交
708

E
Edgar E. Iglesias 已提交
709 710 711 712 713 714
static inline void tcg_set_insn_param(int op_idx, int arg, TCGArg v)
{
    int op_argi = tcg_ctx.gen_op_buf[op_idx].args;
    tcg_ctx.gen_opparam_buf[op_argi + arg] = v;
}

715 716 717
/* The number of opcodes emitted so far.  */
static inline int tcg_op_buf_count(void)
{
718
    return tcg_ctx.gen_next_op_idx;
719 720 721 722 723 724 725 726
}

/* Test for whether to terminate the TB for using too many opcodes.  */
static inline bool tcg_op_buf_full(void)
{
    return tcg_op_buf_count() >= OPC_MAX_SIZE;
}

B
bellard 已提交
727 728 729 730 731
/* pool based memory allocation */

void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s);

K
KONRAD Frederic 已提交
732 733 734 735
void tb_lock(void);
void tb_unlock(void);
void tb_lock_reset(void);

B
bellard 已提交
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
static inline void *tcg_malloc(int size)
{
    TCGContext *s = &tcg_ctx;
    uint8_t *ptr, *ptr_end;
    size = (size + sizeof(long) - 1) & ~(sizeof(long) - 1);
    ptr = s->pool_cur;
    ptr_end = ptr + size;
    if (unlikely(ptr_end > s->pool_end)) {
        return tcg_malloc_internal(&tcg_ctx, size);
    } else {
        s->pool_cur = ptr_end;
        return ptr;
    }
}

void tcg_context_init(TCGContext *s);
752
void tcg_prologue_init(TCGContext *s);
B
bellard 已提交
753 754
void tcg_func_start(TCGContext *s);

755
int tcg_gen_code(TCGContext *s, TranslationBlock *tb);
B
bellard 已提交
756

757
void tcg_set_frame(TCGContext *s, TCGReg reg, intptr_t start, intptr_t size);
P
pbrook 已提交
758

759 760
int tcg_global_mem_new_internal(TCGType, TCGv_ptr, intptr_t, const char *);

761 762
TCGv_i32 tcg_global_reg_new_i32(TCGReg reg, const char *name);
TCGv_i64 tcg_global_reg_new_i64(TCGReg reg, const char *name);
763

P
pbrook 已提交
764
TCGv_i32 tcg_temp_new_internal_i32(int temp_local);
765 766 767 768 769 770 771 772 773 774 775 776
TCGv_i64 tcg_temp_new_internal_i64(int temp_local);

void tcg_temp_free_i32(TCGv_i32 arg);
void tcg_temp_free_i64(TCGv_i64 arg);

static inline TCGv_i32 tcg_global_mem_new_i32(TCGv_ptr reg, intptr_t offset,
                                              const char *name)
{
    int idx = tcg_global_mem_new_internal(TCG_TYPE_I32, reg, offset, name);
    return MAKE_TCGV_I32(idx);
}

P
pbrook 已提交
777 778 779 780
static inline TCGv_i32 tcg_temp_new_i32(void)
{
    return tcg_temp_new_internal_i32(0);
}
781

P
pbrook 已提交
782 783 784 785 786
static inline TCGv_i32 tcg_temp_local_new_i32(void)
{
    return tcg_temp_new_internal_i32(1);
}

787 788 789 790 791 792 793
static inline TCGv_i64 tcg_global_mem_new_i64(TCGv_ptr reg, intptr_t offset,
                                              const char *name)
{
    int idx = tcg_global_mem_new_internal(TCG_TYPE_I64, reg, offset, name);
    return MAKE_TCGV_I64(idx);
}

P
pbrook 已提交
794
static inline TCGv_i64 tcg_temp_new_i64(void)
B
bellard 已提交
795
{
P
pbrook 已提交
796
    return tcg_temp_new_internal_i64(0);
B
bellard 已提交
797
}
798

P
pbrook 已提交
799
static inline TCGv_i64 tcg_temp_local_new_i64(void)
B
bellard 已提交
800
{
P
pbrook 已提交
801
    return tcg_temp_new_internal_i64(1);
B
bellard 已提交
802
}
P
pbrook 已提交
803

804 805 806 807 808 809 810 811 812 813 814 815 816
#if defined(CONFIG_DEBUG_TCG)
/* If you call tcg_clear_temp_count() at the start of a section of
 * code which is not supposed to leak any TCG temporaries, then
 * calling tcg_check_temp_count() at the end of the section will
 * return 1 if the section did in fact leak a temporary.
 */
void tcg_clear_temp_count(void);
int tcg_check_temp_count(void);
#else
#define tcg_clear_temp_count() do { } while (0)
#define tcg_check_temp_count() 0
#endif

817
void tcg_dump_info(FILE *f, fprintf_function cpu_fprintf);
818
void tcg_dump_op_count(FILE *f, fprintf_function cpu_fprintf);
B
bellard 已提交
819 820 821 822 823 824 825

#define TCG_CT_ALIAS  0x80
#define TCG_CT_IALIAS 0x40
#define TCG_CT_REG    0x01
#define TCG_CT_CONST  0x02 /* any constant of register size */

typedef struct TCGArgConstraint {
826 827
    uint16_t ct;
    uint8_t alias_index;
B
bellard 已提交
828 829 830 831 832 833 834
    union {
        TCGRegSet regs;
    } u;
} TCGArgConstraint;

#define TCG_MAX_OP_ARGS 16

835 836 837 838 839 840
/* Bits for TCGOpDef->flags, 8 bits available.  */
enum {
    /* Instruction defines the end of a basic block.  */
    TCG_OPF_BB_END       = 0x01,
    /* Instruction clobbers call registers and potentially update globals.  */
    TCG_OPF_CALL_CLOBBER = 0x02,
841 842
    /* Instruction has side effects: it cannot be removed if its outputs
       are not used, and might trigger exceptions.  */
843 844 845
    TCG_OPF_SIDE_EFFECTS = 0x04,
    /* Instruction operands are 64-bits (otherwise 32-bits).  */
    TCG_OPF_64BIT        = 0x08,
846 847
    /* Instruction is optional and not implemented by the host, or insn
       is generic and should not be implemened by the host.  */
848
    TCG_OPF_NOT_PRESENT  = 0x10,
849
};
B
bellard 已提交
850 851 852 853 854 855 856

typedef struct TCGOpDef {
    const char *name;
    uint8_t nb_oargs, nb_iargs, nb_cargs, nb_args;
    uint8_t flags;
    TCGArgConstraint *args_ct;
    int *sorted_args;
857 858 859
#if defined(CONFIG_DEBUG_TCG)
    int used;
#endif
B
bellard 已提交
860
} TCGOpDef;
861 862

extern TCGOpDef tcg_op_defs[];
863 864
extern const size_t tcg_op_defs_max;

B
bellard 已提交
865
typedef struct TCGTargetOpDef {
866
    TCGOpcode op;
B
bellard 已提交
867 868 869 870 871 872 873 874 875 876 877
    const char *args_ct_str[TCG_MAX_OP_ARGS];
} TCGTargetOpDef;

#define tcg_abort() \
do {\
    fprintf(stderr, "%s:%d: tcg fatal error\n", __FILE__, __LINE__);\
    abort();\
} while (0)

void tcg_add_target_add_op_defs(const TCGTargetOpDef *tdefs);

R
Richard Henderson 已提交
878
#if UINTPTR_MAX == UINT32_MAX
879 880 881
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I32(n))
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I32(GET_TCGV_PTR(n))

R
Richard Henderson 已提交
882
#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i32((intptr_t)(V)))
883 884 885 886 887 888
#define tcg_global_reg_new_ptr(R, N) \
    TCGV_NAT_TO_PTR(tcg_global_reg_new_i32((R), (N)))
#define tcg_global_mem_new_ptr(R, O, N) \
    TCGV_NAT_TO_PTR(tcg_global_mem_new_i32((R), (O), (N)))
#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i32())
#define tcg_temp_free_ptr(T) tcg_temp_free_i32(TCGV_PTR_TO_NAT(T))
B
bellard 已提交
889
#else
890 891 892
#define TCGV_NAT_TO_PTR(n) MAKE_TCGV_PTR(GET_TCGV_I64(n))
#define TCGV_PTR_TO_NAT(n) MAKE_TCGV_I64(GET_TCGV_PTR(n))

R
Richard Henderson 已提交
893
#define tcg_const_ptr(V) TCGV_NAT_TO_PTR(tcg_const_i64((intptr_t)(V)))
894 895 896 897 898 899
#define tcg_global_reg_new_ptr(R, N) \
    TCGV_NAT_TO_PTR(tcg_global_reg_new_i64((R), (N)))
#define tcg_global_mem_new_ptr(R, O, N) \
    TCGV_NAT_TO_PTR(tcg_global_mem_new_i64((R), (O), (N)))
#define tcg_temp_new_ptr() TCGV_NAT_TO_PTR(tcg_temp_new_i64())
#define tcg_temp_free_ptr(T) tcg_temp_free_i64(TCGV_PTR_TO_NAT(T))
B
bellard 已提交
900 901
#endif

902 903
void tcg_gen_callN(TCGContext *s, void *func,
                   TCGArg ret, int nargs, TCGArg *args);
P
pbrook 已提交
904

905
void tcg_op_remove(TCGContext *s, TCGOp *op);
906 907 908
TCGOp *tcg_op_insert_before(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);

909
void tcg_optimize(TCGContext *s);
K
Kirill Batuzov 已提交
910

P
pbrook 已提交
911
/* only used for debugging purposes */
B
Blue Swirl 已提交
912
void tcg_dump_ops(TCGContext *s);
P
pbrook 已提交
913 914 915 916 917 918

TCGv_i32 tcg_const_i32(int32_t val);
TCGv_i64 tcg_const_i64(int64_t val);
TCGv_i32 tcg_const_local_i32(int32_t val);
TCGv_i64 tcg_const_local_i64(int64_t val);

919 920 921 922 923 924 925 926 927 928 929
TCGLabel *gen_new_label(void);

/**
 * label_arg
 * @l: label
 *
 * Encode a label for storage in the TCG opcode stream.
 */

static inline TCGArg label_arg(TCGLabel *l)
{
930
    return (uintptr_t)l;
931 932 933 934 935 936 937 938 939 940
}

/**
 * arg_label
 * @i: value
 *
 * The opposite of label_arg.  Retrieve a label from the
 * encoding of the TCG opcode stream.
 */

941
static inline TCGLabel *arg_label(TCGArg i)
942
{
943
    return (TCGLabel *)(uintptr_t)i;
944 945
}

946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
/**
 * tcg_ptr_byte_diff
 * @a, @b: addresses to be differenced
 *
 * There are many places within the TCG backends where we need a byte
 * difference between two pointers.  While this can be accomplished
 * with local casting, it's easy to get wrong -- especially if one is
 * concerned with the signedness of the result.
 *
 * This version relies on GCC's void pointer arithmetic to get the
 * correct result.
 */

static inline ptrdiff_t tcg_ptr_byte_diff(void *a, void *b)
{
    return a - b;
}

/**
 * tcg_pcrel_diff
 * @s: the tcg context
 * @target: address of the target
 *
 * Produce a pc-relative difference, from the current code_ptr
 * to the destination address.
 */

static inline ptrdiff_t tcg_pcrel_diff(TCGContext *s, void *target)
{
    return tcg_ptr_byte_diff(target, s->code_ptr);
}

/**
 * tcg_current_code_size
 * @s: the tcg context
 *
 * Compute the current code size within the translation block.
 * This is used to fill in qemu's data structures for goto_tb.
 */

static inline size_t tcg_current_code_size(TCGContext *s)
{
    return tcg_ptr_byte_diff(s->code_ptr, s->code_buf);
}

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
/* Combine the TCGMemOp and mmu_idx parameters into a single value.  */
typedef uint32_t TCGMemOpIdx;

/**
 * make_memop_idx
 * @op: memory operation
 * @idx: mmu index
 *
 * Encode these values into a single parameter.
 */
static inline TCGMemOpIdx make_memop_idx(TCGMemOp op, unsigned idx)
{
    tcg_debug_assert(idx <= 15);
    return (op << 4) | idx;
}

/**
 * get_memop
 * @oi: combined op/idx parameter
 *
 * Extract the memory operation from the combined value.
 */
static inline TCGMemOp get_memop(TCGMemOpIdx oi)
{
    return oi >> 4;
}

/**
 * get_mmuidx
 * @oi: combined op/idx parameter
 *
 * Extract the mmu index from the combined value.
 */
static inline unsigned get_mmuidx(TCGMemOpIdx oi)
{
    return oi & 15;
}

1029 1030
/**
 * tcg_qemu_tb_exec:
S
Sergey Fedorov 已提交
1031
 * @env: pointer to CPUArchState for the CPU
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041
 * @tb_ptr: address of generated code for the TB to execute
 *
 * Start executing code from a given translation block.
 * Where translation blocks have been linked, execution
 * may proceed from the given TB into successive ones.
 * Control eventually returns only when some action is needed
 * from the top-level loop: either control must pass to a TB
 * which has not yet been directly linked, or an asynchronous
 * event such as an interrupt needs handling.
 *
S
Sergey Fedorov 已提交
1042 1043 1044 1045 1046
 * Return: The return value is the value passed to the corresponding
 * tcg_gen_exit_tb() at translation time of the last TB attempted to execute.
 * The value is either zero or a 4-byte aligned pointer to that TB combined
 * with additional information in its two least significant bits. The
 * additional information is encoded as follows:
1047 1048 1049 1050 1051 1052
 *  0, 1: the link between this TB and the next is via the specified
 *        TB index (0 or 1). That is, we left the TB via (the equivalent
 *        of) "goto_tb <index>". The main loop uses this to determine
 *        how to link the TB just executed to the next.
 *  2:    we are using instruction counting code generation, and we
 *        did not start executing this TB because the instruction counter
S
Sergey Fedorov 已提交
1053
 *        would hit zero midway through it. In this case the pointer
1054 1055
 *        returned is the TB we were about to execute, and the caller must
 *        arrange to execute the remaining count of instructions.
1056 1057
 *  3:    we stopped because the CPU's exit_request flag was set
 *        (usually meaning that there is an interrupt that needs to be
S
Sergey Fedorov 已提交
1058 1059
 *        handled). The pointer returned is the TB we were about to execute
 *        when we noticed the pending exit request.
1060 1061 1062 1063 1064
 *
 * If the bottom two bits indicate an exit-via-index then the CPU
 * state is correctly synchronised and ready for execution of the next
 * TB (and in particular the guest PC is the address to execute next).
 * Otherwise, we gave up on execution of this TB before it started, and
1065
 * the caller must fix up the CPU state by calling the CPU's
S
Sergey Fedorov 已提交
1066
 * synchronize_from_tb() method with the TB pointer we return (falling
1067 1068
 * back to calling the CPU's set_pc method with tb->pb if no
 * synchronize_from_tb() method exists).
1069 1070 1071 1072 1073 1074 1075 1076 1077
 *
 * Note that TCG targets may use a different definition of tcg_qemu_tb_exec
 * to this default (which just calls the prologue.code emitted by
 * tcg_target_qemu_prologue()).
 */
#define TB_EXIT_MASK 3
#define TB_EXIT_IDX0 0
#define TB_EXIT_IDX1 1
#define TB_EXIT_ICOUNT_EXPIRED 2
1078
#define TB_EXIT_REQUESTED 3
1079

1080 1081 1082
#ifdef HAVE_TCG_QEMU_TB_EXEC
uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr);
#else
1083
# define tcg_qemu_tb_exec(env, tb_ptr) \
1084
    ((uintptr_t (*)(void *, void *))tcg_ctx.code_gen_prologue)(env, tb_ptr)
B
bellard 已提交
1085
#endif
1086 1087

void tcg_register_jit(void *buf, size_t buf_size);
1088

R
Richard Henderson 已提交
1089 1090 1091 1092
/*
 * Memory helpers that will be used by TCG generated code.
 */
#ifdef CONFIG_SOFTMMU
1093 1094
/* Value zero-extended to tcg register size.  */
tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
1095
                                     TCGMemOpIdx oi, uintptr_t retaddr);
1096
tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
1097
                                    TCGMemOpIdx oi, uintptr_t retaddr);
1098
tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
1099
                                    TCGMemOpIdx oi, uintptr_t retaddr);
1100
uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
1101
                           TCGMemOpIdx oi, uintptr_t retaddr);
1102
tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
1103
                                    TCGMemOpIdx oi, uintptr_t retaddr);
1104
tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
1105
                                    TCGMemOpIdx oi, uintptr_t retaddr);
1106
uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
1107
                           TCGMemOpIdx oi, uintptr_t retaddr);
R
Richard Henderson 已提交
1108

1109 1110
/* Value sign-extended to tcg register size.  */
tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
1111
                                     TCGMemOpIdx oi, uintptr_t retaddr);
1112
tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
1113
                                    TCGMemOpIdx oi, uintptr_t retaddr);
1114
tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
1115
                                    TCGMemOpIdx oi, uintptr_t retaddr);
1116
tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
1117
                                    TCGMemOpIdx oi, uintptr_t retaddr);
1118
tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
1119
                                    TCGMemOpIdx oi, uintptr_t retaddr);
1120

R
Richard Henderson 已提交
1121
void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
1122
                        TCGMemOpIdx oi, uintptr_t retaddr);
1123
void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1124
                       TCGMemOpIdx oi, uintptr_t retaddr);
1125
void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1126
                       TCGMemOpIdx oi, uintptr_t retaddr);
1127
void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1128
                       TCGMemOpIdx oi, uintptr_t retaddr);
1129
void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
1130
                       TCGMemOpIdx oi, uintptr_t retaddr);
1131
void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
1132
                       TCGMemOpIdx oi, uintptr_t retaddr);
1133
void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
1134
                       TCGMemOpIdx oi, uintptr_t retaddr);
1135

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr);
uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr);
uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr);

1151 1152 1153 1154 1155 1156
/* Temporary aliases until backends are converted.  */
#ifdef TARGET_WORDS_BIGENDIAN
# define helper_ret_ldsw_mmu  helper_be_ldsw_mmu
# define helper_ret_lduw_mmu  helper_be_lduw_mmu
# define helper_ret_ldsl_mmu  helper_be_ldsl_mmu
# define helper_ret_ldul_mmu  helper_be_ldul_mmu
1157
# define helper_ret_ldl_mmu   helper_be_ldul_mmu
1158 1159 1160 1161
# define helper_ret_ldq_mmu   helper_be_ldq_mmu
# define helper_ret_stw_mmu   helper_be_stw_mmu
# define helper_ret_stl_mmu   helper_be_stl_mmu
# define helper_ret_stq_mmu   helper_be_stq_mmu
1162 1163 1164
# define helper_ret_ldw_cmmu  helper_be_ldw_cmmu
# define helper_ret_ldl_cmmu  helper_be_ldl_cmmu
# define helper_ret_ldq_cmmu  helper_be_ldq_cmmu
1165 1166 1167 1168 1169
#else
# define helper_ret_ldsw_mmu  helper_le_ldsw_mmu
# define helper_ret_lduw_mmu  helper_le_lduw_mmu
# define helper_ret_ldsl_mmu  helper_le_ldsl_mmu
# define helper_ret_ldul_mmu  helper_le_ldul_mmu
1170
# define helper_ret_ldl_mmu   helper_le_ldul_mmu
1171 1172 1173 1174
# define helper_ret_ldq_mmu   helper_le_ldq_mmu
# define helper_ret_stw_mmu   helper_le_stw_mmu
# define helper_ret_stl_mmu   helper_le_stl_mmu
# define helper_ret_stq_mmu   helper_le_stq_mmu
1175 1176 1177
# define helper_ret_ldw_cmmu  helper_le_ldw_cmmu
# define helper_ret_ldl_cmmu  helper_le_ldl_cmmu
# define helper_ret_ldq_cmmu  helper_le_ldq_cmmu
1178
#endif
R
Richard Henderson 已提交
1179

R
Richard Henderson 已提交
1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
uint32_t helper_atomic_cmpxchgb_mmu(CPUArchState *env, target_ulong addr,
                                    uint32_t cmpv, uint32_t newv,
                                    TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_le_mmu(CPUArchState *env, target_ulong addr,
                                       uint32_t cmpv, uint32_t newv,
                                       TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_le_mmu(CPUArchState *env, target_ulong addr,
                                       uint32_t cmpv, uint32_t newv,
                                       TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_le_mmu(CPUArchState *env, target_ulong addr,
                                       uint64_t cmpv, uint64_t newv,
                                       TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgw_be_mmu(CPUArchState *env, target_ulong addr,
                                       uint32_t cmpv, uint32_t newv,
                                       TCGMemOpIdx oi, uintptr_t retaddr);
uint32_t helper_atomic_cmpxchgl_be_mmu(CPUArchState *env, target_ulong addr,
                                       uint32_t cmpv, uint32_t newv,
                                       TCGMemOpIdx oi, uintptr_t retaddr);
uint64_t helper_atomic_cmpxchgq_be_mmu(CPUArchState *env, target_ulong addr,
                                       uint64_t cmpv, uint64_t newv,
                                       TCGMemOpIdx oi, uintptr_t retaddr);

#define GEN_ATOMIC_HELPER(NAME, TYPE, SUFFIX)         \
TYPE helper_atomic_ ## NAME ## SUFFIX ## _mmu         \
    (CPUArchState *env, target_ulong addr, TYPE val,  \
     TCGMemOpIdx oi, uintptr_t retaddr);

#define GEN_ATOMIC_HELPER_ALL(NAME)          \
    GEN_ATOMIC_HELPER(NAME, uint32_t, b)      \
    GEN_ATOMIC_HELPER(NAME, uint32_t, w_le)  \
    GEN_ATOMIC_HELPER(NAME, uint32_t, l_le)  \
    GEN_ATOMIC_HELPER(NAME, uint64_t, q_le)  \
    GEN_ATOMIC_HELPER(NAME, uint32_t, w_be)  \
    GEN_ATOMIC_HELPER(NAME, uint32_t, l_be)  \
    GEN_ATOMIC_HELPER(NAME, uint64_t, q_be)

GEN_ATOMIC_HELPER_ALL(fetch_add)
GEN_ATOMIC_HELPER_ALL(fetch_sub)
GEN_ATOMIC_HELPER_ALL(fetch_and)
GEN_ATOMIC_HELPER_ALL(fetch_or)
GEN_ATOMIC_HELPER_ALL(fetch_xor)

GEN_ATOMIC_HELPER_ALL(add_fetch)
GEN_ATOMIC_HELPER_ALL(sub_fetch)
GEN_ATOMIC_HELPER_ALL(and_fetch)
GEN_ATOMIC_HELPER_ALL(or_fetch)
GEN_ATOMIC_HELPER_ALL(xor_fetch)

GEN_ATOMIC_HELPER_ALL(xchg)

#undef GEN_ATOMIC_HELPER_ALL
#undef GEN_ATOMIC_HELPER
R
Richard Henderson 已提交
1232 1233
#endif /* CONFIG_SOFTMMU */

R
Richard Henderson 已提交
1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256
#ifdef CONFIG_ATOMIC128
#include "qemu/int128.h"

/* These aren't really a "proper" helpers because TCG cannot manage Int128.
   However, use the same format as the others, for use by the backends. */
Int128 helper_atomic_cmpxchgo_le_mmu(CPUArchState *env, target_ulong addr,
                                     Int128 cmpv, Int128 newv,
                                     TCGMemOpIdx oi, uintptr_t retaddr);
Int128 helper_atomic_cmpxchgo_be_mmu(CPUArchState *env, target_ulong addr,
                                     Int128 cmpv, Int128 newv,
                                     TCGMemOpIdx oi, uintptr_t retaddr);

Int128 helper_atomic_ldo_le_mmu(CPUArchState *env, target_ulong addr,
                                TCGMemOpIdx oi, uintptr_t retaddr);
Int128 helper_atomic_ldo_be_mmu(CPUArchState *env, target_ulong addr,
                                TCGMemOpIdx oi, uintptr_t retaddr);
void helper_atomic_sto_le_mmu(CPUArchState *env, target_ulong addr, Int128 val,
                              TCGMemOpIdx oi, uintptr_t retaddr);
void helper_atomic_sto_be_mmu(CPUArchState *env, target_ulong addr, Int128 val,
                              TCGMemOpIdx oi, uintptr_t retaddr);

#endif /* CONFIG_ATOMIC128 */

R
Richard Henderson 已提交
1257
#endif /* TCG_H */