cpu-all.h 10.2 KB
Newer Older
B
bellard 已提交
1 2
/*
 * defines common to all virtual CPUs
3
 *
B
bellard 已提交
4 5 6 7 8 9 10 11 12 13 14 15 16
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18 19 20 21
 */
#ifndef CPU_ALL_H
#define CPU_ALL_H

B
blueswir1 已提交
22
#include "qemu-common.h"
23
#include "exec/cpu-common.h"
24
#include "exec/memory.h"
25
#include "qemu/thread.h"
26
#include "qom/cpu.h"
P
Paolo Bonzini 已提交
27
#include "qemu/rcu.h"
B
bellard 已提交
28

29 30 31 32 33
#define EXCP_INTERRUPT 	0x10000 /* async interruption */
#define EXCP_HLT        0x10001 /* hlt instruction reached */
#define EXCP_DEBUG      0x10002 /* cpu stopped after a breakpoint or singlestep */
#define EXCP_HALTED     0x10003 /* cpu is halted (waiting for external event) */
#define EXCP_YIELD      0x10004 /* cpu wants to yield timeslice to another */
R
Richard Henderson 已提交
34
#define EXCP_ATOMIC     0x10005 /* stop-the-world and emulate atomic */
35

36 37
/* some important defines:
 *
38
 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
B
bellard 已提交
39
 * otherwise little endian.
40
 *
B
bellard 已提交
41 42 43
 * TARGET_WORDS_BIGENDIAN : same for target cpu
 */

44
#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
#define BSWAP_NEEDED
#endif

#ifdef BSWAP_NEEDED

static inline uint16_t tswap16(uint16_t s)
{
    return bswap16(s);
}

static inline uint32_t tswap32(uint32_t s)
{
    return bswap32(s);
}

static inline uint64_t tswap64(uint64_t s)
{
    return bswap64(s);
}

static inline void tswap16s(uint16_t *s)
{
    *s = bswap16(*s);
}

static inline void tswap32s(uint32_t *s)
{
    *s = bswap32(*s);
}

static inline void tswap64s(uint64_t *s)
{
    *s = bswap64(*s);
}

#else

static inline uint16_t tswap16(uint16_t s)
{
    return s;
}

static inline uint32_t tswap32(uint32_t s)
{
    return s;
}

static inline uint64_t tswap64(uint64_t s)
{
    return s;
}

static inline void tswap16s(uint16_t *s)
{
}

static inline void tswap32s(uint32_t *s)
{
}

static inline void tswap64s(uint64_t *s)
{
}

#endif

#if TARGET_LONG_SIZE == 4
#define tswapl(s) tswap32(s)
#define tswapls(s) tswap32s((uint32_t *)(s))
B
bellard 已提交
114
#define bswaptls(s) bswap32s(s)
115 116 117
#else
#define tswapl(s) tswap64(s)
#define tswapls(s) tswap64s((uint64_t *)(s))
B
bellard 已提交
118
#define bswaptls(s) bswap64s(s)
119 120
#endif

121 122
/* Target-endianness CPU memory access functions. These fit into the
 * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
123
 */
124 125 126 127 128 129 130 131 132 133 134 135
#if defined(TARGET_WORDS_BIGENDIAN)
#define lduw_p(p) lduw_be_p(p)
#define ldsw_p(p) ldsw_be_p(p)
#define ldl_p(p) ldl_be_p(p)
#define ldq_p(p) ldq_be_p(p)
#define ldfl_p(p) ldfl_be_p(p)
#define ldfq_p(p) ldfq_be_p(p)
#define stw_p(p, v) stw_be_p(p, v)
#define stl_p(p, v) stl_be_p(p, v)
#define stq_p(p, v) stq_be_p(p, v)
#define stfl_p(p, v) stfl_be_p(p, v)
#define stfq_p(p, v) stfq_be_p(p, v)
136 137
#define ldn_p(p, sz) ldn_be_p(p, sz)
#define stn_p(p, sz, v) stn_be_p(p, sz, v)
138 139 140 141 142 143 144 145 146 147 148 149
#else
#define lduw_p(p) lduw_le_p(p)
#define ldsw_p(p) ldsw_le_p(p)
#define ldl_p(p) ldl_le_p(p)
#define ldq_p(p) ldq_le_p(p)
#define ldfl_p(p) ldfl_le_p(p)
#define ldfq_p(p) ldfq_le_p(p)
#define stw_p(p, v) stw_le_p(p, v)
#define stl_p(p, v) stl_le_p(p, v)
#define stq_p(p, v) stq_le_p(p, v)
#define stfl_p(p, v) stfl_le_p(p, v)
#define stfq_p(p, v) stfq_le_p(p, v)
150 151
#define ldn_p(p, sz) ldn_le_p(p, sz)
#define stn_p(p, sz, v) stn_le_p(p, sz, v)
B
bellard 已提交
152 153
#endif

B
bellard 已提交
154 155
/* MMU memory access macros */

156
#if defined(CONFIG_USER_ONLY)
157
#include "exec/user/abitypes.h"
A
aurel32 已提交
158

159 160 161
/* On some host systems the guest address space is reserved on the host.
 * This allows the guest address space to be offset to a convenient location.
 */
P
Paul Brook 已提交
162 163
extern unsigned long guest_base;
extern int have_guest_base;
P
Paul Brook 已提交
164
extern unsigned long reserved_va;
165

166 167 168 169
#if HOST_LONG_BITS <= TARGET_VIRT_ADDR_SPACE_BITS
#define GUEST_ADDR_MAX (~0ul)
#else
#define GUEST_ADDR_MAX (reserved_va ? reserved_va - 1 : \
170
                                    (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
171
#endif
172 173 174
#else

#include "exec/hwaddr.h"
175 176 177 178 179 180 181

#define SUFFIX
#define ARG1         as
#define ARG1_DECL    AddressSpace *as
#define TARGET_ENDIANNESS
#include "exec/memory_ldst.inc.h"

182
#define SUFFIX       _cached_slow
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
#define ARG1         cache
#define ARG1_DECL    MemoryRegionCache *cache
#define TARGET_ENDIANNESS
#include "exec/memory_ldst.inc.h"

static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
{
    address_space_stl_notdirty(as, addr, val,
                               MEMTXATTRS_UNSPECIFIED, NULL);
}

#define SUFFIX
#define ARG1         as
#define ARG1_DECL    AddressSpace *as
#define TARGET_ENDIANNESS
#include "exec/memory_ldst_phys.inc.h"

200 201 202 203
/* Inline fast path for direct RAM access.  */
#define ENDIANNESS
#include "exec/memory_ldst_cached.inc.h"

204 205 206 207 208
#define SUFFIX       _cached
#define ARG1         cache
#define ARG1_DECL    MemoryRegionCache *cache
#define TARGET_ENDIANNESS
#include "exec/memory_ldst_phys.inc.h"
209 210
#endif

B
bellard 已提交
211 212
/* page related stuff */

213 214 215 216 217 218 219 220 221
#ifdef TARGET_PAGE_BITS_VARY
extern bool target_page_bits_decided;
extern int target_page_bits;
#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \
                            target_page_bits; })
#else
#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
#endif

222
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
B
bellard 已提交
223 224 225
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)

226 227 228
/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
 * when intptr_t is 32-bit and we are aligning a long long.
 */
229
extern uintptr_t qemu_host_page_size;
230
extern intptr_t qemu_host_page_mask;
B
bellard 已提交
231

232
#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
233 234
#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
                                    qemu_real_host_page_mask)
B
bellard 已提交
235 236 237 238 239 240 241 242 243

/* same as PROT_xxx */
#define PAGE_READ      0x0001
#define PAGE_WRITE     0x0002
#define PAGE_EXEC      0x0004
#define PAGE_BITS      (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
#define PAGE_VALID     0x0008
/* original state of the write flag (used when tracking self-modifying
   code */
244
#define PAGE_WRITE_ORG 0x0010
245 246 247
/* Invalidate the TLB entry immediately, helpful for s390x
 * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
#define PAGE_WRITE_INV 0x0040
P
Paul Brook 已提交
248 249
#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
/* FIXME: Code that sets/uses this is broken and needs to go away.  */
250
#define PAGE_RESERVED  0x0020
P
Paul Brook 已提交
251
#endif
B
bellard 已提交
252

P
Paul Brook 已提交
253
#if defined(CONFIG_USER_ONLY)
B
bellard 已提交
254
void page_dump(FILE *f);
255

256 257
typedef int (*walk_memory_regions_fn)(void *, target_ulong,
                                      target_ulong, unsigned long);
258 259
int walk_memory_regions(void *, walk_memory_regions_fn);

260 261
int page_get_flags(target_ulong address);
void page_set_flags(target_ulong start, target_ulong end, int flags);
262
int page_check_range(target_ulong start, target_ulong len, int flags);
P
Paul Brook 已提交
263
#endif
B
bellard 已提交
264

265
CPUArchState *cpu_copy(CPUArchState *env);
266

267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287
/* Flags for use in ENV->INTERRUPT_PENDING.

   The numbers assigned here are non-sequential in order to preserve
   binary compatibility with the vmstate dump.  Bit 0 (0x0001) was
   previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
   the vmstate dump.  */

/* External hardware interrupt pending.  This is typically used for
   interrupts from devices.  */
#define CPU_INTERRUPT_HARD        0x0002

/* Exit the current TB.  This is typically used when some system-level device
   makes some change to the memory mapping.  E.g. the a20 line change.  */
#define CPU_INTERRUPT_EXITTB      0x0004

/* Halt the CPU.  */
#define CPU_INTERRUPT_HALT        0x0020

/* Debug event pending.  */
#define CPU_INTERRUPT_DEBUG       0x0080

288 289 290
/* Reset signal.  */
#define CPU_INTERRUPT_RESET       0x0400

291 292 293 294 295 296 297 298 299
/* Several target-specific external hardware interrupts.  Each target/cpu.h
   should define proper names based on these defines.  */
#define CPU_INTERRUPT_TGT_EXT_0   0x0008
#define CPU_INTERRUPT_TGT_EXT_1   0x0010
#define CPU_INTERRUPT_TGT_EXT_2   0x0040
#define CPU_INTERRUPT_TGT_EXT_3   0x0200
#define CPU_INTERRUPT_TGT_EXT_4   0x1000

/* Several target-specific internal interrupts.  These differ from the
D
Dong Xu Wang 已提交
300
   preceding target-specific interrupts in that they are intended to
301 302 303 304
   originate from within the cpu itself, typically in response to some
   instruction being executed.  These, therefore, are not masked while
   single-stepping within the debugger.  */
#define CPU_INTERRUPT_TGT_INT_0   0x0100
305 306
#define CPU_INTERRUPT_TGT_INT_1   0x0800
#define CPU_INTERRUPT_TGT_INT_2   0x2000
307

308
/* First unused bit: 0x4000.  */
309

310 311 312 313 314 315 316 317
/* The set of all bits that should be masked when single-stepping.  */
#define CPU_INTERRUPT_SSTEP_MASK \
    (CPU_INTERRUPT_HARD          \
     | CPU_INTERRUPT_TGT_EXT_0   \
     | CPU_INTERRUPT_TGT_EXT_1   \
     | CPU_INTERRUPT_TGT_EXT_2   \
     | CPU_INTERRUPT_TGT_EXT_3   \
     | CPU_INTERRUPT_TGT_EXT_4)
B
bellard 已提交
318

319 320
#if !defined(CONFIG_USER_ONLY)

P
pbrook 已提交
321
/* Flags stored in the low bits of the TLB virtual address.  These are
322 323 324 325
 * defined so that fast path ram access is all zeros.
 * The flags all must be between TARGET_PAGE_BITS and
 * maximum address alignment bit.
 */
P
pbrook 已提交
326
/* Zero if TLB entry is valid.  */
327
#define TLB_INVALID_MASK    (1 << (TARGET_PAGE_BITS - 1))
P
pbrook 已提交
328 329
/* Set if TLB entry references a clean RAM page.  The iotlb entry will
   contain the page physical address.  */
330
#define TLB_NOTDIRTY        (1 << (TARGET_PAGE_BITS - 2))
P
pbrook 已提交
331
/* Set if TLB entry is an IO callback.  */
332 333 334 335 336 337
#define TLB_MMIO            (1 << (TARGET_PAGE_BITS - 3))

/* Use this mask to check interception with an alignment mask
 * in a TCG backend.
 */
#define TLB_FLAGS_MASK  (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO)
P
pbrook 已提交
338

339
void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
340
void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
341 342
#endif /* !CONFIG_USER_ONLY */

343
int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
344 345
                        uint8_t *buf, int len, int is_write);

346 347
int cpu_exec(CPUState *cpu);

B
bellard 已提交
348
#endif /* CPU_ALL_H */