softmmu_template.h 15.2 KB
Newer Older
1 2
/*
 *  Software MMU support
3
 *
B
Blue Swirl 已提交
4 5 6 7 8
 * Generate helpers used by TCG for qemu_ld/st ops and code load
 * functions.
 *
 * Included from target op helpers and exec.c.
 *
9 10 11 12 13 14 15 16 17 18 19 20 21
 *  Copyright (c) 2003 Fabrice Bellard
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
22
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 24 25
 */
#if DATA_SIZE == 8
#define SUFFIX q
26
#define LSUFFIX q
27
#define SDATA_TYPE  int64_t
28
#define DATA_TYPE  uint64_t
29 30
#elif DATA_SIZE == 4
#define SUFFIX l
31
#define LSUFFIX l
32
#define SDATA_TYPE  int32_t
33
#define DATA_TYPE  uint32_t
34 35
#elif DATA_SIZE == 2
#define SUFFIX w
36
#define LSUFFIX uw
37
#define SDATA_TYPE  int16_t
38
#define DATA_TYPE  uint16_t
39 40
#elif DATA_SIZE == 1
#define SUFFIX b
41
#define LSUFFIX ub
42
#define SDATA_TYPE  int8_t
43
#define DATA_TYPE  uint8_t
44 45 46 47
#else
#error unsupported data size
#endif

48 49 50 51 52 53 54 55 56 57 58 59 60 61 62

/* For the benefit of TCG generated code, we want to avoid the complication
   of ABI-specific return type promotion and always return a value extended
   to the register size of the host.  This is tcg_target_long, except in the
   case of a 32-bit host and 64-bit data, and for that we always have
   uint64_t.  Don't bother with this widened value for SOFTMMU_CODE_ACCESS.  */
#if defined(SOFTMMU_CODE_ACCESS) || DATA_SIZE == 8
# define WORD_TYPE  DATA_TYPE
# define USUFFIX    SUFFIX
#else
# define WORD_TYPE  tcg_target_ulong
# define USUFFIX    glue(u, SUFFIX)
# define SSUFFIX    glue(s, SUFFIX)
#endif

B
bellard 已提交
63
#ifdef SOFTMMU_CODE_ACCESS
64
#define READ_ACCESS_TYPE MMU_INST_FETCH
B
bellard 已提交
65
#define ADDR_READ addr_code
B
bellard 已提交
66
#else
67
#define READ_ACCESS_TYPE MMU_DATA_LOAD
B
bellard 已提交
68
#define ADDR_READ addr_read
B
bellard 已提交
69 70
#endif

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96
#if DATA_SIZE == 8
# define BSWAP(X)  bswap64(X)
#elif DATA_SIZE == 4
# define BSWAP(X)  bswap32(X)
#elif DATA_SIZE == 2
# define BSWAP(X)  bswap16(X)
#else
# define BSWAP(X)  (X)
#endif

#if DATA_SIZE == 1
# define helper_le_ld_name  glue(glue(helper_ret_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name  helper_le_ld_name
# define helper_le_lds_name glue(glue(helper_ret_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name helper_le_lds_name
# define helper_le_st_name  glue(glue(helper_ret_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name  helper_le_st_name
#else
# define helper_le_ld_name  glue(glue(helper_le_ld, USUFFIX), MMUSUFFIX)
# define helper_be_ld_name  glue(glue(helper_be_ld, USUFFIX), MMUSUFFIX)
# define helper_le_lds_name glue(glue(helper_le_ld, SSUFFIX), MMUSUFFIX)
# define helper_be_lds_name glue(glue(helper_be_ld, SSUFFIX), MMUSUFFIX)
# define helper_le_st_name  glue(glue(helper_le_st, SUFFIX), MMUSUFFIX)
# define helper_be_st_name  glue(glue(helper_be_st, SUFFIX), MMUSUFFIX)
#endif

97
#ifndef SOFTMMU_CODE_ACCESS
98
static inline DATA_TYPE glue(io_read, SUFFIX)(CPUArchState *env,
99
                                              size_t mmu_idx, size_t index,
P
pbrook 已提交
100
                                              target_ulong addr,
101
                                              uintptr_t retaddr)
102
{
103
    CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
104
    return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, DATA_SIZE);
105
}
106
#endif
107

108 109
WORD_TYPE helper_le_ld_name(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr)
110
{
111
    unsigned mmu_idx = get_mmuidx(oi);
R
Richard Henderson 已提交
112 113
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
114
    unsigned a_bits = get_alignment_bits(get_memop(oi));
R
Richard Henderson 已提交
115
    uintptr_t haddr;
116
    DATA_TYPE res;
117

118
    if (addr & ((1 << a_bits) - 1)) {
119 120 121 122
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                             mmu_idx, retaddr);
    }

R
Richard Henderson 已提交
123 124 125
    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
126
        if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
127 128 129
            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                     mmu_idx, retaddr);
        }
R
Richard Henderson 已提交
130 131 132 133 134 135 136
        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
137
        }
138 139 140

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
141
        res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
142 143
        res = TGT_LE(res);
        return res;
R
Richard Henderson 已提交
144 145 146 147 148 149 150
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                    >= TARGET_PAGE_SIZE)) {
        target_ulong addr1, addr2;
151
        DATA_TYPE res1, res2;
R
Richard Henderson 已提交
152 153 154 155
        unsigned shift;
    do_unaligned_access:
        addr1 = addr & ~(DATA_SIZE - 1);
        addr2 = addr1 + DATA_SIZE;
R
Richard Henderson 已提交
156 157
        res1 = helper_le_ld_name(env, addr1, oi, retaddr);
        res2 = helper_le_ld_name(env, addr2, oi, retaddr);
R
Richard Henderson 已提交
158
        shift = (addr & (DATA_SIZE - 1)) * 8;
159 160

        /* Little-endian combine.  */
R
Richard Henderson 已提交
161
        res = (res1 >> shift) | (res2 << ((DATA_SIZE * 8) - shift));
162 163 164 165 166 167 168 169 170 171 172 173 174
        return res;
    }

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
    res = glue(glue(ld, LSUFFIX), _p)((uint8_t *)haddr);
#else
    res = glue(glue(ld, LSUFFIX), _le_p)((uint8_t *)haddr);
#endif
    return res;
}

#if DATA_SIZE > 1
175 176
WORD_TYPE helper_be_ld_name(CPUArchState *env, target_ulong addr,
                            TCGMemOpIdx oi, uintptr_t retaddr)
177
{
178
    unsigned mmu_idx = get_mmuidx(oi);
179 180
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
181
    unsigned a_bits = get_alignment_bits(get_memop(oi));
182 183 184
    uintptr_t haddr;
    DATA_TYPE res;

185
    if (addr & ((1 << a_bits) - 1)) {
186 187 188 189
        cpu_unaligned_access(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                             mmu_idx, retaddr);
    }

190 191 192
    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
         != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
193
        if (!VICTIM_TLB_HIT(ADDR_READ, addr)) {
194 195 196
            tlb_fill(ENV_GET_CPU(env), addr, READ_ACCESS_TYPE,
                     mmu_idx, retaddr);
        }
197 198 199 200 201 202 203 204 205 206 207
        tlb_addr = env->tlb_table[mmu_idx][index].ADDR_READ;
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
208
        res = glue(io_read, SUFFIX)(env, mmu_idx, index, addr, retaddr);
209 210 211 212 213 214 215 216 217 218 219 220 221 222
        res = TGT_BE(res);
        return res;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                    >= TARGET_PAGE_SIZE)) {
        target_ulong addr1, addr2;
        DATA_TYPE res1, res2;
        unsigned shift;
    do_unaligned_access:
        addr1 = addr & ~(DATA_SIZE - 1);
        addr2 = addr1 + DATA_SIZE;
R
Richard Henderson 已提交
223 224
        res1 = helper_be_ld_name(env, addr1, oi, retaddr);
        res2 = helper_be_ld_name(env, addr2, oi, retaddr);
225 226 227 228
        shift = (addr & (DATA_SIZE - 1)) * 8;

        /* Big-endian combine.  */
        res = (res1 << shift) | (res2 >> ((DATA_SIZE * 8) - shift));
R
Richard Henderson 已提交
229 230 231 232
        return res;
    }

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
233 234
    res = glue(glue(ld, LSUFFIX), _be_p)((uint8_t *)haddr);
    return res;
235
}
236
#endif /* DATA_SIZE > 1 */
237

B
bellard 已提交
238 239
#ifndef SOFTMMU_CODE_ACCESS

240 241 242
/* Provide signed versions of the load routines as well.  We can of course
   avoid this for 64-bit data, or for 32-bit data on 32-bit host.  */
#if DATA_SIZE * 8 < TCG_TARGET_REG_BITS
243
WORD_TYPE helper_le_lds_name(CPUArchState *env, target_ulong addr,
244
                             TCGMemOpIdx oi, uintptr_t retaddr)
245
{
246
    return (SDATA_TYPE)helper_le_ld_name(env, addr, oi, retaddr);
247 248 249 250
}

# if DATA_SIZE > 1
WORD_TYPE helper_be_lds_name(CPUArchState *env, target_ulong addr,
251
                             TCGMemOpIdx oi, uintptr_t retaddr)
252
{
253
    return (SDATA_TYPE)helper_be_ld_name(env, addr, oi, retaddr);
254
}
255
# endif
256 257
#endif

258
static inline void glue(io_write, SUFFIX)(CPUArchState *env,
259
                                          size_t mmu_idx, size_t index,
B
bellard 已提交
260
                                          DATA_TYPE val,
P
pbrook 已提交
261
                                          target_ulong addr,
262
                                          uintptr_t retaddr)
B
bellard 已提交
263
{
264
    CPUIOTLBEntry *iotlbentry = &env->iotlb[mmu_idx][index];
265
    return io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, DATA_SIZE);
B
bellard 已提交
266
}
267

268
void helper_le_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
269
                       TCGMemOpIdx oi, uintptr_t retaddr)
270
{
271
    unsigned mmu_idx = get_mmuidx(oi);
R
Richard Henderson 已提交
272 273
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
274
    unsigned a_bits = get_alignment_bits(get_memop(oi));
R
Richard Henderson 已提交
275
    uintptr_t haddr;
276

277
    if (addr & ((1 << a_bits) - 1)) {
278 279 280 281
        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                             mmu_idx, retaddr);
    }

R
Richard Henderson 已提交
282 283 284
    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
285
        if (!VICTIM_TLB_HIT(addr_write, addr)) {
286
            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
287
        }
288
        tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
R
Richard Henderson 已提交
289 290 291 292 293 294 295
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }
296 297 298 299

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
        val = TGT_LE(val);
300
        glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
R
Richard Henderson 已提交
301 302 303 304 305 306 307
        return;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                     >= TARGET_PAGE_SIZE)) {
308 309
        int i, index2;
        target_ulong page2, tlb_addr2;
R
Richard Henderson 已提交
310
    do_unaligned_access:
311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326
        /* Ensure the second page is in the TLB.  Note that the first page
           is already guaranteed to be filled, and that the second page
           cannot evict the first.  */
        page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
        index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
        tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
        if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
            && !VICTIM_TLB_HIT(addr_write, page2)) {
            tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
                     mmu_idx, retaddr);
        }

        /* XXX: not efficient, but simple.  */
        /* This loop must go in the forward direction to avoid issues
           with self-modifying code in Windows 64-bit.  */
        for (i = 0; i < DATA_SIZE; ++i) {
327
            /* Little-endian extract.  */
R
Richard Henderson 已提交
328
            uint8_t val8 = val >> (i * 8);
329
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
R
Richard Henderson 已提交
330
                                            oi, retaddr);
331 332 333 334 335 336 337 338 339
        }
        return;
    }

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
#if DATA_SIZE == 1
    glue(glue(st, SUFFIX), _p)((uint8_t *)haddr, val);
#else
    glue(glue(st, SUFFIX), _le_p)((uint8_t *)haddr, val);
340
#endif
341 342 343 344
}

#if DATA_SIZE > 1
void helper_be_st_name(CPUArchState *env, target_ulong addr, DATA_TYPE val,
345
                       TCGMemOpIdx oi, uintptr_t retaddr)
346
{
347
    unsigned mmu_idx = get_mmuidx(oi);
348 349
    int index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
    target_ulong tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
350
    unsigned a_bits = get_alignment_bits(get_memop(oi));
351 352
    uintptr_t haddr;

353
    if (addr & ((1 << a_bits) - 1)) {
354 355 356 357
        cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE,
                             mmu_idx, retaddr);
    }

358 359 360
    /* If the TLB entry is for a different page, reload and try again.  */
    if ((addr & TARGET_PAGE_MASK)
        != (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
361
        if (!VICTIM_TLB_HIT(addr_write, addr)) {
362
            tlb_fill(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr);
363
        }
364
        tlb_addr = env->tlb_table[mmu_idx][index].addr_write & ~TLB_INVALID_MASK;
365 366 367 368 369 370 371 372 373 374 375
    }

    /* Handle an IO access.  */
    if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
        if ((addr & (DATA_SIZE - 1)) != 0) {
            goto do_unaligned_access;
        }

        /* ??? Note that the io helpers always read data in the target
           byte ordering.  We should push the LE/BE request down into io.  */
        val = TGT_BE(val);
376
        glue(io_write, SUFFIX)(env, mmu_idx, index, val, addr, retaddr);
377 378 379 380 381 382 383
        return;
    }

    /* Handle slow unaligned access (it spans two pages or IO).  */
    if (DATA_SIZE > 1
        && unlikely((addr & ~TARGET_PAGE_MASK) + DATA_SIZE - 1
                     >= TARGET_PAGE_SIZE)) {
384 385
        int i, index2;
        target_ulong page2, tlb_addr2;
386
    do_unaligned_access:
387 388 389 390 391 392 393 394 395 396 397 398
        /* Ensure the second page is in the TLB.  Note that the first page
           is already guaranteed to be filled, and that the second page
           cannot evict the first.  */
        page2 = (addr + DATA_SIZE) & TARGET_PAGE_MASK;
        index2 = (page2 >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
        tlb_addr2 = env->tlb_table[mmu_idx][index2].addr_write;
        if (page2 != (tlb_addr2 & (TARGET_PAGE_MASK | TLB_INVALID_MASK))
            && !VICTIM_TLB_HIT(addr_write, page2)) {
            tlb_fill(ENV_GET_CPU(env), page2, MMU_DATA_STORE,
                     mmu_idx, retaddr);
        }

399
        /* XXX: not efficient, but simple */
400 401 402
        /* This loop must go in the forward direction to avoid issues
           with self-modifying code.  */
        for (i = 0; i < DATA_SIZE; ++i) {
403 404
            /* Big-endian extract.  */
            uint8_t val8 = val >> (((DATA_SIZE - 1) * 8) - (i * 8));
R
Richard Henderson 已提交
405
            glue(helper_ret_stb, MMUSUFFIX)(env, addr + i, val8,
R
Richard Henderson 已提交
406
                                            oi, retaddr);
407
        }
R
Richard Henderson 已提交
408 409 410 411
        return;
    }

    haddr = addr + env->tlb_table[mmu_idx][index].addend;
412
    glue(glue(st, SUFFIX), _be_p)((uint8_t *)haddr, val);
413
}
414
#endif /* DATA_SIZE > 1 */
B
bellard 已提交
415 416 417
#endif /* !defined(SOFTMMU_CODE_ACCESS) */

#undef READ_ACCESS_TYPE
418 419
#undef DATA_TYPE
#undef SUFFIX
420
#undef LSUFFIX
421
#undef DATA_SIZE
B
bellard 已提交
422
#undef ADDR_READ
423 424 425 426
#undef WORD_TYPE
#undef SDATA_TYPE
#undef USUFFIX
#undef SSUFFIX
427 428 429 430 431 432 433
#undef BSWAP
#undef helper_le_ld_name
#undef helper_be_ld_name
#undef helper_le_lds_name
#undef helper_be_lds_name
#undef helper_le_st_name
#undef helper_be_st_name