op_helper.c 30.5 KB
Newer Older
B
bellard 已提交
1 2
/*
 *  ARM helper routines
3
 *
P
pbrook 已提交
4
 *  Copyright (c) 2005-2007 CodeSourcery, LLC
B
bellard 已提交
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
P
Peter Maydell 已提交
19
#include "qemu/osdep.h"
20
#include "qemu/units.h"
21
#include "qemu/log.h"
22
#include "qemu/main-loop.h"
B
Blue Swirl 已提交
23
#include "cpu.h"
24
#include "exec/helper-proto.h"
25
#include "internals.h"
26
#include "exec/exec-all.h"
P
Paolo Bonzini 已提交
27
#include "exec/cpu_ldst.h"
B
bellard 已提交
28

P
pbrook 已提交
29 30 31
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)

32 33
static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
                                    uint32_t syndrome, uint32_t target_el)
B
bellard 已提交
34
{
35
    CPUState *cs = env_cpu(env);
36

37
    if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
38 39 40 41 42 43 44
        /*
         * Redirect NS EL1 exceptions to NS EL2. These are reported with
         * their original syndrome register value, with the exception of
         * SIMD/FP access traps, which are reported as uncategorized
         * (see DDI0478C.a D1.10.4)
         */
        target_el = 2;
45
        if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
46 47 48 49
            syndrome = syn_uncategorized();
        }
    }

50 51 52 53
    assert(!excp_is_internal(excp));
    cs->exception_index = excp;
    env->exception.syndrome = syndrome;
    env->exception.target_el = target_el;
54 55 56 57 58 59 60 61

    return cs;
}

void raise_exception(CPUARMState *env, uint32_t excp,
                     uint32_t syndrome, uint32_t target_el)
{
    CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
62
    cpu_loop_exit(cs);
B
bellard 已提交
63 64
}

65 66 67 68 69 70 71
void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
                        uint32_t target_el, uintptr_t ra)
{
    CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
    cpu_loop_exit_restore(cs, ra);
}

72 73
uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
                          uint32_t maxindex)
P
pbrook 已提交
74
{
75 76 77
    uint32_t val, shift;
    uint64_t *table = vn;

P
pbrook 已提交
78 79
    val = 0;
    for (shift = 0; shift < 32; shift += 8) {
80
        uint32_t index = (ireg >> shift) & 0xff;
P
pbrook 已提交
81
        if (index < maxindex) {
82
            uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
P
pbrook 已提交
83 84
            val |= tmp << shift;
        } else {
P
pbrook 已提交
85
            val |= def & (0xff << shift);
P
pbrook 已提交
86 87
        }
    }
P
pbrook 已提交
88
    return val;
P
pbrook 已提交
89 90
}

91 92 93 94 95 96 97
void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
{
    /*
     * Perform the v8M stack limit check for SP updates from translated code,
     * raising an exception if the limit is breached.
     */
    if (newvalue < v7m_sp_limit(env)) {
98
        CPUState *cs = env_cpu(env);
99 100 101 102 103 104 105 106 107 108 109

        /*
         * Stack limit exceptions are a rare case, so rather than syncing
         * PC/condbits before the call, we use cpu_restore_state() to
         * get them right before raising the exception.
         */
        cpu_restore_state(cs, GETPC(), true);
        raise_exception(env, EXCP_STKOF, 0, 1);
    }
}

110
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
111 112 113 114 115 116 117
{
    uint32_t res = a + b;
    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
        env->QF = 1;
    return res;
}

118
uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
119 120 121 122 123 124 125 126 127
{
    uint32_t res = a + b;
    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
        env->QF = 1;
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    }
    return res;
}

128
uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
129 130 131 132 133 134 135 136 137
{
    uint32_t res = a - b;
    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
        env->QF = 1;
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    }
    return res;
}

138
uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
139 140 141 142 143 144 145 146 147
{
    uint32_t res = a + b;
    if (res < a) {
        env->QF = 1;
        res = ~0;
    }
    return res;
}

148
uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
149 150 151 152 153 154 155 156 157
{
    uint32_t res = a - b;
    if (res > a) {
        env->QF = 1;
        res = 0;
    }
    return res;
}

P
pbrook 已提交
158
/* Signed saturation.  */
159
static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
P
pbrook 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176
{
    int32_t top;
    uint32_t mask;

    top = val >> shift;
    mask = (1u << shift) - 1;
    if (top > 0) {
        env->QF = 1;
        return mask;
    } else if (top < -1) {
        env->QF = 1;
        return ~mask;
    }
    return val;
}

/* Unsigned saturation.  */
177
static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
P
pbrook 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192
{
    uint32_t max;

    max = (1u << shift) - 1;
    if (val < 0) {
        env->QF = 1;
        return 0;
    } else if (val > max) {
        env->QF = 1;
        return max;
    }
    return val;
}

/* Signed saturate.  */
193
uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
194
{
195
    return do_ssat(env, x, shift);
P
pbrook 已提交
196 197 198
}

/* Dual halfword signed saturate.  */
199
uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
200 201 202
{
    uint32_t res;

203 204
    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
P
pbrook 已提交
205 206 207 208
    return res;
}

/* Unsigned saturate.  */
209
uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
210
{
211
    return do_usat(env, x, shift);
P
pbrook 已提交
212 213 214
}

/* Dual halfword unsigned saturate.  */
215
uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
216 217 218
{
    uint32_t res;

219 220
    res = (uint16_t)do_usat(env, (int16_t)x, shift);
    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
P
pbrook 已提交
221 222
    return res;
}
P
pbrook 已提交
223

P
Paolo Bonzini 已提交
224 225 226
void HELPER(setend)(CPUARMState *env)
{
    env->uncached_cpsr ^= CPSR_E;
227
    arm_rebuild_hflags(env);
P
Paolo Bonzini 已提交
228 229
}

230 231 232 233 234 235 236 237 238
/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 * The function returns the target EL (1-3) if the instruction is to be trapped;
 * otherwise it returns 0 indicating it is not trapped.
 */
static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
{
    int cur_el = arm_current_el(env);
    uint64_t mask;

239 240 241 242 243
    if (arm_feature(env, ARM_FEATURE_M)) {
        /* M profile cores can never trap WFI/WFE. */
        return 0;
    }

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266
    /* If we are currently in EL0 then we need to check if SCTLR is set up for
     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
     */
    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
        int target_el;

        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
            /* Secure EL0 and Secure PL1 is at EL3 */
            target_el = 3;
        } else {
            target_el = 1;
        }

        if (!(env->cp15.sctlr_el[target_el] & mask)) {
            return target_el;
        }
    }

    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
     * bits will be zero indicating no trap.
     */
267 268 269
    if (cur_el < 2) {
        mask = is_wfe ? HCR_TWE : HCR_TWI;
        if (arm_hcr_el2_eff(env) & mask) {
270 271 272 273 274 275 276 277 278 279 280 281 282 283 284
            return 2;
        }
    }

    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
    if (cur_el < 3) {
        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
        if (env->cp15.scr_el3 & mask) {
            return 3;
        }
    }

    return 0;
}

285
void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
P
pbrook 已提交
286
{
287
    CPUState *cs = env_cpu(env);
288
    int target_el = check_wfx_trap(env, false);
289

290 291 292 293 294 295 296
    if (cpu_has_work(cs)) {
        /* Don't bother to go into our "low power state" if
         * we would just wake up immediately.
         */
        return;
    }

297
    if (target_el) {
298 299 300 301 302 303
        if (env->aarch64) {
            env->pc -= insn_len;
        } else {
            env->regs[15] -= insn_len;
        }

304 305
        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
                        target_el);
306 307
    }

308
    cs->exception_index = EXCP_HLT;
309
    cs->halted = 1;
310
    cpu_loop_exit(cs);
P
pbrook 已提交
311 312
}

313 314
void HELPER(wfe)(CPUARMState *env)
{
315 316 317
    /* This is a hint instruction that is semantically different
     * from YIELD even though we currently implement it identically.
     * Don't actually halt the CPU, just yield back to top
318 319 320
     * level loop. This is not going into a "low power state"
     * (ie halting until some event occurs), so we never take
     * a configurable trap to a different exception level.
321
     */
322 323 324 325 326
    HELPER(yield)(env);
}

void HELPER(yield)(CPUARMState *env)
{
327
    CPUState *cs = env_cpu(env);
328 329 330 331 332

    /* This is a non-trappable hint instruction that generally indicates
     * that the guest is currently busy-looping. Yield control back to the
     * top level loop so that a more deserving VCPU has a chance to run.
     */
333
    cs->exception_index = EXCP_YIELD;
334
    cpu_loop_exit(cs);
335 336
}

337 338 339 340 341 342 343 344
/* Raise an internal-to-QEMU exception. This is limited to only
 * those EXCP values which are special cases for QEMU to interrupt
 * execution and not to be used for exceptions which are passed to
 * the guest (those must all have syndrome information and thus should
 * use exception_with_syndrome).
 */
void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
{
345
    CPUState *cs = env_cpu(env);
346 347 348 349 350 351 352 353

    assert(excp_is_internal(excp));
    cs->exception_index = excp;
    cpu_loop_exit(cs);
}

/* Raise an exception with the specified syndrome register value */
void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
354
                                     uint32_t syndrome, uint32_t target_el)
P
pbrook 已提交
355
{
356
    raise_exception(env, excp, syndrome, target_el);
P
pbrook 已提交
357 358
}

359 360 361 362 363
/* Raise an EXCP_BKPT with the specified syndrome register value,
 * targeting the correct exception level for debug exceptions.
 */
void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
{
364 365 366
    int debug_el = arm_debug_target_el(env);
    int cur_el = arm_current_el(env);

367 368
    /* FSR will only be used if the debug target EL is AArch32. */
    env->exception.fsr = arm_debug_exception_fsr(env);
369 370 371 372 373
    /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
     * values to the guest that it shouldn't be able to see at its
     * exception/security level.
     */
    env->exception.vaddress = 0;
374 375 376 377 378 379 380 381 382 383 384 385
    /*
     * Other kinds of architectural debug exception are ignored if
     * they target an exception level below the current one (in QEMU
     * this is checked by arm_generate_debug_exceptions()). Breakpoint
     * instructions are special because they always generate an exception
     * to somewhere: if they can't go to the configured debug exception
     * level they are taken to the current exception level.
     */
    if (debug_el < cur_el) {
        debug_el = cur_el;
    }
    raise_exception(env, EXCP_BKPT, syndrome, debug_el);
386 387
}

388
uint32_t HELPER(cpsr_read)(CPUARMState *env)
P
pbrook 已提交
389
{
390
    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
P
pbrook 已提交
391 392
}

B
Blue Swirl 已提交
393
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
P
pbrook 已提交
394
{
395
    cpsr_write(env, val, mask, CPSRWriteByInstr);
396 397
    /* TODO: Not all cpsr bits are relevant to hflags.  */
    arm_rebuild_hflags(env);
P
pbrook 已提交
398
}
P
pbrook 已提交
399

400 401 402
/* Write the CPSR for a 32-bit exception return */
void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
{
403
    qemu_mutex_lock_iothread();
404
    arm_call_pre_el_change_hook(env_archcpu(env));
405 406
    qemu_mutex_unlock_iothread();

407
    cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
408

409 410 411 412 413 414
    /* Generated code has already stored the new PC value, but
     * without masking out its low bits, because which bits need
     * masking depends on whether we're returning to Thumb or ARM
     * state. Do the masking now.
     */
    env->regs[15] &= (env->thumb ? ~1 : ~3);
415
    arm_rebuild_hflags(env);
416

417
    qemu_mutex_lock_iothread();
418
    arm_call_el_change_hook(env_archcpu(env));
419
    qemu_mutex_unlock_iothread();
420 421
}

P
pbrook 已提交
422
/* Access to user mode registers from privileged modes.  */
423
uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
P
pbrook 已提交
424 425 426 427
{
    uint32_t val;

    if (regno == 13) {
428
        val = env->banked_r13[BANK_USRSYS];
P
pbrook 已提交
429
    } else if (regno == 14) {
430
        val = env->banked_r14[BANK_USRSYS];
P
pbrook 已提交
431 432 433 434 435 436 437 438 439
    } else if (regno >= 8
               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
        val = env->usr_regs[regno - 8];
    } else {
        val = env->regs[regno];
    }
    return val;
}

B
Blue Swirl 已提交
440
void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
P
pbrook 已提交
441 442
{
    if (regno == 13) {
443
        env->banked_r13[BANK_USRSYS] = val;
P
pbrook 已提交
444
    } else if (regno == 14) {
445
        env->banked_r14[BANK_USRSYS] = val;
P
pbrook 已提交
446 447 448 449 450 451 452
    } else if (regno >= 8
               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
        env->usr_regs[regno - 8] = val;
    } else {
        env->regs[regno] = val;
    }
}
453

454 455 456 457 458 459 460 461 462 463 464
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
{
    if ((env->uncached_cpsr & CPSR_M) == mode) {
        env->regs[13] = val;
    } else {
        env->banked_r13[bank_number(mode)] = val;
    }
}

uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
{
465 466 467 468 469 470 471 472
    if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
        /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
         * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
         */
        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
                        exception_target_el(env));
    }

473 474 475 476 477 478 479
    if ((env->uncached_cpsr & CPSR_M) == mode) {
        return env->regs[13];
    } else {
        return env->banked_r13[bank_number(mode)];
    }
}

480 481 482 483 484 485 486 487 488 489
static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
                                      uint32_t regno)
{
    /* Raise an exception if the requested access is one of the UNPREDICTABLE
     * cases; otherwise return. This broadly corresponds to the pseudocode
     * BankedRegisterAccessValid() and SPSRAccessValid(),
     * except that we have already handled some cases at translate time.
     */
    int curmode = env->uncached_cpsr & CPSR_M;

490 491 492 493 494 495 496 497
    if (regno == 17) {
        /* ELR_Hyp: a special case because access from tgtmode is OK */
        if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
            goto undef;
        }
        return;
    }

498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
    if (curmode == tgtmode) {
        goto undef;
    }

    if (tgtmode == ARM_CPU_MODE_USR) {
        switch (regno) {
        case 8 ... 12:
            if (curmode != ARM_CPU_MODE_FIQ) {
                goto undef;
            }
            break;
        case 13:
            if (curmode == ARM_CPU_MODE_SYS) {
                goto undef;
            }
            break;
        case 14:
            if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
                goto undef;
            }
            break;
        default:
            break;
        }
    }

    if (tgtmode == ARM_CPU_MODE_HYP) {
525 526 527
        /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
        if (curmode != ARM_CPU_MODE_MON) {
            goto undef;
528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553
        }
    }

    return;

undef:
    raise_exception(env, EXCP_UDEF, syn_uncategorized(),
                    exception_target_el(env));
}

void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
                        uint32_t regno)
{
    msr_mrs_banked_exc_checks(env, tgtmode, regno);

    switch (regno) {
    case 16: /* SPSRs */
        env->banked_spsr[bank_number(tgtmode)] = value;
        break;
    case 17: /* ELR_Hyp */
        env->elr_el[2] = value;
        break;
    case 13:
        env->banked_r13[bank_number(tgtmode)] = value;
        break;
    case 14:
554
        env->banked_r14[r14_bank_number(tgtmode)] = value;
555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
        break;
    case 8 ... 12:
        switch (tgtmode) {
        case ARM_CPU_MODE_USR:
            env->usr_regs[regno - 8] = value;
            break;
        case ARM_CPU_MODE_FIQ:
            env->fiq_regs[regno - 8] = value;
            break;
        default:
            g_assert_not_reached();
        }
        break;
    default:
        g_assert_not_reached();
    }
}

uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
{
    msr_mrs_banked_exc_checks(env, tgtmode, regno);

    switch (regno) {
    case 16: /* SPSRs */
        return env->banked_spsr[bank_number(tgtmode)];
    case 17: /* ELR_Hyp */
        return env->elr_el[2];
    case 13:
        return env->banked_r13[bank_number(tgtmode)];
    case 14:
585
        return env->banked_r14[r14_bank_number(tgtmode)];
586 587 588 589 590 591 592 593 594 595 596 597 598 599
    case 8 ... 12:
        switch (tgtmode) {
        case ARM_CPU_MODE_USR:
            return env->usr_regs[regno - 8];
        case ARM_CPU_MODE_FIQ:
            return env->fiq_regs[regno - 8];
        default:
            g_assert_not_reached();
        }
    default:
        g_assert_not_reached();
    }
}

600 601
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
                                 uint32_t isread)
602 603
{
    const ARMCPRegInfo *ri = rip;
604
    int target_el;
605 606 607

    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
608
        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
609 610
    }

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
    /*
     * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses
     * to sysregs non accessible at EL0 to have UNDEF-ed already.
     */
    if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 &&
        (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
        uint32_t mask = 1 << ri->crn;

        if (ri->type & ARM_CP_64BIT) {
            mask = 1 << ri->crm;
        }

        /* T4 and T14 are RES0 */
        mask &= ~((1 << 4) | (1 << 14));

        if (env->cp15.hstr_el2 & mask) {
            target_el = 2;
            goto exept;
        }
    }

632 633 634 635
    if (!ri->accessfn) {
        return;
    }

636
    switch (ri->accessfn(env, ri, isread)) {
637 638 639
    case CP_ACCESS_OK:
        return;
    case CP_ACCESS_TRAP:
640 641 642 643 644 645
        target_el = exception_target_el(env);
        break;
    case CP_ACCESS_TRAP_EL2:
        /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
         * a bug in the access function.
         */
646
        assert(!arm_is_secure(env) && arm_current_el(env) != 3);
647 648 649 650
        target_el = 2;
        break;
    case CP_ACCESS_TRAP_EL3:
        target_el = 3;
651
        break;
652
    case CP_ACCESS_TRAP_UNCATEGORIZED:
653
        target_el = exception_target_el(env);
654
        syndrome = syn_uncategorized();
655
        break;
656 657 658 659 660 661 662 663
    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
        target_el = 2;
        syndrome = syn_uncategorized();
        break;
    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
        target_el = 3;
        syndrome = syn_uncategorized();
        break;
664 665 666 667 668 669 670 671 672 673 674 675 676
    case CP_ACCESS_TRAP_FP_EL2:
        target_el = 2;
        /* Since we are an implementation that takes exceptions on a trapped
         * conditional insn only if the insn has passed its condition code
         * check, we take the IMPDEF choice to always report CV=1 COND=0xe
         * (which is also the required value for AArch64 traps).
         */
        syndrome = syn_fp_access_trap(1, 0xe, false);
        break;
    case CP_ACCESS_TRAP_FP_EL3:
        target_el = 3;
        syndrome = syn_fp_access_trap(1, 0xe, false);
        break;
677 678 679
    default:
        g_assert_not_reached();
    }
680

681
exept:
682
    raise_exception(env, EXCP_UDEF, syndrome, target_el);
683 684
}

685 686 687
void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
{
    const ARMCPRegInfo *ri = rip;
688

689 690 691 692 693 694 695
    if (ri->type & ARM_CP_IO) {
        qemu_mutex_lock_iothread();
        ri->writefn(env, ri, value);
        qemu_mutex_unlock_iothread();
    } else {
        ri->writefn(env, ri, value);
    }
696 697 698 699 700
}

uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
{
    const ARMCPRegInfo *ri = rip;
701
    uint32_t res;
702

703 704 705 706 707 708 709 710 711
    if (ri->type & ARM_CP_IO) {
        qemu_mutex_lock_iothread();
        res = ri->readfn(env, ri);
        qemu_mutex_unlock_iothread();
    } else {
        res = ri->readfn(env, ri);
    }

    return res;
712 713 714 715 716
}

void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
{
    const ARMCPRegInfo *ri = rip;
717

718 719 720 721 722 723 724
    if (ri->type & ARM_CP_IO) {
        qemu_mutex_lock_iothread();
        ri->writefn(env, ri, value);
        qemu_mutex_unlock_iothread();
    } else {
        ri->writefn(env, ri, value);
    }
725 726 727 728 729
}

uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
{
    const ARMCPRegInfo *ri = rip;
730 731 732 733 734 735 736 737 738
    uint64_t res;

    if (ri->type & ARM_CP_IO) {
        qemu_mutex_lock_iothread();
        res = ri->readfn(env, ri);
        qemu_mutex_unlock_iothread();
    } else {
        res = ri->readfn(env, ri);
    }
739

740
    return res;
741
}
P
pbrook 已提交
742

743 744
void HELPER(pre_hvc)(CPUARMState *env)
{
745
    ARMCPU *cpu = env_archcpu(env);
746
    int cur_el = arm_current_el(env);
747 748 749 750
    /* FIXME: Use actual secure state.  */
    bool secure = false;
    bool undef;

751 752 753 754 755 756 757
    if (arm_is_psci_call(cpu, EXCP_HVC)) {
        /* If PSCI is enabled and this looks like a valid PSCI call then
         * that overrides the architecturally mandated HVC behaviour.
         */
        return;
    }

758 759 760 761 762
    if (!arm_feature(env, ARM_FEATURE_EL2)) {
        /* If EL2 doesn't exist, HVC always UNDEFs */
        undef = true;
    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
        /* EL3.HCE has priority over EL2.HCD. */
763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
        undef = !(env->cp15.scr_el3 & SCR_HCE);
    } else {
        undef = env->cp15.hcr_el2 & HCR_HCD;
    }

    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
     * For ARMv8/AArch64, HVC is allowed in EL3.
     * Note that we've already trapped HVC from EL0 at translation
     * time.
     */
    if (secure && (!is_a64(env) || cur_el == 1)) {
        undef = true;
    }

    if (undef) {
778 779
        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
                        exception_target_el(env));
780 781 782
    }
}

783 784
void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
{
785
    ARMCPU *cpu = env_archcpu(env);
786
    int cur_el = arm_current_el(env);
787
    bool secure = arm_is_secure(env);
788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
    bool smd_flag = env->cp15.scr_el3 & SCR_SMD;

    /*
     * SMC behaviour is summarized in the following table.
     * This helper handles the "Trap to EL2" and "Undef insn" cases.
     * The "Trap to EL3" and "PSCI call" cases are handled in the exception
     * helper.
     *
     *  -> ARM_FEATURE_EL3 and !SMD
     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
     *
     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
     *  Conduit SMC, inval call  Trap to EL2         Trap to EL3
     *  Conduit not SMC          Trap to EL2         Trap to EL3
     *
     *
     *  -> ARM_FEATURE_EL3 and SMD
     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
     *
     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
     *  Conduit SMC, inval call  Trap to EL2         Undef insn
     *  Conduit not SMC          Trap to EL2         Undef insn
     *
     *
     *  -> !ARM_FEATURE_EL3
     *                           HCR_TSC && NS EL1   !HCR_TSC || !NS EL1
     *
     *  Conduit SMC, valid call  Trap to EL2         PSCI Call
     *  Conduit SMC, inval call  Trap to EL2         Undef insn
     *  Conduit not SMC          Undef insn          Undef insn
     */

820 821 822 823 824 825
    /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
     * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
     *  extensions, SMD only applies to NS state.
     * On ARMv7 without the Virtualization extensions, the SMD bit
     * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
     * so we need not special case this here.
826
     */
827 828
    bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
                                                     : smd_flag && !secure;
829

830 831 832 833 834 835 836 837
    if (!arm_feature(env, ARM_FEATURE_EL3) &&
        cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
        /* If we have no EL3 then SMC always UNDEFs and can't be
         * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
         * firmware within QEMU, and we want an EL2 guest to be able
         * to forbid its EL1 from making PSCI calls into QEMU's
         * "firmware" via HCR.TSC, so for these purposes treat
         * PSCI-via-SMC as implying an EL3.
838
         * This handles the very last line of the previous table.
839
         */
840 841 842 843
        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
                        exception_target_el(env));
    }

844
    if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
845 846 847
        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
         * We also want an EL2 guest to be able to forbid its EL1 from
         * making PSCI calls into QEMU's "firmware" via HCR.TSC.
848
         * This handles all the "Trap to EL2" cases of the previous table.
849
         */
850
        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
851 852
    }

853 854 855
    /* Catch the two remaining "Undef insn" cases of the previous table:
     *    - PSCI conduit is SMC but we don't have a valid PCSI call,
     *    - We don't have EL3 or SMD is set.
856
     */
857 858
    if (!arm_is_psci_call(cpu, EXCP_SMC) &&
        (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
859 860
        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
                        exception_target_el(env));
861 862 863
    }
}

P
pbrook 已提交
864 865 866 867 868 869
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
   The only way to do that in TCG is a conditional branch, which clobbers
   all our temporaries.  For now implement these as helper functions.  */

/* Similarly for variable shift instructions.  */

870
uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
{
    int shift = i & 0xff;
    if (shift >= 32) {
        if (shift == 32)
            env->CF = x & 1;
        else
            env->CF = 0;
        return 0;
    } else if (shift != 0) {
        env->CF = (x >> (32 - shift)) & 1;
        return x << shift;
    }
    return x;
}

886
uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
{
    int shift = i & 0xff;
    if (shift >= 32) {
        if (shift == 32)
            env->CF = (x >> 31) & 1;
        else
            env->CF = 0;
        return 0;
    } else if (shift != 0) {
        env->CF = (x >> (shift - 1)) & 1;
        return x >> shift;
    }
    return x;
}

902
uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
903 904 905 906 907 908 909 910 911 912 913 914
{
    int shift = i & 0xff;
    if (shift >= 32) {
        env->CF = (x >> 31) & 1;
        return (int32_t)x >> 31;
    } else if (shift != 0) {
        env->CF = (x >> (shift - 1)) & 1;
        return (int32_t)x >> shift;
    }
    return x;
}

915
uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
916 917 918 919 920 921 922 923 924 925 926 927 928
{
    int shift1, shift;
    shift1 = i & 0xff;
    shift = shift1 & 0x1f;
    if (shift == 0) {
        if (shift1 != 0)
            env->CF = (x >> 31) & 1;
        return x;
    } else {
        env->CF = (x >> (shift - 1)) & 1;
        return ((uint32_t)x >> shift) | (x << (32 - shift));
    }
}
929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020

void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
    /*
     * Implement DC ZVA, which zeroes a fixed-length block of memory.
     * Note that we do not implement the (architecturally mandated)
     * alignment fault for attempts to use this on Device memory
     * (which matches the usual QEMU behaviour of not implementing either
     * alignment faults or any memory attribute handling).
     */

    ARMCPU *cpu = env_archcpu(env);
    uint64_t blocklen = 4 << cpu->dcz_blocksize;
    uint64_t vaddr = vaddr_in & ~(blocklen - 1);

#ifndef CONFIG_USER_ONLY
    {
        /*
         * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than
         * the block size so we might have to do more than one TLB lookup.
         * We know that in fact for any v8 CPU the page size is at least 4K
         * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only
         * 1K as an artefact of legacy v5 subpage support being present in the
         * same QEMU executable. So in practice the hostaddr[] array has
         * two entries, given the current setting of TARGET_PAGE_BITS_MIN.
         */
        int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
        void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)];
        int try, i;
        unsigned mmu_idx = cpu_mmu_index(env, false);
        TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);

        assert(maxidx <= ARRAY_SIZE(hostaddr));

        for (try = 0; try < 2; try++) {

            for (i = 0; i < maxidx; i++) {
                hostaddr[i] = tlb_vaddr_to_host(env,
                                                vaddr + TARGET_PAGE_SIZE * i,
                                                1, mmu_idx);
                if (!hostaddr[i]) {
                    break;
                }
            }
            if (i == maxidx) {
                /*
                 * If it's all in the TLB it's fair game for just writing to;
                 * we know we don't need to update dirty status, etc.
                 */
                for (i = 0; i < maxidx - 1; i++) {
                    memset(hostaddr[i], 0, TARGET_PAGE_SIZE);
                }
                memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE));
                return;
            }
            /*
             * OK, try a store and see if we can populate the tlb. This
             * might cause an exception if the memory isn't writable,
             * in which case we will longjmp out of here. We must for
             * this purpose use the actual register value passed to us
             * so that we get the fault address right.
             */
            helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC());
            /* Now we can populate the other TLB entries, if any */
            for (i = 0; i < maxidx; i++) {
                uint64_t va = vaddr + TARGET_PAGE_SIZE * i;
                if (va != (vaddr_in & TARGET_PAGE_MASK)) {
                    helper_ret_stb_mmu(env, va, 0, oi, GETPC());
                }
            }
        }

        /*
         * Slow path (probably attempt to do this to an I/O device or
         * similar, or clearing of a block of code we have translations
         * cached for). Just do a series of byte writes as the architecture
         * demands. It's not worth trying to use a cpu_physical_memory_map(),
         * memset(), unmap() sequence here because:
         *  + we'd need to account for the blocksize being larger than a page
         *  + the direct-RAM access case is almost always going to be dealt
         *    with in the fastpath code above, so there's no speed benefit
         *  + we would have to deal with the map returning NULL because the
         *    bounce buffer was in use
         */
        for (i = 0; i < blocklen; i++) {
            helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC());
        }
    }
#else
    memset(g2h(vaddr), 0, blocklen);
#endif
}