op_helper.c 27.5 KB
Newer Older
B
bellard 已提交
1 2
/*
 *  ARM helper routines
3
 *
P
pbrook 已提交
4
 *  Copyright (c) 2005-2007 CodeSourcery, LLC
B
bellard 已提交
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
B
Blue Swirl 已提交
19
#include "cpu.h"
20
#include "exec/helper-proto.h"
21
#include "internals.h"
P
Paolo Bonzini 已提交
22
#include "exec/cpu_ldst.h"
B
bellard 已提交
23

P
pbrook 已提交
24 25 26
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)

27 28
static void raise_exception(CPUARMState *env, uint32_t excp,
                            uint32_t syndrome, uint32_t target_el)
B
bellard 已提交
29
{
30
    CPUState *cs = CPU(arm_env_get_cpu(env));
31

32 33 34 35
    assert(!excp_is_internal(excp));
    cs->exception_index = excp;
    env->exception.syndrome = syndrome;
    env->exception.target_el = target_el;
36
    cpu_loop_exit(cs);
B
bellard 已提交
37 38
}

39 40 41 42 43 44 45 46 47 48 49 50 51 52
static int exception_target_el(CPUARMState *env)
{
    int target_el = MAX(1, arm_current_el(env));

    /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
     * to EL3 in this case.
     */
    if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
        target_el = 3;
    }

    return target_el;
}

53
uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
P
pbrook 已提交
54
                          uint32_t rn, uint32_t maxindex)
P
pbrook 已提交
55 56 57 58 59 60 61 62 63
{
    uint32_t val;
    uint32_t tmp;
    int index;
    int shift;
    uint64_t *table;
    table = (uint64_t *)&env->vfp.regs[rn];
    val = 0;
    for (shift = 0; shift < 32; shift += 8) {
P
pbrook 已提交
64 65
        index = (ireg >> shift) & 0xff;
        if (index < maxindex) {
P
pbrook 已提交
66
            tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
P
pbrook 已提交
67 68
            val |= tmp << shift;
        } else {
P
pbrook 已提交
69
            val |= def & (0xff << shift);
P
pbrook 已提交
70 71
        }
    }
P
pbrook 已提交
72
    return val;
P
pbrook 已提交
73 74
}

B
bellard 已提交
75 76 77
#if !defined(CONFIG_USER_ONLY)

/* try to fill the TLB and return an exception if error. If retaddr is
78 79 80 81
 * NULL, it means that the function was called in C code (i.e. not
 * from generated code or from helper.c)
 */
void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
82
              uintptr_t retaddr)
B
bellard 已提交
83
{
84 85
    bool ret;
    uint32_t fsr = 0;
B
bellard 已提交
86

87
    ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr);
88
    if (unlikely(ret)) {
89 90
        ARMCPU *cpu = ARM_CPU(cs);
        CPUARMState *env = &cpu->env;
91 92
        uint32_t syn, exc;
        bool same_el = (arm_current_el(env) != 0);
93

B
bellard 已提交
94 95
        if (retaddr) {
            /* now we have a real cpu fault */
96
            cpu_restore_state(cs, retaddr);
B
bellard 已提交
97
        }
98 99

        /* AArch64 syndrome does not have an LPAE bit */
100
        syn = fsr & ~(1 << 9);
101 102 103 104 105 106 107 108 109 110

        /* For insn and data aborts we assume there is no instruction syndrome
         * information; this is always true for exceptions reported to EL1.
         */
        if (is_write == 2) {
            syn = syn_insn_abort(same_el, 0, 0, syn);
            exc = EXCP_PREFETCH_ABORT;
        } else {
            syn = syn_data_abort(same_el, 0, 0, 0, is_write == 1, syn);
            if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
111
                fsr |= (1 << 11);
112 113 114 115 116
            }
            exc = EXCP_DATA_ABORT;
        }

        env->exception.vaddress = addr;
117
        env->exception.fsr = fsr;
118
        raise_exception(env, exc, syn, exception_target_el(env));
B
bellard 已提交
119 120 121
    }
}
#endif
P
pbrook 已提交
122

123
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
124 125 126 127 128 129 130
{
    uint32_t res = a + b;
    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
        env->QF = 1;
    return res;
}

131
uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
132 133 134 135 136 137 138 139 140
{
    uint32_t res = a + b;
    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
        env->QF = 1;
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    }
    return res;
}

141
uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
142 143 144 145 146 147 148 149 150
{
    uint32_t res = a - b;
    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
        env->QF = 1;
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    }
    return res;
}

151
uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
P
pbrook 已提交
152 153 154 155 156 157 158 159 160 161 162 163 164 165
{
    uint32_t res;
    if (val >= 0x40000000) {
        res = ~SIGNBIT;
        env->QF = 1;
    } else if (val <= (int32_t)0xc0000000) {
        res = SIGNBIT;
        env->QF = 1;
    } else {
        res = val << 1;
    }
    return res;
}

166
uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
167 168 169 170 171 172 173 174 175
{
    uint32_t res = a + b;
    if (res < a) {
        env->QF = 1;
        res = ~0;
    }
    return res;
}

176
uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
177 178 179 180 181 182 183 184 185
{
    uint32_t res = a - b;
    if (res > a) {
        env->QF = 1;
        res = 0;
    }
    return res;
}

P
pbrook 已提交
186
/* Signed saturation.  */
187
static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
P
pbrook 已提交
188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
{
    int32_t top;
    uint32_t mask;

    top = val >> shift;
    mask = (1u << shift) - 1;
    if (top > 0) {
        env->QF = 1;
        return mask;
    } else if (top < -1) {
        env->QF = 1;
        return ~mask;
    }
    return val;
}

/* Unsigned saturation.  */
205
static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
P
pbrook 已提交
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
{
    uint32_t max;

    max = (1u << shift) - 1;
    if (val < 0) {
        env->QF = 1;
        return 0;
    } else if (val > max) {
        env->QF = 1;
        return max;
    }
    return val;
}

/* Signed saturate.  */
221
uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
222
{
223
    return do_ssat(env, x, shift);
P
pbrook 已提交
224 225 226
}

/* Dual halfword signed saturate.  */
227
uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
228 229 230
{
    uint32_t res;

231 232
    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
P
pbrook 已提交
233 234 235 236
    return res;
}

/* Unsigned saturate.  */
237
uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
238
{
239
    return do_usat(env, x, shift);
P
pbrook 已提交
240 241 242
}

/* Dual halfword unsigned saturate.  */
243
uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
244 245 246
{
    uint32_t res;

247 248
    res = (uint16_t)do_usat(env, (int16_t)x, shift);
    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
P
pbrook 已提交
249 250
    return res;
}
P
pbrook 已提交
251

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
/* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
 * The function returns the target EL (1-3) if the instruction is to be trapped;
 * otherwise it returns 0 indicating it is not trapped.
 */
static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
{
    int cur_el = arm_current_el(env);
    uint64_t mask;

    /* If we are currently in EL0 then we need to check if SCTLR is set up for
     * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
     */
    if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
        int target_el;

        mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
        if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
            /* Secure EL0 and Secure PL1 is at EL3 */
            target_el = 3;
        } else {
            target_el = 1;
        }

        if (!(env->cp15.sctlr_el[target_el] & mask)) {
            return target_el;
        }
    }

    /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
     * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
     * bits will be zero indicating no trap.
     */
    if (cur_el < 2 && !arm_is_secure(env)) {
        mask = (is_wfe) ? HCR_TWE : HCR_TWI;
        if (env->cp15.hcr_el2 & mask) {
            return 2;
        }
    }

    /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
    if (cur_el < 3) {
        mask = (is_wfe) ? SCR_TWE : SCR_TWI;
        if (env->cp15.scr_el3 & mask) {
            return 3;
        }
    }

    return 0;
}

B
Blue Swirl 已提交
302
void HELPER(wfi)(CPUARMState *env)
P
pbrook 已提交
303
{
304
    CPUState *cs = CPU(arm_env_get_cpu(env));
305
    int target_el = check_wfx_trap(env, false);
306

307 308 309 310 311 312 313
    if (cpu_has_work(cs)) {
        /* Don't bother to go into our "low power state" if
         * we would just wake up immediately.
         */
        return;
    }

314 315 316 317 318
    if (target_el) {
        env->pc -= 4;
        raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
    }

319
    cs->exception_index = EXCP_HLT;
320
    cs->halted = 1;
321
    cpu_loop_exit(cs);
P
pbrook 已提交
322 323
}

324 325
void HELPER(wfe)(CPUARMState *env)
{
326 327 328
    /* This is a hint instruction that is semantically different
     * from YIELD even though we currently implement it identically.
     * Don't actually halt the CPU, just yield back to top
329 330 331
     * level loop. This is not going into a "low power state"
     * (ie halting until some event occurs), so we never take
     * a configurable trap to a different exception level.
332
     */
333 334 335 336 337 338 339 340 341 342 343 344
    HELPER(yield)(env);
}

void HELPER(yield)(CPUARMState *env)
{
    ARMCPU *cpu = arm_env_get_cpu(env);
    CPUState *cs = CPU(cpu);

    /* This is a non-trappable hint instruction that generally indicates
     * that the guest is currently busy-looping. Yield control back to the
     * top level loop so that a more deserving VCPU has a chance to run.
     */
345
    cs->exception_index = EXCP_YIELD;
346
    cpu_loop_exit(cs);
347 348
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
/* Raise an internal-to-QEMU exception. This is limited to only
 * those EXCP values which are special cases for QEMU to interrupt
 * execution and not to be used for exceptions which are passed to
 * the guest (those must all have syndrome information and thus should
 * use exception_with_syndrome).
 */
void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
{
    CPUState *cs = CPU(arm_env_get_cpu(env));

    assert(excp_is_internal(excp));
    cs->exception_index = excp;
    cpu_loop_exit(cs);
}

/* Raise an exception with the specified syndrome register value */
void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
366
                                     uint32_t syndrome, uint32_t target_el)
P
pbrook 已提交
367
{
368
    raise_exception(env, excp, syndrome, target_el);
P
pbrook 已提交
369 370
}

371
uint32_t HELPER(cpsr_read)(CPUARMState *env)
P
pbrook 已提交
372
{
373
    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
P
pbrook 已提交
374 375
}

B
Blue Swirl 已提交
376
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
P
pbrook 已提交
377 378 379
{
    cpsr_write(env, val, mask);
}
P
pbrook 已提交
380 381

/* Access to user mode registers from privileged modes.  */
382
uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
P
pbrook 已提交
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
{
    uint32_t val;

    if (regno == 13) {
        val = env->banked_r13[0];
    } else if (regno == 14) {
        val = env->banked_r14[0];
    } else if (regno >= 8
               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
        val = env->usr_regs[regno - 8];
    } else {
        val = env->regs[regno];
    }
    return val;
}

B
Blue Swirl 已提交
399
void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
P
pbrook 已提交
400 401 402 403 404 405 406 407 408 409 410 411
{
    if (regno == 13) {
        env->banked_r13[0] = val;
    } else if (regno == 14) {
        env->banked_r14[0] = val;
    } else if (regno >= 8
               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
        env->usr_regs[regno - 8] = val;
    } else {
        env->regs[regno] = val;
    }
}
412

413
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
414 415
{
    const ARMCPRegInfo *ri = rip;
416
    int target_el;
417 418 419

    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
420
        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
421 422 423 424 425 426
    }

    if (!ri->accessfn) {
        return;
    }

427 428 429 430
    switch (ri->accessfn(env, ri)) {
    case CP_ACCESS_OK:
        return;
    case CP_ACCESS_TRAP:
431 432 433 434 435 436
        target_el = exception_target_el(env);
        break;
    case CP_ACCESS_TRAP_EL2:
        /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
         * a bug in the access function.
         */
437
        assert(!arm_is_secure(env) && arm_current_el(env) != 3);
438 439 440 441
        target_el = 2;
        break;
    case CP_ACCESS_TRAP_EL3:
        target_el = 3;
442
        break;
443
    case CP_ACCESS_TRAP_UNCATEGORIZED:
444
        target_el = exception_target_el(env);
445
        syndrome = syn_uncategorized();
446
        break;
447 448 449 450 451 452 453 454
    case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
        target_el = 2;
        syndrome = syn_uncategorized();
        break;
    case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
        target_el = 3;
        syndrome = syn_uncategorized();
        break;
455 456 457
    default:
        g_assert_not_reached();
    }
458

459
    raise_exception(env, EXCP_UDEF, syndrome, target_el);
460 461
}

462 463 464
void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
{
    const ARMCPRegInfo *ri = rip;
465 466

    ri->writefn(env, ri, value);
467 468 469 470 471
}

uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
{
    const ARMCPRegInfo *ri = rip;
472 473

    return ri->readfn(env, ri);
474 475 476 477 478
}

void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
{
    const ARMCPRegInfo *ri = rip;
479 480

    ri->writefn(env, ri, value);
481 482 483 484 485
}

uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
{
    const ARMCPRegInfo *ri = rip;
486 487

    return ri->readfn(env, ri);
488
}
P
pbrook 已提交
489

490 491 492 493 494 495
void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
{
    /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
     * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
     * to catch that case at translate time.
     */
496
    if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
497 498 499 500
        uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
                                                extract32(op, 3, 3), 4,
                                                imm, 0x1f, 0);
        raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
501 502 503 504
    }

    switch (op) {
    case 0x05: /* SPSel */
505
        update_spsel(env, imm);
506 507 508 509 510 511 512 513 514 515 516 517
        break;
    case 0x1e: /* DAIFSet */
        env->daif |= (imm << 6) & PSTATE_DAIF;
        break;
    case 0x1f: /* DAIFClear */
        env->daif &= ~((imm << 6) & PSTATE_DAIF);
        break;
    default:
        g_assert_not_reached();
    }
}

518 519 520 521 522
void HELPER(clear_pstate_ss)(CPUARMState *env)
{
    env->pstate &= ~PSTATE_SS;
}

523 524
void HELPER(pre_hvc)(CPUARMState *env)
{
525
    ARMCPU *cpu = arm_env_get_cpu(env);
526
    int cur_el = arm_current_el(env);
527 528 529 530
    /* FIXME: Use actual secure state.  */
    bool secure = false;
    bool undef;

531 532 533 534 535 536 537
    if (arm_is_psci_call(cpu, EXCP_HVC)) {
        /* If PSCI is enabled and this looks like a valid PSCI call then
         * that overrides the architecturally mandated HVC behaviour.
         */
        return;
    }

538 539 540 541 542
    if (!arm_feature(env, ARM_FEATURE_EL2)) {
        /* If EL2 doesn't exist, HVC always UNDEFs */
        undef = true;
    } else if (arm_feature(env, ARM_FEATURE_EL3)) {
        /* EL3.HCE has priority over EL2.HCD. */
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557
        undef = !(env->cp15.scr_el3 & SCR_HCE);
    } else {
        undef = env->cp15.hcr_el2 & HCR_HCD;
    }

    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
     * For ARMv8/AArch64, HVC is allowed in EL3.
     * Note that we've already trapped HVC from EL0 at translation
     * time.
     */
    if (secure && (!is_a64(env) || cur_el == 1)) {
        undef = true;
    }

    if (undef) {
558 559
        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
                        exception_target_el(env));
560 561 562
    }
}

563 564
void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
{
565
    ARMCPU *cpu = arm_env_get_cpu(env);
566
    int cur_el = arm_current_el(env);
567
    bool secure = arm_is_secure(env);
568 569 570 571 572 573 574 575
    bool smd = env->cp15.scr_el3 & SCR_SMD;
    /* On ARMv8 AArch32, SMD only applies to NS state.
     * On ARMv7 SMD only applies to NS state and only if EL2 is available.
     * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
     * the EL2 condition here.
     */
    bool undef = is_a64(env) ? smd : (!secure && smd);

576 577 578 579 580 581 582
    if (arm_is_psci_call(cpu, EXCP_SMC)) {
        /* If PSCI is enabled and this looks like a valid PSCI call then
         * that overrides the architecturally mandated SMC behaviour.
         */
        return;
    }

583 584 585 586 587
    if (!arm_feature(env, ARM_FEATURE_EL3)) {
        /* If we have no EL3 then SMC always UNDEFs */
        undef = true;
    } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
        /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
588
        raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
589 590 591
    }

    if (undef) {
592 593
        raise_exception(env, EXCP_UDEF, syn_uncategorized(),
                        exception_target_el(env));
594 595 596
    }
}

597 598
void HELPER(exception_return)(CPUARMState *env)
{
599
    int cur_el = arm_current_el(env);
600
    unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
601
    uint32_t spsr = env->banked_spsr[spsr_idx];
602
    int new_el;
603

604
    aarch64_save_sp(env, cur_el);
605 606 607

    env->exclusive_addr = -1;

608 609 610 611 612 613 614 615 616 617 618
    /* We must squash the PSTATE.SS bit to zero unless both of the
     * following hold:
     *  1. debug exceptions are currently disabled
     *  2. singlestep will be active in the EL we return to
     * We check 1 here and 2 after we've done the pstate/cpsr write() to
     * transition to the EL we're going to.
     */
    if (arm_generate_debug_exceptions(env)) {
        spsr &= ~PSTATE_SS;
    }

619
    if (spsr & PSTATE_nRW) {
620
        /* TODO: We currently assume EL1/2/3 are running in AArch64.  */
621 622 623 624
        env->aarch64 = 0;
        new_el = 0;
        env->uncached_cpsr = 0x10;
        cpsr_write(env, spsr, ~0);
625 626 627
        if (!arm_singlestep_active(env)) {
            env->uncached_cpsr &= ~PSTATE_SS;
        }
628
        aarch64_sync_64_to_32(env);
629

630
        env->regs[15] = env->elr_el[1] & ~0x1;
631 632
    } else {
        new_el = extract32(spsr, 2, 2);
633 634 635 636 637
        if (new_el > cur_el
            || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
            /* Disallow return to an EL which is unimplemented or higher
             * than the current one.
             */
638 639 640 641 642 643 644
            goto illegal_return;
        }
        if (extract32(spsr, 1, 1)) {
            /* Return with reserved M[1] bit set */
            goto illegal_return;
        }
        if (new_el == 0 && (spsr & PSTATE_SP)) {
645
            /* Return to EL0 with M[0] bit set */
646 647 648 649
            goto illegal_return;
        }
        env->aarch64 = 1;
        pstate_write(env, spsr);
650 651 652
        if (!arm_singlestep_active(env)) {
            env->pstate &= ~PSTATE_SS;
        }
653
        aarch64_restore_sp(env, new_el);
654
        env->pc = env->elr_el[cur_el];
655 656 657 658 659 660 661 662 663 664 665 666 667
    }

    return;

illegal_return:
    /* Illegal return events of various kinds have architecturally
     * mandated behaviour:
     * restore NZCV and DAIF from SPSR_ELx
     * set PSTATE.IL
     * restore PC from ELR_ELx
     * no change to exception level, execution state or stack pointer
     */
    env->pstate |= PSTATE_IL;
668
    env->pc = env->elr_el[cur_el];
669 670 671
    spsr &= PSTATE_NZCV | PSTATE_DAIF;
    spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
    pstate_write(env, spsr);
672 673 674
    if (!arm_singlestep_active(env)) {
        env->pstate &= ~PSTATE_SS;
    }
675 676
}

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
/* Return true if the linked breakpoint entry lbn passes its checks */
static bool linked_bp_matches(ARMCPU *cpu, int lbn)
{
    CPUARMState *env = &cpu->env;
    uint64_t bcr = env->cp15.dbgbcr[lbn];
    int brps = extract32(cpu->dbgdidr, 24, 4);
    int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
    int bt;
    uint32_t contextidr;

    /* Links to unimplemented or non-context aware breakpoints are
     * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
     * as if linked to an UNKNOWN context-aware breakpoint (in which
     * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
     * We choose the former.
     */
    if (lbn > brps || lbn < (brps - ctx_cmps)) {
        return false;
    }

    bcr = env->cp15.dbgbcr[lbn];

    if (extract64(bcr, 0, 1) == 0) {
        /* Linked breakpoint disabled : generate no events */
        return false;
    }

    bt = extract64(bcr, 20, 4);

    /* We match the whole register even if this is AArch32 using the
     * short descriptor format (in which case it holds both PROCID and ASID),
     * since we don't implement the optional v7 context ID masking.
     */
710
    contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
711 712 713

    switch (bt) {
    case 3: /* linked context ID match */
714
        if (arm_current_el(env) > 1) {
715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731
            /* Context matches never fire in EL2 or (AArch64) EL3 */
            return false;
        }
        return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
    case 5: /* linked address mismatch (reserved in AArch64) */
    case 9: /* linked VMID match (reserved if no EL2) */
    case 11: /* linked context ID and VMID match (reserved if no EL2) */
    default:
        /* Links to Unlinked context breakpoints must generate no
         * events; we choose to do the same for reserved values too.
         */
        return false;
    }

    return false;
}

732
static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
733 734
{
    CPUARMState *env = &cpu->env;
735
    uint64_t cr;
736
    int pac, hmc, ssc, wt, lbn;
737 738 739 740
    /* Note that for watchpoints the check is against the CPU security
     * state, not the S/NS attribute on the offending data access.
     */
    bool is_secure = arm_is_secure(env);
741
    int access_el = arm_current_el(env);
742

743
    if (is_wp) {
744 745 746
        CPUWatchpoint *wp = env->cpu_watchpoint[n];

        if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
747 748 749
            return false;
        }
        cr = env->cp15.dbgwcr[n];
750 751 752 753 754 755 756
        if (wp->hitattrs.user) {
            /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
             * match watchpoints as if they were accesses done at EL0, even if
             * the CPU is at EL1 or higher.
             */
            access_el = 0;
        }
757 758
    } else {
        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
759

760 761 762 763 764
        if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
            return false;
        }
        cr = env->cp15.dbgbcr[n];
    }
765
    /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
766 767 768 769 770
     * enabled and that the address and access type match; for breakpoints
     * we know the address matched; check the remaining fields, including
     * linked breakpoints. We rely on WCR and BCR having the same layout
     * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
     * Note that some combinations of {PAC, HMC, SSC} are reserved and
771 772 773 774 775 776
     * must act either like some valid combination or as if the watchpoint
     * were disabled. We choose the former, and use this together with
     * the fact that EL3 must always be Secure and EL2 must always be
     * Non-Secure to simplify the code slightly compared to the full
     * table in the ARM ARM.
     */
777 778 779
    pac = extract64(cr, 1, 2);
    hmc = extract64(cr, 13, 1);
    ssc = extract64(cr, 14, 2);
780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796

    switch (ssc) {
    case 0:
        break;
    case 1:
    case 3:
        if (is_secure) {
            return false;
        }
        break;
    case 2:
        if (!is_secure) {
            return false;
        }
        break;
    }

797
    switch (access_el) {
798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
    case 3:
    case 2:
        if (!hmc) {
            return false;
        }
        break;
    case 1:
        if (extract32(pac, 0, 1) == 0) {
            return false;
        }
        break;
    case 0:
        if (extract32(pac, 1, 1) == 0) {
            return false;
        }
        break;
    default:
        g_assert_not_reached();
    }

818 819
    wt = extract64(cr, 20, 1);
    lbn = extract64(cr, 16, 4);
820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841

    if (wt && !linked_bp_matches(cpu, lbn)) {
        return false;
    }

    return true;
}

static bool check_watchpoints(ARMCPU *cpu)
{
    CPUARMState *env = &cpu->env;
    int n;

    /* If watchpoints are disabled globally or we can't take debug
     * exceptions here then watchpoint firings are ignored.
     */
    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
        || !arm_generate_debug_exceptions(env)) {
        return false;
    }

    for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
        if (bp_wp_matches(cpu, n, true)) {
            return true;
        }
    }
    return false;
}

static bool check_breakpoints(ARMCPU *cpu)
{
    CPUARMState *env = &cpu->env;
    int n;

    /* If breakpoints are disabled globally or we can't take debug
     * exceptions here then breakpoint firings are ignored.
     */
    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
        || !arm_generate_debug_exceptions(env)) {
        return false;
    }

    for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
        if (bp_wp_matches(cpu, n, false)) {
864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883
            return true;
        }
    }
    return false;
}

void arm_debug_excp_handler(CPUState *cs)
{
    /* Called by core code when a watchpoint or breakpoint fires;
     * need to check which one and raise the appropriate exception.
     */
    ARMCPU *cpu = ARM_CPU(cs);
    CPUARMState *env = &cpu->env;
    CPUWatchpoint *wp_hit = cs->watchpoint_hit;

    if (wp_hit) {
        if (wp_hit->flags & BP_CPU) {
            cs->watchpoint_hit = NULL;
            if (check_watchpoints(cpu)) {
                bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
884
                bool same_el = arm_debug_target_el(env) == arm_current_el(env);
885 886 887 888 889 890 891

                if (extended_addresses_enabled(env)) {
                    env->exception.fsr = (1 << 9) | 0x22;
                } else {
                    env->exception.fsr = 0x2;
                }
                env->exception.vaddress = wp_hit->hitaddr;
892 893 894
                raise_exception(env, EXCP_DATA_ABORT,
                                syn_watchpoint(same_el, 0, wnr),
                                arm_debug_target_el(env));
895 896 897 898
            } else {
                cpu_resume_from_signal(cs, NULL);
            }
        }
899 900
    } else {
        if (check_breakpoints(cpu)) {
901
            bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
902 903 904 905 906 907
            if (extended_addresses_enabled(env)) {
                env->exception.fsr = (1 << 9) | 0x22;
            } else {
                env->exception.fsr = 0x2;
            }
            /* FAR is UNKNOWN, so doesn't need setting */
908 909 910
            raise_exception(env, EXCP_PREFETCH_ABORT,
                            syn_breakpoint(same_el),
                            arm_debug_target_el(env));
911
        }
912 913 914
    }
}

P
pbrook 已提交
915 916 917 918 919 920
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
   The only way to do that in TCG is a conditional branch, which clobbers
   all our temporaries.  For now implement these as helper functions.  */

/* Similarly for variable shift instructions.  */

921
uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
{
    int shift = i & 0xff;
    if (shift >= 32) {
        if (shift == 32)
            env->CF = x & 1;
        else
            env->CF = 0;
        return 0;
    } else if (shift != 0) {
        env->CF = (x >> (32 - shift)) & 1;
        return x << shift;
    }
    return x;
}

937
uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952
{
    int shift = i & 0xff;
    if (shift >= 32) {
        if (shift == 32)
            env->CF = (x >> 31) & 1;
        else
            env->CF = 0;
        return 0;
    } else if (shift != 0) {
        env->CF = (x >> (shift - 1)) & 1;
        return x >> shift;
    }
    return x;
}

953
uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
954 955 956 957 958 959 960 961 962 963 964 965
{
    int shift = i & 0xff;
    if (shift >= 32) {
        env->CF = (x >> 31) & 1;
        return (int32_t)x >> 31;
    } else if (shift != 0) {
        env->CF = (x >> (shift - 1)) & 1;
        return (int32_t)x >> shift;
    }
    return x;
}

966
uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
967 968 969 970 971 972 973 974 975 976 977 978 979
{
    int shift1, shift;
    shift1 = i & 0xff;
    shift = shift1 & 0x1f;
    if (shift == 0) {
        if (shift1 != 0)
            env->CF = (x >> 31) & 1;
        return x;
    } else {
        env->CF = (x >> (shift - 1)) & 1;
        return ((uint32_t)x >> shift) | (x << (32 - shift));
    }
}