op_helper.c 21.5 KB
Newer Older
B
bellard 已提交
1 2
/*
 *  ARM helper routines
3
 *
P
pbrook 已提交
4
 *  Copyright (c) 2005-2007 CodeSourcery, LLC
B
bellard 已提交
5 6 7 8 9 10 11 12 13 14 15 16
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
17
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
B
bellard 已提交
18
 */
B
Blue Swirl 已提交
19
#include "cpu.h"
20
#include "exec/helper-proto.h"
21
#include "internals.h"
P
Paolo Bonzini 已提交
22
#include "exec/cpu_ldst.h"
B
bellard 已提交
23

P
pbrook 已提交
24 25 26
#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)

B
Blue Swirl 已提交
27
static void raise_exception(CPUARMState *env, int tt)
B
bellard 已提交
28
{
29 30 31 32
    ARMCPU *cpu = arm_env_get_cpu(env);
    CPUState *cs = CPU(cpu);

    cs->exception_index = tt;
33
    cpu_loop_exit(cs);
B
bellard 已提交
34 35
}

36
uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
P
pbrook 已提交
37
                          uint32_t rn, uint32_t maxindex)
P
pbrook 已提交
38 39 40 41 42 43 44 45 46
{
    uint32_t val;
    uint32_t tmp;
    int index;
    int shift;
    uint64_t *table;
    table = (uint64_t *)&env->vfp.regs[rn];
    val = 0;
    for (shift = 0; shift < 32; shift += 8) {
P
pbrook 已提交
47 48
        index = (ireg >> shift) & 0xff;
        if (index < maxindex) {
P
pbrook 已提交
49
            tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
P
pbrook 已提交
50 51
            val |= tmp << shift;
        } else {
P
pbrook 已提交
52
            val |= def & (0xff << shift);
P
pbrook 已提交
53 54
        }
    }
P
pbrook 已提交
55
    return val;
P
pbrook 已提交
56 57
}

B
bellard 已提交
58 59 60
#if !defined(CONFIG_USER_ONLY)

/* try to fill the TLB and return an exception if error. If retaddr is
61 62 63 64
 * NULL, it means that the function was called in C code (i.e. not
 * from generated code or from helper.c)
 */
void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
65
              uintptr_t retaddr)
B
bellard 已提交
66 67 68
{
    int ret;

69
    ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
70
    if (unlikely(ret)) {
71 72 73
        ARMCPU *cpu = ARM_CPU(cs);
        CPUARMState *env = &cpu->env;

B
bellard 已提交
74 75
        if (retaddr) {
            /* now we have a real cpu fault */
76
            cpu_restore_state(cs, retaddr);
B
bellard 已提交
77
        }
78
        raise_exception(env, cs->exception_index);
B
bellard 已提交
79 80 81
    }
}
#endif
P
pbrook 已提交
82

83
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
84 85 86 87 88 89 90
{
    uint32_t res = a + b;
    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
        env->QF = 1;
    return res;
}

91
uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
92 93 94 95 96 97 98 99 100
{
    uint32_t res = a + b;
    if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
        env->QF = 1;
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    }
    return res;
}

101
uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
102 103 104 105 106 107 108 109 110
{
    uint32_t res = a - b;
    if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
        env->QF = 1;
        res = ~(((int32_t)a >> 31) ^ SIGNBIT);
    }
    return res;
}

111
uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
P
pbrook 已提交
112 113 114 115 116 117 118 119 120 121 122 123 124 125
{
    uint32_t res;
    if (val >= 0x40000000) {
        res = ~SIGNBIT;
        env->QF = 1;
    } else if (val <= (int32_t)0xc0000000) {
        res = SIGNBIT;
        env->QF = 1;
    } else {
        res = val << 1;
    }
    return res;
}

126
uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
127 128 129 130 131 132 133 134 135
{
    uint32_t res = a + b;
    if (res < a) {
        env->QF = 1;
        res = ~0;
    }
    return res;
}

136
uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
P
pbrook 已提交
137 138 139 140 141 142 143 144 145
{
    uint32_t res = a - b;
    if (res > a) {
        env->QF = 1;
        res = 0;
    }
    return res;
}

P
pbrook 已提交
146
/* Signed saturation.  */
147
static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
P
pbrook 已提交
148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164
{
    int32_t top;
    uint32_t mask;

    top = val >> shift;
    mask = (1u << shift) - 1;
    if (top > 0) {
        env->QF = 1;
        return mask;
    } else if (top < -1) {
        env->QF = 1;
        return ~mask;
    }
    return val;
}

/* Unsigned saturation.  */
165
static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
P
pbrook 已提交
166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
{
    uint32_t max;

    max = (1u << shift) - 1;
    if (val < 0) {
        env->QF = 1;
        return 0;
    } else if (val > max) {
        env->QF = 1;
        return max;
    }
    return val;
}

/* Signed saturate.  */
181
uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
182
{
183
    return do_ssat(env, x, shift);
P
pbrook 已提交
184 185 186
}

/* Dual halfword signed saturate.  */
187
uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
188 189 190
{
    uint32_t res;

191 192
    res = (uint16_t)do_ssat(env, (int16_t)x, shift);
    res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
P
pbrook 已提交
193 194 195 196
    return res;
}

/* Unsigned saturate.  */
197
uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
198
{
199
    return do_usat(env, x, shift);
P
pbrook 已提交
200 201 202
}

/* Dual halfword unsigned saturate.  */
203
uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
P
pbrook 已提交
204 205 206
{
    uint32_t res;

207 208
    res = (uint16_t)do_usat(env, (int16_t)x, shift);
    res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
P
pbrook 已提交
209 210
    return res;
}
P
pbrook 已提交
211

B
Blue Swirl 已提交
212
void HELPER(wfi)(CPUARMState *env)
P
pbrook 已提交
213
{
214 215
    CPUState *cs = CPU(arm_env_get_cpu(env));

216
    cs->exception_index = EXCP_HLT;
217
    cs->halted = 1;
218
    cpu_loop_exit(cs);
P
pbrook 已提交
219 220
}

221 222
void HELPER(wfe)(CPUARMState *env)
{
223 224
    CPUState *cs = CPU(arm_env_get_cpu(env));

225 226 227
    /* Don't actually halt the CPU, just yield back to top
     * level loop
     */
228
    cs->exception_index = EXCP_YIELD;
229
    cpu_loop_exit(cs);
230 231
}

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249
/* Raise an internal-to-QEMU exception. This is limited to only
 * those EXCP values which are special cases for QEMU to interrupt
 * execution and not to be used for exceptions which are passed to
 * the guest (those must all have syndrome information and thus should
 * use exception_with_syndrome).
 */
void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
{
    CPUState *cs = CPU(arm_env_get_cpu(env));

    assert(excp_is_internal(excp));
    cs->exception_index = excp;
    cpu_loop_exit(cs);
}

/* Raise an exception with the specified syndrome register value */
void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
                                     uint32_t syndrome)
P
pbrook 已提交
250
{
251 252
    CPUState *cs = CPU(arm_env_get_cpu(env));

253
    assert(!excp_is_internal(excp));
254
    cs->exception_index = excp;
255
    env->exception.syndrome = syndrome;
256
    cpu_loop_exit(cs);
P
pbrook 已提交
257 258
}

259
uint32_t HELPER(cpsr_read)(CPUARMState *env)
P
pbrook 已提交
260
{
261
    return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
P
pbrook 已提交
262 263
}

B
Blue Swirl 已提交
264
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
P
pbrook 已提交
265 266 267
{
    cpsr_write(env, val, mask);
}
P
pbrook 已提交
268 269

/* Access to user mode registers from privileged modes.  */
270
uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
P
pbrook 已提交
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286
{
    uint32_t val;

    if (regno == 13) {
        val = env->banked_r13[0];
    } else if (regno == 14) {
        val = env->banked_r14[0];
    } else if (regno >= 8
               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
        val = env->usr_regs[regno - 8];
    } else {
        val = env->regs[regno];
    }
    return val;
}

B
Blue Swirl 已提交
287
void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
P
pbrook 已提交
288 289 290 291 292 293 294 295 296 297 298 299
{
    if (regno == 13) {
        env->banked_r13[0] = val;
    } else if (regno == 14) {
        env->banked_r14[0] = val;
    } else if (regno >= 8
               && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
        env->usr_regs[regno - 8] = val;
    } else {
        env->regs[regno] = val;
    }
}
300

301
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
302 303
{
    const ARMCPRegInfo *ri = rip;
304 305 306 307 308 309 310 311 312 313 314

    if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
        && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
        env->exception.syndrome = syndrome;
        raise_exception(env, EXCP_UDEF);
    }

    if (!ri->accessfn) {
        return;
    }

315 316 317 318
    switch (ri->accessfn(env, ri)) {
    case CP_ACCESS_OK:
        return;
    case CP_ACCESS_TRAP:
319 320
        env->exception.syndrome = syndrome;
        break;
321
    case CP_ACCESS_TRAP_UNCATEGORIZED:
322
        env->exception.syndrome = syn_uncategorized();
323 324 325 326 327 328 329
        break;
    default:
        g_assert_not_reached();
    }
    raise_exception(env, EXCP_UDEF);
}

330 331 332
void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
{
    const ARMCPRegInfo *ri = rip;
333 334

    ri->writefn(env, ri, value);
335 336 337 338 339
}

uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
{
    const ARMCPRegInfo *ri = rip;
340 341

    return ri->readfn(env, ri);
342 343 344 345 346
}

void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
{
    const ARMCPRegInfo *ri = rip;
347 348

    ri->writefn(env, ri, value);
349 350 351 352 353
}

uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
{
    const ARMCPRegInfo *ri = rip;
354 355

    return ri->readfn(env, ri);
356
}
P
pbrook 已提交
357

358 359 360 361 362 363 364 365 366 367 368 369
void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
{
    /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
     * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
     * to catch that case at translate time.
     */
    if (arm_current_pl(env) == 0 && !(env->cp15.c1_sys & SCTLR_UMA)) {
        raise_exception(env, EXCP_UDEF);
    }

    switch (op) {
    case 0x05: /* SPSel */
370
        update_spsel(env, imm);
371 372 373 374 375 376 377 378 379 380 381 382
        break;
    case 0x1e: /* DAIFSet */
        env->daif |= (imm << 6) & PSTATE_DAIF;
        break;
    case 0x1f: /* DAIFClear */
        env->daif &= ~((imm << 6) & PSTATE_DAIF);
        break;
    default:
        g_assert_not_reached();
    }
}

383 384 385 386 387
void HELPER(clear_pstate_ss)(CPUARMState *env)
{
    env->pstate &= ~PSTATE_SS;
}

388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
void HELPER(pre_hvc)(CPUARMState *env)
{
    int cur_el = arm_current_pl(env);
    /* FIXME: Use actual secure state.  */
    bool secure = false;
    bool undef;

    /* We've already checked that EL2 exists at translation time.
     * EL3.HCE has priority over EL2.HCD.
     */
    if (arm_feature(env, ARM_FEATURE_EL3)) {
        undef = !(env->cp15.scr_el3 & SCR_HCE);
    } else {
        undef = env->cp15.hcr_el2 & HCR_HCD;
    }

    /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
     * For ARMv8/AArch64, HVC is allowed in EL3.
     * Note that we've already trapped HVC from EL0 at translation
     * time.
     */
    if (secure && (!is_a64(env) || cur_el == 1)) {
        undef = true;
    }

    if (undef) {
        env->exception.syndrome = syn_uncategorized();
        raise_exception(env, EXCP_UDEF);
    }
}

419 420
void HELPER(exception_return)(CPUARMState *env)
{
421 422
    int cur_el = arm_current_pl(env);
    unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
423
    uint32_t spsr = env->banked_spsr[spsr_idx];
424 425
    int new_el, i;

426
    aarch64_save_sp(env, cur_el);
427 428 429

    env->exclusive_addr = -1;

430 431 432 433 434 435 436 437 438 439 440
    /* We must squash the PSTATE.SS bit to zero unless both of the
     * following hold:
     *  1. debug exceptions are currently disabled
     *  2. singlestep will be active in the EL we return to
     * We check 1 here and 2 after we've done the pstate/cpsr write() to
     * transition to the EL we're going to.
     */
    if (arm_generate_debug_exceptions(env)) {
        spsr &= ~PSTATE_SS;
    }

441
    if (spsr & PSTATE_nRW) {
442
        /* TODO: We currently assume EL1/2/3 are running in AArch64.  */
443 444 445 446
        env->aarch64 = 0;
        new_el = 0;
        env->uncached_cpsr = 0x10;
        cpsr_write(env, spsr, ~0);
447 448 449
        if (!arm_singlestep_active(env)) {
            env->uncached_cpsr &= ~PSTATE_SS;
        }
450 451 452 453
        for (i = 0; i < 15; i++) {
            env->regs[i] = env->xregs[i];
        }

454
        env->regs[15] = env->elr_el[1] & ~0x1;
455 456
    } else {
        new_el = extract32(spsr, 2, 2);
457 458 459 460 461
        if (new_el > cur_el
            || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
            /* Disallow return to an EL which is unimplemented or higher
             * than the current one.
             */
462 463 464 465 466 467 468
            goto illegal_return;
        }
        if (extract32(spsr, 1, 1)) {
            /* Return with reserved M[1] bit set */
            goto illegal_return;
        }
        if (new_el == 0 && (spsr & PSTATE_SP)) {
469
            /* Return to EL0 with M[0] bit set */
470 471 472 473
            goto illegal_return;
        }
        env->aarch64 = 1;
        pstate_write(env, spsr);
474 475 476
        if (!arm_singlestep_active(env)) {
            env->pstate &= ~PSTATE_SS;
        }
477
        aarch64_restore_sp(env, new_el);
478
        env->pc = env->elr_el[cur_el];
479 480 481 482 483 484 485 486 487 488 489 490 491
    }

    return;

illegal_return:
    /* Illegal return events of various kinds have architecturally
     * mandated behaviour:
     * restore NZCV and DAIF from SPSR_ELx
     * set PSTATE.IL
     * restore PC from ELR_ELx
     * no change to exception level, execution state or stack pointer
     */
    env->pstate |= PSTATE_IL;
492
    env->pc = env->elr_el[cur_el];
493 494 495
    spsr &= PSTATE_NZCV | PSTATE_DAIF;
    spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
    pstate_write(env, spsr);
496 497 498
    if (!arm_singlestep_active(env)) {
        env->pstate &= ~PSTATE_SS;
    }
499 500
}

501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
/* Return true if the linked breakpoint entry lbn passes its checks */
static bool linked_bp_matches(ARMCPU *cpu, int lbn)
{
    CPUARMState *env = &cpu->env;
    uint64_t bcr = env->cp15.dbgbcr[lbn];
    int brps = extract32(cpu->dbgdidr, 24, 4);
    int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
    int bt;
    uint32_t contextidr;

    /* Links to unimplemented or non-context aware breakpoints are
     * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
     * as if linked to an UNKNOWN context-aware breakpoint (in which
     * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
     * We choose the former.
     */
    if (lbn > brps || lbn < (brps - ctx_cmps)) {
        return false;
    }

    bcr = env->cp15.dbgbcr[lbn];

    if (extract64(bcr, 0, 1) == 0) {
        /* Linked breakpoint disabled : generate no events */
        return false;
    }

    bt = extract64(bcr, 20, 4);

    /* We match the whole register even if this is AArch32 using the
     * short descriptor format (in which case it holds both PROCID and ASID),
     * since we don't implement the optional v7 context ID masking.
     */
    contextidr = extract64(env->cp15.contextidr_el1, 0, 32);

    switch (bt) {
    case 3: /* linked context ID match */
        if (arm_current_pl(env) > 1) {
            /* Context matches never fire in EL2 or (AArch64) EL3 */
            return false;
        }
        return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
    case 5: /* linked address mismatch (reserved in AArch64) */
    case 9: /* linked VMID match (reserved if no EL2) */
    case 11: /* linked context ID and VMID match (reserved if no EL2) */
    default:
        /* Links to Unlinked context breakpoints must generate no
         * events; we choose to do the same for reserved values too.
         */
        return false;
    }

    return false;
}

556
static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
557 558
{
    CPUARMState *env = &cpu->env;
559
    uint64_t cr;
560 561 562 563
    int pac, hmc, ssc, wt, lbn;
    /* TODO: check against CPU security state when we implement TrustZone */
    bool is_secure = false;

564 565 566 567 568 569 570 571
    if (is_wp) {
        if (!env->cpu_watchpoint[n]
            || !(env->cpu_watchpoint[n]->flags & BP_WATCHPOINT_HIT)) {
            return false;
        }
        cr = env->cp15.dbgwcr[n];
    } else {
        uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
572

573 574 575 576 577
        if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
            return false;
        }
        cr = env->cp15.dbgbcr[n];
    }
578
    /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
579 580 581 582 583
     * enabled and that the address and access type match; for breakpoints
     * we know the address matched; check the remaining fields, including
     * linked breakpoints. We rely on WCR and BCR having the same layout
     * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
     * Note that some combinations of {PAC, HMC, SSC} are reserved and
584 585 586 587 588 589
     * must act either like some valid combination or as if the watchpoint
     * were disabled. We choose the former, and use this together with
     * the fact that EL3 must always be Secure and EL2 must always be
     * Non-Secure to simplify the code slightly compared to the full
     * table in the ARM ARM.
     */
590 591 592
    pac = extract64(cr, 1, 2);
    hmc = extract64(cr, 13, 1);
    ssc = extract64(cr, 14, 2);
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615

    switch (ssc) {
    case 0:
        break;
    case 1:
    case 3:
        if (is_secure) {
            return false;
        }
        break;
    case 2:
        if (!is_secure) {
            return false;
        }
        break;
    }

    /* TODO: this is not strictly correct because the LDRT/STRT/LDT/STT
     * "unprivileged access" instructions should match watchpoints as if
     * they were accesses done at EL0, even if the CPU is at EL1 or higher.
     * Implementing this would require reworking the core watchpoint code
     * to plumb the mmu_idx through to this point. Luckily Linux does not
     * rely on this behaviour currently.
616
     * For breakpoints we do want to use the current CPU state.
617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638
     */
    switch (arm_current_pl(env)) {
    case 3:
    case 2:
        if (!hmc) {
            return false;
        }
        break;
    case 1:
        if (extract32(pac, 0, 1) == 0) {
            return false;
        }
        break;
    case 0:
        if (extract32(pac, 1, 1) == 0) {
            return false;
        }
        break;
    default:
        g_assert_not_reached();
    }

639 640
    wt = extract64(cr, 20, 1);
    lbn = extract64(cr, 16, 4);
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662

    if (wt && !linked_bp_matches(cpu, lbn)) {
        return false;
    }

    return true;
}

static bool check_watchpoints(ARMCPU *cpu)
{
    CPUARMState *env = &cpu->env;
    int n;

    /* If watchpoints are disabled globally or we can't take debug
     * exceptions here then watchpoint firings are ignored.
     */
    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
        || !arm_generate_debug_exceptions(env)) {
        return false;
    }

    for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
        if (bp_wp_matches(cpu, n, true)) {
            return true;
        }
    }
    return false;
}

static bool check_breakpoints(ARMCPU *cpu)
{
    CPUARMState *env = &cpu->env;
    int n;

    /* If breakpoints are disabled globally or we can't take debug
     * exceptions here then breakpoint firings are ignored.
     */
    if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
        || !arm_generate_debug_exceptions(env)) {
        return false;
    }

    for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
        if (bp_wp_matches(cpu, n, false)) {
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
            return true;
        }
    }
    return false;
}

void arm_debug_excp_handler(CPUState *cs)
{
    /* Called by core code when a watchpoint or breakpoint fires;
     * need to check which one and raise the appropriate exception.
     */
    ARMCPU *cpu = ARM_CPU(cs);
    CPUARMState *env = &cpu->env;
    CPUWatchpoint *wp_hit = cs->watchpoint_hit;

    if (wp_hit) {
        if (wp_hit->flags & BP_CPU) {
            cs->watchpoint_hit = NULL;
            if (check_watchpoints(cpu)) {
                bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
                bool same_el = arm_debug_target_el(env) == arm_current_pl(env);

                env->exception.syndrome = syn_watchpoint(same_el, 0, wnr);
                if (extended_addresses_enabled(env)) {
                    env->exception.fsr = (1 << 9) | 0x22;
                } else {
                    env->exception.fsr = 0x2;
                }
                env->exception.vaddress = wp_hit->hitaddr;
                raise_exception(env, EXCP_DATA_ABORT);
            } else {
                cpu_resume_from_signal(cs, NULL);
            }
        }
719 720 721 722 723 724 725 726 727 728 729 730
    } else {
        if (check_breakpoints(cpu)) {
            bool same_el = (arm_debug_target_el(env) == arm_current_pl(env));
            env->exception.syndrome = syn_breakpoint(same_el);
            if (extended_addresses_enabled(env)) {
                env->exception.fsr = (1 << 9) | 0x22;
            } else {
                env->exception.fsr = 0x2;
            }
            /* FAR is UNKNOWN, so doesn't need setting */
            raise_exception(env, EXCP_PREFETCH_ABORT);
        }
731 732 733
    }
}

P
pbrook 已提交
734 735 736 737 738 739
/* ??? Flag setting arithmetic is awkward because we need to do comparisons.
   The only way to do that in TCG is a conditional branch, which clobbers
   all our temporaries.  For now implement these as helper functions.  */

/* Similarly for variable shift instructions.  */

740
uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
741 742 743 744 745 746 747 748 749 750 751 752 753 754 755
{
    int shift = i & 0xff;
    if (shift >= 32) {
        if (shift == 32)
            env->CF = x & 1;
        else
            env->CF = 0;
        return 0;
    } else if (shift != 0) {
        env->CF = (x >> (32 - shift)) & 1;
        return x << shift;
    }
    return x;
}

756
uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
757 758 759 760 761 762 763 764 765 766 767 768 769 770 771
{
    int shift = i & 0xff;
    if (shift >= 32) {
        if (shift == 32)
            env->CF = (x >> 31) & 1;
        else
            env->CF = 0;
        return 0;
    } else if (shift != 0) {
        env->CF = (x >> (shift - 1)) & 1;
        return x >> shift;
    }
    return x;
}

772
uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
773 774 775 776 777 778 779 780 781 782 783 784
{
    int shift = i & 0xff;
    if (shift >= 32) {
        env->CF = (x >> 31) & 1;
        return (int32_t)x >> 31;
    } else if (shift != 0) {
        env->CF = (x >> (shift - 1)) & 1;
        return (int32_t)x >> shift;
    }
    return x;
}

785
uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
P
pbrook 已提交
786 787 788 789 790 791 792 793 794 795 796 797 798
{
    int shift1, shift;
    shift1 = i & 0xff;
    shift = shift1 & 0x1f;
    if (shift == 0) {
        if (shift1 != 0)
            env->CF = (x >> 31) & 1;
        return x;
    } else {
        env->CF = (x >> (shift - 1)) & 1;
        return ((uint32_t)x >> shift) | (x << (32 - shift));
    }
}