cpu.c 73.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * QEMU ARM CPU
 *
 * Copyright (c) 2012 SUSE LINUX Products GmbH
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version 2
 * of the License, or (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, see
 * <http://www.gnu.org/licenses/gpl-2.0.html>
 */

P
Peter Maydell 已提交
21
#include "qemu/osdep.h"
22
#include "target/arm/idau.h"
23
#include "qemu/error-report.h"
24
#include "qapi/error.h"
25
#include "qapi/visitor.h"
26
#include "cpu.h"
27
#include "internals.h"
28
#include "qemu-common.h"
29
#include "exec/exec-all.h"
30
#include "hw/qdev-properties.h"
31 32 33
#if !defined(CONFIG_USER_ONLY)
#include "hw/loader.h"
#endif
34
#include "hw/arm/arm.h"
35
#include "sysemu/sysemu.h"
36
#include "sysemu/hw_accel.h"
37
#include "kvm_arm.h"
38
#include "disas/capstone.h"
39
#include "fpu/softfloat.h"
40

41 42 43
static void arm_cpu_set_pc(CPUState *cs, vaddr value)
{
    ARMCPU *cpu = ARM_CPU(cs);
44 45 46 47 48 49 50 51 52 53
    CPUARMState *env = &cpu->env;

    if (is_a64(env)) {
        env->pc = value;
        env->thumb = 0;
    } else {
        env->regs[15] = value & ~1;
        env->thumb = value & 1;
    }
}
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68
static void arm_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
{
    ARMCPU *cpu = ARM_CPU(cs);
    CPUARMState *env = &cpu->env;

    /*
     * It's OK to look at env for the current mode here, because it's
     * never possible for an AArch64 TB to chain to an AArch32 TB.
     */
    if (is_a64(env)) {
        env->pc = tb->pc;
    } else {
        env->regs[15] = tb->pc;
    }
69 70
}

71 72
static bool arm_cpu_has_work(CPUState *cs)
{
73 74
    ARMCPU *cpu = ARM_CPU(cs);

75
    return (cpu->power_state != PSCI_OFF)
76
        && cs->interrupt_request &
77 78 79
        (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD
         | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ
         | CPU_INTERRUPT_EXITTB);
80 81
}

82 83 84 85 86 87 88 89 90 91 92
void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
                                 void *opaque)
{
    ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);

    entry->hook = hook;
    entry->opaque = opaque;

    QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node);
}

93
void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook,
94 95
                                 void *opaque)
{
96 97 98 99 100 101
    ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1);

    entry->hook = hook;
    entry->opaque = opaque;

    QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node);
102 103
}

104 105 106 107 108 109
static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque)
{
    /* Reset a single ARMCPRegInfo register */
    ARMCPRegInfo *ri = value;
    ARMCPU *cpu = opaque;

110
    if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS)) {
111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
        return;
    }

    if (ri->resetfn) {
        ri->resetfn(&cpu->env, ri);
        return;
    }

    /* A zero offset is never possible as it would be regs[0]
     * so we use it to indicate that reset is being handled elsewhere.
     * This is basically only used for fields in non-core coprocessors
     * (like the pxa2xx ones).
     */
    if (!ri->fieldoffset) {
        return;
    }

128
    if (cpreg_field_is_64bit(ri)) {
129 130 131 132 133 134
        CPREG_FIELD64(&cpu->env, ri) = ri->resetvalue;
    } else {
        CPREG_FIELD32(&cpu->env, ri) = ri->resetvalue;
    }
}

135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155
static void cp_reg_check_reset(gpointer key, gpointer value,  gpointer opaque)
{
    /* Purely an assertion check: we've already done reset once,
     * so now check that running the reset for the cpreg doesn't
     * change its value. This traps bugs where two different cpregs
     * both try to reset the same state field but to different values.
     */
    ARMCPRegInfo *ri = value;
    ARMCPU *cpu = opaque;
    uint64_t oldvalue, newvalue;

    if (ri->type & (ARM_CP_SPECIAL | ARM_CP_ALIAS | ARM_CP_NO_RAW)) {
        return;
    }

    oldvalue = read_raw_cp_reg(&cpu->env, ri);
    cp_reg_reset(key, value, opaque);
    newvalue = read_raw_cp_reg(&cpu->env, ri);
    assert(oldvalue == newvalue);
}

156 157 158 159 160
/* CPUClass::reset() */
static void arm_cpu_reset(CPUState *s)
{
    ARMCPU *cpu = ARM_CPU(s);
    ARMCPUClass *acc = ARM_CPU_GET_CLASS(cpu);
161 162
    CPUARMState *env = &cpu->env;

163 164
    acc->parent_reset(s);

165 166
    memset(env, 0, offsetof(CPUARMState, end_reset_fields));

167
    g_hash_table_foreach(cpu->cp_regs, cp_reg_reset, cpu);
168 169
    g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu);

170
    env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid;
171 172 173
    env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0;
    env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1;
    env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2;
174

175
    cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON;
176 177
    s->halted = cpu->start_powered_off;

178 179 180 181
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
        env->iwmmxt.cregs[ARM_IWMMXT_wCID] = 0x69051000 | 'Q';
    }

182 183 184
    if (arm_feature(env, ARM_FEATURE_AARCH64)) {
        /* 64 bit CPUs always start in 64 bit mode */
        env->aarch64 = 1;
185 186
#if defined(CONFIG_USER_ONLY)
        env->pstate = PSTATE_MODE_EL0t;
187
        /* Userspace expects access to DC ZVA, CTL_EL0 and the cache ops */
188
        env->cp15.sctlr_el[1] |= SCTLR_UCT | SCTLR_UCI | SCTLR_DZE;
189 190 191
        /* Enable all PAC keys.  */
        env->cp15.sctlr_el[1] |= (SCTLR_EnIA | SCTLR_EnIB |
                                  SCTLR_EnDA | SCTLR_EnDB);
192 193 194
        /* Enable all PAC instructions */
        env->cp15.hcr_el2 |= HCR_API;
        env->cp15.scr_el3 |= SCR_API;
195
        /* and to the FP/Neon instructions */
196
        env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
197 198 199 200
        /* and to the SVE instructions */
        env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
        env->cp15.cptr_el[3] |= CPTR_EZ;
        /* with maximum vector length */
201 202 203
        env->vfp.zcr_el[1] = cpu->sve_max_vq - 1;
        env->vfp.zcr_el[2] = env->vfp.zcr_el[1];
        env->vfp.zcr_el[3] = env->vfp.zcr_el[1];
204 205 206 207 208 209
        /*
         * Enable TBI0 and TBI1.  While the real kernel only enables TBI0,
         * turning on both here will produce smaller code and otherwise
         * make no difference to the user-level emulation.
         */
        env->cp15.tcr_el[1].raw_tcr = (3ULL << 37);
210
#else
211 212 213 214 215 216 217 218
        /* Reset into the highest available EL */
        if (arm_feature(env, ARM_FEATURE_EL3)) {
            env->pstate = PSTATE_MODE_EL3h;
        } else if (arm_feature(env, ARM_FEATURE_EL2)) {
            env->pstate = PSTATE_MODE_EL2h;
        } else {
            env->pstate = PSTATE_MODE_EL1h;
        }
219
        env->pc = cpu->rvbar;
220 221 222 223
#endif
    } else {
#if defined(CONFIG_USER_ONLY)
        /* Userspace expects access to cp10 and cp11 for FP/Neon */
224
        env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
225
#endif
226 227
    }

228 229 230 231 232 233 234 235 236 237
#if defined(CONFIG_USER_ONLY)
    env->uncached_cpsr = ARM_CPU_MODE_USR;
    /* For user mode we must enable access to coprocessors */
    env->vfp.xregs[ARM_VFP_FPEXC] = 1 << 30;
    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
        env->cp15.c15_cpar = 3;
    } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
        env->cp15.c15_cpar = 1;
    }
#else
238 239 240 241 242 243 244 245 246 247 248 249

    /*
     * If the highest available EL is EL2, AArch32 will start in Hyp
     * mode; otherwise it starts in SVC. Note that if we start in
     * AArch64 then these values in the uncached_cpsr will be ignored.
     */
    if (arm_feature(env, ARM_FEATURE_EL2) &&
        !arm_feature(env, ARM_FEATURE_EL3)) {
        env->uncached_cpsr = ARM_CPU_MODE_HYP;
    } else {
        env->uncached_cpsr = ARM_CPU_MODE_SVC;
    }
250
    env->daif = PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F;
251

P
Peter Maydell 已提交
252
    if (arm_feature(env, ARM_FEATURE_M)) {
253 254
        uint32_t initial_msp; /* Loaded from 0x0 */
        uint32_t initial_pc; /* Loaded from 0x4 */
255
        uint8_t *rom;
256
        uint32_t vecbase;
257

258 259
        if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
            env->v7m.secure = true;
260 261 262 263 264 265 266
        } else {
            /* This bit resets to 0 if security is supported, but 1 if
             * it is not. The bit is not present in v7M, but we set it
             * here so we can avoid having to make checks on it conditional
             * on ARM_FEATURE_V8 (we don't let the guest see the bit).
             */
            env->v7m.aircr = R_V7M_AIRCR_BFHFNMINS_MASK;
267 268
        }

269
        /* In v7M the reset value of this bit is IMPDEF, but ARM recommends
270
         * that it resets to 1, so QEMU always does that rather than making
271
         * it dependent on CPU model. In v8M it is RES1.
272
         */
273 274 275 276 277 278 279
        env->v7m.ccr[M_REG_NS] = R_V7M_CCR_STKALIGN_MASK;
        env->v7m.ccr[M_REG_S] = R_V7M_CCR_STKALIGN_MASK;
        if (arm_feature(env, ARM_FEATURE_V8)) {
            /* in v8M the NONBASETHRDENA bit [0] is RES1 */
            env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_NONBASETHRDENA_MASK;
            env->v7m.ccr[M_REG_S] |= R_V7M_CCR_NONBASETHRDENA_MASK;
        }
280 281 282 283
        if (!arm_feature(env, ARM_FEATURE_M_MAIN)) {
            env->v7m.ccr[M_REG_NS] |= R_V7M_CCR_UNALIGN_TRP_MASK;
            env->v7m.ccr[M_REG_S] |= R_V7M_CCR_UNALIGN_TRP_MASK;
        }
284

285 286 287
        /* Unlike A/R profile, M profile defines the reset LR value */
        env->regs[14] = 0xffffffff;

288 289 290 291
        env->v7m.vecbase[M_REG_S] = cpu->init_svtor & 0xffffff80;

        /* Load the initial SP and PC from offset 0 and 4 in the vector table */
        vecbase = env->v7m.vecbase[env->v7m.secure];
292
        rom = rom_ptr(vecbase, 8);
293
        if (rom) {
294 295 296 297 298 299 300 301 302 303 304
            /* Address zero is covered by ROM which hasn't yet been
             * copied into physical memory.
             */
            initial_msp = ldl_p(rom);
            initial_pc = ldl_p(rom + 4);
        } else {
            /* Address zero not covered by a ROM blob, or the ROM blob
             * is in non-modifiable memory and this is a second reset after
             * it got copied into memory. In the latter case, rom_ptr
             * will return a NULL pointer and we should use ldl_phys instead.
             */
305 306
            initial_msp = ldl_phys(s->as, vecbase);
            initial_pc = ldl_phys(s->as, vecbase + 4);
307
        }
308 309 310 311

        env->regs[13] = initial_msp & 0xFFFFFFFC;
        env->regs[15] = initial_pc & ~1;
        env->thumb = initial_pc & 1;
312
    }
313

314 315 316 317 318
    /* AArch32 has a hard highvec setting of 0xFFFF0000.  If we are currently
     * executing as AArch32 then check if highvecs are enabled and
     * adjust the PC accordingly.
     */
    if (A32_BANKED_CURRENT_REG_GET(env, sctlr) & SCTLR_V) {
319
        env->regs[15] = 0xFFFF0000;
320 321
    }

322 323 324 325 326 327
    /* M profile requires that reset clears the exclusive monitor;
     * A profile does not, but clearing it makes more sense than having it
     * set with an exclusive access on address zero.
     */
    arm_clear_exclusive(env);

328 329
    env->vfp.xregs[ARM_VFP_FPEXC] = 0;
#endif
330

331
    if (arm_feature(env, ARM_FEATURE_PMSA)) {
332
        if (cpu->pmsav7_dregion > 0) {
333
            if (arm_feature(env, ARM_FEATURE_V8)) {
334 335 336 337 338 339 340 341 342 343 344 345 346 347
                memset(env->pmsav8.rbar[M_REG_NS], 0,
                       sizeof(*env->pmsav8.rbar[M_REG_NS])
                       * cpu->pmsav7_dregion);
                memset(env->pmsav8.rlar[M_REG_NS], 0,
                       sizeof(*env->pmsav8.rlar[M_REG_NS])
                       * cpu->pmsav7_dregion);
                if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
                    memset(env->pmsav8.rbar[M_REG_S], 0,
                           sizeof(*env->pmsav8.rbar[M_REG_S])
                           * cpu->pmsav7_dregion);
                    memset(env->pmsav8.rlar[M_REG_S], 0,
                           sizeof(*env->pmsav8.rlar[M_REG_S])
                           * cpu->pmsav7_dregion);
                }
348 349 350 351 352 353 354 355
            } else if (arm_feature(env, ARM_FEATURE_V7)) {
                memset(env->pmsav7.drbar, 0,
                       sizeof(*env->pmsav7.drbar) * cpu->pmsav7_dregion);
                memset(env->pmsav7.drsr, 0,
                       sizeof(*env->pmsav7.drsr) * cpu->pmsav7_dregion);
                memset(env->pmsav7.dracr, 0,
                       sizeof(*env->pmsav7.dracr) * cpu->pmsav7_dregion);
            }
356
        }
357 358
        env->pmsav7.rnr[M_REG_NS] = 0;
        env->pmsav7.rnr[M_REG_S] = 0;
359 360 361 362
        env->pmsav8.mair0[M_REG_NS] = 0;
        env->pmsav8.mair0[M_REG_S] = 0;
        env->pmsav8.mair1[M_REG_NS] = 0;
        env->pmsav8.mair1[M_REG_S] = 0;
363 364
    }

365 366 367 368 369 370 371 372 373 374 375 376
    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
        if (cpu->sau_sregion > 0) {
            memset(env->sau.rbar, 0, sizeof(*env->sau.rbar) * cpu->sau_sregion);
            memset(env->sau.rlar, 0, sizeof(*env->sau.rlar) * cpu->sau_sregion);
        }
        env->sau.rnr = 0;
        /* SAU_CTRL reset value is IMPDEF; we choose 0, which is what
         * the Cortex-M33 does.
         */
        env->sau.ctrl = 0;
    }

377 378 379 380 381 382 383
    set_flush_to_zero(1, &env->vfp.standard_fp_status);
    set_flush_inputs_to_zero(1, &env->vfp.standard_fp_status);
    set_default_nan_mode(1, &env->vfp.standard_fp_status);
    set_float_detect_tininess(float_tininess_before_rounding,
                              &env->vfp.fp_status);
    set_float_detect_tininess(float_tininess_before_rounding,
                              &env->vfp.standard_fp_status);
384 385
    set_float_detect_tininess(float_tininess_before_rounding,
                              &env->vfp.fp_status_f16);
386 387 388 389 390
#ifndef CONFIG_USER_ONLY
    if (kvm_enabled()) {
        kvm_arm_reset_vcpu(cpu);
    }
#endif
391

392
    hw_breakpoint_update_all(cpu);
393
    hw_watchpoint_update_all(cpu);
394 395
}

396 397 398
bool arm_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
    CPUClass *cc = CPU_GET_CLASS(cs);
399 400 401 402 403
    CPUARMState *env = cs->env_ptr;
    uint32_t cur_el = arm_current_el(env);
    bool secure = arm_is_secure(env);
    uint32_t target_el;
    uint32_t excp_idx;
404 405
    bool ret = false;

406 407 408 409 410 411 412 413 414
    if (interrupt_request & CPU_INTERRUPT_FIQ) {
        excp_idx = EXCP_FIQ;
        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
        if (arm_excp_unmasked(cs, excp_idx, target_el)) {
            cs->exception_index = excp_idx;
            env->exception.target_el = target_el;
            cc->do_interrupt(cs);
            ret = true;
        }
415
    }
416 417 418 419 420 421 422 423 424
    if (interrupt_request & CPU_INTERRUPT_HARD) {
        excp_idx = EXCP_IRQ;
        target_el = arm_phys_excp_target_el(cs, excp_idx, cur_el, secure);
        if (arm_excp_unmasked(cs, excp_idx, target_el)) {
            cs->exception_index = excp_idx;
            env->exception.target_el = target_el;
            cc->do_interrupt(cs);
            ret = true;
        }
425
    }
426 427 428 429 430 431 432 433 434
    if (interrupt_request & CPU_INTERRUPT_VIRQ) {
        excp_idx = EXCP_VIRQ;
        target_el = 1;
        if (arm_excp_unmasked(cs, excp_idx, target_el)) {
            cs->exception_index = excp_idx;
            env->exception.target_el = target_el;
            cc->do_interrupt(cs);
            ret = true;
        }
435
    }
436 437 438 439 440 441 442 443 444
    if (interrupt_request & CPU_INTERRUPT_VFIQ) {
        excp_idx = EXCP_VFIQ;
        target_el = 1;
        if (arm_excp_unmasked(cs, excp_idx, target_el)) {
            cs->exception_index = excp_idx;
            env->exception.target_el = target_el;
            cc->do_interrupt(cs);
            ret = true;
        }
445
    }
446 447 448 449

    return ret;
}

450 451 452 453 454 455 456 457
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
static bool arm_v7m_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
    CPUClass *cc = CPU_GET_CLASS(cs);
    ARMCPU *cpu = ARM_CPU(cs);
    CPUARMState *env = &cpu->env;
    bool ret = false;

458
    /* ARMv7-M interrupt masking works differently than -A or -R.
459 460 461 462 463
     * There is no FIQ/IRQ distinction. Instead of I and F bits
     * masking FIQ and IRQ interrupts, an exception is taken only
     * if it is higher priority than the current execution priority
     * (which depends on state like BASEPRI, FAULTMASK and the
     * currently active exception).
464 465
     */
    if (interrupt_request & CPU_INTERRUPT_HARD
466
        && (armv7m_nvic_can_take_pending_exception(env->nvic))) {
467 468 469 470 471 472 473 474
        cs->exception_index = EXCP_IRQ;
        cc->do_interrupt(cs);
        ret = true;
    }
    return ret;
}
#endif

475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516
void arm_cpu_update_virq(ARMCPU *cpu)
{
    /*
     * Update the interrupt level for VIRQ, which is the logical OR of
     * the HCR_EL2.VI bit and the input line level from the GIC.
     */
    CPUARMState *env = &cpu->env;
    CPUState *cs = CPU(cpu);

    bool new_state = (env->cp15.hcr_el2 & HCR_VI) ||
        (env->irq_line_state & CPU_INTERRUPT_VIRQ);

    if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VIRQ) != 0)) {
        if (new_state) {
            cpu_interrupt(cs, CPU_INTERRUPT_VIRQ);
        } else {
            cpu_reset_interrupt(cs, CPU_INTERRUPT_VIRQ);
        }
    }
}

void arm_cpu_update_vfiq(ARMCPU *cpu)
{
    /*
     * Update the interrupt level for VFIQ, which is the logical OR of
     * the HCR_EL2.VF bit and the input line level from the GIC.
     */
    CPUARMState *env = &cpu->env;
    CPUState *cs = CPU(cpu);

    bool new_state = (env->cp15.hcr_el2 & HCR_VF) ||
        (env->irq_line_state & CPU_INTERRUPT_VFIQ);

    if (new_state != ((cs->interrupt_request & CPU_INTERRUPT_VFIQ) != 0)) {
        if (new_state) {
            cpu_interrupt(cs, CPU_INTERRUPT_VFIQ);
        } else {
            cpu_reset_interrupt(cs, CPU_INTERRUPT_VFIQ);
        }
    }
}

517 518 519 520
#ifndef CONFIG_USER_ONLY
static void arm_cpu_set_irq(void *opaque, int irq, int level)
{
    ARMCPU *cpu = opaque;
521
    CPUARMState *env = &cpu->env;
522
    CPUState *cs = CPU(cpu);
523 524 525 526 527 528
    static const int mask[] = {
        [ARM_CPU_IRQ] = CPU_INTERRUPT_HARD,
        [ARM_CPU_FIQ] = CPU_INTERRUPT_FIQ,
        [ARM_CPU_VIRQ] = CPU_INTERRUPT_VIRQ,
        [ARM_CPU_VFIQ] = CPU_INTERRUPT_VFIQ
    };
529

530 531 532 533 534 535
    if (level) {
        env->irq_line_state |= mask[irq];
    } else {
        env->irq_line_state &= ~mask[irq];
    }

536
    switch (irq) {
537
    case ARM_CPU_VIRQ:
538 539 540
        assert(arm_feature(env, ARM_FEATURE_EL2));
        arm_cpu_update_virq(cpu);
        break;
541
    case ARM_CPU_VFIQ:
542
        assert(arm_feature(env, ARM_FEATURE_EL2));
543 544
        arm_cpu_update_vfiq(cpu);
        break;
545
    case ARM_CPU_IRQ:
546 547
    case ARM_CPU_FIQ:
        if (level) {
548
            cpu_interrupt(cs, mask[irq]);
549
        } else {
550
            cpu_reset_interrupt(cs, mask[irq]);
551 552 553
        }
        break;
    default:
554
        g_assert_not_reached();
555 556 557 558 559 560 561
    }
}

static void arm_cpu_kvm_set_irq(void *opaque, int irq, int level)
{
#ifdef CONFIG_KVM
    ARMCPU *cpu = opaque;
562
    CPUARMState *env = &cpu->env;
563 564
    CPUState *cs = CPU(cpu);
    int kvm_irq = KVM_ARM_IRQ_TYPE_CPU << KVM_ARM_IRQ_TYPE_SHIFT;
565
    uint32_t linestate_bit;
566 567 568 569

    switch (irq) {
    case ARM_CPU_IRQ:
        kvm_irq |= KVM_ARM_IRQ_CPU_IRQ;
570
        linestate_bit = CPU_INTERRUPT_HARD;
571 572 573
        break;
    case ARM_CPU_FIQ:
        kvm_irq |= KVM_ARM_IRQ_CPU_FIQ;
574
        linestate_bit = CPU_INTERRUPT_FIQ;
575 576
        break;
    default:
577
        g_assert_not_reached();
578
    }
579 580 581 582 583 584 585

    if (level) {
        env->irq_line_state |= linestate_bit;
    } else {
        env->irq_line_state &= ~linestate_bit;
    }

586 587 588 589
    kvm_irq |= cs->cpu_index << KVM_ARM_IRQ_VCPU_SHIFT;
    kvm_set_irq(kvm_state, kvm_irq, level ? 1 : 0);
#endif
}
590

591
static bool arm_cpu_virtio_is_big_endian(CPUState *cs)
592 593 594 595 596
{
    ARMCPU *cpu = ARM_CPU(cs);
    CPUARMState *env = &cpu->env;

    cpu_synchronize_state(cs);
597
    return arm_cpu_data_is_big_endian(env);
598 599
}

600 601
#endif

602 603
static inline void set_feature(CPUARMState *env, int feature)
{
604
    env->features |= 1ULL << feature;
605 606
}

607 608 609 610 611
static inline void unset_feature(CPUARMState *env, int feature)
{
    env->features &= ~(1ULL << feature);
}

612 613 614 615 616 617 618 619 620 621
static int
print_insn_thumb1(bfd_vma pc, disassemble_info *info)
{
  return print_insn_arm(pc | 1, info);
}

static void arm_disas_set_info(CPUState *cpu, disassemble_info *info)
{
    ARMCPU *ac = ARM_CPU(cpu);
    CPUARMState *env = &ac->env;
622
    bool sctlr_b;
623 624 625 626 627 628 629 630 631

    if (is_a64(env)) {
        /* We might not be compiled with the A64 disassembler
         * because it needs a C++ compiler. Leave print_insn
         * unset in this case to use the caller default behaviour.
         */
#if defined(CONFIG_ARM_A64_DIS)
        info->print_insn = print_insn_arm_a64;
#endif
632
        info->cap_arch = CS_ARCH_ARM64;
633 634
        info->cap_insn_unit = 4;
        info->cap_insn_split = 4;
635
    } else {
636 637 638
        int cap_mode;
        if (env->thumb) {
            info->print_insn = print_insn_thumb1;
639 640
            info->cap_insn_unit = 2;
            info->cap_insn_split = 4;
641 642 643
            cap_mode = CS_MODE_THUMB;
        } else {
            info->print_insn = print_insn_arm;
644 645
            info->cap_insn_unit = 4;
            info->cap_insn_split = 4;
646 647 648 649 650 651 652 653 654 655
            cap_mode = CS_MODE_ARM;
        }
        if (arm_feature(env, ARM_FEATURE_V8)) {
            cap_mode |= CS_MODE_V8;
        }
        if (arm_feature(env, ARM_FEATURE_M)) {
            cap_mode |= CS_MODE_MCLASS;
        }
        info->cap_arch = CS_ARCH_ARM;
        info->cap_mode = cap_mode;
656
    }
657 658 659

    sctlr_b = arm_sctlr_b(env);
    if (bswap_code(sctlr_b)) {
660 661 662 663 664 665
#ifdef TARGET_WORDS_BIGENDIAN
        info->endian = BFD_ENDIAN_LITTLE;
#else
        info->endian = BFD_ENDIAN_BIG;
#endif
    }
666
    info->flags &= ~INSN_ARM_BE32;
667 668
#ifndef CONFIG_USER_ONLY
    if (sctlr_b) {
669 670
        info->flags |= INSN_ARM_BE32;
    }
671
#endif
672 673
}

674 675 676 677 678 679 680
uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz)
{
    uint32_t Aff1 = idx / clustersz;
    uint32_t Aff0 = idx % clustersz;
    return (Aff1 << ARM_AFF1_SHIFT) | Aff0;
}

681 682 683 684 685 686 687 688 689 690 691 692 693 694
static void cpreg_hashtable_data_destroy(gpointer data)
{
    /*
     * Destroy function for cpu->cp_regs hashtable data entries.
     * We must free the name string because it was g_strdup()ed in
     * add_cpreg_to_hashtable(). It's OK to cast away the 'const'
     * from r->name because we know we definitely allocated it.
     */
    ARMCPRegInfo *r = data;

    g_free((void *)r->name);
    g_free(r);
}

695 696
static void arm_cpu_initfn(Object *obj)
{
697
    CPUState *cs = CPU(obj);
698 699
    ARMCPU *cpu = ARM_CPU(obj);

700
    cs->env_ptr = &cpu->env;
701
    cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal,
702
                                         g_free, cpreg_hashtable_data_destroy);
703

704
    QLIST_INIT(&cpu->pre_el_change_hooks);
705 706
    QLIST_INIT(&cpu->el_change_hooks);

707 708 709
#ifndef CONFIG_USER_ONLY
    /* Our inbound IRQ and FIQ lines */
    if (kvm_enabled()) {
710 711 712 713
        /* VIRQ and VFIQ are unused with KVM but we add them to maintain
         * the same interface as non-KVM CPUs.
         */
        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 4);
714
    } else {
715
        qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 4);
716
    }
717 718 719

    qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs,
                       ARRAY_SIZE(cpu->gt_timer_outputs));
720 721 722

    qdev_init_gpio_out_named(DEVICE(cpu), &cpu->gicv3_maintenance_interrupt,
                             "gicv3-maintenance-interrupt", 1);
723 724
    qdev_init_gpio_out_named(DEVICE(cpu), &cpu->pmu_interrupt,
                             "pmu-interrupt", 1);
725 726
#endif

727 728 729 730 731
    /* DTB consumers generally don't in fact care what the 'compatible'
     * string is, so always provide some string and trust that a hypothetical
     * picky DTB consumer will also provide a helpful error message.
     */
    cpu->dtb_compatible = "qemu,unknown";
732
    cpu->psci_version = 1; /* By default assume PSCI v0.1 */
733
    cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE;
734

735 736
    if (tcg_enabled()) {
        cpu->psci_version = 2; /* TCG implements PSCI 0.2 */
737
    }
738 739
}

740
static Property arm_cpu_reset_cbar_property =
741
            DEFINE_PROP_UINT64("reset-cbar", ARMCPU, reset_cbar, 0);
742

743 744 745
static Property arm_cpu_reset_hivecs_property =
            DEFINE_PROP_BOOL("reset-hivecs", ARMCPU, reset_hivecs, false);

746 747 748
static Property arm_cpu_rvbar_property =
            DEFINE_PROP_UINT64("rvbar", ARMCPU, rvbar, 0);

749 750 751
static Property arm_cpu_has_el2_property =
            DEFINE_PROP_BOOL("has_el2", ARMCPU, has_el2, true);

752 753 754
static Property arm_cpu_has_el3_property =
            DEFINE_PROP_BOOL("has_el3", ARMCPU, has_el3, true);

755 756 757
static Property arm_cpu_cfgend_property =
            DEFINE_PROP_BOOL("cfgend", ARMCPU, cfgend, false);

758 759 760 761
/* use property name "pmu" to match other archs and virt tools */
static Property arm_cpu_has_pmu_property =
            DEFINE_PROP_BOOL("pmu", ARMCPU, has_pmu, true);

P
Peter Crosthwaite 已提交
762 763 764
static Property arm_cpu_has_mpu_property =
            DEFINE_PROP_BOOL("has-mpu", ARMCPU, has_mpu, true);

765 766 767 768 769
/* This is like DEFINE_PROP_UINT32 but it doesn't set the default value,
 * because the CPU initfn will have already set cpu->pmsav7_dregion to
 * the right value for that particular CPU type, and we don't want
 * to override that with an incorrect constant value.
 */
770
static Property arm_cpu_pmsav7_dregion_property =
771 772 773
            DEFINE_PROP_UNSIGNED_NODEFAULT("pmsav7-dregion", ARMCPU,
                                           pmsav7_dregion,
                                           qdev_prop_uint32, uint32_t);
774

775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
static void arm_get_init_svtor(Object *obj, Visitor *v, const char *name,
                               void *opaque, Error **errp)
{
    ARMCPU *cpu = ARM_CPU(obj);

    visit_type_uint32(v, name, &cpu->init_svtor, errp);
}

static void arm_set_init_svtor(Object *obj, Visitor *v, const char *name,
                               void *opaque, Error **errp)
{
    ARMCPU *cpu = ARM_CPU(obj);

    visit_type_uint32(v, name, &cpu->init_svtor, errp);
}
790

791
void arm_cpu_post_init(Object *obj)
792 793 794
{
    ARMCPU *cpu = ARM_CPU(obj);

795 796 797 798 799 800 801 802
    /* M profile implies PMSA. We have to do this here rather than
     * in realize with the other feature-implication checks because
     * we look at the PMSA bit to see if we should add some properties.
     */
    if (arm_feature(&cpu->env, ARM_FEATURE_M)) {
        set_feature(&cpu->env, ARM_FEATURE_PMSA);
    }

803 804
    if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) ||
        arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) {
805
        qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_cbar_property,
806
                                 &error_abort);
807
    }
808 809 810

    if (!arm_feature(&cpu->env, ARM_FEATURE_M)) {
        qdev_property_add_static(DEVICE(obj), &arm_cpu_reset_hivecs_property,
811
                                 &error_abort);
812
    }
813 814 815 816 817

    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
        qdev_property_add_static(DEVICE(obj), &arm_cpu_rvbar_property,
                                 &error_abort);
    }
818 819 820 821 822 823 824

    if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
        /* Add the has_el3 state CPU property only if EL3 is allowed.  This will
         * prevent "has_el3" from existing on CPUs which cannot support EL3.
         */
        qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property,
                                 &error_abort);
825 826 827 828 829 830

#ifndef CONFIG_USER_ONLY
        object_property_add_link(obj, "secure-memory",
                                 TYPE_MEMORY_REGION,
                                 (Object **)&cpu->secure_memory,
                                 qdev_prop_allow_set_link_before_realize,
831
                                 OBJ_PROP_LINK_STRONG,
832 833
                                 &error_abort);
#endif
834
    }
P
Peter Crosthwaite 已提交
835

836 837 838 839 840
    if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
        qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el2_property,
                                 &error_abort);
    }

841 842 843 844 845
    if (arm_feature(&cpu->env, ARM_FEATURE_PMU)) {
        qdev_property_add_static(DEVICE(obj), &arm_cpu_has_pmu_property,
                                 &error_abort);
    }

846
    if (arm_feature(&cpu->env, ARM_FEATURE_PMSA)) {
P
Peter Crosthwaite 已提交
847 848
        qdev_property_add_static(DEVICE(obj), &arm_cpu_has_mpu_property,
                                 &error_abort);
849 850 851 852 853
        if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
            qdev_property_add_static(DEVICE(obj),
                                     &arm_cpu_pmsav7_dregion_property,
                                     &error_abort);
        }
P
Peter Crosthwaite 已提交
854 855
    }

856 857 858
    if (arm_feature(&cpu->env, ARM_FEATURE_M_SECURITY)) {
        object_property_add_link(obj, "idau", TYPE_IDAU_INTERFACE, &cpu->idau,
                                 qdev_prop_allow_set_link_before_realize,
859
                                 OBJ_PROP_LINK_STRONG,
860
                                 &error_abort);
861 862 863 864 865 866 867 868
        /*
         * M profile: initial value of the Secure VTOR. We can't just use
         * a simple DEFINE_PROP_UINT32 for this because we want to permit
         * the property to be set after realize.
         */
        object_property_add(obj, "init-svtor", "uint32",
                            arm_get_init_svtor, arm_set_init_svtor,
                            NULL, NULL, &error_abort);
869 870
    }

871 872
    qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property,
                             &error_abort);
873 874
}

875 876 877
static void arm_cpu_finalizefn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
878 879
    ARMELChangeHook *hook, *next;

880
    g_hash_table_destroy(cpu->cp_regs);
881

882 883 884 885
    QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) {
        QLIST_REMOVE(hook, node);
        g_free(hook);
    }
886 887 888 889
    QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) {
        QLIST_REMOVE(hook, node);
        g_free(hook);
    }
890 891 892 893 894 895 896
#ifndef CONFIG_USER_ONLY
    if (cpu->pmu_timer) {
        timer_del(cpu->pmu_timer);
        timer_deinit(cpu->pmu_timer);
        timer_free(cpu->pmu_timer);
    }
#endif
897 898
}

899
static void arm_cpu_realizefn(DeviceState *dev, Error **errp)
900
{
901
    CPUState *cs = CPU(dev);
902 903
    ARMCPU *cpu = ARM_CPU(dev);
    ARMCPUClass *acc = ARM_CPU_GET_CLASS(dev);
904
    CPUARMState *env = &cpu->env;
905
    int pagebits;
906
    Error *local_err = NULL;
907
    bool no_aa32 = false;
908

909 910 911 912 913 914 915 916 917 918 919 920 921
    /* If we needed to query the host kernel for the CPU features
     * then it's possible that might have failed in the initfn, but
     * this is the first point where we can report it.
     */
    if (cpu->host_cpu_probe_failed) {
        if (!kvm_enabled()) {
            error_setg(errp, "The 'host' CPU type can only be used with KVM");
        } else {
            error_setg(errp, "Failed to retrieve host CPU features");
        }
        return;
    }

922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
#ifndef CONFIG_USER_ONLY
    /* The NVIC and M-profile CPU are two halves of a single piece of
     * hardware; trying to use one without the other is a command line
     * error and will result in segfaults if not caught here.
     */
    if (arm_feature(env, ARM_FEATURE_M)) {
        if (!env->nvic) {
            error_setg(errp, "This board cannot be used with Cortex-M CPUs");
            return;
        }
    } else {
        if (env->nvic) {
            error_setg(errp, "This board can only be used with Cortex-M CPUs");
            return;
        }
    }
938 939 940 941 942 943 944 945 946

    cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
                                           arm_gt_ptimer_cb, cpu);
    cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
                                           arm_gt_vtimer_cb, cpu);
    cpu->gt_timer[GTIMER_HYP] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
                                          arm_gt_htimer_cb, cpu);
    cpu->gt_timer[GTIMER_SEC] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE,
                                          arm_gt_stimer_cb, cpu);
947 948
#endif

949 950 951 952 953
    cpu_exec_realizefn(cs, &local_err);
    if (local_err != NULL) {
        error_propagate(errp, local_err);
        return;
    }
954

955
    /* Some features automatically imply others: */
956
    if (arm_feature(env, ARM_FEATURE_V8)) {
957 958 959 960 961
        if (arm_feature(env, ARM_FEATURE_M)) {
            set_feature(env, ARM_FEATURE_V7);
        } else {
            set_feature(env, ARM_FEATURE_V7VE);
        }
962
    }
963 964 965 966 967 968 969 970 971 972

    /*
     * There exist AArch64 cpus without AArch32 support.  When KVM
     * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN.
     * Similarly, we cannot check ID_AA64PFR0 without AArch64 support.
     */
    if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
        no_aa32 = !cpu_isar_feature(aa64_aa32, cpu);
    }

973 974 975 976 977 978 979 980 981
    if (arm_feature(env, ARM_FEATURE_V7VE)) {
        /* v7 Virtualization Extensions. In real hardware this implies
         * EL2 and also the presence of the Security Extensions.
         * For QEMU, for backwards-compatibility we implement some
         * CPUs or CPU configs which have no actual EL2 or EL3 but do
         * include the various other features that V7VE implies.
         * Presence of EL2 itself is ARM_FEATURE_EL2, and of the
         * Security Extensions is ARM_FEATURE_EL3.
         */
982
        assert(no_aa32 || cpu_isar_feature(arm_div, cpu));
983
        set_feature(env, ARM_FEATURE_LPAE);
984
        set_feature(env, ARM_FEATURE_V7);
985
    }
986 987 988
    if (arm_feature(env, ARM_FEATURE_V7)) {
        set_feature(env, ARM_FEATURE_VAPA);
        set_feature(env, ARM_FEATURE_THUMB2);
P
Peter Maydell 已提交
989
        set_feature(env, ARM_FEATURE_MPIDR);
990 991 992 993 994
        if (!arm_feature(env, ARM_FEATURE_M)) {
            set_feature(env, ARM_FEATURE_V6K);
        } else {
            set_feature(env, ARM_FEATURE_V6);
        }
995 996 997 998 999

        /* Always define VBAR for V7 CPUs even if it doesn't exist in
         * non-EL3 configs. This is needed by some legacy boards.
         */
        set_feature(env, ARM_FEATURE_VBAR);
1000 1001 1002 1003 1004 1005 1006 1007
    }
    if (arm_feature(env, ARM_FEATURE_V6K)) {
        set_feature(env, ARM_FEATURE_V6);
        set_feature(env, ARM_FEATURE_MVFR);
    }
    if (arm_feature(env, ARM_FEATURE_V6)) {
        set_feature(env, ARM_FEATURE_V5);
        if (!arm_feature(env, ARM_FEATURE_M)) {
1008
            assert(no_aa32 || cpu_isar_feature(jazelle, cpu));
1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
            set_feature(env, ARM_FEATURE_AUXCR);
        }
    }
    if (arm_feature(env, ARM_FEATURE_V5)) {
        set_feature(env, ARM_FEATURE_V4T);
    }
    if (arm_feature(env, ARM_FEATURE_VFP4)) {
        set_feature(env, ARM_FEATURE_VFP3);
    }
    if (arm_feature(env, ARM_FEATURE_VFP3)) {
        set_feature(env, ARM_FEATURE_VFP);
    }
1021
    if (arm_feature(env, ARM_FEATURE_LPAE)) {
1022
        set_feature(env, ARM_FEATURE_V7MP);
1023 1024
        set_feature(env, ARM_FEATURE_PXN);
    }
1025 1026 1027
    if (arm_feature(env, ARM_FEATURE_CBAR_RO)) {
        set_feature(env, ARM_FEATURE_CBAR);
    }
1028 1029 1030 1031
    if (arm_feature(env, ARM_FEATURE_THUMB2) &&
        !arm_feature(env, ARM_FEATURE_M)) {
        set_feature(env, ARM_FEATURE_THUMB_DSP);
    }
1032

1033 1034
    if (arm_feature(env, ARM_FEATURE_V7) &&
        !arm_feature(env, ARM_FEATURE_M) &&
1035
        !arm_feature(env, ARM_FEATURE_PMSA)) {
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
        /* v7VMSA drops support for the old ARMv5 tiny pages, so we
         * can use 4K pages.
         */
        pagebits = 12;
    } else {
        /* For CPUs which might have tiny 1K pages, or which have an
         * MPU and might have small region sizes, stick with 1K pages.
         */
        pagebits = 10;
    }
    if (!set_preferred_target_page_bits(pagebits)) {
        /* This can only ever happen for hotplugging a CPU, or if
         * the board code incorrectly creates a CPU which it has
         * promised via minimum_page_size that it will not.
         */
        error_setg(errp, "This CPU requires a smaller page size than the "
                   "system is using");
        return;
    }

1056 1057 1058 1059 1060 1061
    /* This cpu-id-to-MPIDR affinity is used only for TCG; KVM will override it.
     * We don't support setting cluster ID ([16..23]) (known as Aff2
     * in later ARM ARM versions), or any of the higher affinity level fields,
     * so these bits always RAZ.
     */
    if (cpu->mp_affinity == ARM64_AFFINITY_INVALID) {
1062 1063
        cpu->mp_affinity = arm_cpu_mp_affinity(cs->cpu_index,
                                               ARM_DEFAULT_CPUS_PER_CLUSTER);
1064 1065
    }

1066 1067 1068 1069
    if (cpu->reset_hivecs) {
            cpu->reset_sctlr |= (1 << 13);
    }

1070 1071 1072 1073 1074 1075 1076 1077
    if (cpu->cfgend) {
        if (arm_feature(&cpu->env, ARM_FEATURE_V7)) {
            cpu->reset_sctlr |= SCTLR_EE;
        } else {
            cpu->reset_sctlr |= SCTLR_B;
        }
    }

1078 1079 1080 1081 1082 1083 1084
    if (!cpu->has_el3) {
        /* If the has_el3 CPU property is disabled then we need to disable the
         * feature.
         */
        unset_feature(env, ARM_FEATURE_EL3);

        /* Disable the security extension feature bits in the processor feature
1085
         * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12].
1086 1087
         */
        cpu->id_pfr1 &= ~0xf0;
1088
        cpu->isar.id_aa64pfr0 &= ~0xf000;
1089 1090
    }

1091 1092 1093 1094
    if (!cpu->has_el2) {
        unset_feature(env, ARM_FEATURE_EL2);
    }

1095
    if (!cpu->has_pmu) {
1096
        unset_feature(env, ARM_FEATURE_PMU);
1097 1098
    }
    if (arm_feature(env, ARM_FEATURE_PMU)) {
1099
        pmu_init(cpu);
1100 1101 1102 1103 1104

        if (!kvm_enabled()) {
            arm_register_pre_el_change_hook(cpu, &pmu_pre_el_change, 0);
            arm_register_el_change_hook(cpu, &pmu_post_el_change, 0);
        }
1105 1106 1107 1108 1109

#ifndef CONFIG_USER_ONLY
        cpu->pmu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, arm_pmu_timer_cb,
                cpu);
#endif
1110
    } else {
1111
        cpu->id_aa64dfr0 &= ~0xf00;
1112
        cpu->id_dfr0 &= ~(0xf << 24);
1113 1114
        cpu->pmceid0 = 0;
        cpu->pmceid1 = 0;
1115 1116
    }

1117 1118 1119 1120 1121
    if (!arm_feature(env, ARM_FEATURE_EL2)) {
        /* Disable the hypervisor feature bits in the processor feature
         * registers if we don't have EL2. These are id_pfr1[15:12] and
         * id_aa64pfr0_el1[11:8].
         */
1122
        cpu->isar.id_aa64pfr0 &= ~0xf00;
1123 1124 1125
        cpu->id_pfr1 &= ~0xf000;
    }

1126 1127 1128
    /* MPU can be configured out of a PMSA CPU either by setting has-mpu
     * to false or by setting pmsav7-dregion to 0.
     */
P
Peter Crosthwaite 已提交
1129
    if (!cpu->has_mpu) {
1130 1131 1132 1133
        cpu->pmsav7_dregion = 0;
    }
    if (cpu->pmsav7_dregion == 0) {
        cpu->has_mpu = false;
P
Peter Crosthwaite 已提交
1134 1135
    }

1136
    if (arm_feature(env, ARM_FEATURE_PMSA) &&
1137 1138 1139 1140
        arm_feature(env, ARM_FEATURE_V7)) {
        uint32_t nr = cpu->pmsav7_dregion;

        if (nr > 0xff) {
1141
            error_setg(errp, "PMSAv7 MPU #regions invalid %" PRIu32, nr);
1142 1143
            return;
        }
1144 1145

        if (nr) {
1146 1147
            if (arm_feature(env, ARM_FEATURE_V8)) {
                /* PMSAv8 */
1148 1149 1150 1151 1152 1153
                env->pmsav8.rbar[M_REG_NS] = g_new0(uint32_t, nr);
                env->pmsav8.rlar[M_REG_NS] = g_new0(uint32_t, nr);
                if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
                    env->pmsav8.rbar[M_REG_S] = g_new0(uint32_t, nr);
                    env->pmsav8.rlar[M_REG_S] = g_new0(uint32_t, nr);
                }
1154 1155 1156 1157 1158
            } else {
                env->pmsav7.drbar = g_new0(uint32_t, nr);
                env->pmsav7.drsr = g_new0(uint32_t, nr);
                env->pmsav7.dracr = g_new0(uint32_t, nr);
            }
1159
        }
1160 1161
    }

1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175
    if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
        uint32_t nr = cpu->sau_sregion;

        if (nr > 0xff) {
            error_setg(errp, "v8M SAU #regions invalid %" PRIu32, nr);
            return;
        }

        if (nr) {
            env->sau.rbar = g_new0(uint32_t, nr);
            env->sau.rlar = g_new0(uint32_t, nr);
        }
    }

1176 1177 1178 1179
    if (arm_feature(env, ARM_FEATURE_EL3)) {
        set_feature(env, ARM_FEATURE_VBAR);
    }

1180
    register_cp_regs_for_features(cpu);
1181 1182
    arm_cpu_register_gdb_regs_for_features(cpu);

1183 1184
    init_cpreg_list(cpu);

1185
#ifndef CONFIG_USER_ONLY
1186 1187 1188
    if (cpu->has_el3 || arm_feature(env, ARM_FEATURE_M_SECURITY)) {
        cs->num_ases = 2;

1189 1190 1191
        if (!cpu->secure_memory) {
            cpu->secure_memory = cs->memory;
        }
P
Peter Xu 已提交
1192 1193
        cpu_address_space_init(cs, ARMASIdx_S, "cpu-secure-memory",
                               cpu->secure_memory);
1194 1195
    } else {
        cs->num_ases = 1;
1196
    }
P
Peter Xu 已提交
1197
    cpu_address_space_init(cs, ARMASIdx_NS, "cpu-memory", cs->memory);
1198 1199 1200 1201 1202

    /* No core_count specified, default to smp_cpus. */
    if (cpu->core_count == -1) {
        cpu->core_count = smp_cpus;
    }
1203 1204
#endif

1205
    qemu_init_vcpu(cs);
1206
    cpu_reset(cs);
1207 1208

    acc->parent_realize(dev, errp);
1209 1210
}

1211 1212 1213
static ObjectClass *arm_cpu_class_by_name(const char *cpu_model)
{
    ObjectClass *oc;
A
Andreas Färber 已提交
1214
    char *typename;
1215
    char **cpuname;
1216
    const char *cpunamestr;
1217

1218
    cpuname = g_strsplit(cpu_model, ",", 1);
1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
    cpunamestr = cpuname[0];
#ifdef CONFIG_USER_ONLY
    /* For backwards compatibility usermode emulation allows "-cpu any",
     * which has the same semantics as "-cpu max".
     */
    if (!strcmp(cpunamestr, "any")) {
        cpunamestr = "max";
    }
#endif
    typename = g_strdup_printf(ARM_CPU_TYPE_NAME("%s"), cpunamestr);
A
Andreas Färber 已提交
1229
    oc = object_class_by_name(typename);
1230
    g_strfreev(cpuname);
A
Andreas Färber 已提交
1231
    g_free(typename);
1232 1233
    if (!oc || !object_class_dynamic_cast(oc, TYPE_ARM_CPU) ||
        object_class_is_abstract(oc)) {
1234 1235 1236 1237 1238
        return NULL;
    }
    return oc;
}

1239 1240 1241
/* CPU models. These are not needed for the AArch64 linux-user build. */
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)

1242 1243 1244
static void arm926_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1245 1246

    cpu->dtb_compatible = "arm,arm926";
1247 1248
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_VFP);
1249 1250
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
    set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
1251
    cpu->midr = 0x41069265;
1252
    cpu->reset_fpsid = 0x41011090;
1253
    cpu->ctr = 0x1dd20d2;
1254
    cpu->reset_sctlr = 0x00090078;
1255 1256 1257 1258 1259 1260

    /*
     * ARMv5 does not have the ID_ISAR registers, but we can still
     * set the field to indicate Jazelle support within QEMU.
     */
    cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);
1261 1262 1263 1264 1265
}

static void arm946_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1266 1267

    cpu->dtb_compatible = "arm,arm946";
1268
    set_feature(&cpu->env, ARM_FEATURE_V5);
1269
    set_feature(&cpu->env, ARM_FEATURE_PMSA);
1270
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1271
    cpu->midr = 0x41059461;
1272
    cpu->ctr = 0x0f004006;
1273
    cpu->reset_sctlr = 0x00000078;
1274 1275 1276 1277 1278
}

static void arm1026_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1279 1280

    cpu->dtb_compatible = "arm,arm1026";
1281 1282 1283
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_VFP);
    set_feature(&cpu->env, ARM_FEATURE_AUXCR);
1284 1285
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
    set_feature(&cpu->env, ARM_FEATURE_CACHE_TEST_CLEAN);
1286
    cpu->midr = 0x4106a262;
1287
    cpu->reset_fpsid = 0x410110a0;
1288
    cpu->ctr = 0x1dd20d2;
1289
    cpu->reset_sctlr = 0x00090078;
1290
    cpu->reset_auxcr = 1;
1291 1292 1293 1294 1295 1296 1297

    /*
     * ARMv5 does not have the ID_ISAR registers, but we can still
     * set the field to indicate Jazelle support within QEMU.
     */
    cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1);

1298 1299 1300 1301 1302
    {
        /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */
        ARMCPRegInfo ifar = {
            .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
            .access = PL1_RW,
F
Fabian Aggeler 已提交
1303
            .fieldoffset = offsetof(CPUARMState, cp15.ifar_ns),
1304 1305 1306 1307
            .resetvalue = 0
        };
        define_one_arm_cp_reg(cpu, &ifar);
    }
1308 1309 1310 1311 1312
}

static void arm1136_r2_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1313 1314 1315 1316 1317 1318 1319
    /* What qemu calls "arm1136_r2" is actually the 1136 r0p2, ie an
     * older core than plain "arm1136". In particular this does not
     * have the v6K features.
     * These ID register values are correct for 1136 but may be wrong
     * for 1136_r2 (in particular r0p2 does not actually implement most
     * of the ID registers).
     */
1320 1321

    cpu->dtb_compatible = "arm,arm1136";
1322 1323
    set_feature(&cpu->env, ARM_FEATURE_V6);
    set_feature(&cpu->env, ARM_FEATURE_VFP);
1324 1325 1326
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
    set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
    set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1327
    cpu->midr = 0x4107b362;
1328
    cpu->reset_fpsid = 0x410120b4;
1329 1330
    cpu->isar.mvfr0 = 0x11111111;
    cpu->isar.mvfr1 = 0x00000000;
1331
    cpu->ctr = 0x1dd20d2;
1332
    cpu->reset_sctlr = 0x00050078;
1333 1334 1335 1336 1337 1338 1339
    cpu->id_pfr0 = 0x111;
    cpu->id_pfr1 = 0x1;
    cpu->id_dfr0 = 0x2;
    cpu->id_afr0 = 0x3;
    cpu->id_mmfr0 = 0x01130003;
    cpu->id_mmfr1 = 0x10030302;
    cpu->id_mmfr2 = 0x01222110;
1340 1341 1342 1343 1344
    cpu->isar.id_isar0 = 0x00140011;
    cpu->isar.id_isar1 = 0x12002111;
    cpu->isar.id_isar2 = 0x11231111;
    cpu->isar.id_isar3 = 0x01102131;
    cpu->isar.id_isar4 = 0x141;
1345
    cpu->reset_auxcr = 7;
1346 1347 1348 1349 1350
}

static void arm1136_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1351 1352

    cpu->dtb_compatible = "arm,arm1136";
1353 1354 1355
    set_feature(&cpu->env, ARM_FEATURE_V6K);
    set_feature(&cpu->env, ARM_FEATURE_V6);
    set_feature(&cpu->env, ARM_FEATURE_VFP);
1356 1357 1358
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
    set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
    set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1359
    cpu->midr = 0x4117b363;
1360
    cpu->reset_fpsid = 0x410120b4;
1361 1362
    cpu->isar.mvfr0 = 0x11111111;
    cpu->isar.mvfr1 = 0x00000000;
1363
    cpu->ctr = 0x1dd20d2;
1364
    cpu->reset_sctlr = 0x00050078;
1365 1366 1367 1368 1369 1370 1371
    cpu->id_pfr0 = 0x111;
    cpu->id_pfr1 = 0x1;
    cpu->id_dfr0 = 0x2;
    cpu->id_afr0 = 0x3;
    cpu->id_mmfr0 = 0x01130003;
    cpu->id_mmfr1 = 0x10030302;
    cpu->id_mmfr2 = 0x01222110;
1372 1373 1374 1375 1376
    cpu->isar.id_isar0 = 0x00140011;
    cpu->isar.id_isar1 = 0x12002111;
    cpu->isar.id_isar2 = 0x11231111;
    cpu->isar.id_isar3 = 0x01102131;
    cpu->isar.id_isar4 = 0x141;
1377
    cpu->reset_auxcr = 7;
1378 1379 1380 1381 1382
}

static void arm1176_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1383 1384

    cpu->dtb_compatible = "arm,arm1176";
1385 1386 1387
    set_feature(&cpu->env, ARM_FEATURE_V6K);
    set_feature(&cpu->env, ARM_FEATURE_VFP);
    set_feature(&cpu->env, ARM_FEATURE_VAPA);
1388 1389 1390
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
    set_feature(&cpu->env, ARM_FEATURE_CACHE_DIRTY_REG);
    set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS);
1391
    set_feature(&cpu->env, ARM_FEATURE_EL3);
1392
    cpu->midr = 0x410fb767;
1393
    cpu->reset_fpsid = 0x410120b5;
1394 1395
    cpu->isar.mvfr0 = 0x11111111;
    cpu->isar.mvfr1 = 0x00000000;
1396
    cpu->ctr = 0x1dd20d2;
1397
    cpu->reset_sctlr = 0x00050078;
1398 1399 1400 1401 1402 1403 1404
    cpu->id_pfr0 = 0x111;
    cpu->id_pfr1 = 0x11;
    cpu->id_dfr0 = 0x33;
    cpu->id_afr0 = 0;
    cpu->id_mmfr0 = 0x01130003;
    cpu->id_mmfr1 = 0x10030302;
    cpu->id_mmfr2 = 0x01222100;
1405 1406 1407 1408 1409
    cpu->isar.id_isar0 = 0x0140011;
    cpu->isar.id_isar1 = 0x12002111;
    cpu->isar.id_isar2 = 0x11231121;
    cpu->isar.id_isar3 = 0x01102131;
    cpu->isar.id_isar4 = 0x01141;
1410
    cpu->reset_auxcr = 7;
1411 1412 1413 1414 1415
}

static void arm11mpcore_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1416 1417

    cpu->dtb_compatible = "arm,arm11mpcore";
1418 1419 1420
    set_feature(&cpu->env, ARM_FEATURE_V6K);
    set_feature(&cpu->env, ARM_FEATURE_VFP);
    set_feature(&cpu->env, ARM_FEATURE_VAPA);
P
Peter Maydell 已提交
1421
    set_feature(&cpu->env, ARM_FEATURE_MPIDR);
1422
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1423
    cpu->midr = 0x410fb022;
1424
    cpu->reset_fpsid = 0x410120b4;
1425 1426
    cpu->isar.mvfr0 = 0x11111111;
    cpu->isar.mvfr1 = 0x00000000;
1427
    cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */
1428 1429 1430 1431 1432 1433 1434
    cpu->id_pfr0 = 0x111;
    cpu->id_pfr1 = 0x1;
    cpu->id_dfr0 = 0;
    cpu->id_afr0 = 0x2;
    cpu->id_mmfr0 = 0x01100103;
    cpu->id_mmfr1 = 0x10020302;
    cpu->id_mmfr2 = 0x01222000;
1435 1436 1437 1438 1439
    cpu->isar.id_isar0 = 0x00100011;
    cpu->isar.id_isar1 = 0x12002111;
    cpu->isar.id_isar2 = 0x11221011;
    cpu->isar.id_isar3 = 0x01102131;
    cpu->isar.id_isar4 = 0x141;
1440
    cpu->reset_auxcr = 1;
1441 1442
}

1443 1444 1445 1446 1447 1448 1449 1450 1451
static void cortex_m0_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
    set_feature(&cpu->env, ARM_FEATURE_V6);
    set_feature(&cpu->env, ARM_FEATURE_M);

    cpu->midr = 0x410cc200;
}

1452 1453 1454
static void cortex_m3_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1455 1456
    set_feature(&cpu->env, ARM_FEATURE_V7);
    set_feature(&cpu->env, ARM_FEATURE_M);
1457
    set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
1458
    cpu->midr = 0x410fc231;
1459
    cpu->pmsav7_dregion = 8;
1460 1461 1462 1463 1464 1465 1466 1467
    cpu->id_pfr0 = 0x00000030;
    cpu->id_pfr1 = 0x00000200;
    cpu->id_dfr0 = 0x00100000;
    cpu->id_afr0 = 0x00000000;
    cpu->id_mmfr0 = 0x00000030;
    cpu->id_mmfr1 = 0x00000000;
    cpu->id_mmfr2 = 0x00000000;
    cpu->id_mmfr3 = 0x00000000;
1468 1469 1470 1471 1472 1473 1474
    cpu->isar.id_isar0 = 0x01141110;
    cpu->isar.id_isar1 = 0x02111000;
    cpu->isar.id_isar2 = 0x21112231;
    cpu->isar.id_isar3 = 0x01111110;
    cpu->isar.id_isar4 = 0x01310102;
    cpu->isar.id_isar5 = 0x00000000;
    cpu->isar.id_isar6 = 0x00000000;
1475 1476
}

1477 1478 1479 1480 1481 1482
static void cortex_m4_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);

    set_feature(&cpu->env, ARM_FEATURE_V7);
    set_feature(&cpu->env, ARM_FEATURE_M);
1483
    set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
1484 1485
    set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
    cpu->midr = 0x410fc240; /* r0p0 */
1486
    cpu->pmsav7_dregion = 8;
1487 1488 1489 1490 1491 1492 1493 1494
    cpu->id_pfr0 = 0x00000030;
    cpu->id_pfr1 = 0x00000200;
    cpu->id_dfr0 = 0x00100000;
    cpu->id_afr0 = 0x00000000;
    cpu->id_mmfr0 = 0x00000030;
    cpu->id_mmfr1 = 0x00000000;
    cpu->id_mmfr2 = 0x00000000;
    cpu->id_mmfr3 = 0x00000000;
1495 1496 1497 1498 1499 1500 1501
    cpu->isar.id_isar0 = 0x01141110;
    cpu->isar.id_isar1 = 0x02111000;
    cpu->isar.id_isar2 = 0x21112231;
    cpu->isar.id_isar3 = 0x01111110;
    cpu->isar.id_isar4 = 0x01310102;
    cpu->isar.id_isar5 = 0x00000000;
    cpu->isar.id_isar6 = 0x00000000;
1502
}
1503

P
Peter Maydell 已提交
1504 1505 1506 1507 1508 1509
static void cortex_m33_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);

    set_feature(&cpu->env, ARM_FEATURE_V8);
    set_feature(&cpu->env, ARM_FEATURE_M);
1510
    set_feature(&cpu->env, ARM_FEATURE_M_MAIN);
P
Peter Maydell 已提交
1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523
    set_feature(&cpu->env, ARM_FEATURE_M_SECURITY);
    set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP);
    cpu->midr = 0x410fd213; /* r0p3 */
    cpu->pmsav7_dregion = 16;
    cpu->sau_sregion = 8;
    cpu->id_pfr0 = 0x00000030;
    cpu->id_pfr1 = 0x00000210;
    cpu->id_dfr0 = 0x00200000;
    cpu->id_afr0 = 0x00000000;
    cpu->id_mmfr0 = 0x00101F40;
    cpu->id_mmfr1 = 0x00000000;
    cpu->id_mmfr2 = 0x01000000;
    cpu->id_mmfr3 = 0x00000000;
1524 1525 1526 1527 1528 1529 1530
    cpu->isar.id_isar0 = 0x01101110;
    cpu->isar.id_isar1 = 0x02212000;
    cpu->isar.id_isar2 = 0x20232232;
    cpu->isar.id_isar3 = 0x01111131;
    cpu->isar.id_isar4 = 0x01310132;
    cpu->isar.id_isar5 = 0x00000000;
    cpu->isar.id_isar6 = 0x00000000;
P
Peter Maydell 已提交
1531 1532 1533 1534
    cpu->clidr = 0x00000000;
    cpu->ctr = 0x8000c000;
}

1535 1536
static void arm_v7m_class_init(ObjectClass *oc, void *data)
{
1537
    ARMCPUClass *acc = ARM_CPU_CLASS(oc);
1538 1539
    CPUClass *cc = CPU_CLASS(oc);

1540
    acc->info = data;
1541
#ifndef CONFIG_USER_ONLY
1542 1543
    cc->do_interrupt = arm_v7m_cpu_do_interrupt;
#endif
1544 1545

    cc->cpu_exec_interrupt = arm_v7m_cpu_exec_interrupt;
1546 1547
}

1548 1549 1550 1551 1552 1553
static const ARMCPRegInfo cortexr5_cp_reginfo[] = {
    /* Dummy the TCM region regs for the moment */
    { .name = "ATCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 0,
      .access = PL1_RW, .type = ARM_CP_CONST },
    { .name = "BTCM", .cp = 15, .opc1 = 0, .crn = 9, .crm = 1, .opc2 = 1,
      .access = PL1_RW, .type = ARM_CP_CONST },
1554 1555
    { .name = "DCACHE_INVAL", .cp = 15, .opc1 = 0, .crn = 15, .crm = 5,
      .opc2 = 0, .access = PL1_W, .type = ARM_CP_NOP },
1556 1557 1558 1559 1560 1561 1562 1563 1564
    REGINFO_SENTINEL
};

static void cortex_r5_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);

    set_feature(&cpu->env, ARM_FEATURE_V7);
    set_feature(&cpu->env, ARM_FEATURE_V7MP);
1565
    set_feature(&cpu->env, ARM_FEATURE_PMSA);
1566 1567 1568 1569 1570 1571 1572 1573 1574
    cpu->midr = 0x411fc153; /* r1p3 */
    cpu->id_pfr0 = 0x0131;
    cpu->id_pfr1 = 0x001;
    cpu->id_dfr0 = 0x010400;
    cpu->id_afr0 = 0x0;
    cpu->id_mmfr0 = 0x0210030;
    cpu->id_mmfr1 = 0x00000000;
    cpu->id_mmfr2 = 0x01200000;
    cpu->id_mmfr3 = 0x0211;
1575 1576 1577 1578 1579 1580 1581
    cpu->isar.id_isar0 = 0x02101111;
    cpu->isar.id_isar1 = 0x13112111;
    cpu->isar.id_isar2 = 0x21232141;
    cpu->isar.id_isar3 = 0x01112131;
    cpu->isar.id_isar4 = 0x0010142;
    cpu->isar.id_isar5 = 0x0;
    cpu->isar.id_isar6 = 0x0;
1582
    cpu->mp_is_up = true;
1583
    cpu->pmsav7_dregion = 16;
1584 1585 1586
    define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
}

1587 1588 1589 1590 1591 1592 1593 1594
static void cortex_r5f_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);

    cortex_r5_initfn(obj);
    set_feature(&cpu->env, ARM_FEATURE_VFP3);
}

1595 1596 1597 1598 1599 1600 1601 1602
static const ARMCPRegInfo cortexa8_cp_reginfo[] = {
    { .name = "L2LOCKDOWN", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 0,
      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
    { .name = "L2AUXCR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
    REGINFO_SENTINEL
};

1603 1604 1605
static void cortex_a8_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1606 1607

    cpu->dtb_compatible = "arm,cortex-a8";
1608 1609 1610 1611
    set_feature(&cpu->env, ARM_FEATURE_V7);
    set_feature(&cpu->env, ARM_FEATURE_VFP3);
    set_feature(&cpu->env, ARM_FEATURE_NEON);
    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1612
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1613
    set_feature(&cpu->env, ARM_FEATURE_EL3);
1614
    cpu->midr = 0x410fc080;
1615
    cpu->reset_fpsid = 0x410330c0;
1616 1617
    cpu->isar.mvfr0 = 0x11110222;
    cpu->isar.mvfr1 = 0x00011111;
1618
    cpu->ctr = 0x82048004;
1619
    cpu->reset_sctlr = 0x00c50078;
1620 1621 1622 1623 1624 1625 1626 1627
    cpu->id_pfr0 = 0x1031;
    cpu->id_pfr1 = 0x11;
    cpu->id_dfr0 = 0x400;
    cpu->id_afr0 = 0;
    cpu->id_mmfr0 = 0x31100003;
    cpu->id_mmfr1 = 0x20000000;
    cpu->id_mmfr2 = 0x01202000;
    cpu->id_mmfr3 = 0x11;
1628 1629 1630 1631 1632
    cpu->isar.id_isar0 = 0x00101111;
    cpu->isar.id_isar1 = 0x12112111;
    cpu->isar.id_isar2 = 0x21232031;
    cpu->isar.id_isar3 = 0x11112131;
    cpu->isar.id_isar4 = 0x00111142;
1633
    cpu->dbgdidr = 0x15141000;
1634 1635 1636 1637
    cpu->clidr = (1 << 27) | (2 << 24) | 3;
    cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */
    cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
    cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
1638
    cpu->reset_auxcr = 2;
1639
    define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
1640 1641
}

1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670
static const ARMCPRegInfo cortexa9_cp_reginfo[] = {
    /* power_control should be set to maximum latency. Again,
     * default to 0 and set by private hook
     */
    { .name = "A9_PWRCTL", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 0,
      .access = PL1_RW, .resetvalue = 0,
      .fieldoffset = offsetof(CPUARMState, cp15.c15_power_control) },
    { .name = "A9_DIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 1,
      .access = PL1_RW, .resetvalue = 0,
      .fieldoffset = offsetof(CPUARMState, cp15.c15_diagnostic) },
    { .name = "A9_PWRDIAG", .cp = 15, .crn = 15, .crm = 0, .opc1 = 0, .opc2 = 2,
      .access = PL1_RW, .resetvalue = 0,
      .fieldoffset = offsetof(CPUARMState, cp15.c15_power_diagnostic) },
    { .name = "NEONBUSY", .cp = 15, .crn = 15, .crm = 1, .opc1 = 0, .opc2 = 0,
      .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
    /* TLB lockdown control */
    { .name = "TLB_LOCKR", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 2,
      .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
    { .name = "TLB_LOCKW", .cp = 15, .crn = 15, .crm = 4, .opc1 = 5, .opc2 = 4,
      .access = PL1_W, .resetvalue = 0, .type = ARM_CP_NOP },
    { .name = "TLB_VA", .cp = 15, .crn = 15, .crm = 5, .opc1 = 5, .opc2 = 2,
      .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
    { .name = "TLB_PA", .cp = 15, .crn = 15, .crm = 6, .opc1 = 5, .opc2 = 2,
      .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
    { .name = "TLB_ATTR", .cp = 15, .crn = 15, .crm = 7, .opc1 = 5, .opc2 = 2,
      .access = PL1_RW, .resetvalue = 0, .type = ARM_CP_CONST },
    REGINFO_SENTINEL
};

1671 1672 1673
static void cortex_a9_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1674 1675

    cpu->dtb_compatible = "arm,cortex-a9";
1676 1677 1678 1679
    set_feature(&cpu->env, ARM_FEATURE_V7);
    set_feature(&cpu->env, ARM_FEATURE_VFP3);
    set_feature(&cpu->env, ARM_FEATURE_NEON);
    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
1680
    set_feature(&cpu->env, ARM_FEATURE_EL3);
1681 1682 1683 1684 1685
    /* Note that A9 supports the MP extensions even for
     * A9UP and single-core A9MP (which are both different
     * and valid configurations; we don't model A9UP).
     */
    set_feature(&cpu->env, ARM_FEATURE_V7MP);
1686
    set_feature(&cpu->env, ARM_FEATURE_CBAR);
1687
    cpu->midr = 0x410fc090;
1688
    cpu->reset_fpsid = 0x41033090;
1689 1690
    cpu->isar.mvfr0 = 0x11110222;
    cpu->isar.mvfr1 = 0x01111111;
1691
    cpu->ctr = 0x80038003;
1692
    cpu->reset_sctlr = 0x00c50078;
1693 1694 1695 1696 1697 1698 1699 1700
    cpu->id_pfr0 = 0x1031;
    cpu->id_pfr1 = 0x11;
    cpu->id_dfr0 = 0x000;
    cpu->id_afr0 = 0;
    cpu->id_mmfr0 = 0x00100103;
    cpu->id_mmfr1 = 0x20000000;
    cpu->id_mmfr2 = 0x01230000;
    cpu->id_mmfr3 = 0x00002111;
1701 1702 1703 1704 1705
    cpu->isar.id_isar0 = 0x00101111;
    cpu->isar.id_isar1 = 0x13112111;
    cpu->isar.id_isar2 = 0x21232041;
    cpu->isar.id_isar3 = 0x11112131;
    cpu->isar.id_isar4 = 0x00111142;
1706
    cpu->dbgdidr = 0x35141000;
1707
    cpu->clidr = (1 << 27) | (1 << 24) | 3;
1708 1709
    cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
    cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
1710
    define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
1711 1712
}

1713
#ifndef CONFIG_USER_ONLY
1714
static uint64_t a15_l2ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
1715 1716 1717 1718
{
    /* Linux wants the number of processors from here.
     * Might as well set the interrupt-controller bit too.
     */
1719
    return ((smp_cpus - 1) << 24) | (1 << 23);
1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733
}
#endif

static const ARMCPRegInfo cortexa15_cp_reginfo[] = {
#ifndef CONFIG_USER_ONLY
    { .name = "L2CTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 2,
      .access = PL1_RW, .resetvalue = 0, .readfn = a15_l2ctlr_read,
      .writefn = arm_cp_write_ignore, },
#endif
    { .name = "L2ECTLR", .cp = 15, .crn = 9, .crm = 0, .opc1 = 1, .opc2 = 3,
      .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
    REGINFO_SENTINEL
};

1734 1735 1736 1737 1738
static void cortex_a7_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);

    cpu->dtb_compatible = "arm,cortex-a7";
1739
    set_feature(&cpu->env, ARM_FEATURE_V7VE);
1740 1741 1742 1743 1744 1745
    set_feature(&cpu->env, ARM_FEATURE_VFP4);
    set_feature(&cpu->env, ARM_FEATURE_NEON);
    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1746
    set_feature(&cpu->env, ARM_FEATURE_EL2);
1747
    set_feature(&cpu->env, ARM_FEATURE_EL3);
1748
    set_feature(&cpu->env, ARM_FEATURE_PMU);
1749 1750 1751
    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7;
    cpu->midr = 0x410fc075;
    cpu->reset_fpsid = 0x41023075;
1752 1753
    cpu->isar.mvfr0 = 0x10110222;
    cpu->isar.mvfr1 = 0x11111111;
1754 1755 1756 1757 1758 1759 1760 1761 1762 1763
    cpu->ctr = 0x84448003;
    cpu->reset_sctlr = 0x00c50078;
    cpu->id_pfr0 = 0x00001131;
    cpu->id_pfr1 = 0x00011011;
    cpu->id_dfr0 = 0x02010555;
    cpu->id_afr0 = 0x00000000;
    cpu->id_mmfr0 = 0x10101105;
    cpu->id_mmfr1 = 0x40000000;
    cpu->id_mmfr2 = 0x01240000;
    cpu->id_mmfr3 = 0x02102211;
1764 1765 1766
    /* a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but
     * table 4-41 gives 0x02101110, which includes the arm div insns.
     */
1767 1768 1769 1770 1771
    cpu->isar.id_isar0 = 0x02101110;
    cpu->isar.id_isar1 = 0x13112111;
    cpu->isar.id_isar2 = 0x21232041;
    cpu->isar.id_isar3 = 0x11112131;
    cpu->isar.id_isar4 = 0x10011142;
1772 1773 1774 1775 1776 1777 1778 1779
    cpu->dbgdidr = 0x3515f005;
    cpu->clidr = 0x0a200023;
    cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
    cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
    cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
    define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
}

1780 1781 1782
static void cortex_a15_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1783 1784

    cpu->dtb_compatible = "arm,cortex-a15";
1785
    set_feature(&cpu->env, ARM_FEATURE_V7VE);
1786 1787 1788 1789
    set_feature(&cpu->env, ARM_FEATURE_VFP4);
    set_feature(&cpu->env, ARM_FEATURE_NEON);
    set_feature(&cpu->env, ARM_FEATURE_THUMB2EE);
    set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
1790
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1791
    set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
1792
    set_feature(&cpu->env, ARM_FEATURE_EL2);
1793
    set_feature(&cpu->env, ARM_FEATURE_EL3);
1794
    set_feature(&cpu->env, ARM_FEATURE_PMU);
1795
    cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15;
1796
    cpu->midr = 0x412fc0f1;
1797
    cpu->reset_fpsid = 0x410430f0;
1798 1799
    cpu->isar.mvfr0 = 0x10110222;
    cpu->isar.mvfr1 = 0x11111111;
1800
    cpu->ctr = 0x8444c004;
1801
    cpu->reset_sctlr = 0x00c50078;
1802 1803 1804 1805 1806 1807 1808 1809
    cpu->id_pfr0 = 0x00001131;
    cpu->id_pfr1 = 0x00011011;
    cpu->id_dfr0 = 0x02010555;
    cpu->id_afr0 = 0x00000000;
    cpu->id_mmfr0 = 0x10201105;
    cpu->id_mmfr1 = 0x20000000;
    cpu->id_mmfr2 = 0x01240000;
    cpu->id_mmfr3 = 0x02102211;
1810 1811 1812 1813 1814
    cpu->isar.id_isar0 = 0x02101110;
    cpu->isar.id_isar1 = 0x13112111;
    cpu->isar.id_isar2 = 0x21232041;
    cpu->isar.id_isar3 = 0x11112131;
    cpu->isar.id_isar4 = 0x10011142;
1815
    cpu->dbgdidr = 0x3515f021;
1816 1817 1818 1819
    cpu->clidr = 0x0a200023;
    cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
    cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
    cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
1820
    define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
1821 1822 1823 1824 1825
}

static void ti925t_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1826 1827
    set_feature(&cpu->env, ARM_FEATURE_V4T);
    set_feature(&cpu->env, ARM_FEATURE_OMAPCP);
1828
    cpu->midr = ARM_CPUID_TI925T;
1829
    cpu->ctr = 0x5109149;
1830
    cpu->reset_sctlr = 0x00000070;
1831 1832 1833 1834 1835
}

static void sa1100_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1836 1837

    cpu->dtb_compatible = "intel,sa1100";
1838
    set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
1839
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1840
    cpu->midr = 0x4401A11B;
1841
    cpu->reset_sctlr = 0x00000070;
1842 1843 1844 1845 1846
}

static void sa1110_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1847
    set_feature(&cpu->env, ARM_FEATURE_STRONGARM);
1848
    set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
1849
    cpu->midr = 0x6901B119;
1850
    cpu->reset_sctlr = 0x00000070;
1851 1852 1853 1854 1855
}

static void pxa250_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1856 1857

    cpu->dtb_compatible = "marvell,xscale";
1858 1859
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1860
    cpu->midr = 0x69052100;
1861
    cpu->ctr = 0xd172172;
1862
    cpu->reset_sctlr = 0x00000078;
1863 1864 1865 1866 1867
}

static void pxa255_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1868 1869

    cpu->dtb_compatible = "marvell,xscale";
1870 1871
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1872
    cpu->midr = 0x69052d00;
1873
    cpu->ctr = 0xd172172;
1874
    cpu->reset_sctlr = 0x00000078;
1875 1876 1877 1878 1879
}

static void pxa260_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1880 1881

    cpu->dtb_compatible = "marvell,xscale";
1882 1883
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1884
    cpu->midr = 0x69052903;
1885
    cpu->ctr = 0xd172172;
1886
    cpu->reset_sctlr = 0x00000078;
1887 1888 1889 1890 1891
}

static void pxa261_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1892 1893

    cpu->dtb_compatible = "marvell,xscale";
1894 1895
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1896
    cpu->midr = 0x69052d05;
1897
    cpu->ctr = 0xd172172;
1898
    cpu->reset_sctlr = 0x00000078;
1899 1900 1901 1902 1903
}

static void pxa262_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1904 1905

    cpu->dtb_compatible = "marvell,xscale";
1906 1907
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
1908
    cpu->midr = 0x69052d06;
1909
    cpu->ctr = 0xd172172;
1910
    cpu->reset_sctlr = 0x00000078;
1911 1912 1913 1914 1915
}

static void pxa270a0_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1916 1917

    cpu->dtb_compatible = "marvell,xscale";
1918 1919 1920
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1921
    cpu->midr = 0x69054110;
1922
    cpu->ctr = 0xd172172;
1923
    cpu->reset_sctlr = 0x00000078;
1924 1925 1926 1927 1928
}

static void pxa270a1_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1929 1930

    cpu->dtb_compatible = "marvell,xscale";
1931 1932 1933
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1934
    cpu->midr = 0x69054111;
1935
    cpu->ctr = 0xd172172;
1936
    cpu->reset_sctlr = 0x00000078;
1937 1938 1939 1940 1941
}

static void pxa270b0_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1942 1943

    cpu->dtb_compatible = "marvell,xscale";
1944 1945 1946
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1947
    cpu->midr = 0x69054112;
1948
    cpu->ctr = 0xd172172;
1949
    cpu->reset_sctlr = 0x00000078;
1950 1951 1952 1953 1954
}

static void pxa270b1_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1955 1956

    cpu->dtb_compatible = "marvell,xscale";
1957 1958 1959
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1960
    cpu->midr = 0x69054113;
1961
    cpu->ctr = 0xd172172;
1962
    cpu->reset_sctlr = 0x00000078;
1963 1964 1965 1966 1967
}

static void pxa270c0_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1968 1969

    cpu->dtb_compatible = "marvell,xscale";
1970 1971 1972
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1973
    cpu->midr = 0x69054114;
1974
    cpu->ctr = 0xd172172;
1975
    cpu->reset_sctlr = 0x00000078;
1976 1977 1978 1979 1980
}

static void pxa270c5_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);
1981 1982

    cpu->dtb_compatible = "marvell,xscale";
1983 1984 1985
    set_feature(&cpu->env, ARM_FEATURE_V5);
    set_feature(&cpu->env, ARM_FEATURE_XSCALE);
    set_feature(&cpu->env, ARM_FEATURE_IWMMXT);
1986
    cpu->midr = 0x69054117;
1987
    cpu->ctr = 0xd172172;
1988
    cpu->reset_sctlr = 0x00000078;
1989 1990
}

1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004
#ifndef TARGET_AARCH64
/* -cpu max: if KVM is enabled, like -cpu host (best possible with this host);
 * otherwise, a CPU with as many features enabled as our emulation supports.
 * The version of '-cpu max' for qemu-system-aarch64 is defined in cpu64.c;
 * this only needs to handle 32 bits.
 */
static void arm_max_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);

    if (kvm_enabled()) {
        kvm_arm_set_cpu_features_from_host(cpu);
    } else {
        cortex_a15_initfn(obj);
2005 2006
#ifdef CONFIG_USER_ONLY
        /* We don't set these in system emulation mode for the moment,
2007 2008
         * since we don't correctly set (all of) the ID registers to
         * advertise them.
2009
         */
2010
        set_feature(&cpu->env, ARM_FEATURE_V8);
2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
        {
            uint32_t t;

            t = cpu->isar.id_isar5;
            t = FIELD_DP32(t, ID_ISAR5, AES, 2);
            t = FIELD_DP32(t, ID_ISAR5, SHA1, 1);
            t = FIELD_DP32(t, ID_ISAR5, SHA2, 1);
            t = FIELD_DP32(t, ID_ISAR5, CRC32, 1);
            t = FIELD_DP32(t, ID_ISAR5, RDM, 1);
            t = FIELD_DP32(t, ID_ISAR5, VCMA, 1);
            cpu->isar.id_isar5 = t;

            t = cpu->isar.id_isar6;
2024
            t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1);
2025
            t = FIELD_DP32(t, ID_ISAR6, DP, 1);
2026
            t = FIELD_DP32(t, ID_ISAR6, FHM, 1);
2027
            t = FIELD_DP32(t, ID_ISAR6, SB, 1);
2028
            t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1);
2029
            cpu->isar.id_isar6 = t;
2030

2031 2032 2033 2034 2035
            t = cpu->isar.mvfr2;
            t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */
            t = FIELD_DP32(t, MVFR2, FPMISC, 4);   /* FP MaxNum */
            cpu->isar.mvfr2 = t;

2036 2037 2038
            t = cpu->id_mmfr4;
            t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */
            cpu->id_mmfr4 = t;
2039
        }
2040
#endif
2041
    }
2042
}
2043
#endif
2044

2045 2046
#endif /* !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64) */

2047
struct ARMCPUInfo {
2048 2049
    const char *name;
    void (*initfn)(Object *obj);
2050
    void (*class_init)(ObjectClass *oc, void *data);
2051
};
2052 2053

static const ARMCPUInfo arm_cpus[] = {
2054
#if !defined(CONFIG_USER_ONLY) || !defined(TARGET_AARCH64)
2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065
    { .name = "arm926",      .initfn = arm926_initfn },
    { .name = "arm946",      .initfn = arm946_initfn },
    { .name = "arm1026",     .initfn = arm1026_initfn },
    /* What QEMU calls "arm1136-r2" is actually the 1136 r0p2, i.e. an
     * older core than plain "arm1136". In particular this does not
     * have the v6K features.
     */
    { .name = "arm1136-r2",  .initfn = arm1136_r2_initfn },
    { .name = "arm1136",     .initfn = arm1136_initfn },
    { .name = "arm1176",     .initfn = arm1176_initfn },
    { .name = "arm11mpcore", .initfn = arm11mpcore_initfn },
2066 2067
    { .name = "cortex-m0",   .initfn = cortex_m0_initfn,
                             .class_init = arm_v7m_class_init },
2068 2069
    { .name = "cortex-m3",   .initfn = cortex_m3_initfn,
                             .class_init = arm_v7m_class_init },
2070 2071
    { .name = "cortex-m4",   .initfn = cortex_m4_initfn,
                             .class_init = arm_v7m_class_init },
P
Peter Maydell 已提交
2072 2073
    { .name = "cortex-m33",  .initfn = cortex_m33_initfn,
                             .class_init = arm_v7m_class_init },
2074
    { .name = "cortex-r5",   .initfn = cortex_r5_initfn },
2075
    { .name = "cortex-r5f",  .initfn = cortex_r5f_initfn },
2076
    { .name = "cortex-a7",   .initfn = cortex_a7_initfn },
2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095
    { .name = "cortex-a8",   .initfn = cortex_a8_initfn },
    { .name = "cortex-a9",   .initfn = cortex_a9_initfn },
    { .name = "cortex-a15",  .initfn = cortex_a15_initfn },
    { .name = "ti925t",      .initfn = ti925t_initfn },
    { .name = "sa1100",      .initfn = sa1100_initfn },
    { .name = "sa1110",      .initfn = sa1110_initfn },
    { .name = "pxa250",      .initfn = pxa250_initfn },
    { .name = "pxa255",      .initfn = pxa255_initfn },
    { .name = "pxa260",      .initfn = pxa260_initfn },
    { .name = "pxa261",      .initfn = pxa261_initfn },
    { .name = "pxa262",      .initfn = pxa262_initfn },
    /* "pxa270" is an alias for "pxa270-a0" */
    { .name = "pxa270",      .initfn = pxa270a0_initfn },
    { .name = "pxa270-a0",   .initfn = pxa270a0_initfn },
    { .name = "pxa270-a1",   .initfn = pxa270a1_initfn },
    { .name = "pxa270-b0",   .initfn = pxa270b0_initfn },
    { .name = "pxa270-b1",   .initfn = pxa270b1_initfn },
    { .name = "pxa270-c0",   .initfn = pxa270c0_initfn },
    { .name = "pxa270-c5",   .initfn = pxa270c5_initfn },
2096 2097 2098
#ifndef TARGET_AARCH64
    { .name = "max",         .initfn = arm_max_initfn },
#endif
2099
#ifdef CONFIG_USER_ONLY
2100
    { .name = "any",         .initfn = arm_max_initfn },
2101
#endif
2102
#endif
2103
    { .name = NULL }
2104 2105
};

2106 2107
static Property arm_cpu_properties[] = {
    DEFINE_PROP_BOOL("start-powered-off", ARMCPU, start_powered_off, false),
2108
    DEFINE_PROP_UINT32("psci-conduit", ARMCPU, psci_conduit, 0),
2109
    DEFINE_PROP_UINT32("midr", ARMCPU, midr, 0),
2110 2111
    DEFINE_PROP_UINT64("mp-affinity", ARMCPU,
                        mp_affinity, ARM64_AFFINITY_INVALID),
2112
    DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID),
2113
    DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1),
2114 2115 2116
    DEFINE_PROP_END_OF_LIST()
};

2117
#ifdef CONFIG_USER_ONLY
2118 2119
static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
                                    int rw, int mmu_idx)
2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133
{
    ARMCPU *cpu = ARM_CPU(cs);
    CPUARMState *env = &cpu->env;

    env->exception.vaddress = address;
    if (rw == 2) {
        cs->exception_index = EXCP_PREFETCH_ABORT;
    } else {
        cs->exception_index = EXCP_DATA_ABORT;
    }
    return 1;
}
#endif

2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144
static gchar *arm_gdb_arch_name(CPUState *cs)
{
    ARMCPU *cpu = ARM_CPU(cs);
    CPUARMState *env = &cpu->env;

    if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
        return g_strdup("iwmmxt");
    }
    return g_strdup("arm");
}

2145 2146 2147 2148
static void arm_cpu_class_init(ObjectClass *oc, void *data)
{
    ARMCPUClass *acc = ARM_CPU_CLASS(oc);
    CPUClass *cc = CPU_CLASS(acc);
2149 2150
    DeviceClass *dc = DEVICE_CLASS(oc);

2151 2152
    device_class_set_parent_realize(dc, arm_cpu_realizefn,
                                    &acc->parent_realize);
2153
    dc->props = arm_cpu_properties;
2154 2155 2156

    acc->parent_reset = cc->reset;
    cc->reset = arm_cpu_reset;
2157 2158

    cc->class_by_name = arm_cpu_class_by_name;
2159
    cc->has_work = arm_cpu_has_work;
2160
    cc->cpu_exec_interrupt = arm_cpu_exec_interrupt;
2161
    cc->dump_state = arm_cpu_dump_state;
2162
    cc->set_pc = arm_cpu_set_pc;
2163
    cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
2164 2165
    cc->gdb_read_register = arm_cpu_gdb_read_register;
    cc->gdb_write_register = arm_cpu_gdb_write_register;
2166 2167 2168
#ifdef CONFIG_USER_ONLY
    cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
#else
2169
    cc->do_interrupt = arm_cpu_do_interrupt;
2170
    cc->do_unaligned_access = arm_cpu_do_unaligned_access;
2171
    cc->do_transaction_failed = arm_cpu_do_transaction_failed;
2172
    cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug;
2173
    cc->asidx_from_attrs = arm_asidx_from_attrs;
2174
    cc->vmsd = &vmstate_arm_cpu;
2175
    cc->virtio_is_big_endian = arm_cpu_virtio_is_big_endian;
2176 2177
    cc->write_elf64_note = arm_cpu_write_elf64_note;
    cc->write_elf32_note = arm_cpu_write_elf32_note;
2178
#endif
2179
    cc->gdb_num_core_regs = 26;
2180
    cc->gdb_core_xml_file = "arm-core.xml";
2181
    cc->gdb_arch_name = arm_gdb_arch_name;
2182
    cc->gdb_get_dynamic_xml = arm_gdb_get_dynamic_xml;
2183
    cc->gdb_stop_before_watchpoint = true;
2184
    cc->debug_excp_handler = arm_debug_excp_handler;
2185
    cc->debug_check_watchpoint = arm_debug_check_watchpoint;
2186 2187 2188
#if !defined(CONFIG_USER_ONLY)
    cc->adjust_watchpoint_address = arm_adjust_watchpoint_address;
#endif
2189 2190

    cc->disas_set_info = arm_disas_set_info;
2191
#ifdef CONFIG_TCG
2192
    cc->tcg_initialize = arm_translate_init;
2193
#endif
2194 2195
}

2196 2197 2198 2199 2200 2201
#ifdef CONFIG_KVM
static void arm_host_initfn(Object *obj)
{
    ARMCPU *cpu = ARM_CPU(obj);

    kvm_arm_set_cpu_features_from_host(cpu);
2202
    arm_cpu_post_init(obj);
2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216
}

static const TypeInfo host_arm_cpu_type_info = {
    .name = TYPE_ARM_HOST_CPU,
#ifdef TARGET_AARCH64
    .parent = TYPE_AARCH64_CPU,
#else
    .parent = TYPE_ARM_CPU,
#endif
    .instance_init = arm_host_initfn,
};

#endif

2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231
static void arm_cpu_instance_init(Object *obj)
{
    ARMCPUClass *acc = ARM_CPU_GET_CLASS(obj);

    acc->info->initfn(obj);
    arm_cpu_post_init(obj);
}

static void cpu_register_class_init(ObjectClass *oc, void *data)
{
    ARMCPUClass *acc = ARM_CPU_CLASS(oc);

    acc->info = data;
}

2232 2233 2234 2235 2236
static void cpu_register(const ARMCPUInfo *info)
{
    TypeInfo type_info = {
        .parent = TYPE_ARM_CPU,
        .instance_size = sizeof(ARMCPU),
2237
        .instance_init = arm_cpu_instance_init,
2238
        .class_size = sizeof(ARMCPUClass),
2239 2240
        .class_init = info->class_init ?: cpu_register_class_init,
        .class_data = (void *)info,
2241 2242
    };

A
Andreas Färber 已提交
2243
    type_info.name = g_strdup_printf("%s-" TYPE_ARM_CPU, info->name);
2244
    type_register(&type_info);
A
Andreas Färber 已提交
2245
    g_free((void *)type_info.name);
2246 2247
}

2248 2249 2250 2251
static const TypeInfo arm_cpu_type_info = {
    .name = TYPE_ARM_CPU,
    .parent = TYPE_CPU,
    .instance_size = sizeof(ARMCPU),
2252
    .instance_init = arm_cpu_initfn,
2253
    .instance_finalize = arm_cpu_finalizefn,
2254
    .abstract = true,
2255 2256 2257 2258
    .class_size = sizeof(ARMCPUClass),
    .class_init = arm_cpu_class_init,
};

2259 2260 2261 2262 2263 2264
static const TypeInfo idau_interface_type_info = {
    .name = TYPE_IDAU_INTERFACE,
    .parent = TYPE_INTERFACE,
    .class_size = sizeof(IDAUInterfaceClass),
};

2265 2266
static void arm_cpu_register_types(void)
{
2267
    const ARMCPUInfo *info = arm_cpus;
2268

2269
    type_register_static(&arm_cpu_type_info);
2270
    type_register_static(&idau_interface_type_info);
2271 2272 2273 2274

    while (info->name) {
        cpu_register(info);
        info++;
2275
    }
2276 2277 2278 2279

#ifdef CONFIG_KVM
    type_register_static(&host_arm_cpu_type_info);
#endif
2280 2281 2282
}

type_init(arm_cpu_register_types)