mmu-hash64.c 17.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
/*
 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
 *
 *  Copyright (c) 2003-2007 Jocelyn Mayer
 *  Copyright (c) 2013 David Gibson, IBM Corporation
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */
#include "cpu.h"
#include "helper.h"
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"

26
//#define DEBUG_MMU
27 28
//#define DEBUG_SLB

29 30
#ifdef DEBUG_MMU
#  define LOG_MMU(...) qemu_log(__VA_ARGS__)
31
#  define LOG_MMU_STATE(cpu) log_cpu_state((cpu), 0)
32 33
#else
#  define LOG_MMU(...) do { } while (0)
34
#  define LOG_MMU_STATE(cpu) do { } while (0)
35 36
#endif

37 38 39 40 41 42
#ifdef DEBUG_SLB
#  define LOG_SLB(...) qemu_log(__VA_ARGS__)
#else
#  define LOG_SLB(...) do { } while (0)
#endif

43 44 45 46 47
/*
 * Used to indicate whether we have allocated htab in the
 * host kernel
 */
bool kvmppc_kern_htab;
48 49 50 51
/*
 * SLB handling
 */

52
static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
{
    uint64_t esid_256M, esid_1T;
    int n;

    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);

    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;

    for (n = 0; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
        /* We check for 1T matches on all MMUs here - if the MMU
         * doesn't have 1T segment support, we will have prevented 1T
         * entries from being inserted in the slbmte code. */
        if (((slb->esid == esid_256M) &&
             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
            || ((slb->esid == esid_1T) &&
                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
            return slb;
        }
    }

    return NULL;
}

void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
{
    int i;
    uint64_t slbe, slbv;

86
    cpu_synchronize_state(CPU(ppc_env_get_cpu(env)));
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

    cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
    for (i = 0; i < env->slb_nr; i++) {
        slbe = env->slb[i].esid;
        slbv = env->slb[i].vsid;
        if (slbe == 0 && slbv == 0) {
            continue;
        }
        cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
                    i, slbe, slbv);
    }
}

void helper_slbia(CPUPPCState *env)
{
102
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119
    int n, do_invalidate;

    do_invalidate = 0;
    /* XXX: Warning: slbia never invalidates the first segment */
    for (n = 1; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        if (slb->esid & SLB_ESID_V) {
            slb->esid &= ~SLB_ESID_V;
            /* XXX: given the fact that segment size is 256 MB or 1TB,
             *      and we still don't have a tlb_flush_mask(env, n, mask)
             *      in QEMU, we just invalidate all TLBs
             */
            do_invalidate = 1;
        }
    }
    if (do_invalidate) {
120
        tlb_flush(CPU(cpu), 1);
121 122 123 124 125
    }
}

void helper_slbie(CPUPPCState *env, target_ulong addr)
{
126
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
127 128 129 130 131 132 133 134 135 136 137 138 139 140
    ppc_slb_t *slb;

    slb = slb_lookup(env, addr);
    if (!slb) {
        return;
    }

    if (slb->esid & SLB_ESID_V) {
        slb->esid &= ~SLB_ESID_V;

        /* XXX: given the fact that segment size is 256 MB or 1TB,
         *      and we still don't have a tlb_flush_mask(env, n, mask)
         *      in QEMU, we just invalidate all TLBs
         */
141
        tlb_flush(CPU(cpu), 1);
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
    }
}

int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (rb & (0x1000 - env->slb_nr)) {
        return -1; /* Reserved bits set or slot too high */
    }
    if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
        return -1; /* Bad segment size */
    }
    if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
        return -1; /* 1T segment on MMU that doesn't support it */
    }

    /* Mask out the slot number as we store the entry */
    slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
    slb->vsid = rs;

    LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
            " %016" PRIx64 "\n", __func__, slot, rb, rs,
            slb->esid, slb->vsid);

    return 0;
}

static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
                             target_ulong *rt)
{
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->esid;
    return 0;
}

static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
                             target_ulong *rt)
{
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->vsid;
    return 0;
}

void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
    if (ppc_store_slb(env, rb, rs) < 0) {
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
}

target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
{
    target_ulong rt = 0;

    if (ppc_load_slb_esid(env, rb, &rt) < 0) {
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}

target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
{
    target_ulong rt = 0;

    if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}
228 229 230 231 232

/*
 * 64-bit hash table MMU handling
 */

233 234
static int ppc_hash64_pte_prot(CPUPPCState *env,
                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
235
{
236 237 238 239 240 241 242 243
    unsigned pp, key;
    /* Some pp bit combinations have undefined behaviour, so default
     * to no access in those cases */
    int prot = 0;

    key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
             : (slb->vsid & SLB_VSID_KS));
    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
244 245 246 247 248 249

    if (key == 0) {
        switch (pp) {
        case 0x0:
        case 0x1:
        case 0x2:
250 251 252
            prot = PAGE_READ | PAGE_WRITE;
            break;

253 254
        case 0x3:
        case 0x6:
255
            prot = PAGE_READ;
256 257 258 259 260 261
            break;
        }
    } else {
        switch (pp) {
        case 0x0:
        case 0x6:
262
            prot = 0;
263
            break;
264

265 266
        case 0x1:
        case 0x3:
267
            prot = PAGE_READ;
268
            break;
269

270
        case 0x2:
271
            prot = PAGE_READ | PAGE_WRITE;
272 273 274 275
            break;
        }
    }

276
    /* No execute if either noexec or guarded bits set */
277 278
    if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
        || (slb->vsid & SLB_VSID_N)) {
279
        prot |= PAGE_EXEC;
280 281
    }

282
    return prot;
283 284
}

285 286 287
static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
{
    int key, amrbits;
288
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
289 290 291 292


    /* Only recent MMUs implement Virtual Page Class Key Protection */
    if (!(env->mmu_model & POWERPC_MMU_AMR)) {
293
        return prot;
294 295 296 297 298 299 300 301
    }

    key = HPTE64_R_KEY(pte.pte1);
    amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;

    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
    /*         env->spr[SPR_AMR]); */

302 303 304 305
    /*
     * A store is permitted if the AMR bit is 0. Remove write
     * protection if it is set.
     */
306
    if (amrbits & 0x2) {
307
        prot &= ~PAGE_WRITE;
308
    }
309 310 311 312
    /*
     * A load is permitted if the AMR bit is 0. Remove read
     * protection if it is set.
     */
313
    if (amrbits & 0x1) {
314
        prot &= ~PAGE_READ;
315 316 317 318 319
    }

    return prot;
}

320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
{
    uint64_t token = 0;
    hwaddr pte_offset;

    pte_offset = pte_index * HASH_PTE_SIZE_64;
    if (kvmppc_kern_htab) {
        /*
         * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
         */
        token = kvmppc_hash64_read_pteg(cpu, pte_index);
        if (token) {
            return token;
        }
        /*
         * pteg read failed, even though we have allocated htab via
         * kvmppc_reset_htab.
         */
        return 0;
    }
    /*
     * HTAB is controlled by QEMU. Just point to the internally
     * accessible PTEG.
     */
    if (cpu->env.external_htab) {
        token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
    } else if (cpu->env.htab_base) {
        token = cpu->env.htab_base + pte_offset;
    }
    return token;
}

void ppc_hash64_stop_access(uint64_t token)
{
    if (kvmppc_kern_htab) {
        return kvmppc_hash64_free_pteg(token);
    }
}

static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
360 361 362 363
                                     bool secondary, target_ulong ptem,
                                     ppc_hash_pte64_t *pte)
{
    int i;
364 365 366
    uint64_t token;
    target_ulong pte0, pte1;
    target_ulong pte_index;
367

368 369 370 371 372
    pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
    token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index);
    if (!token) {
        return -1;
    }
373
    for (i = 0; i < HPTES_PER_GROUP; i++) {
374 375
        pte0 = ppc_hash64_load_hpte0(env, token, i);
        pte1 = ppc_hash64_load_hpte1(env, token, i);
376 377 378 379 380 381

        if ((pte0 & HPTE64_V_VALID)
            && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
            && HPTE64_V_COMPARE(pte0, ptem)) {
            pte->pte0 = pte0;
            pte->pte1 = pte1;
382 383
            ppc_hash64_stop_access(token);
            return (pte_index + i) * HASH_PTE_SIZE_64;
384 385
        }
    }
386 387 388 389
    ppc_hash64_stop_access(token);
    /*
     * We didn't find a valid entry.
     */
390 391 392
    return -1;
}

393 394 395
static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
                                     ppc_slb_t *slb, target_ulong eaddr,
                                     ppc_hash_pte64_t *pte)
396
{
397
    hwaddr pte_offset;
398
    hwaddr hash;
399
    uint64_t vsid, epnshift, epnmask, epn, ptem;
400

401 402 403 404 405
    /* Page size according to the SLB, which we use to generate the
     * EPN for hash table lookup..  When we implement more recent MMU
     * extensions this might be different from the actual page size
     * encoded in the PTE */
    epnshift = (slb->vsid & SLB_VSID_L)
406
        ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
407
    epnmask = ~((1ULL << epnshift) - 1);
408 409

    if (slb->vsid & SLB_VSID_B) {
410 411 412 413
        /* 1TB segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
        hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
414
    } else {
415 416 417 418
        /* 256M segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
        hash = vsid ^ (epn >> epnshift);
419
    }
420
    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
421 422 423 424 425 426 427 428 429 430 431

    /* Page address translation */
    LOG_MMU("htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
            " hash " TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, hash);

    /* Primary PTEG lookup */
    LOG_MMU("0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
            " hash=" TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, vsid, ptem,  hash);
432
    pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte);
433

434 435 436 437 438 439 440
    if (pte_offset == -1) {
        /* Secondary PTEG lookup */
        LOG_MMU("1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
                " hash=" TARGET_FMT_plx "\n", env->htab_base,
                env->htab_mask, vsid, ptem, ~hash);

441
        pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte);
442 443
    }

444
    return pte_offset;
445
}
446

447 448 449
static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
                                   target_ulong eaddr)
{
450
    hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
451 452 453 454 455 456 457 458
    /* FIXME: Add support for SLLP extended page sizes */
    int target_page_bits = (slb->vsid & SLB_VSID_L)
        ? TARGET_PAGE_BITS_16M : TARGET_PAGE_BITS;
    hwaddr mask = (1ULL << target_page_bits) - 1;

    return (rpn & ~mask) | (eaddr & mask);
}

459
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
460
                                int rwx, int mmu_idx)
461
{
462 463
    CPUState *cs = CPU(cpu);
    CPUPPCState *env = &cpu->env;
464
    ppc_slb_t *slb;
465 466
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;
467
    int pp_prot, amr_prot, prot;
468
    uint64_t new_pte1;
469
    const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
470
    hwaddr raddr;
471

472 473
    assert((rwx == 0) || (rwx == 1) || (rwx == 2));

474 475 476 477
    /* 1. Handle real mode accesses */
    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
        /* Translation is off */
        /* In real mode the top 4 effective address bits are ignored */
478
        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
479
        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
480 481
                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
                     TARGET_PAGE_SIZE);
482 483 484
        return 0;
    }

485
    /* 2. Translation is on, so look up the SLB */
486
    slb = slb_lookup(env, eaddr);
487

488
    if (!slb) {
489
        if (rwx == 2) {
490
            cs->exception_index = POWERPC_EXCP_ISEG;
491 492
            env->error_code = 0;
        } else {
493
            cs->exception_index = POWERPC_EXCP_DSEG;
494 495 496 497
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
        }
        return 1;
498 499
    }

500 501
    /* 3. Check for segment level no-execute violation */
    if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
502
        cs->exception_index = POWERPC_EXCP_ISI;
503 504
        env->error_code = 0x10000000;
        return 1;
505 506
    }

507 508 509
    /* 4. Locate the PTE in the hash table */
    pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte);
    if (pte_offset == -1) {
510
        if (rwx == 2) {
511
            cs->exception_index = POWERPC_EXCP_ISI;
512 513
            env->error_code = 0x40000000;
        } else {
514
            cs->exception_index = POWERPC_EXCP_DSI;
515 516 517 518 519 520 521 522 523
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
            if (rwx == 1) {
                env->spr[SPR_DSISR] = 0x42000000;
            } else {
                env->spr[SPR_DSISR] = 0x40000000;
            }
        }
        return 1;
524 525 526 527 528
    }
    LOG_MMU("found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);

    /* 5. Check access permissions */

529 530 531
    pp_prot = ppc_hash64_pte_prot(env, slb, pte);
    amr_prot = ppc_hash64_amr_prot(env, pte);
    prot = pp_prot & amr_prot;
532

533
    if ((need_prot[rwx] & ~prot) != 0) {
534 535
        /* Access right violation */
        LOG_MMU("PTE access rejected\n");
536
        if (rwx == 2) {
537
            cs->exception_index = POWERPC_EXCP_ISI;
538 539
            env->error_code = 0x08000000;
        } else {
540 541
            target_ulong dsisr = 0;

542
            cs->exception_index = POWERPC_EXCP_DSI;
543 544
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
545 546 547
            if (need_prot[rwx] & ~pp_prot) {
                dsisr |= 0x08000000;
            }
548
            if (rwx == 1) {
549 550 551 552
                dsisr |= 0x02000000;
            }
            if (need_prot[rwx] & ~amr_prot) {
                dsisr |= 0x00200000;
553
            }
554
            env->spr[SPR_DSISR] = dsisr;
555 556
        }
        return 1;
557 558
    }

559 560 561 562
    LOG_MMU("PTE access granted !\n");

    /* 6. Update PTE referenced and changed bits if necessary */

563 564 565 566 567 568
    new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
    if (rwx == 1) {
        new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
    } else {
        /* Treat the page as read-only for now, so that a later write
         * will pass through this function again to set the C bit */
569
        prot &= ~PAGE_WRITE;
570 571 572
    }

    if (new_pte1 != pte.pte1) {
573 574
        ppc_hash64_store_hpte(env, pte_offset / HASH_PTE_SIZE_64,
                              pte.pte0, new_pte1);
575
    }
576

577 578
    /* 7. Determine the real address from the PTE */

579 580
    raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);

581
    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
582
                 prot, mmu_idx, TARGET_PAGE_SIZE);
583 584

    return 0;
585
}
586

587 588
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
{
589 590 591 592 593 594 595 596
    ppc_slb_t *slb;
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;

    if (msr_dr == 0) {
        /* In real mode the top 4 effective address bits are ignored */
        return addr & 0x0FFFFFFFFFFFFFFFULL;
    }
597

598 599 600 601 602 603 604
    slb = slb_lookup(env, addr);
    if (!slb) {
        return -1;
    }

    pte_offset = ppc_hash64_htab_lookup(env, slb, addr, &pte);
    if (pte_offset == -1) {
605 606 607
        return -1;
    }

608
    return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
609
}
610 611 612 613 614

void ppc_hash64_store_hpte(CPUPPCState *env,
                           target_ulong pte_index,
                           target_ulong pte0, target_ulong pte1)
{
615
    CPUState *cs = CPU(ppc_env_get_cpu(env));
616 617 618 619 620 621 622 623 624 625 626 627 628 629

    if (kvmppc_kern_htab) {
        return kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
    }

    pte_index *= HASH_PTE_SIZE_64;
    if (env->external_htab) {
        stq_p(env->external_htab + pte_index, pte0);
        stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1);
    } else {
        stq_phys(cs->as, env->htab_base + pte_index, pte0);
        stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1);
    }
}