mmu-hash64.c 20.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
 *
 *  Copyright (c) 2003-2007 Jocelyn Mayer
 *  Copyright (c) 2013 David Gibson, IBM Corporation
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */
P
Peter Maydell 已提交
20
#include "qemu/osdep.h"
21
#include "cpu.h"
22
#include "exec/helper-proto.h"
23
#include "qemu/error-report.h"
24
#include "sysemu/kvm.h"
25
#include "qemu/error-report.h"
26 27 28 29 30 31
#include "kvm_ppc.h"
#include "mmu-hash64.h"

//#define DEBUG_SLB

#ifdef DEBUG_SLB
P
Paolo Bonzini 已提交
32
#  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
33 34 35 36
#else
#  define LOG_SLB(...) do { } while (0)
#endif

37 38 39 40 41
/*
 * Used to indicate whether we have allocated htab in the
 * host kernel
 */
bool kvmppc_kern_htab;
42 43 44 45
/*
 * SLB handling
 */

46
static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
47
{
48
    CPUPPCState *env = &cpu->env;
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
    uint64_t esid_256M, esid_1T;
    int n;

    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);

    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;

    for (n = 0; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
        /* We check for 1T matches on all MMUs here - if the MMU
         * doesn't have 1T segment support, we will have prevented 1T
         * entries from being inserted in the slbmte code. */
        if (((slb->esid == esid_256M) &&
             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
            || ((slb->esid == esid_1T) &&
                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
            return slb;
        }
    }

    return NULL;
}

76
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
77
{
78
    CPUPPCState *env = &cpu->env;
79 80 81
    int i;
    uint64_t slbe, slbv;

82
    cpu_synchronize_state(CPU(cpu));
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97

    cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
    for (i = 0; i < env->slb_nr; i++) {
        slbe = env->slb[i].esid;
        slbv = env->slb[i].vsid;
        if (slbe == 0 && slbv == 0) {
            continue;
        }
        cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
                    i, slbe, slbv);
    }
}

void helper_slbia(CPUPPCState *env)
{
98
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
    int n, do_invalidate;

    do_invalidate = 0;
    /* XXX: Warning: slbia never invalidates the first segment */
    for (n = 1; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        if (slb->esid & SLB_ESID_V) {
            slb->esid &= ~SLB_ESID_V;
            /* XXX: given the fact that segment size is 256 MB or 1TB,
             *      and we still don't have a tlb_flush_mask(env, n, mask)
             *      in QEMU, we just invalidate all TLBs
             */
            do_invalidate = 1;
        }
    }
    if (do_invalidate) {
116
        tlb_flush(CPU(cpu), 1);
117 118 119 120 121
    }
}

void helper_slbie(CPUPPCState *env, target_ulong addr)
{
122
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
123 124
    ppc_slb_t *slb;

125
    slb = slb_lookup(cpu, addr);
126 127 128 129 130 131 132 133 134 135 136
    if (!slb) {
        return;
    }

    if (slb->esid & SLB_ESID_V) {
        slb->esid &= ~SLB_ESID_V;

        /* XXX: given the fact that segment size is 256 MB or 1TB,
         *      and we still don't have a tlb_flush_mask(env, n, mask)
         *      in QEMU, we just invalidate all TLBs
         */
137
        tlb_flush(CPU(cpu), 1);
138 139 140
    }
}

D
David Gibson 已提交
141 142
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
                  target_ulong esid, target_ulong vsid)
143
{
144
    CPUPPCState *env = &cpu->env;
145
    ppc_slb_t *slb = &env->slb[slot];
146 147
    const struct ppc_one_seg_page_size *sps = NULL;
    int i;
148

D
David Gibson 已提交
149 150 151 152 153
    if (slot >= env->slb_nr) {
        return -1; /* Bad slot number */
    }
    if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
        return -1; /* Reserved bits set */
154
    }
D
David Gibson 已提交
155
    if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
156 157
        return -1; /* Bad segment size */
    }
D
David Gibson 已提交
158
    if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
159 160 161
        return -1; /* 1T segment on MMU that doesn't support it */
    }

162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181
    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];

        if (!sps1->page_shift) {
            break;
        }

        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
            sps = sps1;
            break;
        }
    }

    if (!sps) {
        error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
                     " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
                     slot, esid, vsid);
        return -1;
    }

D
David Gibson 已提交
182 183
    slb->esid = esid;
    slb->vsid = vsid;
184
    slb->sps = sps;
185 186

    LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
D
David Gibson 已提交
187
            " %016" PRIx64 "\n", __func__, slot, esid, vsid,
188 189 190 191 192
            slb->esid, slb->vsid);

    return 0;
}

193
static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
194 195
                             target_ulong *rt)
{
196
    CPUPPCState *env = &cpu->env;
197 198 199 200 201 202 203 204 205 206 207
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->esid;
    return 0;
}

208
static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
209 210
                             target_ulong *rt)
{
211
    CPUPPCState *env = &cpu->env;
212 213 214 215 216 217 218 219 220 221 222 223 224
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->vsid;
    return 0;
}

void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
225 226
    PowerPCCPU *cpu = ppc_env_get_cpu(env);

D
David Gibson 已提交
227
    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
228 229 230 231 232 233 234
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
}

target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
{
235
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
236 237
    target_ulong rt = 0;

238
    if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
239 240 241 242 243 244 245 246
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}

target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
{
247
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
248 249
    target_ulong rt = 0;

250
    if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
251 252 253 254 255
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}
256 257 258 259 260

/*
 * 64-bit hash table MMU handling
 */

261
static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
262
                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
263
{
264
    CPUPPCState *env = &cpu->env;
265 266 267 268 269 270 271 272
    unsigned pp, key;
    /* Some pp bit combinations have undefined behaviour, so default
     * to no access in those cases */
    int prot = 0;

    key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
             : (slb->vsid & SLB_VSID_KS));
    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
273 274 275 276 277 278

    if (key == 0) {
        switch (pp) {
        case 0x0:
        case 0x1:
        case 0x2:
279 280 281
            prot = PAGE_READ | PAGE_WRITE;
            break;

282 283
        case 0x3:
        case 0x6:
284
            prot = PAGE_READ;
285 286 287 288 289 290
            break;
        }
    } else {
        switch (pp) {
        case 0x0:
        case 0x6:
291
            prot = 0;
292
            break;
293

294 295
        case 0x1:
        case 0x3:
296
            prot = PAGE_READ;
297
            break;
298

299
        case 0x2:
300
            prot = PAGE_READ | PAGE_WRITE;
301 302 303 304
            break;
        }
    }

305
    /* No execute if either noexec or guarded bits set */
306 307
    if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
        || (slb->vsid & SLB_VSID_N)) {
308
        prot |= PAGE_EXEC;
309 310
    }

311
    return prot;
312 313
}

314
static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
315
{
316
    CPUPPCState *env = &cpu->env;
317
    int key, amrbits;
318
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
319 320 321

    /* Only recent MMUs implement Virtual Page Class Key Protection */
    if (!(env->mmu_model & POWERPC_MMU_AMR)) {
322
        return prot;
323 324 325 326 327 328 329 330
    }

    key = HPTE64_R_KEY(pte.pte1);
    amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;

    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
    /*         env->spr[SPR_AMR]); */

331 332 333 334
    /*
     * A store is permitted if the AMR bit is 0. Remove write
     * protection if it is set.
     */
335
    if (amrbits & 0x2) {
336
        prot &= ~PAGE_WRITE;
337
    }
338 339 340 341
    /*
     * A load is permitted if the AMR bit is 0. Remove read
     * protection if it is set.
     */
342
    if (amrbits & 0x1) {
343
        prot &= ~PAGE_READ;
344 345 346 347 348
    }

    return prot;
}

349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
{
    uint64_t token = 0;
    hwaddr pte_offset;

    pte_offset = pte_index * HASH_PTE_SIZE_64;
    if (kvmppc_kern_htab) {
        /*
         * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
         */
        token = kvmppc_hash64_read_pteg(cpu, pte_index);
        if (token) {
            return token;
        }
        /*
         * pteg read failed, even though we have allocated htab via
         * kvmppc_reset_htab.
         */
        return 0;
    }
    /*
     * HTAB is controlled by QEMU. Just point to the internally
     * accessible PTEG.
     */
    if (cpu->env.external_htab) {
        token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
    } else if (cpu->env.htab_base) {
        token = cpu->env.htab_base + pte_offset;
    }
    return token;
}

void ppc_hash64_stop_access(uint64_t token)
{
    if (kvmppc_kern_htab) {
384
        kvmppc_hash64_free_pteg(token);
385 386 387
    }
}

388
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
389 390 391
                                     bool secondary, target_ulong ptem,
                                     ppc_hash_pte64_t *pte)
{
392
    CPUPPCState *env = &cpu->env;
393
    int i;
394 395 396
    uint64_t token;
    target_ulong pte0, pte1;
    target_ulong pte_index;
397

398
    pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
399
    token = ppc_hash64_start_access(cpu, pte_index);
400 401 402
    if (!token) {
        return -1;
    }
403
    for (i = 0; i < HPTES_PER_GROUP; i++) {
404 405
        pte0 = ppc_hash64_load_hpte0(cpu, token, i);
        pte1 = ppc_hash64_load_hpte1(cpu, token, i);
406 407 408 409 410 411

        if ((pte0 & HPTE64_V_VALID)
            && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
            && HPTE64_V_COMPARE(pte0, ptem)) {
            pte->pte0 = pte0;
            pte->pte1 = pte1;
412 413
            ppc_hash64_stop_access(token);
            return (pte_index + i) * HASH_PTE_SIZE_64;
414 415
        }
    }
416 417 418 419
    ppc_hash64_stop_access(token);
    /*
     * We didn't find a valid entry.
     */
420 421 422
    return -1;
}

423
static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
424 425
                                     ppc_slb_t *slb, target_ulong eaddr,
                                     ppc_hash_pte64_t *pte)
426
{
427
    CPUPPCState *env = &cpu->env;
428
    hwaddr pte_offset;
429
    hwaddr hash;
430 431 432 433 434
    uint64_t vsid, epnmask, epn, ptem;

    /* The SLB store path should prevent any bad page size encodings
     * getting in there, so: */
    assert(slb->sps);
435

436
    epnmask = ~((1ULL << slb->sps->page_shift) - 1);
437 438

    if (slb->vsid & SLB_VSID_B) {
439 440 441
        /* 1TB segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
442
        hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
443
    } else {
444 445 446
        /* 256M segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
447
        hash = vsid ^ (epn >> slb->sps->page_shift);
448
    }
449
    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
450 451

    /* Page address translation */
452 453
    qemu_log_mask(CPU_LOG_MMU,
            "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
454 455 456 457
            " hash " TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, hash);

    /* Primary PTEG lookup */
458 459
    qemu_log_mask(CPU_LOG_MMU,
            "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
460 461 462
            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
            " hash=" TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, vsid, ptem,  hash);
463
    pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
464

465 466
    if (pte_offset == -1) {
        /* Secondary PTEG lookup */
467 468
        qemu_log_mask(CPU_LOG_MMU,
                "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
469 470 471 472
                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
                " hash=" TARGET_FMT_plx "\n", env->htab_base,
                env->htab_mask, vsid, ptem, ~hash);

473
        pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
474 475
    }

476
    return pte_offset;
477
}
478

479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
static unsigned hpte_page_shift(const struct ppc_one_seg_page_size *sps,
    uint64_t pte0, uint64_t pte1)
{
    int i;

    if (!(pte0 & HPTE64_V_LARGE)) {
        if (sps->page_shift != 12) {
            /* 4kiB page in a non 4kiB segment */
            return 0;
        }
        /* Normal 4kiB page */
        return 12;
    }

    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
        const struct ppc_one_page_size *ps = &sps->enc[i];
        uint64_t mask;

        if (!ps->page_shift) {
            break;
        }

        if (ps->page_shift == 12) {
            /* L bit is set so this can't be a 4kiB page */
            continue;
        }

        mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;

        if ((pte1 & mask) == (ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
            return ps->page_shift;
        }
    }

    return 0; /* Bad page size encoding */
}

516
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
517
                                int rwx, int mmu_idx)
518
{
519 520
    CPUState *cs = CPU(cpu);
    CPUPPCState *env = &cpu->env;
521
    ppc_slb_t *slb;
522
    unsigned apshift;
523 524
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;
525
    int pp_prot, amr_prot, prot;
526
    uint64_t new_pte1;
527
    const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
528
    hwaddr raddr;
529

530 531
    assert((rwx == 0) || (rwx == 1) || (rwx == 2));

532 533 534 535
    /* 1. Handle real mode accesses */
    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
        /* Translation is off */
        /* In real mode the top 4 effective address bits are ignored */
536
        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
537
        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
538 539
                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
                     TARGET_PAGE_SIZE);
540 541 542
        return 0;
    }

543
    /* 2. Translation is on, so look up the SLB */
544
    slb = slb_lookup(cpu, eaddr);
545

546
    if (!slb) {
547
        if (rwx == 2) {
548
            cs->exception_index = POWERPC_EXCP_ISEG;
549 550
            env->error_code = 0;
        } else {
551
            cs->exception_index = POWERPC_EXCP_DSEG;
552 553 554 555
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
        }
        return 1;
556 557
    }

558 559
    /* 3. Check for segment level no-execute violation */
    if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
560
        cs->exception_index = POWERPC_EXCP_ISI;
561 562
        env->error_code = 0x10000000;
        return 1;
563 564
    }

565
    /* 4. Locate the PTE in the hash table */
566
    pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
567
    if (pte_offset == -1) {
568
        if (rwx == 2) {
569
            cs->exception_index = POWERPC_EXCP_ISI;
570 571
            env->error_code = 0x40000000;
        } else {
572
            cs->exception_index = POWERPC_EXCP_DSI;
573 574 575 576 577 578 579 580 581
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
            if (rwx == 1) {
                env->spr[SPR_DSISR] = 0x42000000;
            } else {
                env->spr[SPR_DSISR] = 0x40000000;
            }
        }
        return 1;
582
    }
583 584
    qemu_log_mask(CPU_LOG_MMU,
                "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
585

586 587 588 589 590 591 592 593 594 595 596 597
    /* Validate page size encoding */
    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
    if (!apshift) {
        error_report("Bad page size encoding in HPTE 0x%"PRIx64" - 0x%"PRIx64
                     " @ 0x%"HWADDR_PRIx, pte.pte0, pte.pte1, pte_offset);
        /* Not entirely sure what the right action here, but machine
         * check seems reasonable */
        cs->exception_index = POWERPC_EXCP_MCHECK;
        env->error_code = 0;
        return 1;
    }

598 599
    /* 5. Check access permissions */

600 601
    pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
    amr_prot = ppc_hash64_amr_prot(cpu, pte);
602
    prot = pp_prot & amr_prot;
603

604
    if ((need_prot[rwx] & ~prot) != 0) {
605
        /* Access right violation */
606
        qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
607
        if (rwx == 2) {
608
            cs->exception_index = POWERPC_EXCP_ISI;
609 610
            env->error_code = 0x08000000;
        } else {
611 612
            target_ulong dsisr = 0;

613
            cs->exception_index = POWERPC_EXCP_DSI;
614 615
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
616 617 618
            if (need_prot[rwx] & ~pp_prot) {
                dsisr |= 0x08000000;
            }
619
            if (rwx == 1) {
620 621 622 623
                dsisr |= 0x02000000;
            }
            if (need_prot[rwx] & ~amr_prot) {
                dsisr |= 0x00200000;
624
            }
625
            env->spr[SPR_DSISR] = dsisr;
626 627
        }
        return 1;
628 629
    }

630
    qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
631 632 633

    /* 6. Update PTE referenced and changed bits if necessary */

634 635 636 637 638 639
    new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
    if (rwx == 1) {
        new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
    } else {
        /* Treat the page as read-only for now, so that a later write
         * will pass through this function again to set the C bit */
640
        prot &= ~PAGE_WRITE;
641 642 643
    }

    if (new_pte1 != pte.pte1) {
644
        ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
645
                              pte.pte0, new_pte1);
646
    }
647

648 649
    /* 7. Determine the real address from the PTE */

650
    raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
651

652
    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
653
                 prot, mmu_idx, 1ULL << apshift);
654 655

    return 0;
656
}
657

658
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
659
{
660
    CPUPPCState *env = &cpu->env;
661 662 663
    ppc_slb_t *slb;
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;
664
    unsigned apshift;
665 666 667 668 669

    if (msr_dr == 0) {
        /* In real mode the top 4 effective address bits are ignored */
        return addr & 0x0FFFFFFFFFFFFFFFULL;
    }
670

671
    slb = slb_lookup(cpu, addr);
672 673 674 675
    if (!slb) {
        return -1;
    }

676
    pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
677
    if (pte_offset == -1) {
678 679 680
        return -1;
    }

681 682 683 684 685 686
    apshift = hpte_page_shift(slb->sps, pte.pte0, pte.pte1);
    if (!apshift) {
        return -1;
    }

    return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
687
        & TARGET_PAGE_MASK;
688
}
689

690
void ppc_hash64_store_hpte(PowerPCCPU *cpu,
691 692 693
                           target_ulong pte_index,
                           target_ulong pte0, target_ulong pte1)
{
694
    CPUPPCState *env = &cpu->env;
695 696

    if (kvmppc_kern_htab) {
697 698
        kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
        return;
699 700 701 702 703
    }

    pte_index *= HASH_PTE_SIZE_64;
    if (env->external_htab) {
        stq_p(env->external_htab + pte_index, pte0);
704
        stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
705
    } else {
706 707 708
        stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
        stq_phys(CPU(cpu)->as,
                 env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
709 710
    }
}
711 712 713 714 715 716 717 718 719 720 721 722

void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
                               target_ulong pte_index,
                               target_ulong pte0, target_ulong pte1)
{
    /*
     * XXX: given the fact that there are too many segments to
     * invalidate, and we still don't have a tlb_flush_mask(env, n,
     * mask) in QEMU, we just invalidate all TLBs
     */
    tlb_flush(CPU(cpu), 1);
}