mmu-hash64.c 18.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
 *
 *  Copyright (c) 2003-2007 Jocelyn Mayer
 *  Copyright (c) 2013 David Gibson, IBM Corporation
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */
P
Peter Maydell 已提交
20
#include "qemu/osdep.h"
21
#include "cpu.h"
22
#include "exec/helper-proto.h"
23 24 25 26 27 28 29
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"

//#define DEBUG_SLB

#ifdef DEBUG_SLB
P
Paolo Bonzini 已提交
30
#  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
31 32 33 34
#else
#  define LOG_SLB(...) do { } while (0)
#endif

35 36 37 38 39
/*
 * Used to indicate whether we have allocated htab in the
 * host kernel
 */
bool kvmppc_kern_htab;
40 41 42 43
/*
 * SLB handling
 */

44
static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
45
{
46
    CPUPPCState *env = &cpu->env;
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
    uint64_t esid_256M, esid_1T;
    int n;

    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);

    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;

    for (n = 0; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
        /* We check for 1T matches on all MMUs here - if the MMU
         * doesn't have 1T segment support, we will have prevented 1T
         * entries from being inserted in the slbmte code. */
        if (((slb->esid == esid_256M) &&
             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
            || ((slb->esid == esid_1T) &&
                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
            return slb;
        }
    }

    return NULL;
}

74
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
75
{
76
    CPUPPCState *env = &cpu->env;
77 78 79
    int i;
    uint64_t slbe, slbv;

80
    cpu_synchronize_state(CPU(cpu));
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95

    cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
    for (i = 0; i < env->slb_nr; i++) {
        slbe = env->slb[i].esid;
        slbv = env->slb[i].vsid;
        if (slbe == 0 && slbv == 0) {
            continue;
        }
        cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
                    i, slbe, slbv);
    }
}

void helper_slbia(CPUPPCState *env)
{
96
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
    int n, do_invalidate;

    do_invalidate = 0;
    /* XXX: Warning: slbia never invalidates the first segment */
    for (n = 1; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        if (slb->esid & SLB_ESID_V) {
            slb->esid &= ~SLB_ESID_V;
            /* XXX: given the fact that segment size is 256 MB or 1TB,
             *      and we still don't have a tlb_flush_mask(env, n, mask)
             *      in QEMU, we just invalidate all TLBs
             */
            do_invalidate = 1;
        }
    }
    if (do_invalidate) {
114
        tlb_flush(CPU(cpu), 1);
115 116 117 118 119
    }
}

void helper_slbie(CPUPPCState *env, target_ulong addr)
{
120
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
121 122
    ppc_slb_t *slb;

123
    slb = slb_lookup(cpu, addr);
124 125 126 127 128 129 130 131 132 133 134
    if (!slb) {
        return;
    }

    if (slb->esid & SLB_ESID_V) {
        slb->esid &= ~SLB_ESID_V;

        /* XXX: given the fact that segment size is 256 MB or 1TB,
         *      and we still don't have a tlb_flush_mask(env, n, mask)
         *      in QEMU, we just invalidate all TLBs
         */
135
        tlb_flush(CPU(cpu), 1);
136 137 138
    }
}

139
int ppc_store_slb(PowerPCCPU *cpu, target_ulong rb, target_ulong rs)
140
{
141
    CPUPPCState *env = &cpu->env;
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (rb & (0x1000 - env->slb_nr)) {
        return -1; /* Reserved bits set or slot too high */
    }
    if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
        return -1; /* Bad segment size */
    }
    if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
        return -1; /* 1T segment on MMU that doesn't support it */
    }

    /* Mask out the slot number as we store the entry */
    slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
    slb->vsid = rs;

    LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
            " %016" PRIx64 "\n", __func__, slot, rb, rs,
            slb->esid, slb->vsid);

    return 0;
}

166
static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
167 168
                             target_ulong *rt)
{
169
    CPUPPCState *env = &cpu->env;
170 171 172 173 174 175 176 177 178 179 180
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->esid;
    return 0;
}

181
static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
182 183
                             target_ulong *rt)
{
184
    CPUPPCState *env = &cpu->env;
185 186 187 188 189 190 191 192 193 194 195 196 197
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->vsid;
    return 0;
}

void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
198 199 200
    PowerPCCPU *cpu = ppc_env_get_cpu(env);

    if (ppc_store_slb(cpu, rb, rs) < 0) {
201 202 203 204 205 206 207
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
}

target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
{
208
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
209 210
    target_ulong rt = 0;

211
    if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
212 213 214 215 216 217 218 219
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}

target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
{
220
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
221 222
    target_ulong rt = 0;

223
    if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
224 225 226 227 228
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}
229 230 231 232 233

/*
 * 64-bit hash table MMU handling
 */

234
static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
235
                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
236
{
237
    CPUPPCState *env = &cpu->env;
238 239 240 241 242 243 244 245
    unsigned pp, key;
    /* Some pp bit combinations have undefined behaviour, so default
     * to no access in those cases */
    int prot = 0;

    key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
             : (slb->vsid & SLB_VSID_KS));
    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
246 247 248 249 250 251

    if (key == 0) {
        switch (pp) {
        case 0x0:
        case 0x1:
        case 0x2:
252 253 254
            prot = PAGE_READ | PAGE_WRITE;
            break;

255 256
        case 0x3:
        case 0x6:
257
            prot = PAGE_READ;
258 259 260 261 262 263
            break;
        }
    } else {
        switch (pp) {
        case 0x0:
        case 0x6:
264
            prot = 0;
265
            break;
266

267 268
        case 0x1:
        case 0x3:
269
            prot = PAGE_READ;
270
            break;
271

272
        case 0x2:
273
            prot = PAGE_READ | PAGE_WRITE;
274 275 276 277
            break;
        }
    }

278
    /* No execute if either noexec or guarded bits set */
279 280
    if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
        || (slb->vsid & SLB_VSID_N)) {
281
        prot |= PAGE_EXEC;
282 283
    }

284
    return prot;
285 286
}

287
static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
288
{
289
    CPUPPCState *env = &cpu->env;
290
    int key, amrbits;
291
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
292 293 294

    /* Only recent MMUs implement Virtual Page Class Key Protection */
    if (!(env->mmu_model & POWERPC_MMU_AMR)) {
295
        return prot;
296 297 298 299 300 301 302 303
    }

    key = HPTE64_R_KEY(pte.pte1);
    amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;

    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
    /*         env->spr[SPR_AMR]); */

304 305 306 307
    /*
     * A store is permitted if the AMR bit is 0. Remove write
     * protection if it is set.
     */
308
    if (amrbits & 0x2) {
309
        prot &= ~PAGE_WRITE;
310
    }
311 312 313 314
    /*
     * A load is permitted if the AMR bit is 0. Remove read
     * protection if it is set.
     */
315
    if (amrbits & 0x1) {
316
        prot &= ~PAGE_READ;
317 318 319 320 321
    }

    return prot;
}

322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
{
    uint64_t token = 0;
    hwaddr pte_offset;

    pte_offset = pte_index * HASH_PTE_SIZE_64;
    if (kvmppc_kern_htab) {
        /*
         * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
         */
        token = kvmppc_hash64_read_pteg(cpu, pte_index);
        if (token) {
            return token;
        }
        /*
         * pteg read failed, even though we have allocated htab via
         * kvmppc_reset_htab.
         */
        return 0;
    }
    /*
     * HTAB is controlled by QEMU. Just point to the internally
     * accessible PTEG.
     */
    if (cpu->env.external_htab) {
        token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
    } else if (cpu->env.htab_base) {
        token = cpu->env.htab_base + pte_offset;
    }
    return token;
}

void ppc_hash64_stop_access(uint64_t token)
{
    if (kvmppc_kern_htab) {
357
        kvmppc_hash64_free_pteg(token);
358 359 360
    }
}

361
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
362 363 364
                                     bool secondary, target_ulong ptem,
                                     ppc_hash_pte64_t *pte)
{
365
    CPUPPCState *env = &cpu->env;
366
    int i;
367 368 369
    uint64_t token;
    target_ulong pte0, pte1;
    target_ulong pte_index;
370

371
    pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
372
    token = ppc_hash64_start_access(cpu, pte_index);
373 374 375
    if (!token) {
        return -1;
    }
376
    for (i = 0; i < HPTES_PER_GROUP; i++) {
377 378
        pte0 = ppc_hash64_load_hpte0(cpu, token, i);
        pte1 = ppc_hash64_load_hpte1(cpu, token, i);
379 380 381 382 383 384

        if ((pte0 & HPTE64_V_VALID)
            && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
            && HPTE64_V_COMPARE(pte0, ptem)) {
            pte->pte0 = pte0;
            pte->pte1 = pte1;
385 386
            ppc_hash64_stop_access(token);
            return (pte_index + i) * HASH_PTE_SIZE_64;
387 388
        }
    }
389 390 391 392
    ppc_hash64_stop_access(token);
    /*
     * We didn't find a valid entry.
     */
393 394 395
    return -1;
}

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413
static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
{
    uint64_t epnshift;

    /* Page size according to the SLB, which we use to generate the
     * EPN for hash table lookup..  When we implement more recent MMU
     * extensions this might be different from the actual page size
     * encoded in the PTE */
    if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
        epnshift = TARGET_PAGE_BITS;
    } else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
        epnshift = TARGET_PAGE_BITS_64K;
    } else {
        epnshift = TARGET_PAGE_BITS_16M;
    }
    return epnshift;
}

414
static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
415 416
                                     ppc_slb_t *slb, target_ulong eaddr,
                                     ppc_hash_pte64_t *pte)
417
{
418
    CPUPPCState *env = &cpu->env;
419
    hwaddr pte_offset;
420
    hwaddr hash;
421
    uint64_t vsid, epnshift, epnmask, epn, ptem;
422

423
    epnshift = ppc_hash64_page_shift(slb);
424
    epnmask = ~((1ULL << epnshift) - 1);
425 426

    if (slb->vsid & SLB_VSID_B) {
427 428 429 430
        /* 1TB segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
        hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
431
    } else {
432 433 434 435
        /* 256M segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
        hash = vsid ^ (epn >> epnshift);
436
    }
437
    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
438 439

    /* Page address translation */
440 441
    qemu_log_mask(CPU_LOG_MMU,
            "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
442 443 444 445
            " hash " TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, hash);

    /* Primary PTEG lookup */
446 447
    qemu_log_mask(CPU_LOG_MMU,
            "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
448 449 450
            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
            " hash=" TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, vsid, ptem,  hash);
451
    pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
452

453 454
    if (pte_offset == -1) {
        /* Secondary PTEG lookup */
455 456
        qemu_log_mask(CPU_LOG_MMU,
                "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
457 458 459 460
                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
                " hash=" TARGET_FMT_plx "\n", env->htab_base,
                env->htab_mask, vsid, ptem, ~hash);

461
        pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
462 463
    }

464
    return pte_offset;
465
}
466

467 468 469
static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
                                   target_ulong eaddr)
{
470 471
    hwaddr mask;
    int target_page_bits;
472
    hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
473 474 475 476 477
    /*
     * We support 4K, 64K and 16M now
     */
    target_page_bits = ppc_hash64_page_shift(slb);
    mask = (1ULL << target_page_bits) - 1;
478 479 480
    return (rpn & ~mask) | (eaddr & mask);
}

481
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
482
                                int rwx, int mmu_idx)
483
{
484 485
    CPUState *cs = CPU(cpu);
    CPUPPCState *env = &cpu->env;
486
    ppc_slb_t *slb;
487 488
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;
489
    int pp_prot, amr_prot, prot;
490
    uint64_t new_pte1;
491
    const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
492
    hwaddr raddr;
493

494 495
    assert((rwx == 0) || (rwx == 1) || (rwx == 2));

496 497 498 499
    /* 1. Handle real mode accesses */
    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
        /* Translation is off */
        /* In real mode the top 4 effective address bits are ignored */
500
        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
501
        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
502 503
                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
                     TARGET_PAGE_SIZE);
504 505 506
        return 0;
    }

507
    /* 2. Translation is on, so look up the SLB */
508
    slb = slb_lookup(cpu, eaddr);
509

510
    if (!slb) {
511
        if (rwx == 2) {
512
            cs->exception_index = POWERPC_EXCP_ISEG;
513 514
            env->error_code = 0;
        } else {
515
            cs->exception_index = POWERPC_EXCP_DSEG;
516 517 518 519
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
        }
        return 1;
520 521
    }

522 523
    /* 3. Check for segment level no-execute violation */
    if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
524
        cs->exception_index = POWERPC_EXCP_ISI;
525 526
        env->error_code = 0x10000000;
        return 1;
527 528
    }

529
    /* 4. Locate the PTE in the hash table */
530
    pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
531
    if (pte_offset == -1) {
532
        if (rwx == 2) {
533
            cs->exception_index = POWERPC_EXCP_ISI;
534 535
            env->error_code = 0x40000000;
        } else {
536
            cs->exception_index = POWERPC_EXCP_DSI;
537 538 539 540 541 542 543 544 545
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
            if (rwx == 1) {
                env->spr[SPR_DSISR] = 0x42000000;
            } else {
                env->spr[SPR_DSISR] = 0x40000000;
            }
        }
        return 1;
546
    }
547 548
    qemu_log_mask(CPU_LOG_MMU,
                "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
549 550 551

    /* 5. Check access permissions */

552 553
    pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
    amr_prot = ppc_hash64_amr_prot(cpu, pte);
554
    prot = pp_prot & amr_prot;
555

556
    if ((need_prot[rwx] & ~prot) != 0) {
557
        /* Access right violation */
558
        qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
559
        if (rwx == 2) {
560
            cs->exception_index = POWERPC_EXCP_ISI;
561 562
            env->error_code = 0x08000000;
        } else {
563 564
            target_ulong dsisr = 0;

565
            cs->exception_index = POWERPC_EXCP_DSI;
566 567
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
568 569 570
            if (need_prot[rwx] & ~pp_prot) {
                dsisr |= 0x08000000;
            }
571
            if (rwx == 1) {
572 573 574 575
                dsisr |= 0x02000000;
            }
            if (need_prot[rwx] & ~amr_prot) {
                dsisr |= 0x00200000;
576
            }
577
            env->spr[SPR_DSISR] = dsisr;
578 579
        }
        return 1;
580 581
    }

582
    qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
583 584 585

    /* 6. Update PTE referenced and changed bits if necessary */

586 587 588 589 590 591
    new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
    if (rwx == 1) {
        new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
    } else {
        /* Treat the page as read-only for now, so that a later write
         * will pass through this function again to set the C bit */
592
        prot &= ~PAGE_WRITE;
593 594 595
    }

    if (new_pte1 != pte.pte1) {
596
        ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
597
                              pte.pte0, new_pte1);
598
    }
599

600 601
    /* 7. Determine the real address from the PTE */

602 603
    raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);

604
    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
605
                 prot, mmu_idx, TARGET_PAGE_SIZE);
606 607

    return 0;
608
}
609

610
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
611
{
612
    CPUPPCState *env = &cpu->env;
613 614 615 616 617 618 619 620
    ppc_slb_t *slb;
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;

    if (msr_dr == 0) {
        /* In real mode the top 4 effective address bits are ignored */
        return addr & 0x0FFFFFFFFFFFFFFFULL;
    }
621

622
    slb = slb_lookup(cpu, addr);
623 624 625 626
    if (!slb) {
        return -1;
    }

627
    pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
628
    if (pte_offset == -1) {
629 630 631
        return -1;
    }

632
    return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
633
}
634

635
void ppc_hash64_store_hpte(PowerPCCPU *cpu,
636 637 638
                           target_ulong pte_index,
                           target_ulong pte0, target_ulong pte1)
{
639
    CPUPPCState *env = &cpu->env;
640 641

    if (kvmppc_kern_htab) {
642 643
        kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
        return;
644 645 646 647 648
    }

    pte_index *= HASH_PTE_SIZE_64;
    if (env->external_htab) {
        stq_p(env->external_htab + pte_index, pte0);
649
        stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
650
    } else {
651 652 653
        stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
        stq_phys(CPU(cpu)->as,
                 env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
654 655
    }
}