mmu-hash64.c 18.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
 *
 *  Copyright (c) 2003-2007 Jocelyn Mayer
 *  Copyright (c) 2013 David Gibson, IBM Corporation
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */
P
Peter Maydell 已提交
20
#include "qemu/osdep.h"
21
#include "cpu.h"
22
#include "exec/helper-proto.h"
23
#include "qemu/error-report.h"
24 25 26 27 28 29 30
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"

//#define DEBUG_SLB

#ifdef DEBUG_SLB
P
Paolo Bonzini 已提交
31
#  define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
32 33 34 35
#else
#  define LOG_SLB(...) do { } while (0)
#endif

36 37 38 39 40
/*
 * Used to indicate whether we have allocated htab in the
 * host kernel
 */
bool kvmppc_kern_htab;
41 42 43 44
/*
 * SLB handling
 */

45
static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
46
{
47
    CPUPPCState *env = &cpu->env;
48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
    uint64_t esid_256M, esid_1T;
    int n;

    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);

    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;

    for (n = 0; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
        /* We check for 1T matches on all MMUs here - if the MMU
         * doesn't have 1T segment support, we will have prevented 1T
         * entries from being inserted in the slbmte code. */
        if (((slb->esid == esid_256M) &&
             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
            || ((slb->esid == esid_1T) &&
                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
            return slb;
        }
    }

    return NULL;
}

75
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
76
{
77
    CPUPPCState *env = &cpu->env;
78 79 80
    int i;
    uint64_t slbe, slbv;

81
    cpu_synchronize_state(CPU(cpu));
82 83 84 85 86 87 88 89 90 91 92 93 94 95 96

    cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
    for (i = 0; i < env->slb_nr; i++) {
        slbe = env->slb[i].esid;
        slbv = env->slb[i].vsid;
        if (slbe == 0 && slbv == 0) {
            continue;
        }
        cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
                    i, slbe, slbv);
    }
}

void helper_slbia(CPUPPCState *env)
{
97
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114
    int n, do_invalidate;

    do_invalidate = 0;
    /* XXX: Warning: slbia never invalidates the first segment */
    for (n = 1; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        if (slb->esid & SLB_ESID_V) {
            slb->esid &= ~SLB_ESID_V;
            /* XXX: given the fact that segment size is 256 MB or 1TB,
             *      and we still don't have a tlb_flush_mask(env, n, mask)
             *      in QEMU, we just invalidate all TLBs
             */
            do_invalidate = 1;
        }
    }
    if (do_invalidate) {
115
        tlb_flush(CPU(cpu), 1);
116 117 118 119 120
    }
}

void helper_slbie(CPUPPCState *env, target_ulong addr)
{
121
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
122 123
    ppc_slb_t *slb;

124
    slb = slb_lookup(cpu, addr);
125 126 127 128 129 130 131 132 133 134 135
    if (!slb) {
        return;
    }

    if (slb->esid & SLB_ESID_V) {
        slb->esid &= ~SLB_ESID_V;

        /* XXX: given the fact that segment size is 256 MB or 1TB,
         *      and we still don't have a tlb_flush_mask(env, n, mask)
         *      in QEMU, we just invalidate all TLBs
         */
136
        tlb_flush(CPU(cpu), 1);
137 138 139
    }
}

D
David Gibson 已提交
140 141
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
                  target_ulong esid, target_ulong vsid)
142
{
143
    CPUPPCState *env = &cpu->env;
144
    ppc_slb_t *slb = &env->slb[slot];
145 146
    const struct ppc_one_seg_page_size *sps = NULL;
    int i;
147

D
David Gibson 已提交
148 149 150 151 152
    if (slot >= env->slb_nr) {
        return -1; /* Bad slot number */
    }
    if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
        return -1; /* Reserved bits set */
153
    }
D
David Gibson 已提交
154
    if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
155 156
        return -1; /* Bad segment size */
    }
D
David Gibson 已提交
157
    if ((vsid & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
158 159 160
        return -1; /* 1T segment on MMU that doesn't support it */
    }

161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
    for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
        const struct ppc_one_seg_page_size *sps1 = &env->sps.sps[i];

        if (!sps1->page_shift) {
            break;
        }

        if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
            sps = sps1;
            break;
        }
    }

    if (!sps) {
        error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
                     " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
                     slot, esid, vsid);
        return -1;
    }

D
David Gibson 已提交
181 182
    slb->esid = esid;
    slb->vsid = vsid;
183
    slb->sps = sps;
184 185

    LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
D
David Gibson 已提交
186
            " %016" PRIx64 "\n", __func__, slot, esid, vsid,
187 188 189 190 191
            slb->esid, slb->vsid);

    return 0;
}

192
static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
193 194
                             target_ulong *rt)
{
195
    CPUPPCState *env = &cpu->env;
196 197 198 199 200 201 202 203 204 205 206
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->esid;
    return 0;
}

207
static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
208 209
                             target_ulong *rt)
{
210
    CPUPPCState *env = &cpu->env;
211 212 213 214 215 216 217 218 219 220 221 222 223
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->vsid;
    return 0;
}

void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
224 225
    PowerPCCPU *cpu = ppc_env_get_cpu(env);

D
David Gibson 已提交
226
    if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
227 228 229 230 231 232 233
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
}

target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
{
234
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
235 236
    target_ulong rt = 0;

237
    if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
238 239 240 241 242 243 244 245
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}

target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
{
246
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
247 248
    target_ulong rt = 0;

249
    if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
250 251 252 253 254
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}
255 256 257 258 259

/*
 * 64-bit hash table MMU handling
 */

260
static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
261
                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
262
{
263
    CPUPPCState *env = &cpu->env;
264 265 266 267 268 269 270 271
    unsigned pp, key;
    /* Some pp bit combinations have undefined behaviour, so default
     * to no access in those cases */
    int prot = 0;

    key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
             : (slb->vsid & SLB_VSID_KS));
    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
272 273 274 275 276 277

    if (key == 0) {
        switch (pp) {
        case 0x0:
        case 0x1:
        case 0x2:
278 279 280
            prot = PAGE_READ | PAGE_WRITE;
            break;

281 282
        case 0x3:
        case 0x6:
283
            prot = PAGE_READ;
284 285 286 287 288 289
            break;
        }
    } else {
        switch (pp) {
        case 0x0:
        case 0x6:
290
            prot = 0;
291
            break;
292

293 294
        case 0x1:
        case 0x3:
295
            prot = PAGE_READ;
296
            break;
297

298
        case 0x2:
299
            prot = PAGE_READ | PAGE_WRITE;
300 301 302 303
            break;
        }
    }

304
    /* No execute if either noexec or guarded bits set */
305 306
    if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
        || (slb->vsid & SLB_VSID_N)) {
307
        prot |= PAGE_EXEC;
308 309
    }

310
    return prot;
311 312
}

313
static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
314
{
315
    CPUPPCState *env = &cpu->env;
316
    int key, amrbits;
317
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
318 319 320

    /* Only recent MMUs implement Virtual Page Class Key Protection */
    if (!(env->mmu_model & POWERPC_MMU_AMR)) {
321
        return prot;
322 323 324 325 326 327 328 329
    }

    key = HPTE64_R_KEY(pte.pte1);
    amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;

    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
    /*         env->spr[SPR_AMR]); */

330 331 332 333
    /*
     * A store is permitted if the AMR bit is 0. Remove write
     * protection if it is set.
     */
334
    if (amrbits & 0x2) {
335
        prot &= ~PAGE_WRITE;
336
    }
337 338 339 340
    /*
     * A load is permitted if the AMR bit is 0. Remove read
     * protection if it is set.
     */
341
    if (amrbits & 0x1) {
342
        prot &= ~PAGE_READ;
343 344 345 346 347
    }

    return prot;
}

348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
{
    uint64_t token = 0;
    hwaddr pte_offset;

    pte_offset = pte_index * HASH_PTE_SIZE_64;
    if (kvmppc_kern_htab) {
        /*
         * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
         */
        token = kvmppc_hash64_read_pteg(cpu, pte_index);
        if (token) {
            return token;
        }
        /*
         * pteg read failed, even though we have allocated htab via
         * kvmppc_reset_htab.
         */
        return 0;
    }
    /*
     * HTAB is controlled by QEMU. Just point to the internally
     * accessible PTEG.
     */
    if (cpu->env.external_htab) {
        token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
    } else if (cpu->env.htab_base) {
        token = cpu->env.htab_base + pte_offset;
    }
    return token;
}

void ppc_hash64_stop_access(uint64_t token)
{
    if (kvmppc_kern_htab) {
383
        kvmppc_hash64_free_pteg(token);
384 385 386
    }
}

387
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
388 389 390
                                     bool secondary, target_ulong ptem,
                                     ppc_hash_pte64_t *pte)
{
391
    CPUPPCState *env = &cpu->env;
392
    int i;
393 394 395
    uint64_t token;
    target_ulong pte0, pte1;
    target_ulong pte_index;
396

397
    pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
398
    token = ppc_hash64_start_access(cpu, pte_index);
399 400 401
    if (!token) {
        return -1;
    }
402
    for (i = 0; i < HPTES_PER_GROUP; i++) {
403 404
        pte0 = ppc_hash64_load_hpte0(cpu, token, i);
        pte1 = ppc_hash64_load_hpte1(cpu, token, i);
405 406 407 408 409 410

        if ((pte0 & HPTE64_V_VALID)
            && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
            && HPTE64_V_COMPARE(pte0, ptem)) {
            pte->pte0 = pte0;
            pte->pte1 = pte1;
411 412
            ppc_hash64_stop_access(token);
            return (pte_index + i) * HASH_PTE_SIZE_64;
413 414
        }
    }
415 416 417 418
    ppc_hash64_stop_access(token);
    /*
     * We didn't find a valid entry.
     */
419 420 421
    return -1;
}

422
static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
423 424
                                     ppc_slb_t *slb, target_ulong eaddr,
                                     ppc_hash_pte64_t *pte)
425
{
426
    CPUPPCState *env = &cpu->env;
427
    hwaddr pte_offset;
428
    hwaddr hash;
429 430 431 432 433
    uint64_t vsid, epnmask, epn, ptem;

    /* The SLB store path should prevent any bad page size encodings
     * getting in there, so: */
    assert(slb->sps);
434

435
    epnmask = ~((1ULL << slb->sps->page_shift) - 1);
436 437

    if (slb->vsid & SLB_VSID_B) {
438 439 440
        /* 1TB segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
441
        hash = vsid ^ (vsid << 25) ^ (epn >> slb->sps->page_shift);
442
    } else {
443 444 445
        /* 256M segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
446
        hash = vsid ^ (epn >> slb->sps->page_shift);
447
    }
448
    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
449 450

    /* Page address translation */
451 452
    qemu_log_mask(CPU_LOG_MMU,
            "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
453 454 455 456
            " hash " TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, hash);

    /* Primary PTEG lookup */
457 458
    qemu_log_mask(CPU_LOG_MMU,
            "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
459 460 461
            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
            " hash=" TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, vsid, ptem,  hash);
462
    pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
463

464 465
    if (pte_offset == -1) {
        /* Secondary PTEG lookup */
466 467
        qemu_log_mask(CPU_LOG_MMU,
                "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
468 469 470 471
                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
                " hash=" TARGET_FMT_plx "\n", env->htab_base,
                env->htab_mask, vsid, ptem, ~hash);

472
        pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
473 474
    }

475
    return pte_offset;
476
}
477

478
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
479
                                int rwx, int mmu_idx)
480
{
481 482
    CPUState *cs = CPU(cpu);
    CPUPPCState *env = &cpu->env;
483
    ppc_slb_t *slb;
484 485
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;
486
    int pp_prot, amr_prot, prot;
487
    uint64_t new_pte1;
488
    const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
489
    hwaddr raddr;
490

491 492
    assert((rwx == 0) || (rwx == 1) || (rwx == 2));

493 494 495 496
    /* 1. Handle real mode accesses */
    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
        /* Translation is off */
        /* In real mode the top 4 effective address bits are ignored */
497
        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
498
        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
499 500
                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
                     TARGET_PAGE_SIZE);
501 502 503
        return 0;
    }

504
    /* 2. Translation is on, so look up the SLB */
505
    slb = slb_lookup(cpu, eaddr);
506

507
    if (!slb) {
508
        if (rwx == 2) {
509
            cs->exception_index = POWERPC_EXCP_ISEG;
510 511
            env->error_code = 0;
        } else {
512
            cs->exception_index = POWERPC_EXCP_DSEG;
513 514 515 516
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
        }
        return 1;
517 518
    }

519 520
    /* 3. Check for segment level no-execute violation */
    if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
521
        cs->exception_index = POWERPC_EXCP_ISI;
522 523
        env->error_code = 0x10000000;
        return 1;
524 525
    }

526
    /* 4. Locate the PTE in the hash table */
527
    pte_offset = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte);
528
    if (pte_offset == -1) {
529
        if (rwx == 2) {
530
            cs->exception_index = POWERPC_EXCP_ISI;
531 532
            env->error_code = 0x40000000;
        } else {
533
            cs->exception_index = POWERPC_EXCP_DSI;
534 535 536 537 538 539 540 541 542
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
            if (rwx == 1) {
                env->spr[SPR_DSISR] = 0x42000000;
            } else {
                env->spr[SPR_DSISR] = 0x40000000;
            }
        }
        return 1;
543
    }
544 545
    qemu_log_mask(CPU_LOG_MMU,
                "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
546 547 548

    /* 5. Check access permissions */

549 550
    pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
    amr_prot = ppc_hash64_amr_prot(cpu, pte);
551
    prot = pp_prot & amr_prot;
552

553
    if ((need_prot[rwx] & ~prot) != 0) {
554
        /* Access right violation */
555
        qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
556
        if (rwx == 2) {
557
            cs->exception_index = POWERPC_EXCP_ISI;
558 559
            env->error_code = 0x08000000;
        } else {
560 561
            target_ulong dsisr = 0;

562
            cs->exception_index = POWERPC_EXCP_DSI;
563 564
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
565 566 567
            if (need_prot[rwx] & ~pp_prot) {
                dsisr |= 0x08000000;
            }
568
            if (rwx == 1) {
569 570 571 572
                dsisr |= 0x02000000;
            }
            if (need_prot[rwx] & ~amr_prot) {
                dsisr |= 0x00200000;
573
            }
574
            env->spr[SPR_DSISR] = dsisr;
575 576
        }
        return 1;
577 578
    }

579
    qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
580 581 582

    /* 6. Update PTE referenced and changed bits if necessary */

583 584 585 586 587 588
    new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
    if (rwx == 1) {
        new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
    } else {
        /* Treat the page as read-only for now, so that a later write
         * will pass through this function again to set the C bit */
589
        prot &= ~PAGE_WRITE;
590 591 592
    }

    if (new_pte1 != pte.pte1) {
593
        ppc_hash64_store_hpte(cpu, pte_offset / HASH_PTE_SIZE_64,
594
                              pte.pte0, new_pte1);
595
    }
596

597 598
    /* 7. Determine the real address from the PTE */

599
    raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, slb->sps->page_shift, eaddr);
600

601
    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
602
                 prot, mmu_idx, TARGET_PAGE_SIZE);
603 604

    return 0;
605
}
606

607
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
608
{
609
    CPUPPCState *env = &cpu->env;
610 611 612 613 614 615 616 617
    ppc_slb_t *slb;
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;

    if (msr_dr == 0) {
        /* In real mode the top 4 effective address bits are ignored */
        return addr & 0x0FFFFFFFFFFFFFFFULL;
    }
618

619
    slb = slb_lookup(cpu, addr);
620 621 622 623
    if (!slb) {
        return -1;
    }

624
    pte_offset = ppc_hash64_htab_lookup(cpu, slb, addr, &pte);
625
    if (pte_offset == -1) {
626 627 628
        return -1;
    }

629 630
    return deposit64(pte.pte1 & HPTE64_R_RPN, 0, slb->sps->page_shift, addr)
        & TARGET_PAGE_MASK;
631
}
632

633
void ppc_hash64_store_hpte(PowerPCCPU *cpu,
634 635 636
                           target_ulong pte_index,
                           target_ulong pte0, target_ulong pte1)
{
637
    CPUPPCState *env = &cpu->env;
638 639

    if (kvmppc_kern_htab) {
640 641
        kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
        return;
642 643 644 645 646
    }

    pte_index *= HASH_PTE_SIZE_64;
    if (env->external_htab) {
        stq_p(env->external_htab + pte_index, pte0);
647
        stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
648
    } else {
649 650 651
        stq_phys(CPU(cpu)->as, env->htab_base + pte_index, pte0);
        stq_phys(CPU(cpu)->as,
                 env->htab_base + pte_index + HASH_PTE_SIZE_64 / 2, pte1);
652 653
    }
}