mmu-hash64.c 18.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 *  PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
 *
 *  Copyright (c) 2003-2007 Jocelyn Mayer
 *  Copyright (c) 2013 David Gibson, IBM Corporation
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
 */
#include "cpu.h"
21
#include "exec/helper-proto.h"
22 23 24 25
#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "mmu-hash64.h"

26
//#define DEBUG_MMU
27 28
//#define DEBUG_SLB

29
#ifdef DEBUG_MMU
30
#  define LOG_MMU_STATE(cpu) log_cpu_state((cpu), 0)
31
#else
32
#  define LOG_MMU_STATE(cpu) do { } while (0)
33 34
#endif

35 36 37 38 39 40
#ifdef DEBUG_SLB
#  define LOG_SLB(...) qemu_log(__VA_ARGS__)
#else
#  define LOG_SLB(...) do { } while (0)
#endif

41 42 43 44 45
/*
 * Used to indicate whether we have allocated htab in the
 * host kernel
 */
bool kvmppc_kern_htab;
46 47 48 49
/*
 * SLB handling
 */

50
static ppc_slb_t *slb_lookup(CPUPPCState *env, target_ulong eaddr)
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
{
    uint64_t esid_256M, esid_1T;
    int n;

    LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);

    esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
    esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;

    for (n = 0; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        LOG_SLB("%s: slot %d %016" PRIx64 " %016"
                    PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
        /* We check for 1T matches on all MMUs here - if the MMU
         * doesn't have 1T segment support, we will have prevented 1T
         * entries from being inserted in the slbmte code. */
        if (((slb->esid == esid_256M) &&
             ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
            || ((slb->esid == esid_1T) &&
                ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
            return slb;
        }
    }

    return NULL;
}

void dump_slb(FILE *f, fprintf_function cpu_fprintf, CPUPPCState *env)
{
    int i;
    uint64_t slbe, slbv;

84
    cpu_synchronize_state(CPU(ppc_env_get_cpu(env)));
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99

    cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
    for (i = 0; i < env->slb_nr; i++) {
        slbe = env->slb[i].esid;
        slbv = env->slb[i].vsid;
        if (slbe == 0 && slbv == 0) {
            continue;
        }
        cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
                    i, slbe, slbv);
    }
}

void helper_slbia(CPUPPCState *env)
{
100
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
    int n, do_invalidate;

    do_invalidate = 0;
    /* XXX: Warning: slbia never invalidates the first segment */
    for (n = 1; n < env->slb_nr; n++) {
        ppc_slb_t *slb = &env->slb[n];

        if (slb->esid & SLB_ESID_V) {
            slb->esid &= ~SLB_ESID_V;
            /* XXX: given the fact that segment size is 256 MB or 1TB,
             *      and we still don't have a tlb_flush_mask(env, n, mask)
             *      in QEMU, we just invalidate all TLBs
             */
            do_invalidate = 1;
        }
    }
    if (do_invalidate) {
118
        tlb_flush(CPU(cpu), 1);
119 120 121 122 123
    }
}

void helper_slbie(CPUPPCState *env, target_ulong addr)
{
124
    PowerPCCPU *cpu = ppc_env_get_cpu(env);
125 126 127 128 129 130 131 132 133 134 135 136 137 138
    ppc_slb_t *slb;

    slb = slb_lookup(env, addr);
    if (!slb) {
        return;
    }

    if (slb->esid & SLB_ESID_V) {
        slb->esid &= ~SLB_ESID_V;

        /* XXX: given the fact that segment size is 256 MB or 1TB,
         *      and we still don't have a tlb_flush_mask(env, n, mask)
         *      in QEMU, we just invalidate all TLBs
         */
139
        tlb_flush(CPU(cpu), 1);
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
    }
}

int ppc_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (rb & (0x1000 - env->slb_nr)) {
        return -1; /* Reserved bits set or slot too high */
    }
    if (rs & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
        return -1; /* Bad segment size */
    }
    if ((rs & SLB_VSID_B) && !(env->mmu_model & POWERPC_MMU_1TSEG)) {
        return -1; /* 1T segment on MMU that doesn't support it */
    }

    /* Mask out the slot number as we store the entry */
    slb->esid = rb & (SLB_ESID_ESID | SLB_ESID_V);
    slb->vsid = rs;

    LOG_SLB("%s: %d " TARGET_FMT_lx " - " TARGET_FMT_lx " => %016" PRIx64
            " %016" PRIx64 "\n", __func__, slot, rb, rs,
            slb->esid, slb->vsid);

    return 0;
}

static int ppc_load_slb_esid(CPUPPCState *env, target_ulong rb,
                             target_ulong *rt)
{
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->esid;
    return 0;
}

static int ppc_load_slb_vsid(CPUPPCState *env, target_ulong rb,
                             target_ulong *rt)
{
    int slot = rb & 0xfff;
    ppc_slb_t *slb = &env->slb[slot];

    if (slot >= env->slb_nr) {
        return -1;
    }

    *rt = slb->vsid;
    return 0;
}

void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
{
    if (ppc_store_slb(env, rb, rs) < 0) {
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
}

target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
{
    target_ulong rt = 0;

    if (ppc_load_slb_esid(env, rb, &rt) < 0) {
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}

target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
{
    target_ulong rt = 0;

    if (ppc_load_slb_vsid(env, rb, &rt) < 0) {
        helper_raise_exception_err(env, POWERPC_EXCP_PROGRAM,
                                   POWERPC_EXCP_INVAL);
    }
    return rt;
}
226 227 228 229 230

/*
 * 64-bit hash table MMU handling
 */

231 232
static int ppc_hash64_pte_prot(CPUPPCState *env,
                               ppc_slb_t *slb, ppc_hash_pte64_t pte)
233
{
234 235 236 237 238 239 240 241
    unsigned pp, key;
    /* Some pp bit combinations have undefined behaviour, so default
     * to no access in those cases */
    int prot = 0;

    key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
             : (slb->vsid & SLB_VSID_KS));
    pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
242 243 244 245 246 247

    if (key == 0) {
        switch (pp) {
        case 0x0:
        case 0x1:
        case 0x2:
248 249 250
            prot = PAGE_READ | PAGE_WRITE;
            break;

251 252
        case 0x3:
        case 0x6:
253
            prot = PAGE_READ;
254 255 256 257 258 259
            break;
        }
    } else {
        switch (pp) {
        case 0x0:
        case 0x6:
260
            prot = 0;
261
            break;
262

263 264
        case 0x1:
        case 0x3:
265
            prot = PAGE_READ;
266
            break;
267

268
        case 0x2:
269
            prot = PAGE_READ | PAGE_WRITE;
270 271 272 273
            break;
        }
    }

274
    /* No execute if either noexec or guarded bits set */
275 276
    if (!(pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G)
        || (slb->vsid & SLB_VSID_N)) {
277
        prot |= PAGE_EXEC;
278 279
    }

280
    return prot;
281 282
}

283 284 285
static int ppc_hash64_amr_prot(CPUPPCState *env, ppc_hash_pte64_t pte)
{
    int key, amrbits;
286
    int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
287 288 289 290


    /* Only recent MMUs implement Virtual Page Class Key Protection */
    if (!(env->mmu_model & POWERPC_MMU_AMR)) {
291
        return prot;
292 293 294 295 296 297 298 299
    }

    key = HPTE64_R_KEY(pte.pte1);
    amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;

    /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
    /*         env->spr[SPR_AMR]); */

300 301 302 303
    /*
     * A store is permitted if the AMR bit is 0. Remove write
     * protection if it is set.
     */
304
    if (amrbits & 0x2) {
305
        prot &= ~PAGE_WRITE;
306
    }
307 308 309 310
    /*
     * A load is permitted if the AMR bit is 0. Remove read
     * protection if it is set.
     */
311
    if (amrbits & 0x1) {
312
        prot &= ~PAGE_READ;
313 314 315 316 317
    }

    return prot;
}

318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
uint64_t ppc_hash64_start_access(PowerPCCPU *cpu, target_ulong pte_index)
{
    uint64_t token = 0;
    hwaddr pte_offset;

    pte_offset = pte_index * HASH_PTE_SIZE_64;
    if (kvmppc_kern_htab) {
        /*
         * HTAB is controlled by KVM. Fetch the PTEG into a new buffer.
         */
        token = kvmppc_hash64_read_pteg(cpu, pte_index);
        if (token) {
            return token;
        }
        /*
         * pteg read failed, even though we have allocated htab via
         * kvmppc_reset_htab.
         */
        return 0;
    }
    /*
     * HTAB is controlled by QEMU. Just point to the internally
     * accessible PTEG.
     */
    if (cpu->env.external_htab) {
        token = (uint64_t)(uintptr_t) cpu->env.external_htab + pte_offset;
    } else if (cpu->env.htab_base) {
        token = cpu->env.htab_base + pte_offset;
    }
    return token;
}

void ppc_hash64_stop_access(uint64_t token)
{
    if (kvmppc_kern_htab) {
353
        kvmppc_hash64_free_pteg(token);
354 355 356 357
    }
}

static hwaddr ppc_hash64_pteg_search(CPUPPCState *env, hwaddr hash,
358 359 360 361
                                     bool secondary, target_ulong ptem,
                                     ppc_hash_pte64_t *pte)
{
    int i;
362 363 364
    uint64_t token;
    target_ulong pte0, pte1;
    target_ulong pte_index;
365

366 367 368 369 370
    pte_index = (hash & env->htab_mask) * HPTES_PER_GROUP;
    token = ppc_hash64_start_access(ppc_env_get_cpu(env), pte_index);
    if (!token) {
        return -1;
    }
371
    for (i = 0; i < HPTES_PER_GROUP; i++) {
372 373
        pte0 = ppc_hash64_load_hpte0(env, token, i);
        pte1 = ppc_hash64_load_hpte1(env, token, i);
374 375 376 377 378 379

        if ((pte0 & HPTE64_V_VALID)
            && (secondary == !!(pte0 & HPTE64_V_SECONDARY))
            && HPTE64_V_COMPARE(pte0, ptem)) {
            pte->pte0 = pte0;
            pte->pte1 = pte1;
380 381
            ppc_hash64_stop_access(token);
            return (pte_index + i) * HASH_PTE_SIZE_64;
382 383
        }
    }
384 385 386 387
    ppc_hash64_stop_access(token);
    /*
     * We didn't find a valid entry.
     */
388 389 390
    return -1;
}

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
static uint64_t ppc_hash64_page_shift(ppc_slb_t *slb)
{
    uint64_t epnshift;

    /* Page size according to the SLB, which we use to generate the
     * EPN for hash table lookup..  When we implement more recent MMU
     * extensions this might be different from the actual page size
     * encoded in the PTE */
    if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_4K) {
        epnshift = TARGET_PAGE_BITS;
    } else if ((slb->vsid & SLB_VSID_LLP_MASK) == SLB_VSID_64K) {
        epnshift = TARGET_PAGE_BITS_64K;
    } else {
        epnshift = TARGET_PAGE_BITS_16M;
    }
    return epnshift;
}

409 410 411
static hwaddr ppc_hash64_htab_lookup(CPUPPCState *env,
                                     ppc_slb_t *slb, target_ulong eaddr,
                                     ppc_hash_pte64_t *pte)
412
{
413
    hwaddr pte_offset;
414
    hwaddr hash;
415
    uint64_t vsid, epnshift, epnmask, epn, ptem;
416

417
    epnshift = ppc_hash64_page_shift(slb);
418
    epnmask = ~((1ULL << epnshift) - 1);
419 420

    if (slb->vsid & SLB_VSID_B) {
421 422 423 424
        /* 1TB segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
        epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
        hash = vsid ^ (vsid << 25) ^ (epn >> epnshift);
425
    } else {
426 427 428 429
        /* 256M segment */
        vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
        epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
        hash = vsid ^ (epn >> epnshift);
430
    }
431
    ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
432 433

    /* Page address translation */
434 435
    qemu_log_mask(CPU_LOG_MMU,
            "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
436 437 438 439
            " hash " TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, hash);

    /* Primary PTEG lookup */
440 441
    qemu_log_mask(CPU_LOG_MMU,
            "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
442 443 444
            " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
            " hash=" TARGET_FMT_plx "\n",
            env->htab_base, env->htab_mask, vsid, ptem,  hash);
445
    pte_offset = ppc_hash64_pteg_search(env, hash, 0, ptem, pte);
446

447 448
    if (pte_offset == -1) {
        /* Secondary PTEG lookup */
449 450
        qemu_log_mask(CPU_LOG_MMU,
                "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
451 452 453 454
                " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
                " hash=" TARGET_FMT_plx "\n", env->htab_base,
                env->htab_mask, vsid, ptem, ~hash);

455
        pte_offset = ppc_hash64_pteg_search(env, ~hash, 1, ptem, pte);
456 457
    }

458
    return pte_offset;
459
}
460

461 462 463
static hwaddr ppc_hash64_pte_raddr(ppc_slb_t *slb, ppc_hash_pte64_t pte,
                                   target_ulong eaddr)
{
464 465
    hwaddr mask;
    int target_page_bits;
466
    hwaddr rpn = pte.pte1 & HPTE64_R_RPN;
467 468 469 470 471
    /*
     * We support 4K, 64K and 16M now
     */
    target_page_bits = ppc_hash64_page_shift(slb);
    mask = (1ULL << target_page_bits) - 1;
472 473 474
    return (rpn & ~mask) | (eaddr & mask);
}

475
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, target_ulong eaddr,
476
                                int rwx, int mmu_idx)
477
{
478 479
    CPUState *cs = CPU(cpu);
    CPUPPCState *env = &cpu->env;
480
    ppc_slb_t *slb;
481 482
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;
483
    int pp_prot, amr_prot, prot;
484
    uint64_t new_pte1;
485
    const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
486
    hwaddr raddr;
487

488 489
    assert((rwx == 0) || (rwx == 1) || (rwx == 2));

490 491 492 493
    /* 1. Handle real mode accesses */
    if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
        /* Translation is off */
        /* In real mode the top 4 effective address bits are ignored */
494
        raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
495
        tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
496 497
                     PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
                     TARGET_PAGE_SIZE);
498 499 500
        return 0;
    }

501
    /* 2. Translation is on, so look up the SLB */
502
    slb = slb_lookup(env, eaddr);
503

504
    if (!slb) {
505
        if (rwx == 2) {
506
            cs->exception_index = POWERPC_EXCP_ISEG;
507 508
            env->error_code = 0;
        } else {
509
            cs->exception_index = POWERPC_EXCP_DSEG;
510 511 512 513
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
        }
        return 1;
514 515
    }

516 517
    /* 3. Check for segment level no-execute violation */
    if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
518
        cs->exception_index = POWERPC_EXCP_ISI;
519 520
        env->error_code = 0x10000000;
        return 1;
521 522
    }

523 524 525
    /* 4. Locate the PTE in the hash table */
    pte_offset = ppc_hash64_htab_lookup(env, slb, eaddr, &pte);
    if (pte_offset == -1) {
526
        if (rwx == 2) {
527
            cs->exception_index = POWERPC_EXCP_ISI;
528 529
            env->error_code = 0x40000000;
        } else {
530
            cs->exception_index = POWERPC_EXCP_DSI;
531 532 533 534 535 536 537 538 539
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
            if (rwx == 1) {
                env->spr[SPR_DSISR] = 0x42000000;
            } else {
                env->spr[SPR_DSISR] = 0x40000000;
            }
        }
        return 1;
540
    }
541 542
    qemu_log_mask(CPU_LOG_MMU,
                "found PTE at offset %08" HWADDR_PRIx "\n", pte_offset);
543 544 545

    /* 5. Check access permissions */

546 547 548
    pp_prot = ppc_hash64_pte_prot(env, slb, pte);
    amr_prot = ppc_hash64_amr_prot(env, pte);
    prot = pp_prot & amr_prot;
549

550
    if ((need_prot[rwx] & ~prot) != 0) {
551
        /* Access right violation */
552
        qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
553
        if (rwx == 2) {
554
            cs->exception_index = POWERPC_EXCP_ISI;
555 556
            env->error_code = 0x08000000;
        } else {
557 558
            target_ulong dsisr = 0;

559
            cs->exception_index = POWERPC_EXCP_DSI;
560 561
            env->error_code = 0;
            env->spr[SPR_DAR] = eaddr;
562 563 564
            if (need_prot[rwx] & ~pp_prot) {
                dsisr |= 0x08000000;
            }
565
            if (rwx == 1) {
566 567 568 569
                dsisr |= 0x02000000;
            }
            if (need_prot[rwx] & ~amr_prot) {
                dsisr |= 0x00200000;
570
            }
571
            env->spr[SPR_DSISR] = dsisr;
572 573
        }
        return 1;
574 575
    }

576
    qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
577 578 579

    /* 6. Update PTE referenced and changed bits if necessary */

580 581 582 583 584 585
    new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
    if (rwx == 1) {
        new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
    } else {
        /* Treat the page as read-only for now, so that a later write
         * will pass through this function again to set the C bit */
586
        prot &= ~PAGE_WRITE;
587 588 589
    }

    if (new_pte1 != pte.pte1) {
590 591
        ppc_hash64_store_hpte(env, pte_offset / HASH_PTE_SIZE_64,
                              pte.pte0, new_pte1);
592
    }
593

594 595
    /* 7. Determine the real address from the PTE */

596 597
    raddr = ppc_hash64_pte_raddr(slb, pte, eaddr);

598
    tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
599
                 prot, mmu_idx, TARGET_PAGE_SIZE);
600 601

    return 0;
602
}
603

604 605
hwaddr ppc_hash64_get_phys_page_debug(CPUPPCState *env, target_ulong addr)
{
606 607 608 609 610 611 612 613
    ppc_slb_t *slb;
    hwaddr pte_offset;
    ppc_hash_pte64_t pte;

    if (msr_dr == 0) {
        /* In real mode the top 4 effective address bits are ignored */
        return addr & 0x0FFFFFFFFFFFFFFFULL;
    }
614

615 616 617 618 619 620 621
    slb = slb_lookup(env, addr);
    if (!slb) {
        return -1;
    }

    pte_offset = ppc_hash64_htab_lookup(env, slb, addr, &pte);
    if (pte_offset == -1) {
622 623 624
        return -1;
    }

625
    return ppc_hash64_pte_raddr(slb, pte, addr) & TARGET_PAGE_MASK;
626
}
627 628 629 630 631

void ppc_hash64_store_hpte(CPUPPCState *env,
                           target_ulong pte_index,
                           target_ulong pte0, target_ulong pte1)
{
632
    CPUState *cs = CPU(ppc_env_get_cpu(env));
633 634

    if (kvmppc_kern_htab) {
635 636
        kvmppc_hash64_write_pte(env, pte_index, pte0, pte1);
        return;
637 638 639 640 641 642 643 644 645 646 647
    }

    pte_index *= HASH_PTE_SIZE_64;
    if (env->external_htab) {
        stq_p(env->external_htab + pte_index, pte0);
        stq_p(env->external_htab + pte_index + HASH_PTE_SIZE_64/2, pte1);
    } else {
        stq_phys(cs->as, env->htab_base + pte_index, pte0);
        stq_phys(cs->as, env->htab_base + pte_index + HASH_PTE_SIZE_64/2, pte1);
    }
}