mmu.c 17.7 KB
Newer Older
1
/*
S
Shell 已提交
2
 * Copyright (c) 2006-2018, RT-Thread Development Team
3
 *
B
bigmagic 已提交
4
 * SPDX-License-Identifier: Apache-2.0
5 6
 *
 * Change Logs:
G
GuEe-GUI 已提交
7
 * Date           Author       Notes
S
Shell 已提交
8
 * 2012-01-10     bernard      porting to AM1808
9
 */
B
bigmagic 已提交
10

S
Shell 已提交
11
#include <board.h>
12 13 14 15 16
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
B
bigmagic 已提交
17

18 19
#include "mm_aspace.h"
#include "mm_page.h"
S
Shell 已提交
20
#include "mmu.h"
21
#include "tlb.h"
S
Shell 已提交
22 23

#ifdef RT_USING_SMART
24
#include "ioremap.h"
S
Shell 已提交
25 26 27
#include <lwp_mm.h>
#endif

28 29 30
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
S
Shell 已提交
31

32 33 34 35 36 37 38 39 40 41 42
#define MMU_LEVEL_MASK   0x1ffUL
#define MMU_LEVEL_SHIFT  9
#define MMU_ADDRESS_BITS 39
#define MMU_ADDRESS_MASK 0x0000fffffffff000UL
#define MMU_ATTRIB_MASK  0xfff0000000000ffcUL

#define MMU_TYPE_MASK  3UL
#define MMU_TYPE_USED  1UL
#define MMU_TYPE_BLOCK 1UL
#define MMU_TYPE_TABLE 3UL
#define MMU_TYPE_PAGE  3UL
S
Shell 已提交
43 44 45 46 47

#define MMU_TBL_BLOCK_2M_LEVEL 2
#define MMU_TBL_PAGE_4k_LEVEL  3
#define MMU_TBL_LEVEL_NR       4

48
volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
49

S
Shell 已提交
50
struct mmu_level_info
51
{
S
Shell 已提交
52 53 54
    unsigned long *pos;
    void *page;
};
55

S
Shell 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
{
    int level;
    unsigned long va = (unsigned long)v_addr;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
    struct mmu_level_info level_info[4];
    int ref;
    int level_shift = MMU_ADDRESS_BITS;
    unsigned long *pos;

    rt_memset(level_info, 0, sizeof level_info);
    for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        page = cur_lv_tbl[off];
        if (!(page & MMU_TYPE_USED))
        {
            break;
        }
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
        {
            break;
        }
        level_info[level].pos = cur_lv_tbl + off;
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_info[level].page = cur_lv_tbl;
        level_shift -= MMU_LEVEL_SHIFT;
    }

    level = MMU_TBL_PAGE_4k_LEVEL;
    pos = level_info[level].pos;
    if (pos)
G
GuEe-GUI 已提交
92
    {
S
Shell 已提交
93 94
        *pos = (unsigned long)RT_NULL;
        rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
G
GuEe-GUI 已提交
95
    }
S
Shell 已提交
96 97 98
    level--;

    while (level >= 0)
G
GuEe-GUI 已提交
99
    {
S
Shell 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112
        pos = level_info[level].pos;
        if (pos)
        {
            void *cur_page = level_info[level].page;
            ref = rt_page_ref_get(cur_page, 0);
            if (ref == 1)
            {
                *pos = (unsigned long)RT_NULL;
                rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
            }
            rt_pages_free(cur_page, 0);
        }
        level--;
G
GuEe-GUI 已提交
113
    }
114

S
Shell 已提交
115 116 117
    return;
}

118 119
static int _kenrel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr,
                          unsigned long attr)
S
Shell 已提交
120 121 122 123 124 125
{
    int ret = 0;
    int level;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
126 127
    intptr_t va = (intptr_t)vaddr;
    intptr_t pa = (intptr_t)paddr;
S
Shell 已提交
128 129 130
    int level_shift = MMU_ADDRESS_BITS;

    if (va & ARCH_PAGE_MASK)
131
    {
S
Shell 已提交
132
        return MMU_MAP_ERROR_VANOTALIGN;
B
bigmagic 已提交
133
    }
S
Shell 已提交
134
    if (pa & ARCH_PAGE_MASK)
B
bigmagic 已提交
135
    {
S
Shell 已提交
136
        return MMU_MAP_ERROR_PANOTALIGN;
B
bigmagic 已提交
137
    }
S
Shell 已提交
138 139 140 141 142 143 144 145 146 147 148 149 150
    for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
        {
            page = (unsigned long)rt_pages_alloc(0);
            if (!page)
            {
                ret = MMU_MAP_ERROR_NOPAGE;
                goto err;
            }
            rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
151 152
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page,
                                 ARCH_PAGE_SIZE);
S
Shell 已提交
153
            cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
154 155
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off,
                                 sizeof(void *));
S
Shell 已提交
156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187
        }
        else
        {
            page = cur_lv_tbl[off];
            page &= MMU_ADDRESS_MASK;
            /* page to va */
            page -= PV_OFFSET;
            rt_page_ref_inc((void *)page, 0);
        }
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
        {
            /* is block! error! */
            ret = MMU_MAP_ERROR_CONFLICT;
            goto err;
        }
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_shift -= MMU_LEVEL_SHIFT;
    }
    /* now is level page */
    attr &= MMU_ATTRIB_MASK;
    pa |= (attr | MMU_TYPE_PAGE); /* page */
    off = (va >> ARCH_PAGE_SHIFT);
    off &= MMU_LEVEL_MASK;
    cur_lv_tbl[off] = pa; /* page */
    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
    return ret;
err:
    _kenrel_unmap_4K(lv0_tbl, (void *)va);
    return ret;
}
G
GuEe-GUI 已提交
188

189 190
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
                    size_t attr)
S
Shell 已提交
191
{
192
    int ret = -1;
G
GuEe-GUI 已提交
193

194 195 196 197 198
    void *unmap_va = v_addr;
    size_t npages = size >> ARCH_PAGE_SHIFT;

    // TODO trying with HUGEPAGE here
    while (npages--)
B
bigmagic 已提交
199
    {
200
        ret = _kenrel_map_4K(aspace->page_table, v_addr, p_addr, attr);
B
bigmagic 已提交
201
        if (ret != 0)
202
        {
203 204 205 206 207 208 209
            /* error, undo map */
            while (unmap_va != v_addr)
            {
                _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
                unmap_va += ARCH_PAGE_SIZE;
            }
            break;
S
Shell 已提交
210
        }
211 212
        v_addr += ARCH_PAGE_SIZE;
        p_addr += ARCH_PAGE_SIZE;
S
Shell 已提交
213
    }
214 215 216 217 218 219 220

    if (ret == 0)
    {
        return unmap_va;
    }

    return NULL;
S
Shell 已提交
221 222
}

223
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
S
Shell 已提交
224
{
225 226
    // caller guarantee that v_addr & size are page aligned
    size_t npages = size >> ARCH_PAGE_SHIFT;
S
Shell 已提交
227

228 229 230 231
    if (!aspace->page_table)
    {
        return;
    }
S
Shell 已提交
232

233 234 235 236 237
    while (npages--)
    {
        _kenrel_unmap_4K(aspace->page_table, v_addr);
        v_addr += ARCH_PAGE_SIZE;
    }
S
Shell 已提交
238 239
}

240
void rt_hw_aspace_switch(rt_aspace_t aspace)
S
Shell 已提交
241
{
242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
    if (aspace != &rt_kernel_space)
    {
        void *pgtbl = aspace->page_table;
        pgtbl = _rt_kmem_v2p(pgtbl);
        uintptr_t tcr;

        __asm__ volatile("msr ttbr0_el1, %0" ::"r"(pgtbl) : "memory");

        __asm__ volatile("mrs %0, tcr_el1" : "=r"(tcr));
        tcr &= ~(1ul << 7);
        __asm__ volatile("msr tcr_el1, %0\n"
                         "isb" ::"r"(tcr)
                         : "memory");

        rt_hw_tlb_invalidate_all_local();
    }
S
Shell 已提交
258 259
}

260
void rt_hw_mmu_ktbl_set(unsigned long tbl)
S
Shell 已提交
261
{
262 263 264 265 266 267 268 269
#ifdef RT_USING_SMART
    tbl += PV_OFFSET;
    __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
#else
    __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
#endif
    __asm__ volatile("tlbi vmalle1\n dsb sy\nisb" ::: "memory");
    __asm__ volatile("ic ialluis\n dsb sy\nisb" ::: "memory");
S
Shell 已提交
270 271
}

272 273 274 275 276 277 278 279 280 281 282 283 284
/**
 * @brief setup Page Table for kernel space. It's a fixed map
 * and all mappings cannot be changed after initialization.
 *
 * Memory region in struct mem_desc must be page aligned,
 * otherwise is a failure and no report will be
 * returned.
 *
 * @param mmu_info
 * @param mdesc
 * @param desc_nr
 */
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
S
Shell 已提交
285
{
286 287
    void *err;
    for (size_t i = 0; i < desc_nr; i++)
S
Shell 已提交
288
    {
289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
        size_t attr;
        switch (mdesc->attr)
        {
        case NORMAL_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case NORMAL_NOCACHE_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case DEVICE_MEM:
            attr = MMU_MAP_K_DEVICE;
            break;
        default:
            attr = MMU_MAP_K_DEVICE;
        }

        struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
                                    .limit_start = aspace->start,
                                    .limit_range_size = aspace->size,
                                    .map_size = mdesc->vaddr_end -
                                                mdesc->vaddr_start + 1,
                                    .prefer = (void *)mdesc->vaddr_start};

312 313 314
        if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
            mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;

315 316 317
        rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
                                 mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
        mdesc++;
S
Shell 已提交
318
    }
319 320 321

    rt_hw_mmu_ktbl_set((unsigned long)rt_kernel_space.page_table);
    rt_page_cleanup();
S
Shell 已提交
322 323
}

324

S
Shell 已提交
325
#ifdef RT_USING_SMART
326 327 328 329 330
static inline void _init_region(void *vaddr, size_t size)
{
    rt_ioremap_start = vaddr;
    rt_ioremap_size = size;
    rt_mpr_start = rt_ioremap_start - rt_mpr_size;
S
Shell 已提交
331
}
332
#else
S
Shell 已提交
333

334 335
#define RTOS_VEND ((void *)0xfffffffff000UL)
static inline void _init_region(void *vaddr, size_t size)
S
Shell 已提交
336
{
337
    rt_mpr_start = RTOS_VEND - rt_mpr_size;
S
Shell 已提交
338
}
339
#endif
S
Shell 已提交
340 341 342 343 344 345 346 347 348 349 350 351

/**
 * This function will initialize rt_mmu_info structure.
 *
 * @param mmu_info   rt_mmu_info structure
 * @param v_address  virtual address
 * @param size       map size
 * @param vtable     mmu table
 * @param pv_off     pv offset in kernel space
 *
 * @return 0 on successful and -1 for fail
 */
352 353
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
                       size_t *vtable, size_t pv_off)
S
Shell 已提交
354 355 356
{
    size_t va_s, va_e;

357
    if (!aspace || !vtable)
S
Shell 已提交
358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377
    {
        return -1;
    }

    va_s = (size_t)v_address;
    va_e = (size_t)v_address + size - 1;

    if (va_e < va_s)
    {
        return -1;
    }

    va_s >>= ARCH_SECTION_SHIFT;
    va_e >>= ARCH_SECTION_SHIFT;

    if (va_s == 0)
    {
        return -1;
    }

378 379 380 381 382 383 384 385
#ifdef RT_USING_SMART
    rt_aspace_init(aspace, (void *)KERNEL_VADDR_START, 0 - KERNEL_VADDR_START,
                   vtable);
#else
    rt_aspace_init(aspace, (void *)0x1000, RTOS_VEND - (void *)0x1000, vtable);
#endif

    _init_region(v_address, size);
S
Shell 已提交
386 387 388 389

    return 0;
}

390 391 392 393 394 395 396
/************ setting el1 mmu register**************
  MAIR_EL1
  index 0 : memory outer writeback, write/read alloc
  index 1 : memory nocache
  index 2 : device nGnRnE
 *****************************************************/
void mmu_tcr_init(void)
S
Shell 已提交
397
{
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
    unsigned long val64;

    val64 = 0x00447fUL;
    __asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n" ::"r"(val64));

    /* TCR_EL1 */
    val64 = (16UL << 0)      /* t0sz 48bit */
            | (0x0UL << 6)   /* reserved */
            | (0x0UL << 7)   /* epd0 */
            | (0x3UL << 8)   /* t0 wb cacheable */
            | (0x3UL << 10)  /* inner shareable */
            | (0x2UL << 12)  /* t0 outer shareable */
            | (0x0UL << 14)  /* t0 4K */
            | (16UL << 16)   /* t1sz 48bit */
            | (0x0UL << 22)  /* define asid use ttbr0.asid */
            | (0x0UL << 23)  /* epd1 */
            | (0x3UL << 24)  /* t1 inner wb cacheable */
            | (0x3UL << 26)  /* t1 outer wb cacheable */
            | (0x2UL << 28)  /* t1 outer shareable */
            | (0x2UL << 30)  /* t1 4k */
            | (0x1UL << 32)  /* 001b 64GB PA */
            | (0x0UL << 35)  /* reserved */
            | (0x1UL << 36)  /* as: 0:8bit 1:16bit */
            | (0x0UL << 37)  /* tbi0 */
            | (0x0UL << 38); /* tbi1 */
    __asm__ volatile("msr TCR_EL1, %0\n" ::"r"(val64));
S
Shell 已提交
424 425
}

426
struct page_table
S
Shell 已提交
427
{
428 429
    unsigned long page[512];
};
S
Shell 已提交
430

431 432 433 434 435
static struct page_table *__init_page_array;
static unsigned long __page_off = 0UL;
unsigned long get_free_page(void)
{
    if (!__init_page_array)
S
Shell 已提交
436
    {
437 438 439 440 441
        unsigned long temp_page_start;
        asm volatile("mov %0, sp" : "=r"(temp_page_start));
        __init_page_array =
            (struct page_table *)(temp_page_start & ~(ARCH_SECTION_MASK));
        __page_off = 2; /* 0, 1 for ttbr0, ttrb1 */
S
Shell 已提交
442
    }
443 444 445 446 447 448 449 450 451 452 453 454
    __page_off++;
    return (unsigned long)(__init_page_array[__page_off - 1].page);
}

static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
                               unsigned long pa, unsigned long attr)
{
    int level;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
    int level_shift = MMU_ADDRESS_BITS;
S
Shell 已提交
455

456
    if (va & ARCH_SECTION_MASK)
S
Shell 已提交
457
    {
458
        return MMU_MAP_ERROR_VANOTALIGN;
S
Shell 已提交
459
    }
460
    if (pa & ARCH_SECTION_MASK)
S
Shell 已提交
461
    {
462 463 464 465 466 467 468
        return MMU_MAP_ERROR_PANOTALIGN;
    }
    for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
S
Shell 已提交
469
        {
470 471 472 473 474 475 476
            page = get_free_page();
            if (!page)
            {
                return MMU_MAP_ERROR_NOPAGE;
            }
            rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
            cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
S
Shell 已提交
477
        }
478 479
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
S
Shell 已提交
480
        {
481 482
            /* is block! error! */
            return MMU_MAP_ERROR_CONFLICT;
S
Shell 已提交
483
        }
484 485
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        level_shift -= MMU_LEVEL_SHIFT;
S
Shell 已提交
486
    }
487 488 489 490 491
    attr &= MMU_ATTRIB_MASK;
    pa |= (attr | MMU_TYPE_BLOCK); /* block */
    off = (va >> ARCH_SECTION_SHIFT);
    off &= MMU_LEVEL_MASK;
    cur_lv_tbl[off] = pa;
S
Shell 已提交
492 493 494
    return 0;
}

495 496 497
static int _init_map_2M(unsigned long *lv0_tbl, unsigned long va,
                        unsigned long pa, unsigned long count,
                        unsigned long attr)
S
Shell 已提交
498
{
499 500
    unsigned long i;
    int ret;
S
Shell 已提交
501

502
    if (va & ARCH_SECTION_MASK)
S
Shell 已提交
503 504 505
    {
        return -1;
    }
506
    if (pa & ARCH_SECTION_MASK)
S
Shell 已提交
507 508 509
    {
        return -1;
    }
510
    for (i = 0; i < count; i++)
S
Shell 已提交
511
    {
512 513 514 515
        ret = _map_single_page_2M(lv0_tbl, va, pa, attr);
        va += ARCH_SECTION_SIZE;
        pa += ARCH_SECTION_SIZE;
        if (ret != 0)
S
Shell 已提交
516
        {
517
            return ret;
518 519
        }
    }
S
Shell 已提交
520 521 522
    return 0;
}

523
static unsigned long *_query(rt_aspace_t aspace, void *vaddr, int *plvl_shf)
S
Shell 已提交
524
{
525 526 527 528 529 530
    int level;
    unsigned long va = (unsigned long)vaddr;
    unsigned long *cur_lv_tbl;
    unsigned long page;
    unsigned long off;
    int level_shift = MMU_ADDRESS_BITS;
S
Shell 已提交
531

532 533
    cur_lv_tbl = aspace->page_table;
    RT_ASSERT(cur_lv_tbl);
S
Shell 已提交
534

535
    for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
S
Shell 已提交
536
    {
537 538
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
S
Shell 已提交
539

540 541 542 543
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
        {
            return (void *)0;
        }
B
bigmagic 已提交
544

545 546
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
S
Shell 已提交
547
        {
548 549
            *plvl_shf = level_shift;
            return &cur_lv_tbl[off];
S
Shell 已提交
550
        }
551 552 553 554

        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_shift -= MMU_LEVEL_SHIFT;
S
Shell 已提交
555
    }
556 557 558 559
    /* now is level MMU_TBL_PAGE_4k_LEVEL */
    off = (va >> ARCH_PAGE_SHIFT);
    off &= MMU_LEVEL_MASK;
    page = cur_lv_tbl[off];
S
Shell 已提交
560

561 562 563 564 565 566
    if (!(page & MMU_TYPE_USED))
    {
        return (void *)0;
    }
    *plvl_shf = level_shift;
    return &cur_lv_tbl[off];
S
Shell 已提交
567 568
}

569
void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
S
Shell 已提交
570
{
571 572 573
    int level_shift;
    unsigned long paddr;
    unsigned long *pte = _query(aspace, v_addr, &level_shift);
S
Shell 已提交
574

575
    if (pte)
S
Shell 已提交
576
    {
577 578
        paddr = *pte & MMU_ADDRESS_MASK;
        paddr |= (uintptr_t)v_addr & ((1ul << level_shift) - 1);
S
Shell 已提交
579 580 581
    }
    else
    {
582
        paddr = (unsigned long)ARCH_MAP_FAILED;
S
Shell 已提交
583
    }
584
    return (void *)paddr;
S
Shell 已提交
585 586
}

587
static int _noncache(uintptr_t *pte)
S
Shell 已提交
588
{
589 590 591 592 593
    int err = 0;
    const uintptr_t idx_shift = 2;
    const uintptr_t idx_mask = 0x7 << idx_shift;
    uintptr_t entry = *pte;
    if ((entry & idx_mask) == (NORMAL_MEM << idx_shift))
S
Shell 已提交
594
    {
595
        *pte = (entry & ~idx_mask) | (NORMAL_NOCACHE_MEM << idx_shift);
S
Shell 已提交
596
    }
597
    else
S
Shell 已提交
598
    {
599 600
        // do not support other type to be noncache
        err = RT_ENOSYS;
S
Shell 已提交
601
    }
602
    return err;
S
Shell 已提交
603 604
}

605
static int _cache(uintptr_t *pte)
S
Shell 已提交
606
{
607 608 609 610 611
    int err = 0;
    const uintptr_t idx_shift = 2;
    const uintptr_t idx_mask = 0x7 << idx_shift;
    uintptr_t entry = *pte;
    if ((entry & idx_mask) == (NORMAL_NOCACHE_MEM << idx_shift))
S
Shell 已提交
612
    {
613
        *pte = (entry & ~idx_mask) | (NORMAL_MEM << idx_shift);
S
Shell 已提交
614 615 616
    }
    else
    {
617 618
        // do not support other type to be cache
        err = -RT_ENOSYS;
S
Shell 已提交
619
    }
620
    return err;
S
Shell 已提交
621 622
}

623 624 625 626
static int (*control_handler[MMU_CNTL_DUMMY_END])(uintptr_t *pte) = {
    [MMU_CNTL_CACHE] = _cache,
    [MMU_CNTL_NONCACHE] = _noncache,
};
627

628 629
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                      enum rt_mmu_cntl cmd)
630
{
631 632 633
    int level_shift;
    int err = -RT_EINVAL;
    void *vend = vaddr + size;
G
GuEe-GUI 已提交
634

635 636
    int (*handler)(uintptr_t * pte);
    if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
637
    {
638
        handler = control_handler[cmd];
B
bigmagic 已提交
639

640
        while (vaddr < vend)
S
Shell 已提交
641
        {
642 643 644 645 646 647 648 649 650 651
            uintptr_t *pte = _query(aspace, vaddr, &level_shift);
            void *range_end = vaddr + (1ul << level_shift);
            RT_ASSERT(range_end <= vend);

            if (pte)
            {
                err = handler(pte);
                RT_ASSERT(err == RT_EOK);
            }
            vaddr = range_end;
S
Shell 已提交
652 653
        }
    }
654
    else
S
Shell 已提交
655
    {
656
        err = -RT_ENOSYS;
S
Shell 已提交
657
    }
B
bigmagic 已提交
658

659
    return err;
mysterywolf's avatar
mysterywolf 已提交
660
}
S
Shell 已提交
661

662
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
663
                           unsigned long size, unsigned long pv_off)
S
Shell 已提交
664 665
{
    int ret;
666 667 668 669

    /* setup pv off */
    rt_kmem_pvoff_set(pv_off);

S
Shell 已提交
670 671 672 673 674
    unsigned long va = KERNEL_VADDR_START;
    unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
    unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);

    /* clean the first two pages */
675 676
    rt_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
    rt_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
S
Shell 已提交
677

678
    ret = _init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
S
Shell 已提交
679 680 681 682
    if (ret != 0)
    {
        while (1);
    }
683
    ret = _init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
S
Shell 已提交
684 685 686 687 688
    if (ret != 0)
    {
        while (1);
    }
}