mmu.c 21.1 KB
Newer Older
1
/*
2
 * Copyright (c) 2006-2023, RT-Thread Development Team
3
 *
B
bigmagic 已提交
4
 * SPDX-License-Identifier: Apache-2.0
5 6
 *
 * Change Logs:
G
GuEe-GUI 已提交
7
 * Date           Author       Notes
S
Shell 已提交
8
 * 2012-01-10     bernard      porting to AM1808
9 10
 * 2021-11-28     GuEe-GUI     first version
 * 2022-12-10     WangXiaoyao  porting to MM
11
 */
12 13 14 15 16
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
B
bigmagic 已提交
17

18 19
#include "mm_aspace.h"
#include "mm_page.h"
S
Shell 已提交
20
#include "mmu.h"
21
#include "tlb.h"
S
Shell 已提交
22 23

#ifdef RT_USING_SMART
24
#include "ioremap.h"
S
Shell 已提交
25 26 27
#include <lwp_mm.h>
#endif

28 29 30
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
S
Shell 已提交
31

32 33 34 35 36 37 38 39 40 41 42
#define MMU_LEVEL_MASK   0x1ffUL
#define MMU_LEVEL_SHIFT  9
#define MMU_ADDRESS_BITS 39
#define MMU_ADDRESS_MASK 0x0000fffffffff000UL
#define MMU_ATTRIB_MASK  0xfff0000000000ffcUL

#define MMU_TYPE_MASK  3UL
#define MMU_TYPE_USED  1UL
#define MMU_TYPE_BLOCK 1UL
#define MMU_TYPE_TABLE 3UL
#define MMU_TYPE_PAGE  3UL
S
Shell 已提交
43 44 45 46 47

#define MMU_TBL_BLOCK_2M_LEVEL 2
#define MMU_TBL_PAGE_4k_LEVEL  3
#define MMU_TBL_LEVEL_NR       4

48
volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
49

S
Shell 已提交
50
struct mmu_level_info
51
{
S
Shell 已提交
52 53 54
    unsigned long *pos;
    void *page;
};
55

S
Shell 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
{
    int level;
    unsigned long va = (unsigned long)v_addr;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
    struct mmu_level_info level_info[4];
    int ref;
    int level_shift = MMU_ADDRESS_BITS;
    unsigned long *pos;

    rt_memset(level_info, 0, sizeof level_info);
    for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        page = cur_lv_tbl[off];
        if (!(page & MMU_TYPE_USED))
        {
            break;
        }
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
        {
            break;
        }
82
        /* next table entry in current level */
S
Shell 已提交
83 84 85 86 87 88 89 90 91 92
        level_info[level].pos = cur_lv_tbl + off;
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_info[level].page = cur_lv_tbl;
        level_shift -= MMU_LEVEL_SHIFT;
    }

    level = MMU_TBL_PAGE_4k_LEVEL;
    pos = level_info[level].pos;
    if (pos)
G
GuEe-GUI 已提交
93
    {
S
Shell 已提交
94 95
        *pos = (unsigned long)RT_NULL;
        rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
G
GuEe-GUI 已提交
96
    }
S
Shell 已提交
97 98 99
    level--;

    while (level >= 0)
G
GuEe-GUI 已提交
100
    {
S
Shell 已提交
101 102 103 104 105 106 107 108 109 110 111 112
        pos = level_info[level].pos;
        if (pos)
        {
            void *cur_page = level_info[level].page;
            ref = rt_page_ref_get(cur_page, 0);
            if (ref == 1)
            {
                *pos = (unsigned long)RT_NULL;
                rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
            }
            rt_pages_free(cur_page, 0);
        }
113 114 115 116
        else
        {
            break;
        }
S
Shell 已提交
117
        level--;
G
GuEe-GUI 已提交
118
    }
119

S
Shell 已提交
120 121 122
    return;
}

123
static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
S
Shell 已提交
124 125 126 127 128 129
{
    int ret = 0;
    int level;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
130 131
    intptr_t va = (intptr_t)vaddr;
    intptr_t pa = (intptr_t)paddr;
S
Shell 已提交
132 133 134
    int level_shift = MMU_ADDRESS_BITS;

    if (va & ARCH_PAGE_MASK)
135
    {
S
Shell 已提交
136
        return MMU_MAP_ERROR_VANOTALIGN;
B
bigmagic 已提交
137
    }
S
Shell 已提交
138
    if (pa & ARCH_PAGE_MASK)
B
bigmagic 已提交
139
    {
S
Shell 已提交
140
        return MMU_MAP_ERROR_PANOTALIGN;
B
bigmagic 已提交
141
    }
S
Shell 已提交
142 143 144 145 146 147
    for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
        {
148
            page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
S
Shell 已提交
149 150 151 152 153 154
            if (!page)
            {
                ret = MMU_MAP_ERROR_NOPAGE;
                goto err;
            }
            rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
W
wangxiaoyao 已提交
155
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
S
Shell 已提交
156
            cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
W
wangxiaoyao 已提交
157
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
S
Shell 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
        }
        else
        {
            page = cur_lv_tbl[off];
            page &= MMU_ADDRESS_MASK;
            /* page to va */
            page -= PV_OFFSET;
            rt_page_ref_inc((void *)page, 0);
        }
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
        {
            /* is block! error! */
            ret = MMU_MAP_ERROR_CONFLICT;
            goto err;
        }
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_shift -= MMU_LEVEL_SHIFT;
    }
    /* now is level page */
    attr &= MMU_ATTRIB_MASK;
    pa |= (attr | MMU_TYPE_PAGE); /* page */
    off = (va >> ARCH_PAGE_SHIFT);
    off &= MMU_LEVEL_MASK;
    cur_lv_tbl[off] = pa; /* page */
    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
    return ret;
err:
    _kenrel_unmap_4K(lv0_tbl, (void *)va);
    return ret;
}
G
GuEe-GUI 已提交
190

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
static int _kernel_map_2M(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
{
    int ret = 0;
    int level;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
    unsigned long va = (unsigned long)vaddr;
    unsigned long pa = (unsigned long)paddr;

    int level_shift = MMU_ADDRESS_BITS;

    if (va & ARCH_SECTION_MASK)
    {
        return MMU_MAP_ERROR_VANOTALIGN;
    }
    if (pa & ARCH_SECTION_MASK)
    {
        return MMU_MAP_ERROR_PANOTALIGN;
    }
    for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
        {
            page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
            if (!page)
            {
                ret = MMU_MAP_ERROR_NOPAGE;
                goto err;
            }
            rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
            cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
        }
        else
        {
            page = cur_lv_tbl[off];
            page &= MMU_ADDRESS_MASK;
            /* page to va */
            page -= PV_OFFSET;
            rt_page_ref_inc((void *)page, 0);
        }
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
        {
            /* is block! error! */
            ret = MMU_MAP_ERROR_CONFLICT;
            goto err;
        }
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_shift -= MMU_LEVEL_SHIFT;
    }
    /* now is level page */
    attr &= MMU_ATTRIB_MASK;
    pa |= (attr | MMU_TYPE_BLOCK); /* block */
    off = (va >> ARCH_SECTION_SHIFT);
    off &= MMU_LEVEL_MASK;
    cur_lv_tbl[off] = pa;
    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
    return ret;
err:
    _kenrel_unmap_4K(lv0_tbl, (void *)va);
    return ret;
}

260 261
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
                    size_t attr)
S
Shell 已提交
262
{
263
    int ret = -1;
G
GuEe-GUI 已提交
264

265
    void *unmap_va = v_addr;
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
    size_t npages;
    size_t stride;
    int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr);

    if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (size & ARCH_SECTION_MASK))
    {
        /* legacy 4k mapping */
        npages = size >> ARCH_PAGE_SHIFT;
        stride = ARCH_PAGE_SIZE;
        mapper = _kernel_map_4K;
    }
    else
    {
        /* 2m huge page */
        npages = size >> ARCH_SECTION_SHIFT;
        stride = ARCH_SECTION_SIZE;
        mapper = _kernel_map_2M;
    }
284 285

    while (npages--)
B
bigmagic 已提交
286
    {
W
wangxiaoyao 已提交
287
        MM_PGTBL_LOCK(aspace);
288
        ret = mapper(aspace->page_table, v_addr, p_addr, attr);
W
wangxiaoyao 已提交
289 290
        MM_PGTBL_UNLOCK(aspace);

B
bigmagic 已提交
291
        if (ret != 0)
292
        {
S
Shell 已提交
293 294
            /* other types of return value are taken as programming error */
            RT_ASSERT(ret == MMU_MAP_ERROR_NOPAGE);
295 296 297
            /* error, undo map */
            while (unmap_va != v_addr)
            {
W
wangxiaoyao 已提交
298
                MM_PGTBL_LOCK(aspace);
299
                _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
W
wangxiaoyao 已提交
300
                MM_PGTBL_UNLOCK(aspace);
301
                unmap_va = (char *)unmap_va + stride;
302 303
            }
            break;
S
Shell 已提交
304
        }
305 306
        v_addr = (char *)v_addr + stride;
        p_addr = (char *)p_addr + stride;
S
Shell 已提交
307
    }
308 309 310 311 312 313 314

    if (ret == 0)
    {
        return unmap_va;
    }

    return NULL;
S
Shell 已提交
315 316
}

317
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
S
Shell 已提交
318
{
319 320
    // caller guarantee that v_addr & size are page aligned
    size_t npages = size >> ARCH_PAGE_SHIFT;
S
Shell 已提交
321

322 323 324 325
    if (!aspace->page_table)
    {
        return;
    }
S
Shell 已提交
326

327 328
    while (npages--)
    {
W
wangxiaoyao 已提交
329
        MM_PGTBL_LOCK(aspace);
330
        _kenrel_unmap_4K(aspace->page_table, v_addr);
W
wangxiaoyao 已提交
331
        MM_PGTBL_UNLOCK(aspace);
332
        v_addr = (char *)v_addr + ARCH_PAGE_SIZE;
333
    }
S
Shell 已提交
334 335
}

336
void rt_hw_aspace_switch(rt_aspace_t aspace)
S
Shell 已提交
337
{
338 339 340
    if (aspace != &rt_kernel_space)
    {
        void *pgtbl = aspace->page_table;
W
wangxiaoyao 已提交
341
        pgtbl = rt_kmem_v2p(pgtbl);
342
        rt_ubase_t tcr;
343 344 345 346 347 348 349 350 351 352 353

        __asm__ volatile("msr ttbr0_el1, %0" ::"r"(pgtbl) : "memory");

        __asm__ volatile("mrs %0, tcr_el1" : "=r"(tcr));
        tcr &= ~(1ul << 7);
        __asm__ volatile("msr tcr_el1, %0\n"
                         "isb" ::"r"(tcr)
                         : "memory");

        rt_hw_tlb_invalidate_all_local();
    }
S
Shell 已提交
354 355
}

356
void rt_hw_mmu_ktbl_set(unsigned long tbl)
S
Shell 已提交
357
{
358 359 360 361 362 363 364 365
#ifdef RT_USING_SMART
    tbl += PV_OFFSET;
    __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
#else
    __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
#endif
    __asm__ volatile("tlbi vmalle1\n dsb sy\nisb" ::: "memory");
    __asm__ volatile("ic ialluis\n dsb sy\nisb" ::: "memory");
S
Shell 已提交
366 367
}

368 369 370 371 372 373 374 375 376 377 378 379 380
/**
 * @brief setup Page Table for kernel space. It's a fixed map
 * and all mappings cannot be changed after initialization.
 *
 * Memory region in struct mem_desc must be page aligned,
 * otherwise is a failure and no report will be
 * returned.
 *
 * @param mmu_info
 * @param mdesc
 * @param desc_nr
 */
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
S
Shell 已提交
381
{
382 383
    void *err;
    for (size_t i = 0; i < desc_nr; i++)
S
Shell 已提交
384
    {
385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
        size_t attr;
        switch (mdesc->attr)
        {
        case NORMAL_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case NORMAL_NOCACHE_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case DEVICE_MEM:
            attr = MMU_MAP_K_DEVICE;
            break;
        default:
            attr = MMU_MAP_K_DEVICE;
        }

        struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
                                    .limit_start = aspace->start,
                                    .limit_range_size = aspace->size,
                                    .map_size = mdesc->vaddr_end -
                                                mdesc->vaddr_start + 1,
                                    .prefer = (void *)mdesc->vaddr_start};

408 409
        if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
            mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
W
wangxiaoyao 已提交
410 411
        int retval;
        retval = rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
412
                                 mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
W
wangxiaoyao 已提交
413 414 415 416 417
        if (retval)
        {
            LOG_E("%s: map failed with code %d", retval);
            RT_ASSERT(0);
        }
418
        mdesc++;
S
Shell 已提交
419
    }
420 421 422

    rt_hw_mmu_ktbl_set((unsigned long)rt_kernel_space.page_table);
    rt_page_cleanup();
S
Shell 已提交
423 424 425
}

#ifdef RT_USING_SMART
426
static void _init_region(void *vaddr, size_t size)
427 428 429
{
    rt_ioremap_start = vaddr;
    rt_ioremap_size = size;
430
    rt_mpr_start = (char *)rt_ioremap_start - rt_mpr_size;
S
Shell 已提交
431
}
432
#else
S
Shell 已提交
433

434
#define RTOS_VEND (0xfffffffff000UL)
435
static inline void _init_region(void *vaddr, size_t size)
S
Shell 已提交
436
{
437
    rt_mpr_start = (void *)(RTOS_VEND - rt_mpr_size);
S
Shell 已提交
438
}
439
#endif
S
Shell 已提交
440 441 442 443 444 445 446 447 448 449 450 451

/**
 * This function will initialize rt_mmu_info structure.
 *
 * @param mmu_info   rt_mmu_info structure
 * @param v_address  virtual address
 * @param size       map size
 * @param vtable     mmu table
 * @param pv_off     pv offset in kernel space
 *
 * @return 0 on successful and -1 for fail
 */
452 453
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
                       size_t *vtable, size_t pv_off)
S
Shell 已提交
454 455 456
{
    size_t va_s, va_e;

457
    if (!aspace || !vtable)
S
Shell 已提交
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477
    {
        return -1;
    }

    va_s = (size_t)v_address;
    va_e = (size_t)v_address + size - 1;

    if (va_e < va_s)
    {
        return -1;
    }

    va_s >>= ARCH_SECTION_SHIFT;
    va_e >>= ARCH_SECTION_SHIFT;

    if (va_s == 0)
    {
        return -1;
    }

478 479 480 481
#ifdef RT_USING_SMART
    rt_aspace_init(aspace, (void *)KERNEL_VADDR_START, 0 - KERNEL_VADDR_START,
                   vtable);
#else
482
    rt_aspace_init(aspace, (void *)0x1000, RTOS_VEND - 0x1000ul, vtable);
483 484 485
#endif

    _init_region(v_address, size);
S
Shell 已提交
486 487 488 489

    return 0;
}

490 491 492 493 494 495 496
/************ setting el1 mmu register**************
  MAIR_EL1
  index 0 : memory outer writeback, write/read alloc
  index 1 : memory nocache
  index 2 : device nGnRnE
 *****************************************************/
void mmu_tcr_init(void)
S
Shell 已提交
497
{
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
    unsigned long val64;

    val64 = 0x00447fUL;
    __asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n" ::"r"(val64));

    /* TCR_EL1 */
    val64 = (16UL << 0)      /* t0sz 48bit */
            | (0x0UL << 6)   /* reserved */
            | (0x0UL << 7)   /* epd0 */
            | (0x3UL << 8)   /* t0 wb cacheable */
            | (0x3UL << 10)  /* inner shareable */
            | (0x2UL << 12)  /* t0 outer shareable */
            | (0x0UL << 14)  /* t0 4K */
            | (16UL << 16)   /* t1sz 48bit */
            | (0x0UL << 22)  /* define asid use ttbr0.asid */
            | (0x0UL << 23)  /* epd1 */
            | (0x3UL << 24)  /* t1 inner wb cacheable */
            | (0x3UL << 26)  /* t1 outer wb cacheable */
            | (0x2UL << 28)  /* t1 outer shareable */
            | (0x2UL << 30)  /* t1 4k */
            | (0x1UL << 32)  /* 001b 64GB PA */
            | (0x0UL << 35)  /* reserved */
            | (0x1UL << 36)  /* as: 0:8bit 1:16bit */
            | (0x0UL << 37)  /* tbi0 */
            | (0x0UL << 38); /* tbi1 */
    __asm__ volatile("msr TCR_EL1, %0\n" ::"r"(val64));
S
Shell 已提交
524 525
}

526
struct page_table
S
Shell 已提交
527
{
528 529
    unsigned long page[512];
};
S
Shell 已提交
530

531 532 533 534 535 536 537 538
/*  */
static struct page_table __init_page_array[6] rt_align(0x1000);
static unsigned long __page_off = 2UL; /* 0, 1 for ttbr0, ttrb1 */
unsigned long get_ttbrn_base(void)
{
    return (unsigned long) __init_page_array;
}

539 540 541
unsigned long get_free_page(void)
{
    __page_off++;
542
    return (unsigned long) (__init_page_array[__page_off - 1].page);
543 544 545 546 547 548 549 550 551 552
}

static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
                               unsigned long pa, unsigned long attr)
{
    int level;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
    int level_shift = MMU_ADDRESS_BITS;
S
Shell 已提交
553

554
    if (va & ARCH_SECTION_MASK)
S
Shell 已提交
555
    {
556
        return MMU_MAP_ERROR_VANOTALIGN;
S
Shell 已提交
557
    }
558
    if (pa & ARCH_SECTION_MASK)
S
Shell 已提交
559
    {
560 561 562 563 564 565 566
        return MMU_MAP_ERROR_PANOTALIGN;
    }
    for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
S
Shell 已提交
567
        {
568 569 570 571 572 573 574
            page = get_free_page();
            if (!page)
            {
                return MMU_MAP_ERROR_NOPAGE;
            }
            rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
            cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
S
Shell 已提交
575
        }
576 577
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
S
Shell 已提交
578
        {
579 580
            /* is block! error! */
            return MMU_MAP_ERROR_CONFLICT;
S
Shell 已提交
581
        }
582 583
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        level_shift -= MMU_LEVEL_SHIFT;
S
Shell 已提交
584
    }
585 586 587 588 589
    attr &= MMU_ATTRIB_MASK;
    pa |= (attr | MMU_TYPE_BLOCK); /* block */
    off = (va >> ARCH_SECTION_SHIFT);
    off &= MMU_LEVEL_MASK;
    cur_lv_tbl[off] = pa;
S
Shell 已提交
590 591 592
    return 0;
}

593 594 595
static int _init_map_2M(unsigned long *lv0_tbl, unsigned long va,
                        unsigned long pa, unsigned long count,
                        unsigned long attr)
S
Shell 已提交
596
{
597 598
    unsigned long i;
    int ret;
S
Shell 已提交
599

600
    if (va & ARCH_SECTION_MASK)
S
Shell 已提交
601 602 603
    {
        return -1;
    }
604
    if (pa & ARCH_SECTION_MASK)
S
Shell 已提交
605 606 607
    {
        return -1;
    }
608
    for (i = 0; i < count; i++)
S
Shell 已提交
609
    {
610 611 612 613
        ret = _map_single_page_2M(lv0_tbl, va, pa, attr);
        va += ARCH_SECTION_SIZE;
        pa += ARCH_SECTION_SIZE;
        if (ret != 0)
S
Shell 已提交
614
        {
615
            return ret;
616 617
        }
    }
S
Shell 已提交
618 619 620
    return 0;
}

621
static unsigned long *_query(rt_aspace_t aspace, void *vaddr, int *plvl_shf)
S
Shell 已提交
622
{
623 624 625 626 627 628
    int level;
    unsigned long va = (unsigned long)vaddr;
    unsigned long *cur_lv_tbl;
    unsigned long page;
    unsigned long off;
    int level_shift = MMU_ADDRESS_BITS;
S
Shell 已提交
629

630 631
    cur_lv_tbl = aspace->page_table;
    RT_ASSERT(cur_lv_tbl);
S
Shell 已提交
632

633
    for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
S
Shell 已提交
634
    {
635 636
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
S
Shell 已提交
637

638 639 640 641
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
        {
            return (void *)0;
        }
B
bigmagic 已提交
642

643 644
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
S
Shell 已提交
645
        {
646 647
            *plvl_shf = level_shift;
            return &cur_lv_tbl[off];
S
Shell 已提交
648
        }
649 650 651 652

        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_shift -= MMU_LEVEL_SHIFT;
S
Shell 已提交
653
    }
654 655 656 657
    /* now is level MMU_TBL_PAGE_4k_LEVEL */
    off = (va >> ARCH_PAGE_SHIFT);
    off &= MMU_LEVEL_MASK;
    page = cur_lv_tbl[off];
S
Shell 已提交
658

659 660 661 662 663 664
    if (!(page & MMU_TYPE_USED))
    {
        return (void *)0;
    }
    *plvl_shf = level_shift;
    return &cur_lv_tbl[off];
S
Shell 已提交
665 666
}

667
void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
S
Shell 已提交
668
{
669 670
    int level_shift;
    unsigned long paddr;
S
Shell 已提交
671

672
    if (aspace == &rt_kernel_space)
S
Shell 已提交
673
    {
674
        paddr = (unsigned long)rt_hw_mmu_kernel_v2p(v_addr);
S
Shell 已提交
675 676 677
    }
    else
    {
678 679 680 681 682 683 684 685 686 687 688
        unsigned long *pte = _query(aspace, v_addr, &level_shift);

        if (pte)
        {
            paddr = *pte & MMU_ADDRESS_MASK;
            paddr |= (rt_ubase_t)v_addr & ((1ul << level_shift) - 1);
        }
        else
        {
            paddr = (unsigned long)ARCH_MAP_FAILED;
        }
S
Shell 已提交
689
    }
690

691
    return (void *)paddr;
S
Shell 已提交
692 693
}

694
static int _noncache(rt_ubase_t *pte)
S
Shell 已提交
695
{
696
    int err = 0;
697 698 699
    const rt_ubase_t idx_shift = 2;
    const rt_ubase_t idx_mask = 0x7 << idx_shift;
    rt_ubase_t entry = *pte;
700
    if ((entry & idx_mask) == (NORMAL_MEM << idx_shift))
S
Shell 已提交
701
    {
702
        *pte = (entry & ~idx_mask) | (NORMAL_NOCACHE_MEM << idx_shift);
S
Shell 已提交
703
    }
704
    else
S
Shell 已提交
705
    {
706
        // do not support other type to be noncache
707
        err = -RT_ENOSYS;
S
Shell 已提交
708
    }
709
    return err;
S
Shell 已提交
710 711
}

712
static int _cache(rt_ubase_t *pte)
S
Shell 已提交
713
{
714
    int err = 0;
715 716 717
    const rt_ubase_t idx_shift = 2;
    const rt_ubase_t idx_mask = 0x7 << idx_shift;
    rt_ubase_t entry = *pte;
718
    if ((entry & idx_mask) == (NORMAL_NOCACHE_MEM << idx_shift))
S
Shell 已提交
719
    {
720
        *pte = (entry & ~idx_mask) | (NORMAL_MEM << idx_shift);
S
Shell 已提交
721 722 723
    }
    else
    {
724 725
        // do not support other type to be cache
        err = -RT_ENOSYS;
S
Shell 已提交
726
    }
727
    return err;
S
Shell 已提交
728 729
}

730
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_ubase_t *pte) = {
731 732 733
    [MMU_CNTL_CACHE] = _cache,
    [MMU_CNTL_NONCACHE] = _noncache,
};
734

735 736
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                      enum rt_mmu_cntl cmd)
737
{
738 739
    int level_shift;
    int err = -RT_EINVAL;
740 741
    rt_ubase_t vstart = (rt_ubase_t)vaddr;
    rt_ubase_t vend = vstart + size;
G
GuEe-GUI 已提交
742

743
    int (*handler)(rt_ubase_t * pte);
744
    if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
745
    {
746
        handler = control_handler[cmd];
B
bigmagic 已提交
747

748
        while (vstart < vend)
S
Shell 已提交
749
        {
750 751
            rt_ubase_t *pte = _query(aspace, (void *)vstart, &level_shift);
            rt_ubase_t range_end = vstart + (1ul << level_shift);
752 753 754 755 756 757 758
            RT_ASSERT(range_end <= vend);

            if (pte)
            {
                err = handler(pte);
                RT_ASSERT(err == RT_EOK);
            }
759
            vstart = range_end;
S
Shell 已提交
760 761
        }
    }
762
    else
S
Shell 已提交
763
    {
764
        err = -RT_ENOSYS;
S
Shell 已提交
765
    }
B
bigmagic 已提交
766

767
    return err;
mysterywolf's avatar
mysterywolf 已提交
768
}
S
Shell 已提交
769

770
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
771
                           unsigned long size, unsigned long pv_off)
S
Shell 已提交
772 773
{
    int ret;
774 775
    unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
    unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);
776

777
#ifdef RT_USING_SMART
S
Shell 已提交
778
    unsigned long va = KERNEL_VADDR_START;
779
#else
780 781
    extern unsigned char _start;
    unsigned long va = (unsigned long) &_start;
782
    va = RT_ALIGN_DOWN(va, 0x200000);
783 784
#endif

785 786
    /* setup pv off */
    rt_kmem_pvoff_set(pv_off);
S
Shell 已提交
787 788

    /* clean the first two pages */
789 790
    rt_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
    rt_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
S
Shell 已提交
791

792
    ret = _init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
S
Shell 已提交
793 794 795 796
    if (ret != 0)
    {
        while (1);
    }
797
    ret = _init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
S
Shell 已提交
798 799 800 801 802
    if (ret != 0)
    {
        while (1);
    }
}