mmu.c 21.6 KB
Newer Older
1
/*
2
 * Copyright (c) 2006-2023, RT-Thread Development Team
3
 *
B
bigmagic 已提交
4
 * SPDX-License-Identifier: Apache-2.0
5 6
 *
 * Change Logs:
G
GuEe-GUI 已提交
7
 * Date           Author       Notes
S
Shell 已提交
8
 * 2012-01-10     bernard      porting to AM1808
9 10
 * 2021-11-28     GuEe-GUI     first version
 * 2022-12-10     WangXiaoyao  porting to MM
11
 */
12 13 14 15 16
#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>
#include <stdint.h>
#include <string.h>
B
bigmagic 已提交
17

18 19
#include "mm_aspace.h"
#include "mm_page.h"
S
Shell 已提交
20
#include "mmu.h"
21
#include "tlb.h"
S
Shell 已提交
22 23

#ifdef RT_USING_SMART
24
#include "ioremap.h"
S
Shell 已提交
25 26 27
#include <lwp_mm.h>
#endif

28 29 30
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
S
Shell 已提交
31

32 33 34 35 36 37 38 39 40 41 42
#define MMU_LEVEL_MASK   0x1ffUL
#define MMU_LEVEL_SHIFT  9
#define MMU_ADDRESS_BITS 39
#define MMU_ADDRESS_MASK 0x0000fffffffff000UL
#define MMU_ATTRIB_MASK  0xfff0000000000ffcUL

#define MMU_TYPE_MASK  3UL
#define MMU_TYPE_USED  1UL
#define MMU_TYPE_BLOCK 1UL
#define MMU_TYPE_TABLE 3UL
#define MMU_TYPE_PAGE  3UL
S
Shell 已提交
43 44 45 46 47

#define MMU_TBL_BLOCK_2M_LEVEL 2
#define MMU_TBL_PAGE_4k_LEVEL  3
#define MMU_TBL_LEVEL_NR       4

48
volatile unsigned long MMUTable[512] __attribute__((aligned(4 * 1024)));
49

S
Shell 已提交
50
struct mmu_level_info
51
{
S
Shell 已提交
52 53 54
    unsigned long *pos;
    void *page;
};
55

S
Shell 已提交
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81
static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
{
    int level;
    unsigned long va = (unsigned long)v_addr;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
    struct mmu_level_info level_info[4];
    int ref;
    int level_shift = MMU_ADDRESS_BITS;
    unsigned long *pos;

    rt_memset(level_info, 0, sizeof level_info);
    for (level = 0; level < MMU_TBL_LEVEL_NR; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        page = cur_lv_tbl[off];
        if (!(page & MMU_TYPE_USED))
        {
            break;
        }
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
        {
            break;
        }
82
        /* next table entry in current level */
S
Shell 已提交
83 84 85 86 87 88 89 90 91 92
        level_info[level].pos = cur_lv_tbl + off;
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_info[level].page = cur_lv_tbl;
        level_shift -= MMU_LEVEL_SHIFT;
    }

    level = MMU_TBL_PAGE_4k_LEVEL;
    pos = level_info[level].pos;
    if (pos)
G
GuEe-GUI 已提交
93
    {
S
Shell 已提交
94 95
        *pos = (unsigned long)RT_NULL;
        rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
G
GuEe-GUI 已提交
96
    }
S
Shell 已提交
97 98 99
    level--;

    while (level >= 0)
G
GuEe-GUI 已提交
100
    {
S
Shell 已提交
101 102 103 104 105 106 107 108 109 110 111 112
        pos = level_info[level].pos;
        if (pos)
        {
            void *cur_page = level_info[level].page;
            ref = rt_page_ref_get(cur_page, 0);
            if (ref == 1)
            {
                *pos = (unsigned long)RT_NULL;
                rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, pos, sizeof(void *));
            }
            rt_pages_free(cur_page, 0);
        }
113 114 115 116
        else
        {
            break;
        }
S
Shell 已提交
117
        level--;
G
GuEe-GUI 已提交
118
    }
119

S
Shell 已提交
120 121 122
    return;
}

123
static int _kernel_map_4K(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
S
Shell 已提交
124 125 126 127 128 129
{
    int ret = 0;
    int level;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
130 131
    intptr_t va = (intptr_t)vaddr;
    intptr_t pa = (intptr_t)paddr;
S
Shell 已提交
132 133 134
    int level_shift = MMU_ADDRESS_BITS;

    if (va & ARCH_PAGE_MASK)
135
    {
S
Shell 已提交
136
        return MMU_MAP_ERROR_VANOTALIGN;
B
bigmagic 已提交
137
    }
S
Shell 已提交
138
    if (pa & ARCH_PAGE_MASK)
B
bigmagic 已提交
139
    {
S
Shell 已提交
140
        return MMU_MAP_ERROR_PANOTALIGN;
B
bigmagic 已提交
141
    }
S
Shell 已提交
142 143 144 145 146 147
    for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
        {
148
            page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
S
Shell 已提交
149 150 151 152 153 154
            if (!page)
            {
                ret = MMU_MAP_ERROR_NOPAGE;
                goto err;
            }
            rt_memset((void *)page, 0, ARCH_PAGE_SIZE);
W
wangxiaoyao 已提交
155
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
S
Shell 已提交
156
            cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
W
wangxiaoyao 已提交
157
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
S
Shell 已提交
158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
        }
        else
        {
            page = cur_lv_tbl[off];
            page &= MMU_ADDRESS_MASK;
            /* page to va */
            page -= PV_OFFSET;
            rt_page_ref_inc((void *)page, 0);
        }
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
        {
            /* is block! error! */
            ret = MMU_MAP_ERROR_CONFLICT;
            goto err;
        }
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_shift -= MMU_LEVEL_SHIFT;
    }
    /* now is level page */
    attr &= MMU_ATTRIB_MASK;
    pa |= (attr | MMU_TYPE_PAGE); /* page */
    off = (va >> ARCH_PAGE_SHIFT);
    off &= MMU_LEVEL_MASK;
    cur_lv_tbl[off] = pa; /* page */
    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
    return ret;
err:
    _kenrel_unmap_4K(lv0_tbl, (void *)va);
    return ret;
}
G
GuEe-GUI 已提交
190

191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
static int _kernel_map_2M(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr)
{
    int ret = 0;
    int level;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
    unsigned long va = (unsigned long)vaddr;
    unsigned long pa = (unsigned long)paddr;

    int level_shift = MMU_ADDRESS_BITS;

    if (va & ARCH_SECTION_MASK)
    {
        return MMU_MAP_ERROR_VANOTALIGN;
    }
    if (pa & ARCH_SECTION_MASK)
    {
        return MMU_MAP_ERROR_PANOTALIGN;
    }
    for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
        {
            page = (unsigned long)rt_pages_alloc_ext(0, PAGE_ANY_AVAILABLE);
            if (!page)
            {
                ret = MMU_MAP_ERROR_NOPAGE;
                goto err;
            }
            rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)page, ARCH_PAGE_SIZE);
            cur_lv_tbl[off] = (page + PV_OFFSET) | MMU_TYPE_TABLE;
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
        }
        else
        {
            page = cur_lv_tbl[off];
            page &= MMU_ADDRESS_MASK;
            /* page to va */
            page -= PV_OFFSET;
            rt_page_ref_inc((void *)page, 0);
        }
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
        {
            /* is block! error! */
            ret = MMU_MAP_ERROR_CONFLICT;
            goto err;
        }
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_shift -= MMU_LEVEL_SHIFT;
    }
    /* now is level page */
    attr &= MMU_ATTRIB_MASK;
    pa |= (attr | MMU_TYPE_BLOCK); /* block */
    off = (va >> ARCH_SECTION_SHIFT);
    off &= MMU_LEVEL_MASK;
    cur_lv_tbl[off] = pa;
    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, cur_lv_tbl + off, sizeof(void *));
    return ret;
err:
    _kenrel_unmap_4K(lv0_tbl, (void *)va);
    return ret;
}

260 261
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
                    size_t attr)
S
Shell 已提交
262
{
263
    int ret = -1;
G
GuEe-GUI 已提交
264

265
    void *unmap_va = v_addr;
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283
    size_t npages;
    size_t stride;
    int (*mapper)(unsigned long *lv0_tbl, void *vaddr, void *paddr, unsigned long attr);

    if (((rt_ubase_t)v_addr & ARCH_SECTION_MASK) || (size & ARCH_SECTION_MASK))
    {
        /* legacy 4k mapping */
        npages = size >> ARCH_PAGE_SHIFT;
        stride = ARCH_PAGE_SIZE;
        mapper = _kernel_map_4K;
    }
    else
    {
        /* 2m huge page */
        npages = size >> ARCH_SECTION_SHIFT;
        stride = ARCH_SECTION_SIZE;
        mapper = _kernel_map_2M;
    }
284 285

    while (npages--)
B
bigmagic 已提交
286
    {
W
wangxiaoyao 已提交
287
        MM_PGTBL_LOCK(aspace);
288
        ret = mapper(aspace->page_table, v_addr, p_addr, attr);
W
wangxiaoyao 已提交
289 290
        MM_PGTBL_UNLOCK(aspace);

B
bigmagic 已提交
291
        if (ret != 0)
292
        {
S
Shell 已提交
293 294
            /* other types of return value are taken as programming error */
            RT_ASSERT(ret == MMU_MAP_ERROR_NOPAGE);
295 296 297
            /* error, undo map */
            while (unmap_va != v_addr)
            {
W
wangxiaoyao 已提交
298
                MM_PGTBL_LOCK(aspace);
299
                _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
W
wangxiaoyao 已提交
300
                MM_PGTBL_UNLOCK(aspace);
301
                unmap_va = (char *)unmap_va + stride;
302 303
            }
            break;
S
Shell 已提交
304
        }
305 306
        v_addr = (char *)v_addr + stride;
        p_addr = (char *)p_addr + stride;
S
Shell 已提交
307
    }
308 309 310 311 312 313 314

    if (ret == 0)
    {
        return unmap_va;
    }

    return NULL;
S
Shell 已提交
315 316
}

317
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
S
Shell 已提交
318
{
319 320
    // caller guarantee that v_addr & size are page aligned
    size_t npages = size >> ARCH_PAGE_SHIFT;
S
Shell 已提交
321

322 323 324 325
    if (!aspace->page_table)
    {
        return;
    }
S
Shell 已提交
326

327 328
    while (npages--)
    {
W
wangxiaoyao 已提交
329
        MM_PGTBL_LOCK(aspace);
330 331
        if (rt_hw_mmu_v2p(aspace, v_addr) != ARCH_MAP_FAILED)
            _kenrel_unmap_4K(aspace->page_table, v_addr);
W
wangxiaoyao 已提交
332
        MM_PGTBL_UNLOCK(aspace);
333
        v_addr = (char *)v_addr + ARCH_PAGE_SIZE;
334
    }
S
Shell 已提交
335 336
}

337
void rt_hw_aspace_switch(rt_aspace_t aspace)
S
Shell 已提交
338
{
339 340 341
    if (aspace != &rt_kernel_space)
    {
        void *pgtbl = aspace->page_table;
W
wangxiaoyao 已提交
342
        pgtbl = rt_kmem_v2p(pgtbl);
343
        rt_ubase_t tcr;
344 345 346 347 348 349 350 351 352 353 354

        __asm__ volatile("msr ttbr0_el1, %0" ::"r"(pgtbl) : "memory");

        __asm__ volatile("mrs %0, tcr_el1" : "=r"(tcr));
        tcr &= ~(1ul << 7);
        __asm__ volatile("msr tcr_el1, %0\n"
                         "isb" ::"r"(tcr)
                         : "memory");

        rt_hw_tlb_invalidate_all_local();
    }
S
Shell 已提交
355 356
}

357
void rt_hw_mmu_ktbl_set(unsigned long tbl)
S
Shell 已提交
358
{
359 360 361 362 363 364 365 366
#ifdef RT_USING_SMART
    tbl += PV_OFFSET;
    __asm__ volatile("msr TTBR1_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
#else
    __asm__ volatile("msr TTBR0_EL1, %0\n dsb sy\nisb" ::"r"(tbl) : "memory");
#endif
    __asm__ volatile("tlbi vmalle1\n dsb sy\nisb" ::: "memory");
    __asm__ volatile("ic ialluis\n dsb sy\nisb" ::: "memory");
S
Shell 已提交
367 368
}

369 370 371 372 373 374 375 376 377 378 379 380 381
/**
 * @brief setup Page Table for kernel space. It's a fixed map
 * and all mappings cannot be changed after initialization.
 *
 * Memory region in struct mem_desc must be page aligned,
 * otherwise is a failure and no report will be
 * returned.
 *
 * @param mmu_info
 * @param mdesc
 * @param desc_nr
 */
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
S
Shell 已提交
382
{
383 384
    void *err;
    for (size_t i = 0; i < desc_nr; i++)
S
Shell 已提交
385
    {
386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
        size_t attr;
        switch (mdesc->attr)
        {
        case NORMAL_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case NORMAL_NOCACHE_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case DEVICE_MEM:
            attr = MMU_MAP_K_DEVICE;
            break;
        default:
            attr = MMU_MAP_K_DEVICE;
        }

        struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
                                    .limit_start = aspace->start,
                                    .limit_range_size = aspace->size,
                                    .map_size = mdesc->vaddr_end -
                                                mdesc->vaddr_start + 1,
                                    .prefer = (void *)mdesc->vaddr_start};

409 410
        if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
            mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
W
wangxiaoyao 已提交
411 412
        int retval;
        retval = rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
413
                                 mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
W
wangxiaoyao 已提交
414 415 416 417 418
        if (retval)
        {
            LOG_E("%s: map failed with code %d", retval);
            RT_ASSERT(0);
        }
419
        mdesc++;
S
Shell 已提交
420
    }
421 422 423

    rt_hw_mmu_ktbl_set((unsigned long)rt_kernel_space.page_table);
    rt_page_cleanup();
S
Shell 已提交
424 425 426
}

#ifdef RT_USING_SMART
427
static void _init_region(void *vaddr, size_t size)
428 429 430
{
    rt_ioremap_start = vaddr;
    rt_ioremap_size = size;
431
    rt_mpr_start = (char *)rt_ioremap_start - rt_mpr_size;
S
Shell 已提交
432
}
433
#else
S
Shell 已提交
434

435
#define RTOS_VEND (0xfffffffff000UL)
436
static inline void _init_region(void *vaddr, size_t size)
S
Shell 已提交
437
{
438
    rt_mpr_start = (void *)(RTOS_VEND - rt_mpr_size);
S
Shell 已提交
439
}
440
#endif
S
Shell 已提交
441 442 443 444 445 446 447 448 449 450 451 452

/**
 * This function will initialize rt_mmu_info structure.
 *
 * @param mmu_info   rt_mmu_info structure
 * @param v_address  virtual address
 * @param size       map size
 * @param vtable     mmu table
 * @param pv_off     pv offset in kernel space
 *
 * @return 0 on successful and -1 for fail
 */
453 454
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, size_t size,
                       size_t *vtable, size_t pv_off)
S
Shell 已提交
455 456 457
{
    size_t va_s, va_e;

458
    if (!aspace || !vtable)
S
Shell 已提交
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
    {
        return -1;
    }

    va_s = (size_t)v_address;
    va_e = (size_t)v_address + size - 1;

    if (va_e < va_s)
    {
        return -1;
    }

    va_s >>= ARCH_SECTION_SHIFT;
    va_e >>= ARCH_SECTION_SHIFT;

    if (va_s == 0)
    {
        return -1;
    }

479 480 481 482
#ifdef RT_USING_SMART
    rt_aspace_init(aspace, (void *)KERNEL_VADDR_START, 0 - KERNEL_VADDR_START,
                   vtable);
#else
483
    rt_aspace_init(aspace, (void *)0x1000, RTOS_VEND - 0x1000ul, vtable);
484 485 486
#endif

    _init_region(v_address, size);
S
Shell 已提交
487 488 489 490

    return 0;
}

491 492 493 494 495 496 497
/************ setting el1 mmu register**************
  MAIR_EL1
  index 0 : memory outer writeback, write/read alloc
  index 1 : memory nocache
  index 2 : device nGnRnE
 *****************************************************/
void mmu_tcr_init(void)
S
Shell 已提交
498
{
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
    unsigned long val64;

    val64 = 0x00447fUL;
    __asm__ volatile("msr MAIR_EL1, %0\n dsb sy\n" ::"r"(val64));

    /* TCR_EL1 */
    val64 = (16UL << 0)      /* t0sz 48bit */
            | (0x0UL << 6)   /* reserved */
            | (0x0UL << 7)   /* epd0 */
            | (0x3UL << 8)   /* t0 wb cacheable */
            | (0x3UL << 10)  /* inner shareable */
            | (0x2UL << 12)  /* t0 outer shareable */
            | (0x0UL << 14)  /* t0 4K */
            | (16UL << 16)   /* t1sz 48bit */
            | (0x0UL << 22)  /* define asid use ttbr0.asid */
            | (0x0UL << 23)  /* epd1 */
            | (0x3UL << 24)  /* t1 inner wb cacheable */
            | (0x3UL << 26)  /* t1 outer wb cacheable */
            | (0x2UL << 28)  /* t1 outer shareable */
            | (0x2UL << 30)  /* t1 4k */
            | (0x1UL << 32)  /* 001b 64GB PA */
            | (0x0UL << 35)  /* reserved */
            | (0x1UL << 36)  /* as: 0:8bit 1:16bit */
            | (0x0UL << 37)  /* tbi0 */
            | (0x0UL << 38); /* tbi1 */
    __asm__ volatile("msr TCR_EL1, %0\n" ::"r"(val64));
S
Shell 已提交
525 526
}

527
struct page_table
S
Shell 已提交
528
{
529 530
    unsigned long page[512];
};
S
Shell 已提交
531

532 533 534 535 536 537 538 539
/*  */
static struct page_table __init_page_array[6] rt_align(0x1000);
static unsigned long __page_off = 2UL; /* 0, 1 for ttbr0, ttrb1 */
unsigned long get_ttbrn_base(void)
{
    return (unsigned long) __init_page_array;
}

540 541 542
unsigned long get_free_page(void)
{
    __page_off++;
543
    return (unsigned long) (__init_page_array[__page_off - 1].page);
544 545 546 547 548 549 550 551 552 553
}

static int _map_single_page_2M(unsigned long *lv0_tbl, unsigned long va,
                               unsigned long pa, unsigned long attr)
{
    int level;
    unsigned long *cur_lv_tbl = lv0_tbl;
    unsigned long page;
    unsigned long off;
    int level_shift = MMU_ADDRESS_BITS;
S
Shell 已提交
554

555
    if (va & ARCH_SECTION_MASK)
S
Shell 已提交
556
    {
557
        return MMU_MAP_ERROR_VANOTALIGN;
S
Shell 已提交
558
    }
559
    if (pa & ARCH_SECTION_MASK)
S
Shell 已提交
560
    {
561 562 563 564 565 566 567
        return MMU_MAP_ERROR_PANOTALIGN;
    }
    for (level = 0; level < MMU_TBL_BLOCK_2M_LEVEL; level++)
    {
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
S
Shell 已提交
568
        {
569 570 571 572 573 574 575
            page = get_free_page();
            if (!page)
            {
                return MMU_MAP_ERROR_NOPAGE;
            }
            rt_memset((char *)page, 0, ARCH_PAGE_SIZE);
            cur_lv_tbl[off] = page | MMU_TYPE_TABLE;
S
Shell 已提交
576
        }
577 578
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
S
Shell 已提交
579
        {
580 581
            /* is block! error! */
            return MMU_MAP_ERROR_CONFLICT;
S
Shell 已提交
582
        }
583 584
        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        level_shift -= MMU_LEVEL_SHIFT;
S
Shell 已提交
585
    }
586 587 588 589 590
    attr &= MMU_ATTRIB_MASK;
    pa |= (attr | MMU_TYPE_BLOCK); /* block */
    off = (va >> ARCH_SECTION_SHIFT);
    off &= MMU_LEVEL_MASK;
    cur_lv_tbl[off] = pa;
S
Shell 已提交
591 592 593
    return 0;
}

G
GUI 已提交
594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
void *rt_ioremap_early(void *paddr, size_t size)
{
    size_t count;
    static void *tbl = RT_NULL;

    if (!size)
    {
        return RT_NULL;
    }

    if (!tbl)
    {
        tbl = rt_hw_mmu_tbl_get();
    }

    count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;

    while (count --> 0)
    {
        _map_single_page_2M(tbl, (unsigned long)paddr, (unsigned long)paddr, MMU_MAP_K_DEVICE);
    }

    return paddr;
}

619 620 621
static int _init_map_2M(unsigned long *lv0_tbl, unsigned long va,
                        unsigned long pa, unsigned long count,
                        unsigned long attr)
S
Shell 已提交
622
{
623 624
    unsigned long i;
    int ret;
S
Shell 已提交
625

626
    if (va & ARCH_SECTION_MASK)
S
Shell 已提交
627 628 629
    {
        return -1;
    }
630
    if (pa & ARCH_SECTION_MASK)
S
Shell 已提交
631 632 633
    {
        return -1;
    }
634
    for (i = 0; i < count; i++)
S
Shell 已提交
635
    {
636 637 638 639
        ret = _map_single_page_2M(lv0_tbl, va, pa, attr);
        va += ARCH_SECTION_SIZE;
        pa += ARCH_SECTION_SIZE;
        if (ret != 0)
S
Shell 已提交
640
        {
641
            return ret;
642 643
        }
    }
S
Shell 已提交
644 645 646
    return 0;
}

647
static unsigned long *_query(rt_aspace_t aspace, void *vaddr, int *plvl_shf)
S
Shell 已提交
648
{
649 650 651 652 653 654
    int level;
    unsigned long va = (unsigned long)vaddr;
    unsigned long *cur_lv_tbl;
    unsigned long page;
    unsigned long off;
    int level_shift = MMU_ADDRESS_BITS;
S
Shell 已提交
655

656 657
    cur_lv_tbl = aspace->page_table;
    RT_ASSERT(cur_lv_tbl);
S
Shell 已提交
658

659
    for (level = 0; level < MMU_TBL_PAGE_4k_LEVEL; level++)
S
Shell 已提交
660
    {
661 662
        off = (va >> level_shift);
        off &= MMU_LEVEL_MASK;
S
Shell 已提交
663

664 665 666 667
        if (!(cur_lv_tbl[off] & MMU_TYPE_USED))
        {
            return (void *)0;
        }
B
bigmagic 已提交
668

669 670
        page = cur_lv_tbl[off];
        if ((page & MMU_TYPE_MASK) == MMU_TYPE_BLOCK)
S
Shell 已提交
671
        {
672 673
            *plvl_shf = level_shift;
            return &cur_lv_tbl[off];
S
Shell 已提交
674
        }
675 676 677 678

        cur_lv_tbl = (unsigned long *)(page & MMU_ADDRESS_MASK);
        cur_lv_tbl = (unsigned long *)((unsigned long)cur_lv_tbl - PV_OFFSET);
        level_shift -= MMU_LEVEL_SHIFT;
S
Shell 已提交
679
    }
680 681 682 683
    /* now is level MMU_TBL_PAGE_4k_LEVEL */
    off = (va >> ARCH_PAGE_SHIFT);
    off &= MMU_LEVEL_MASK;
    page = cur_lv_tbl[off];
S
Shell 已提交
684

685 686 687 688 689 690
    if (!(page & MMU_TYPE_USED))
    {
        return (void *)0;
    }
    *plvl_shf = level_shift;
    return &cur_lv_tbl[off];
S
Shell 已提交
691 692
}

693
void *rt_hw_mmu_v2p(rt_aspace_t aspace, void *v_addr)
S
Shell 已提交
694
{
695 696
    int level_shift;
    unsigned long paddr;
S
Shell 已提交
697

698
    if (aspace == &rt_kernel_space)
S
Shell 已提交
699
    {
700
        paddr = (unsigned long)rt_hw_mmu_kernel_v2p(v_addr);
S
Shell 已提交
701 702 703
    }
    else
    {
704 705 706 707 708 709 710 711 712 713 714
        unsigned long *pte = _query(aspace, v_addr, &level_shift);

        if (pte)
        {
            paddr = *pte & MMU_ADDRESS_MASK;
            paddr |= (rt_ubase_t)v_addr & ((1ul << level_shift) - 1);
        }
        else
        {
            paddr = (unsigned long)ARCH_MAP_FAILED;
        }
S
Shell 已提交
715
    }
716

717
    return (void *)paddr;
S
Shell 已提交
718 719
}

720
static int _noncache(rt_ubase_t *pte)
S
Shell 已提交
721
{
722
    int err = 0;
723 724 725
    const rt_ubase_t idx_shift = 2;
    const rt_ubase_t idx_mask = 0x7 << idx_shift;
    rt_ubase_t entry = *pte;
726
    if ((entry & idx_mask) == (NORMAL_MEM << idx_shift))
S
Shell 已提交
727
    {
728
        *pte = (entry & ~idx_mask) | (NORMAL_NOCACHE_MEM << idx_shift);
S
Shell 已提交
729
    }
730
    else
S
Shell 已提交
731
    {
732
        // do not support other type to be noncache
733
        err = -RT_ENOSYS;
S
Shell 已提交
734
    }
735
    return err;
S
Shell 已提交
736 737
}

738
static int _cache(rt_ubase_t *pte)
S
Shell 已提交
739
{
740
    int err = 0;
741 742 743
    const rt_ubase_t idx_shift = 2;
    const rt_ubase_t idx_mask = 0x7 << idx_shift;
    rt_ubase_t entry = *pte;
744
    if ((entry & idx_mask) == (NORMAL_NOCACHE_MEM << idx_shift))
S
Shell 已提交
745
    {
746
        *pte = (entry & ~idx_mask) | (NORMAL_MEM << idx_shift);
S
Shell 已提交
747 748 749
    }
    else
    {
750 751
        // do not support other type to be cache
        err = -RT_ENOSYS;
S
Shell 已提交
752
    }
753
    return err;
S
Shell 已提交
754 755
}

756
static int (*control_handler[MMU_CNTL_DUMMY_END])(rt_ubase_t *pte) = {
757 758 759
    [MMU_CNTL_CACHE] = _cache,
    [MMU_CNTL_NONCACHE] = _noncache,
};
760

761 762
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                      enum rt_mmu_cntl cmd)
763
{
764 765
    int level_shift;
    int err = -RT_EINVAL;
766 767
    rt_ubase_t vstart = (rt_ubase_t)vaddr;
    rt_ubase_t vend = vstart + size;
G
GuEe-GUI 已提交
768

769
    int (*handler)(rt_ubase_t * pte);
770
    if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
771
    {
772
        handler = control_handler[cmd];
B
bigmagic 已提交
773

774
        while (vstart < vend)
S
Shell 已提交
775
        {
776 777
            rt_ubase_t *pte = _query(aspace, (void *)vstart, &level_shift);
            rt_ubase_t range_end = vstart + (1ul << level_shift);
778 779 780 781 782 783 784
            RT_ASSERT(range_end <= vend);

            if (pte)
            {
                err = handler(pte);
                RT_ASSERT(err == RT_EOK);
            }
785
            vstart = range_end;
S
Shell 已提交
786 787
        }
    }
788
    else
S
Shell 已提交
789
    {
790
        err = -RT_ENOSYS;
S
Shell 已提交
791
    }
B
bigmagic 已提交
792

793
    return err;
mysterywolf's avatar
mysterywolf 已提交
794
}
S
Shell 已提交
795

796
void rt_hw_mem_setup_early(unsigned long *tbl0, unsigned long *tbl1,
797
                           unsigned long size, unsigned long pv_off)
S
Shell 已提交
798 799
{
    int ret;
800 801
    unsigned long count = (size + ARCH_SECTION_MASK) >> ARCH_SECTION_SHIFT;
    unsigned long normal_attr = MMU_MAP_CUSTOM(MMU_AP_KAUN, NORMAL_MEM);
802

803
#ifdef RT_USING_SMART
S
Shell 已提交
804
    unsigned long va = KERNEL_VADDR_START;
805
#else
806 807
    extern unsigned char _start;
    unsigned long va = (unsigned long) &_start;
808
    va = RT_ALIGN_DOWN(va, 0x200000);
809 810
#endif

811 812
    /* setup pv off */
    rt_kmem_pvoff_set(pv_off);
S
Shell 已提交
813 814

    /* clean the first two pages */
815 816
    rt_memset((char *)tbl0, 0, ARCH_PAGE_SIZE);
    rt_memset((char *)tbl1, 0, ARCH_PAGE_SIZE);
S
Shell 已提交
817

818
    ret = _init_map_2M(tbl1, va, va + pv_off, count, normal_attr);
S
Shell 已提交
819 820 821 822
    if (ret != 0)
    {
        while (1);
    }
823
    ret = _init_map_2M(tbl0, va + pv_off, va + pv_off, count, normal_attr);
S
Shell 已提交
824 825 826 827 828
    if (ret != 0)
    {
        while (1);
    }
}