mmu.c 11.9 KB
Newer Older
G
guo 已提交
1 2 3 4 5 6 7 8
/*
 * Copyright (c) 2006-2021, RT-Thread Development Team
 *
 * SPDX-License-Identifier: Apache-2.0
 *
 * Change Logs:
 * Date           Author       Notes
 * 2021-01-30     lizhirui     first version
9
 * 2022-12-13     WangXiaoyao  Port to new mm
G
guo 已提交
10 11 12
 */

#include <rtthread.h>
13 14
#include <stddef.h>
#include <stdint.h>
15

16 17 18 19
#define DBG_TAG "hw.mmu"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>

G
guo 已提交
20
#include <cache.h>
21 22 23 24 25
#include <mm_aspace.h>
#include <mm_page.h>
#include <mmu.h>
#include <riscv_mmu.h>
#include <tlb.h>
G
guo 已提交
26

27
#ifdef RT_USING_SMART
28
#include <board.h>
29 30 31
#include <ioremap.h>
#include <lwp_user_mm.h>
#endif
G
guo 已提交
32

33 34 35
#ifndef RT_USING_SMART
#define USER_VADDR_START 0
#endif
G
guo 已提交
36

37
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size);
G
guo 已提交
38

39
static void *current_mmu_table = RT_NULL;
G
guo 已提交
40

41 42
volatile __attribute__((aligned(4 * 1024)))
rt_ubase_t MMUTable[__SIZE(VPN2_BIT)];
G
guo 已提交
43

44
void rt_hw_aspace_switch(rt_aspace_t aspace)
G
guo 已提交
45
{
46 47
    uintptr_t page_table = (uintptr_t)_rt_kmem_v2p(aspace->page_table);
    current_mmu_table = aspace->page_table;
G
guo 已提交
48

49 50 51
    write_csr(satp, (((size_t)SATP_MODE) << SATP_MODE_OFFSET) |
                        ((rt_ubase_t)page_table >> PAGE_OFFSET_BIT));
    rt_hw_tlb_invalidate_all_local();
G
guo 已提交
52 53
}

54
void *rt_hw_mmu_tbl_get()
G
guo 已提交
55
{
56
    return current_mmu_table;
G
guo 已提交
57 58
}

59 60
static int _map_one_page(struct rt_aspace *aspace, void *va, void *pa,
                         size_t attr)
G
guo 已提交
61 62 63 64
{
    rt_size_t l1_off, l2_off, l3_off;
    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;

65 66 67
    l1_off = GET_L1((size_t)va);
    l2_off = GET_L2((size_t)va);
    l3_off = GET_L3((size_t)va);
G
guo 已提交
68

69
    mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
G
guo 已提交
70 71 72

    if (PTE_USED(*mmu_l1))
    {
73
        mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
G
guo 已提交
74 75 76 77 78 79 80 81 82
    }
    else
    {
        mmu_l2 = (rt_size_t *)rt_pages_alloc(0);

        if (mmu_l2)
        {
            rt_memset(mmu_l2, 0, PAGE_SIZE);
            rt_hw_cpu_dcache_clean(mmu_l2, PAGE_SIZE);
83 84
            *mmu_l1 = COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l2, PV_OFFSET),
                                 PAGE_DEFAULT_ATTR_NEXT);
G
guo 已提交
85 86 87 88 89 90 91 92 93 94 95
            rt_hw_cpu_dcache_clean(mmu_l1, sizeof(*mmu_l1));
        }
        else
        {
            return -1;
        }
    }

    if (PTE_USED(*(mmu_l2 + l2_off)))
    {
        RT_ASSERT(!PAGE_IS_LEAF(*(mmu_l2 + l2_off)));
96 97
        mmu_l3 =
            (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)), PV_OFFSET);
G
guo 已提交
98 99 100 101 102 103 104 105 106
    }
    else
    {
        mmu_l3 = (rt_size_t *)rt_pages_alloc(0);

        if (mmu_l3)
        {
            rt_memset(mmu_l3, 0, PAGE_SIZE);
            rt_hw_cpu_dcache_clean(mmu_l3, PAGE_SIZE);
107 108 109
            *(mmu_l2 + l2_off) =
                COMBINEPTE((rt_size_t)VPN_TO_PPN(mmu_l3, PV_OFFSET),
                           PAGE_DEFAULT_ATTR_NEXT);
G
guo 已提交
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
            rt_hw_cpu_dcache_clean(mmu_l2, sizeof(*mmu_l2));
            // declares a reference to parent page table
            rt_page_ref_inc((void *)mmu_l2, 0);
        }
        else
        {
            return -1;
        }
    }

    RT_ASSERT(!PTE_USED(*(mmu_l3 + l3_off)));
    // declares a reference to parent page table
    rt_page_ref_inc((void *)mmu_l3, 0);
    *(mmu_l3 + l3_off) = COMBINEPTE((rt_size_t)pa, attr);
    rt_hw_cpu_dcache_clean(mmu_l3 + l3_off, sizeof(*(mmu_l3 + l3_off)));
    return 0;
}

128 129 130
/** rt_hw_mmu_map will never override existed page table entry */
void *rt_hw_mmu_map(struct rt_aspace *aspace, void *v_addr, void *p_addr,
                    size_t size, size_t attr)
G
guo 已提交
131
{
132 133 134
    int ret = -1;
    void *unmap_va = v_addr;
    size_t npages = size >> ARCH_PAGE_SHIFT;
G
guo 已提交
135

136
    // TODO trying with HUGEPAGE here
G
guo 已提交
137 138
    while (npages--)
    {
139 140
        ret = _map_one_page(aspace, v_addr, p_addr, attr);
        if (ret != 0)
G
guo 已提交
141
        {
142 143 144 145 146 147 148 149 150
            /* error, undo map */
            while (unmap_va != v_addr)
            {
                MM_PGTBL_LOCK(aspace);
                _unmap_area(aspace, unmap_va, ARCH_PAGE_SIZE);
                MM_PGTBL_UNLOCK(aspace);
                unmap_va += ARCH_PAGE_SIZE;
            }
            break;
G
guo 已提交
151
        }
152 153
        v_addr += ARCH_PAGE_SIZE;
        p_addr += ARCH_PAGE_SIZE;
G
guo 已提交
154 155
    }

156
    if (ret == 0)
G
guo 已提交
157
    {
158
        return unmap_va;
G
guo 已提交
159 160
    }

161 162
    return NULL;
}
G
guo 已提交
163

164 165 166 167
static void _unmap_pte(rt_size_t *pentry, rt_size_t *lvl_entry[], int level)
{
    int loop_flag = 1;
    while (loop_flag)
G
guo 已提交
168
    {
169 170 171
        loop_flag = 0;
        *pentry = 0;
        rt_hw_cpu_dcache_clean(pentry, sizeof(*pentry));
G
guo 已提交
172

173 174
        // we don't handle level 0, which is maintained by caller
        if (level > 0)
G
guo 已提交
175
        {
176
            void *page = (void *)((rt_ubase_t)pentry & ~ARCH_PAGE_MASK);
G
guo 已提交
177

178 179
            // decrease reference from child page to parent
            rt_pages_free(page, 0);
G
guo 已提交
180

181 182 183 184 185 186 187
            int free = rt_page_ref_get(page, 0);
            if (free == 1)
            {
                rt_pages_free(page, 0);
                pentry = lvl_entry[--level];
                loop_flag = 1;
            }
G
guo 已提交
188 189 190 191
        }
    }
}

192
static size_t _unmap_area(struct rt_aspace *aspace, void *v_addr, size_t size)
G
guo 已提交
193 194
{
    rt_size_t loop_va = __UMASKVALUE((rt_size_t)v_addr, PAGE_OFFSET_MASK);
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
    size_t unmapped = 0;

    int i = 0;
    rt_size_t lvl_off[3];
    rt_size_t *lvl_entry[3];
    lvl_off[0] = (rt_size_t)GET_L1(loop_va);
    lvl_off[1] = (rt_size_t)GET_L2(loop_va);
    lvl_off[2] = (rt_size_t)GET_L3(loop_va);
    unmapped = 1 << (ARCH_PAGE_SHIFT + ARCH_INDEX_WIDTH * 2ul);

    rt_size_t *pentry;
    lvl_entry[i] = ((rt_size_t *)aspace->page_table + lvl_off[i]);
    pentry = lvl_entry[i];

    // find leaf page table entry
    while (PTE_USED(*pentry) && !PAGE_IS_LEAF(*pentry))
G
guo 已提交
211
    {
212 213 214 215 216
        i += 1;
        lvl_entry[i] = ((rt_size_t *)PPN_TO_VPN(GET_PADDR(*pentry), PV_OFFSET) +
                        lvl_off[i]);
        pentry = lvl_entry[i];
        unmapped >>= ARCH_INDEX_WIDTH;
G
guo 已提交
217 218
    }

219 220
    // clear PTE & setup its
    if (PTE_USED(*pentry))
G
guo 已提交
221
    {
222
        _unmap_pte(pentry, lvl_entry, i);
G
guo 已提交
223 224
    }

225 226
    return unmapped;
}
G
guo 已提交
227

228 229 230 231 232 233 234 235 236
/** unmap is different from map that it can handle multiple pages */
void rt_hw_mmu_unmap(struct rt_aspace *aspace, void *v_addr, size_t size)
{
    // caller guarantee that v_addr & size are page aligned
    if (!aspace->page_table)
    {
        return;
    }
    size_t unmapped = 0;
G
guo 已提交
237

238
    while (size > 0)
G
guo 已提交
239
    {
240 241 242
        MM_PGTBL_LOCK(aspace);
        unmapped = _unmap_area(aspace, v_addr, size);
        MM_PGTBL_UNLOCK(aspace);
G
guo 已提交
243

244 245 246
        // when unmapped == 0, region not exist in pgtbl
        if (!unmapped || unmapped > size)
            break;
G
guo 已提交
247

248 249
        size -= unmapped;
        v_addr += unmapped;
G
guo 已提交
250
    }
251
}
G
guo 已提交
252

253 254 255 256 257 258
#ifdef RT_USING_SMART
static inline void _init_region(void *vaddr, size_t size)
{
    rt_ioremap_start = vaddr;
    rt_ioremap_size = size;
    rt_mpr_start = rt_ioremap_start - rt_mpr_size;
259
    LOG_D("rt_ioremap_start: %p, rt_mpr_start: %p", rt_ioremap_start, rt_mpr_start);
G
guo 已提交
260
}
261 262 263 264 265 266
#else
static inline void _init_region(void *vaddr, size_t size)
{
    rt_mpr_start = vaddr - rt_mpr_size;
}
#endif
G
guo 已提交
267

268 269
int rt_hw_mmu_map_init(rt_aspace_t aspace, void *v_address, rt_size_t size,
                       rt_size_t *vtable, rt_size_t pv_off)
G
guo 已提交
270
{
271 272
    size_t l1_off, va_s, va_e;
    rt_base_t level;
G
guo 已提交
273

274
    if ((!aspace) || (!vtable))
G
guo 已提交
275
    {
276
        return -1;
G
guo 已提交
277 278
    }

279 280
    va_s = (rt_size_t)v_address;
    va_e = ((rt_size_t)v_address) + size - 1;
G
guo 已提交
281

282
    if (va_e < va_s)
G
guo 已提交
283
    {
284
        return -1;
G
guo 已提交
285
    }
286 287 288 289 290 291

    // convert address to PPN2 index
    va_s = GET_L1(va_s);
    va_e = GET_L1(va_e);

    if (va_s == 0)
G
guo 已提交
292
    {
293
        return -1;
G
guo 已提交
294 295
    }

296 297
    // vtable initialization check
    for (l1_off = va_s; l1_off <= va_e; l1_off++)
G
guo 已提交
298
    {
299
        size_t v = vtable[l1_off];
G
guo 已提交
300

301
        if (v)
G
guo 已提交
302
        {
303
            return -1;
G
guo 已提交
304 305 306
        }
    }

307 308
    rt_aspace_init(&rt_kernel_space, (void *)0x1000, USER_VADDR_START - 0x1000,
                   vtable);
G
guo 已提交
309

310 311
    _init_region(v_address, size);
    return 0;
G
guo 已提交
312 313
}

314 315
const static int max_level =
    (ARCH_VADDR_WIDTH - ARCH_PAGE_SHIFT) / ARCH_INDEX_WIDTH;
G
guo 已提交
316

317
static inline uintptr_t _get_level_size(int level)
G
guo 已提交
318
{
319
    return 1ul << (ARCH_PAGE_SHIFT + (max_level - level) * ARCH_INDEX_WIDTH);
G
guo 已提交
320 321
}

322
static rt_size_t *_query(struct rt_aspace *aspace, void *vaddr, int *level)
G
guo 已提交
323 324 325 326 327
{
    rt_size_t l1_off, l2_off, l3_off;
    rt_size_t *mmu_l1, *mmu_l2, *mmu_l3;
    rt_size_t pa;

328 329 330
    l1_off = GET_L1((rt_size_t)vaddr);
    l2_off = GET_L2((rt_size_t)vaddr);
    l3_off = GET_L3((rt_size_t)vaddr);
G
guo 已提交
331

332
    if (!aspace)
G
guo 已提交
333
    {
334
        LOG_W("%s: no aspace", __func__);
G
guo 已提交
335 336 337
        return RT_NULL;
    }

338
    mmu_l1 = ((rt_size_t *)aspace->page_table) + l1_off;
G
guo 已提交
339 340 341 342

    if (PTE_USED(*mmu_l1))
    {
        if (*mmu_l1 & PTE_XWR_MASK)
343 344 345 346
        {
            *level = 1;
            return mmu_l1;
        }
G
guo 已提交
347

348
        mmu_l2 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*mmu_l1), PV_OFFSET);
G
guo 已提交
349 350 351 352

        if (PTE_USED(*(mmu_l2 + l2_off)))
        {
            if (*(mmu_l2 + l2_off) & PTE_XWR_MASK)
353 354 355 356
            {
                *level = 2;
                return mmu_l2 + l2_off;
            }
G
guo 已提交
357

358 359
            mmu_l3 = (rt_size_t *)PPN_TO_VPN(GET_PADDR(*(mmu_l2 + l2_off)),
                                             PV_OFFSET);
G
guo 已提交
360 361 362

            if (PTE_USED(*(mmu_l3 + l3_off)))
            {
363 364
                *level = 3;
                return mmu_l3 + l3_off;
G
guo 已提交
365 366 367 368 369 370 371
            }
        }
    }

    return RT_NULL;
}

372
void *rt_hw_mmu_v2p(struct rt_aspace *aspace, void *vaddr)
G
guo 已提交
373
{
374 375 376
    int level;
    uintptr_t *pte = _query(aspace, vaddr, &level);
    uintptr_t paddr;
G
guo 已提交
377

378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
    if (pte)
    {
        paddr = GET_PADDR(*pte);
        paddr |= ((intptr_t)vaddr & (_get_level_size(level) - 1));
    }
    else
    {
        paddr = (uintptr_t)ARCH_MAP_FAILED;
    }
    return (void *)paddr;
}

static int _noncache(uintptr_t *pte)
{
    return 0;
}

static int _cache(uintptr_t *pte)
{
    return 0;
}

static int (*control_handler[MMU_CNTL_DUMMY_END])(uintptr_t *pte) = {
    [MMU_CNTL_CACHE] = _cache,
    [MMU_CNTL_NONCACHE] = _noncache,
};

int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                      enum rt_mmu_cntl cmd)
{
    int level;
    int err = -RT_EINVAL;
    void *vend = vaddr + size;

    int (*handler)(uintptr_t * pte);
    if (cmd >= 0 && cmd < MMU_CNTL_DUMMY_END)
    {
        handler = control_handler[cmd];

        while (vaddr < vend)
        {
            uintptr_t *pte = _query(aspace, vaddr, &level);
            void *range_end = vaddr + _get_level_size(level);
            RT_ASSERT(range_end <= vend);

            if (pte)
            {
                err = handler(pte);
                RT_ASSERT(err == RT_EOK);
            }
            vaddr = range_end;
        }
    }
    else
    {
        err = -RT_ENOSYS;
    }

    return err;
G
guo 已提交
437 438 439 440 441 442 443 444 445 446 447 448 449 450
}

/**
 * @brief setup Page Table for kernel space. It's a fixed map
 * and all mappings cannot be changed after initialization.
 *
 * Memory region in struct mem_desc must be page aligned,
 * otherwise is a failure and no report will be
 * returned.
 *
 * @param mmu_info
 * @param mdesc
 * @param desc_nr
 */
451
void rt_hw_mmu_setup(rt_aspace_t aspace, struct mem_desc *mdesc, int desc_nr)
G
guo 已提交
452 453 454 455 456 457 458
{
    void *err;
    for (size_t i = 0; i < desc_nr; i++)
    {
        size_t attr;
        switch (mdesc->attr)
        {
459 460 461 462 463 464 465 466 467 468 469
        case NORMAL_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case NORMAL_NOCACHE_MEM:
            attr = MMU_MAP_K_RWCB;
            break;
        case DEVICE_MEM:
            attr = MMU_MAP_K_DEVICE;
            break;
        default:
            attr = MMU_MAP_K_DEVICE;
G
guo 已提交
470
        }
471 472 473 474 475 476 477 478

        struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
                                    .limit_start = aspace->start,
                                    .limit_range_size = aspace->size,
                                    .map_size = mdesc->vaddr_end -
                                                mdesc->vaddr_start + 1,
                                    .prefer = (void *)mdesc->vaddr_start};

479 480 481
        if (mdesc->paddr_start == (rt_size_t)ARCH_MAP_FAILED)
            mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;

482 483
        rt_aspace_map_phy_static(aspace, &mdesc->varea, &hint, attr,
                                 mdesc->paddr_start >> MM_PAGE_SHIFT, &err);
G
guo 已提交
484 485 486
        mdesc++;
    }

487 488
    rt_hw_aspace_switch(&rt_kernel_space);
    rt_page_cleanup();
G
guo 已提交
489
}