mmu.c 10.1 KB
Newer Older
B
Bernard Xiong 已提交
1
/*
mysterywolf's avatar
mysterywolf 已提交
2
 * Copyright (c) 2006-2021, RT-Thread Development Team
B
Bernard Xiong 已提交
3
 *
4
 * SPDX-License-Identifier: Apache-2.0
B
Bernard Xiong 已提交
5 6 7 8 9 10 11
 *
 * Change Logs:
 * Date           Author       Notes
 * 2012-01-10     bernard      porting to AM1808
 */

#include <rthw.h>
G
guo 已提交
12 13
#include <rtthread.h>

B
Bernard Xiong 已提交
14 15
#include <board.h>
#include "cp15.h"
16
#include "mm_page.h"
17
#include "mmu.h"
18 19
#include <mm_aspace.h>
#include <tlb.h>
B
Bernard Xiong 已提交
20

21
#ifdef RT_USING_SMART
G
guo 已提交
22
#include <lwp_mm.h>
23 24 25 26
#include <lwp_arch.h>
#include "ioremap.h"
#else
#define KERNEL_VADDR_START 0
G
guo 已提交
27 28 29
#endif

/* level1 page table, each entry for 1MB memory. */
30
volatile unsigned long MMUTable[4 * 1024] __attribute__((aligned(16 * 1024)));
G
guo 已提交
31 32 33 34 35 36 37 38 39 40 41

unsigned long rt_hw_set_domain_register(unsigned long domain_val)
{
    unsigned long old_domain;

    asm volatile ("mrc p15, 0, %0, c3, c0\n" : "=r" (old_domain));
    asm volatile ("mcr p15, 0, %0, c3, c0\n" : :"r" (domain_val) : "memory");

    return old_domain;
}

42 43
void rt_hw_mmu_setmtt(rt_uint32_t vaddrStart, rt_uint32_t vaddrEnd,
                      rt_uint32_t paddrStart, rt_uint32_t attr)
B
Bernard Xiong 已提交
44 45 46 47 48 49 50 51 52 53 54 55
{
    volatile rt_uint32_t *pTT;
    volatile int i, nSec;
    pTT  = (rt_uint32_t *)MMUTable + (vaddrStart >> 20);
    nSec = (vaddrEnd >> 20) - (vaddrStart >> 20);
    for(i = 0; i <= nSec; i++)
    {
        *pTT = attr | (((paddrStart >> 20) + i) << 20);
        pTT++;
    }
}

56 57 58 59 60
void rt_hw_init_mmu_table(struct mem_desc *mdesc, rt_uint32_t size)
{
    /* set page table */
    for(; size > 0; size--)
    {
61 62
        if (mdesc->paddr_start == (rt_uint32_t)ARCH_MAP_FAILED)
            mdesc->paddr_start = mdesc->vaddr_start + PV_OFFSET;
63 64 65 66
        rt_hw_mmu_setmtt(mdesc->vaddr_start, mdesc->vaddr_end,
                mdesc->paddr_start, mdesc->attr);
        mdesc++;
    }
G
guo 已提交
67 68

    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void*)MMUTable, sizeof MMUTable);
69 70
}

B
Bernard Xiong 已提交
71 72
void rt_hw_mmu_init(void)
{
73 74
    rt_cpu_dcache_clean_flush();
    rt_cpu_icache_flush();
B
Bernard Xiong 已提交
75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
    rt_hw_cpu_dcache_disable();
    rt_hw_cpu_icache_disable();
    rt_cpu_mmu_disable();

    /*rt_hw_cpu_dump_page_table(MMUTable);*/
    rt_hw_set_domain_register(0x55555555);

    rt_cpu_tlb_set(MMUTable);

    rt_cpu_mmu_enable();

    rt_hw_cpu_icache_enable();
    rt_hw_cpu_dcache_enable();
}

90
int rt_hw_mmu_map_init(struct rt_aspace *aspace, void* v_address, size_t size, size_t *vtable, size_t pv_off)
G
guo 已提交
91 92 93
{
    size_t l1_off, va_s, va_e;

94
    if (!aspace || !vtable)
G
guo 已提交
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
    {
        return -1;
    }

    va_s = (size_t)v_address;
    va_e = (size_t)v_address + size - 1;

    if ( va_e < va_s)
    {
        return -1;
    }

    va_s >>= ARCH_SECTION_SHIFT;
    va_e >>= ARCH_SECTION_SHIFT;

    if (va_s == 0)
    {
        return -1;
    }

    for (l1_off = va_s; l1_off <= va_e; l1_off++)
    {
        size_t v = vtable[l1_off];

        if (v & ARCH_MMU_USED_MASK)
        {
            return -1;
        }
    }

125 126 127 128 129 130 131 132 133
#ifdef RT_USING_SMART
    rt_aspace_init(&rt_kernel_space, (void *)USER_VADDR_TOP, 0 - USER_VADDR_TOP, vtable);
    rt_ioremap_start = v_address;
    rt_ioremap_size = size;
    rt_mpr_start = rt_ioremap_start - rt_mpr_size;
#else
    rt_aspace_init(&rt_kernel_space, (void *)0x1000, 0 - 0x1000, vtable);
    rt_mpr_start = (void *)0 - rt_mpr_size;
#endif
G
guo 已提交
134 135 136 137

    return 0;
}

138
int rt_hw_mmu_ioremap_init(rt_aspace_t aspace, void* v_address, size_t size)
G
guo 已提交
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
{
#ifdef RT_IOREMAP_LATE
    size_t loop_va;
    size_t l1_off;
    size_t *mmu_l1, *mmu_l2;
    size_t sections;

    /* for kernel ioremap */
    if ((size_t)v_address < KERNEL_VADDR_START)
    {
        return -1;
    }
    /* must align to section */
    if ((size_t)v_address & ARCH_SECTION_MASK)
    {
        return -1;
    }
    /* must align to section */
    if (size & ARCH_SECTION_MASK)
    {
        return -1;
    }

    loop_va = (size_t)v_address;
    sections = (size >> ARCH_SECTION_SHIFT);
    while (sections--)
    {
        l1_off = (loop_va >> ARCH_SECTION_SHIFT);
167
        mmu_l1 =  (size_t*)aspace->page_table + l1_off;
G
guo 已提交
168 169 170 171 172 173 174 175 176

        RT_ASSERT((*mmu_l1 & ARCH_MMU_USED_MASK) == 0);
        mmu_l2 = (size_t*)rt_pages_alloc(0);
        if (mmu_l2)
        {
            rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
            /* cache maintain */
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2, ARCH_PAGE_TBL_SIZE);

177
            *mmu_l1 = (((size_t)mmu_l2 + PV_OFFSET) | 0x1);
G
guo 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196
            /* cache maintain */
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
        }
        else
        {
            /* error */
            return -1;
        }

        loop_va += ARCH_SECTION_SIZE;
    }
#endif

    return 0;
}




197
static void _kenrel_unmap_4K(unsigned long *lv0_tbl, void *v_addr)
G
guo 已提交
198
{
199
    size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
G
guo 已提交
200 201 202
    size_t l1_off, l2_off;
    size_t *mmu_l1, *mmu_l2;

203
    l1_off = (loop_va >> ARCH_SECTION_SHIFT);
G
guo 已提交
204

205 206
    l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
    mmu_l1 = (size_t *)lv0_tbl + l1_off;
G
guo 已提交
207

208
    if (*mmu_l1 & ARCH_MMU_USED_MASK)
G
guo 已提交
209
    {
210
        mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
G
guo 已提交
211
    }
212
    else
G
guo 已提交
213
    {
214
        return;
G
guo 已提交
215 216
    }

217
    if (*(mmu_l2 + l2_off) & ARCH_MMU_USED_MASK)
G
guo 已提交
218
    {
219 220 221
        *(mmu_l2 + l2_off) = 0;
        /* cache maintain */
        rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2 + l2_off, 4);
G
guo 已提交
222

223
        if (rt_pages_free(mmu_l2, 0))
G
guo 已提交
224
        {
225 226
            *mmu_l1 = 0;
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
G
guo 已提交
227 228
        }
    }
229
    loop_va += ARCH_PAGE_SIZE;
G
guo 已提交
230 231
}

232 233
static int _kenrel_map_4K(unsigned long *lv0_tbl, void *v_addr, void *p_addr,
                          size_t attr)
G
guo 已提交
234 235
{
    size_t loop_va = (size_t)v_addr & ~ARCH_PAGE_MASK;
236
    size_t loop_pa = (size_t)p_addr & ~ARCH_PAGE_MASK;
G
guo 已提交
237 238 239
    size_t l1_off, l2_off;
    size_t *mmu_l1, *mmu_l2;

240 241 242 243 244
    l1_off = (loop_va >> ARCH_SECTION_SHIFT);
    l2_off = ((loop_va & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
    mmu_l1 = (size_t *)lv0_tbl + l1_off;

    if (*mmu_l1 & ARCH_MMU_USED_MASK)
G
guo 已提交
245
    {
246 247
        mmu_l2 = (size_t *)((*mmu_l1 & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
        rt_page_ref_inc(mmu_l2, 0);
G
guo 已提交
248
    }
249
    else
G
guo 已提交
250
    {
251 252
        mmu_l2 = (size_t *)rt_pages_alloc(0);
        if (mmu_l2)
G
guo 已提交
253
        {
254 255 256
            rt_memset(mmu_l2, 0, ARCH_PAGE_TBL_SIZE * 2);
            /* cache maintain */
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2, ARCH_PAGE_TBL_SIZE);
G
guo 已提交
257

258 259 260
            *mmu_l1 = (((size_t)mmu_l2 + PV_OFFSET) | 0x1);
            /* cache maintain */
            rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l1, 4);
G
guo 已提交
261 262 263
        }
        else
        {
264 265
            /* error, quit */
            return -1;
G
guo 已提交
266
        }
267
    }
G
guo 已提交
268

269 270 271
    *(mmu_l2 + l2_off) = (loop_pa | attr);
    /* cache maintain */
    rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, mmu_l2 + l2_off, 4);
G
guo 已提交
272

273 274 275 276
    loop_va += ARCH_PAGE_SIZE;
    loop_pa += ARCH_PAGE_SIZE;

    return 0;
G
guo 已提交
277 278
}

279 280
void *rt_hw_mmu_map(rt_aspace_t aspace, void *v_addr, void *p_addr, size_t size,
                    size_t attr)
G
guo 已提交
281
{
282 283 284
    int ret = -1;
    void *unmap_va = v_addr;
    size_t npages = size >> ARCH_PAGE_SHIFT;
G
guo 已提交
285

286
    // TODO trying with HUGEPAGE here
G
guo 已提交
287 288
    while (npages--)
    {
289 290
        ret = _kenrel_map_4K(aspace->page_table, v_addr, p_addr, attr);
        if (ret != 0)
G
guo 已提交
291
        {
292 293
            /* error, undo map */
            while (unmap_va != v_addr)
G
guo 已提交
294
            {
295 296 297
                rt_enter_critical();
                _kenrel_unmap_4K(aspace->page_table, (void *)unmap_va);
                rt_exit_critical();
G
guo 已提交
298

299
                unmap_va += ARCH_PAGE_SIZE;
G
guo 已提交
300
            }
301
            break;
G
guo 已提交
302
        }
303 304 305
        v_addr += ARCH_PAGE_SIZE;
        p_addr += ARCH_PAGE_SIZE;
    }
G
guo 已提交
306

307 308 309
    if (ret == 0)
    {
        return v_addr;
G
guo 已提交
310 311
    }

312
    return NULL;
G
guo 已提交
313 314
}

315
void rt_hw_mmu_unmap(rt_aspace_t aspace, void *v_addr, size_t size)
G
guo 已提交
316
{
317 318
    // caller guarantee that v_addr & size are page aligned
    size_t npages = size >> ARCH_PAGE_SHIFT;
G
guo 已提交
319

320
    if (!aspace->page_table)
G
guo 已提交
321
    {
322
        return;
G
guo 已提交
323
    }
324 325

    while (npages--)
G
guo 已提交
326 327
    {
        rt_enter_critical();
328
        _kenrel_unmap_4K(aspace->page_table, v_addr);
G
guo 已提交
329
        rt_exit_critical();
330 331

        v_addr += ARCH_PAGE_SIZE;
G
guo 已提交
332 333 334
    }
}

335
void rt_hw_aspace_switch(rt_aspace_t aspace)
G
guo 已提交
336
{
337
    if (aspace != &rt_kernel_space)
G
guo 已提交
338
    {
339 340
        void *pgtbl = aspace->page_table;
        pgtbl = _rt_kmem_v2p(pgtbl);
G
guo 已提交
341

342
        rt_hw_mmu_switch(pgtbl);
G
guo 已提交
343

344
        rt_hw_tlb_invalidate_all_local();
G
guo 已提交
345 346 347
    }
}

348
void init_mm_setup(unsigned int *mtbl, unsigned int size, unsigned int pv_off)
G
guo 已提交
349
{
350
    unsigned int va;
G
guo 已提交
351

352
    for (va = 0; va < 0x1000; va++)
G
guo 已提交
353
    {
354
        unsigned int vaddr = (va << 20);
G
guo 已提交
355

356
        if (vaddr >= KERNEL_VADDR_START && vaddr - KERNEL_VADDR_START < size)
G
guo 已提交
357
        {
358
            mtbl[va] = ((va << 20) + pv_off) | NORMAL_MEM;
G
guo 已提交
359
        }
360
        else if (vaddr >= (KERNEL_VADDR_START + pv_off) && vaddr - (KERNEL_VADDR_START + pv_off) < size)
G
guo 已提交
361
        {
362 363 364 365 366
            mtbl[va] = (va << 20) | NORMAL_MEM;
        }
        else
        {
            mtbl[va] = 0;
G
guo 已提交
367 368 369 370
        }
    }
}

371
void *rt_hw_mmu_v2p(rt_aspace_t aspace, void* v_addr)
G
guo 已提交
372 373 374 375 376 377 378 379
{
    size_t l1_off, l2_off;
    size_t *mmu_l1, *mmu_l2;
    size_t tmp;
    size_t pa;

    l1_off = (size_t)v_addr >> ARCH_SECTION_SHIFT;

380
    RT_ASSERT(aspace);
G
guo 已提交
381

382
    mmu_l1 =  (size_t*)aspace->page_table + l1_off;
G
guo 已提交
383 384 385 386 387 388 389 390

    tmp = *mmu_l1;

    switch (tmp & ARCH_MMU_USED_MASK)
    {
        case 0: /* not used */
            break;
        case 1: /* page table */
391
            mmu_l2 = (size_t *)((tmp & ~ARCH_PAGE_TBL_MASK) - PV_OFFSET);
G
guo 已提交
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
            l2_off = (((size_t)v_addr & ARCH_SECTION_MASK) >> ARCH_PAGE_SHIFT);
            pa = *(mmu_l2 + l2_off);
            if (pa & ARCH_MMU_USED_MASK)
            {
                if ((pa & ARCH_MMU_USED_MASK) == 1)
                {
                    /* large page, not support */
                    break;
                }
                pa &= ~(ARCH_PAGE_MASK);
                pa += ((size_t)v_addr & ARCH_PAGE_MASK);
                return (void*)pa;
            }
            break;
        case 2:
        case 3:
            /* section */
            if (tmp & ARCH_TYPE_SUPERSECTION)
            {
                /* super section, not support */
                break;
            }
            pa = (tmp & ~ARCH_SECTION_MASK);
            pa += ((size_t)v_addr & ARCH_SECTION_MASK);
            return (void*)pa;
    }
418
    return ARCH_MAP_FAILED;
G
guo 已提交
419 420
}

421 422
int rt_hw_mmu_control(struct rt_aspace *aspace, void *vaddr, size_t size,
                      enum rt_mmu_cntl cmd)
G
guo 已提交
423
{
424
    return -RT_ENOSYS;
G
guo 已提交
425
}