mm_aspace.h 5.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright (c) 2006-2022, RT-Thread Development Team
 *
 * SPDX-License-Identifier: Apache-2.0
 *
 * Change Logs:
 * Date           Author       Notes
 * 2022-11-14     WangXiaoyao  the first version
 */
#ifndef __MM_ASPACE_H__
#define __MM_ASPACE_H__

#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>

#include "avl_adpt.h"
#include "mm_fault.h"
#include "mm_flag.h"

#define MM_PAGE_SHIFT    12
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
23
#define PV_OFFSET        (rt_kmem_pvoff())
24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56

#ifndef RT_USING_SMP
typedef rt_spinlock_t mm_spinlock;

#define MM_PGTBL_LOCK_INIT(aspace)
#define MM_PGTBL_LOCK(aspace)      (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_UNLOCK(aspace)    (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))

#else
typedef rt_hw_spinlock_t mm_spinlock;

#define MM_PGTBL_LOCK_INIT(aspace) (rt_hw_spin_lock_init(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_LOCK(aspace)      (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_UNLOCK(aspace)    (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))

#endif /* RT_USING_SMP */

struct rt_aspace;
struct rt_varea;
struct rt_mem_obj;

extern struct rt_aspace rt_kernel_space;

typedef struct rt_aspace
{
    void *start;
    rt_size_t size;

    void *page_table;
    mm_spinlock pgtbl_lock;

    struct _aspace_tree tree;
    struct rt_mutex bst_lock;
C
chenhy0106 已提交
57 58

    rt_uint64_t asid;
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
} *rt_aspace_t;

typedef struct rt_varea
{
    void *start;
    rt_size_t size;
    rt_size_t offset;

    rt_size_t attr;
    rt_size_t flag;

    struct rt_aspace *aspace;
    struct rt_mem_obj *mem_obj;

    struct _aspace_node node;

    struct rt_page *frames;
    void *data;
} *rt_varea_t;

typedef struct rt_mm_va_hint
{
    void *limit_start;
    rt_size_t limit_range_size;

    void *prefer;
    const rt_size_t map_size;

    mm_flag_t flags;
} *rt_mm_va_hint_t;

typedef struct rt_mem_obj
{
    void (*hint_free)(rt_mm_va_hint_t hint);
    void (*on_page_fault)(struct rt_varea *varea, struct rt_mm_fault_msg *msg);

    /* do pre open bushiness like inc a ref */
    void (*on_varea_open)(struct rt_varea *varea);
    /* do post close bushiness like def a ref */
    void (*on_varea_close)(struct rt_varea *varea);

    void (*on_page_offload)(struct rt_varea *varea, void *vaddr, rt_size_t size);

    const char *(*get_name)(rt_varea_t varea);
} *rt_mem_obj_t;

extern struct rt_mem_obj rt_mm_dummy_mapper;

enum rt_mmu_cntl
{
    MMU_CNTL_NONCACHE,
    MMU_CNTL_CACHE,
    MMU_CNTL_DUMMY_END,
};

/**
 * @brief Lock to access page table of address space
 */
#define WR_LOCK(aspace)                                                        \
    rt_thread_self() ? rt_mutex_take(&(aspace)->bst_lock, RT_WAITING_FOREVER)  \
                     : 0
#define WR_UNLOCK(aspace)                                                      \
    rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0

#define RD_LOCK(aspace)   WR_LOCK(aspace)
#define RD_UNLOCK(aspace) WR_UNLOCK(aspace)

rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);

rt_aspace_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length,
                           void *pgtbl);

void rt_aspace_delete(rt_aspace_t aspace);

void rt_aspace_detach(rt_aspace_t aspace);

/**
 * @brief Memory Map on Virtual Address Space to Mappable Object
 * *INFO There is no restriction to use NULL address(physical/virtual).
 * Vaddr passing in addr must be page aligned. If vaddr is MM_MAP_FAILED,
 * a suitable address will be chose automatically.
 *
 * @param aspace target virtual address space
 * @param addr virtual address of the mapping
 * @param length length of mapping region
 * @param attr MMU attribution
 * @param flags desired memory protection and behaviour of the mapping
 * @param mem_obj memory map backing store object
 * @param offset offset of mapping in 4KB page for mem_obj
 * @return int E_OK on success, with addr set to vaddr of mapping
 *             E_INVAL
 */
int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
                  mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);

/** no malloc routines call */
int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
                         rt_size_t length, rt_size_t attr, mm_flag_t flags,
                         rt_mem_obj_t mem_obj, rt_size_t offset);

/**
 * @brief Memory Map on Virtual Address Space to Physical Memory
 *
 * @param aspace target virtual address space
 * @param hint hint of mapping va
 * @param attr MMU attribution
 * @param pa_off (physical address >> 12)
 * @param ret_va pointer to the location to store va
 * @return int E_OK on success, with ret_va set to vaddr of mapping
 *             E_INVAL
 */
int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
                      rt_size_t pa_off, void **ret_va);

/** no malloc routines call */
int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
                             rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
                             void **ret_va);

/**
 * @brief Remove any mappings overlap the range [addr, addr + bytes)
 *
 * @param aspace
 * @param addr
 * @param length
 * @return int
 */
int rt_aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length);

int mm_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);

int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);

int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);

int rt_aspace_traversal(rt_aspace_t aspace,
                        int (*fn)(rt_varea_t varea, void *arg), void *arg);

void rt_aspace_print_all(rt_aspace_t aspace);

void rt_varea_insert_page(rt_varea_t varea, void *page_addr);

void rt_varea_free_pages(rt_varea_t varea);

void rt_varea_offload_page(rt_varea_t varea, void *vaddr, rt_size_t size);

205 206 207 208
rt_ubase_t rt_kmem_pvoff(void);

void rt_kmem_pvoff_set(rt_ubase_t pvoff);

209
#endif /* __MM_ASPACE_H__ */