mm_aspace.h 7.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/*
 * Copyright (c) 2006-2022, RT-Thread Development Team
 *
 * SPDX-License-Identifier: Apache-2.0
 *
 * Change Logs:
 * Date           Author       Notes
 * 2022-11-14     WangXiaoyao  the first version
 */
#ifndef __MM_ASPACE_H__
#define __MM_ASPACE_H__

#include <rthw.h>
#include <rtthread.h>
#include <stddef.h>

#include "avl_adpt.h"
#include "mm_fault.h"
#include "mm_flag.h"

#define MM_PAGE_SHIFT    12
#define MM_PA_TO_OFF(pa) ((uintptr_t)(pa) >> MM_PAGE_SHIFT)
23
#define PV_OFFSET        (rt_kmem_pvoff())
24 25 26 27 28 29 30 31 32

#ifndef RT_USING_SMP
typedef rt_spinlock_t mm_spinlock;

#define MM_PGTBL_LOCK_INIT(aspace)
#define MM_PGTBL_LOCK(aspace)      (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_UNLOCK(aspace)    (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))

#else
33
typedef struct rt_spinlock mm_spinlock;
34

35 36 37
#define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_LOCK(aspace)      (rt_spin_lock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_UNLOCK(aspace)    (rt_spin_unlock(&((aspace)->pgtbl_lock)))
38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56

#endif /* RT_USING_SMP */

struct rt_aspace;
struct rt_varea;
struct rt_mem_obj;

extern struct rt_aspace rt_kernel_space;

typedef struct rt_aspace
{
    void *start;
    rt_size_t size;

    void *page_table;
    mm_spinlock pgtbl_lock;

    struct _aspace_tree tree;
    struct rt_mutex bst_lock;
C
chenhy0106 已提交
57 58

    rt_uint64_t asid;
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92
} *rt_aspace_t;

typedef struct rt_varea
{
    void *start;
    rt_size_t size;
    rt_size_t offset;

    rt_size_t attr;
    rt_size_t flag;

    struct rt_aspace *aspace;
    struct rt_mem_obj *mem_obj;

    struct _aspace_node node;

    struct rt_page *frames;
    void *data;
} *rt_varea_t;

typedef struct rt_mm_va_hint
{
    void *limit_start;
    rt_size_t limit_range_size;

    void *prefer;
    const rt_size_t map_size;

    mm_flag_t flags;
} *rt_mm_va_hint_t;

typedef struct rt_mem_obj
{
    void (*hint_free)(rt_mm_va_hint_t hint);
93
    void (*on_page_fault)(struct rt_varea *varea, struct rt_aspace_fault_msg *msg);
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110

    /* do pre open bushiness like inc a ref */
    void (*on_varea_open)(struct rt_varea *varea);
    /* do post close bushiness like def a ref */
    void (*on_varea_close)(struct rt_varea *varea);

    void (*on_page_offload)(struct rt_varea *varea, void *vaddr, rt_size_t size);

    const char *(*get_name)(rt_varea_t varea);
} *rt_mem_obj_t;

extern struct rt_mem_obj rt_mm_dummy_mapper;

enum rt_mmu_cntl
{
    MMU_CNTL_NONCACHE,
    MMU_CNTL_CACHE,
111 112
    MMU_CNTL_READONLY,
    MMU_CNTL_READWRITE,
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
    MMU_CNTL_DUMMY_END,
};

/**
 * @brief Lock to access page table of address space
 */
#define WR_LOCK(aspace)                                                        \
    rt_thread_self() ? rt_mutex_take(&(aspace)->bst_lock, RT_WAITING_FOREVER)  \
                     : 0
#define WR_UNLOCK(aspace)                                                      \
    rt_thread_self() ? rt_mutex_release(&(aspace)->bst_lock) : 0

#define RD_LOCK(aspace)   WR_LOCK(aspace)
#define RD_UNLOCK(aspace) WR_UNLOCK(aspace)

rt_aspace_t rt_aspace_create(void *start, rt_size_t length, void *pgtbl);

130
rt_err_t rt_aspace_init(rt_aspace_t aspace, void *start, rt_size_t length, void *pgtbl);
131 132 133 134 135 136 137 138

void rt_aspace_delete(rt_aspace_t aspace);

void rt_aspace_detach(rt_aspace_t aspace);

/**
 * @brief Memory Map on Virtual Address Space to Mappable Object
 * *INFO There is no restriction to use NULL address(physical/virtual).
139
 * Vaddr passing in addr must be page aligned. If vaddr is RT_NULL,
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
 * a suitable address will be chose automatically.
 *
 * @param aspace target virtual address space
 * @param addr virtual address of the mapping
 * @param length length of mapping region
 * @param attr MMU attribution
 * @param flags desired memory protection and behaviour of the mapping
 * @param mem_obj memory map backing store object
 * @param offset offset of mapping in 4KB page for mem_obj
 * @return int E_OK on success, with addr set to vaddr of mapping
 *             E_INVAL
 */
int rt_aspace_map(rt_aspace_t aspace, void **addr, rt_size_t length, rt_size_t attr,
                  mm_flag_t flags, rt_mem_obj_t mem_obj, rt_size_t offset);

/** no malloc routines call */
int rt_aspace_map_static(rt_aspace_t aspace, rt_varea_t varea, void **addr,
                         rt_size_t length, rt_size_t attr, mm_flag_t flags,
                         rt_mem_obj_t mem_obj, rt_size_t offset);

/**
 * @brief Memory Map on Virtual Address Space to Physical Memory
 *
 * @param aspace target virtual address space
 * @param hint hint of mapping va
 * @param attr MMU attribution
 * @param pa_off (physical address >> 12)
 * @param ret_va pointer to the location to store va
 * @return int E_OK on success, with ret_va set to vaddr of mapping
 *             E_INVAL
 */
int rt_aspace_map_phy(rt_aspace_t aspace, rt_mm_va_hint_t hint, rt_size_t attr,
                      rt_size_t pa_off, void **ret_va);

/** no malloc routines call */
int rt_aspace_map_phy_static(rt_aspace_t aspace, rt_varea_t varea,
                             rt_mm_va_hint_t hint, rt_size_t attr, rt_size_t pa_off,
                             void **ret_va);

/**
 * @brief Remove any mappings overlap the range [addr, addr + bytes)
 *
 * @param aspace
 * @param addr
 * @return int
 */
186
int rt_aspace_unmap(rt_aspace_t aspace, void *addr);
187

188
int rt_aspace_control(rt_aspace_t aspace, void *addr, enum rt_mmu_cntl cmd);
189 190 191 192 193 194 195 196 197 198

int rt_aspace_load_page(rt_aspace_t aspace, void *addr, rt_size_t npage);

int rt_aspace_offload_page(rt_aspace_t aspace, void *addr, rt_size_t npage);

int rt_aspace_traversal(rt_aspace_t aspace,
                        int (*fn)(rt_varea_t varea, void *arg), void *arg);

void rt_aspace_print_all(rt_aspace_t aspace);

199 200 201 202 203 204 205 206 207
/**
 * @brief Map one page to varea
 *
 * @param varea target varea
 * @param addr user address
 * @param page the page frame to be mapped
 * @return int
 */
int rt_varea_map_page(rt_varea_t varea, void *vaddr, void *page);
208

209 210 211 212 213 214 215 216 217 218
/**
 * @brief Unmap one page in varea
 *
 * @param varea target varea
 * @param addr user address
 * @param page the page frame to be mapped
 * @return int
 */
int rt_varea_unmap_page(rt_varea_t varea, void *vaddr);

219 220 221
/**
 * @brief Map a range of physical address to varea
 *
222 223 224
 * @warning Caller should take care of synchronization of its varea among all
 *          the map/unmap operation
 *
225 226 227 228 229 230 231
 * @param varea target varea
 * @param vaddr user address
 * @param paddr physical address
 * @param length map range
 * @return int
 */
int rt_varea_map_range(rt_varea_t varea, void *vaddr, void *paddr, rt_size_t length);
232

233 234 235 236 237 238 239 240 241 242 243 244 245
/**
 * @brief Unmap a range of physical address in varea
 *
 * @warning Caller should take care of synchronization of its varea among all
 *          the map/unmap operation
 *
 * @param varea target varea
 * @param vaddr user address
 * @param length map range
 * @return int
 */
int rt_varea_unmap_range(rt_varea_t varea, void *vaddr, rt_size_t length);

246 247 248 249 250 251 252 253
/**
 * @brief Insert page to page manager of varea
 * The page will be freed by varea on uninstall automatically
 *
 * @param varea target varea
 * @param page_addr the page frame to be added
 */
void rt_varea_pgmgr_insert(rt_varea_t varea, void *page_addr);
254

255 256 257 258
rt_ubase_t rt_kmem_pvoff(void);

void rt_kmem_pvoff_set(rt_ubase_t pvoff);

259 260 261 262
int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);

void *rt_kmem_v2p(void *vaddr);

263 264
void rt_kmem_list(void);

265
#endif /* __MM_ASPACE_H__ */