提交 855aef9c 编写于 作者: W wangxiaoyao 提交者: guo

[mm] distinguish NULL and MAP_FAILED

上级 ed58f667
......@@ -262,7 +262,7 @@ static void *_lwp_shmat(int id, void *shm_vaddr)
struct rt_lwp *lwp = RT_NULL;
struct lwp_avl_struct *node_key = RT_NULL;
struct lwp_shm_struct *p = RT_NULL;
void *va = RT_NULL;
void *va = shm_vaddr;
/* The id is used to locate the node_key in the binary tree, and then get the
* shared-memory structure linked to the node_key. We don't use the id to refer
......@@ -282,16 +282,12 @@ static void *_lwp_shmat(int id, void *shm_vaddr)
{
return RT_NULL;
}
if (shm_vaddr == 0)
va = ARCH_MAP_FAILED;
else
va = shm_vaddr;
err = rt_aspace_map(lwp->aspace, &va, p->size, MMU_MAP_U_RWCB, MMF_PREFETCH,
&p->mem_obj, 0);
if (err != RT_EOK)
{
va = 0;
va = RT_NULL;
}
return va;
}
......
......@@ -114,13 +114,13 @@ static void _user_do_page_fault(struct rt_varea *varea,
{
struct rt_lwp_objs *lwp_objs;
lwp_objs = rt_container_of(varea->mem_obj, struct rt_lwp_objs, mem_obj);
void *vaddr = ARCH_MAP_FAILED;
if (lwp_objs->source)
{
void *paddr = rt_hw_mmu_v2p(lwp_objs->source, msg->vaddr);
if (paddr != ARCH_MAP_FAILED)
{
void *vaddr;
vaddr = paddr - PV_OFFSET;
if (!(varea->flag & MMF_TEXT))
......@@ -176,22 +176,21 @@ static void _init_lwp_objs(struct rt_lwp_objs *lwp_objs, rt_aspace_t aspace)
static void *_lwp_map_user(struct rt_lwp *lwp, void *map_va, size_t map_size,
int text)
{
void *va = RT_NULL;
void *va = map_va;
int ret = 0;
size_t flags = MMF_PREFETCH;
if (text)
flags |= MMF_TEXT;
rt_mem_obj_t mem_obj = &lwp->lwp_obj->mem_obj;
va = map_va ? map_va : ARCH_MAP_FAILED;
ret = rt_aspace_map(lwp->aspace, &va, map_size, MMU_MAP_U_RWCB, flags,
mem_obj, 0);
if (ret != RT_EOK)
{
va = RT_NULL;
LOG_I("lwp_map_user: failed to map %lx with size %lx", map_va,
map_size);
LOG_I("lwp_map_user: failed to map %lx with size %lx with errno %d", map_va,
map_size, ret);
}
return va;
......@@ -350,9 +349,6 @@ void *lwp_map_user_phy(struct rt_lwp *lwp, void *map_va, void *map_pa,
map_size &= ~ARCH_PAGE_MASK;
map_pa = (void *)((size_t)map_pa & ~ARCH_PAGE_MASK);
if (map_va == RT_NULL)
map_va = ARCH_MAP_FAILED;
struct rt_mm_va_hint hint = {.flags = MMF_MAP_FIXED,
.limit_range_size = lwp->aspace->size,
.limit_start = lwp->aspace->start,
......
......@@ -10,13 +10,16 @@
#include <rthw.h>
#include <rtthread.h>
#include <mmu.h>
#include <mm_aspace.h>
#include <ioremap.h>
void *rt_ioremap_start;
size_t rt_ioremap_size;
#ifdef RT_USING_SMART
#include <mmu.h>
#include <lwp_mm.h>
#include <mm_aspace.h>
#include <lwp_mm.h>
#define DBG_TAG "mm.ioremap"
#define DBG_LVL DBG_LOG
#include <rtdbg.h>
......@@ -27,9 +30,6 @@ enum ioremap_type
MM_AREA_TYPE_PHY_CACHED
};
void *rt_ioremap_start;
size_t rt_ioremap_size;
static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type)
{
void *v_addr = NULL;
......@@ -40,7 +40,7 @@ static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type)
lo_off = (uintptr_t)paddr & ARCH_PAGE_MASK;
struct rt_mm_va_hint hint = {
.prefer = ARCH_MAP_FAILED,
.prefer = RT_NULL,
.map_size = RT_ALIGN(size + lo_off, ARCH_PAGE_SIZE),
.flags = 0,
.limit_start = rt_ioremap_start,
......
......@@ -204,10 +204,12 @@ static int _do_prefetch(rt_aspace_t aspace, rt_varea_t varea, void *start,
LOG_W("%s: MMU mapping failed for va %p to %p of %lx", __func__,
vaddr, store + PV_OFFSET, store_sz);
}
else
{
rt_hw_tlb_invalidate_range(aspace, vaddr, store_sz, ARCH_PAGE_SIZE);
}
vaddr += store_sz;
off += store_sz >> ARCH_PAGE_SHIFT;
rt_hw_tlb_invalidate_range(aspace, vaddr, store_sz, ARCH_PAGE_SIZE);
}
else
{
......@@ -235,7 +237,7 @@ int _varea_install(rt_aspace_t aspace, rt_varea_t varea, rt_mm_va_hint_t hint)
/* TODO try merge surrounding regions to optimize memory footprint */
if (alloc_va != ARCH_MAP_FAILED)
if (alloc_va != RT_NULL)
{
varea->start = alloc_va;
_aspace_bst_insert(aspace, varea);
......@@ -285,9 +287,10 @@ static int _mm_aspace_map(rt_aspace_t aspace, rt_varea_t varea, rt_size_t attr,
static inline int _not_in_range(void *start, rt_size_t length,
void *limit_start, rt_size_t limit_size)
{
LOG_D("%s: [%p : %p] [%p : %p]", __func__, start, length, limit_start, limit_size);
if (start != RT_NULL)
LOG_D("%s: [%p : %p] [%p : %p]", __func__, start, length, limit_start, limit_size);
/* assuming (base + length) will not overflow except (0) */
return start != ARCH_MAP_FAILED
return start != RT_NULL
? ((length > (0ul - (uintptr_t)start)) || start < limit_start ||
(length + (rt_size_t)(start - limit_start)) > limit_size)
: length > limit_size;
......@@ -295,7 +298,7 @@ static inline int _not_in_range(void *start, rt_size_t length,
static inline int _not_align(void *start, rt_size_t length, rt_size_t mask)
{
return (start != ARCH_MAP_FAILED) &&
return (start != RT_NULL) &&
(((uintptr_t)start & mask) || (length & mask));
}
......@@ -438,7 +441,7 @@ int _mm_aspace_map_phy(rt_aspace_t aspace, rt_varea_t varea,
if (err == RT_EOK)
*ret_va = vaddr;
else
*ret_va = ARCH_MAP_FAILED;
*ret_va = RT_NULL;
}
return err;
......@@ -511,8 +514,7 @@ void _aspace_unmap(rt_aspace_t aspace, void *addr, rt_size_t length)
WR_UNLOCK(aspace);
rt_hw_mmu_unmap(aspace, varea->start, varea->size);
rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size,
ARCH_PAGE_SIZE);
rt_hw_tlb_invalidate_range(aspace, varea->start, varea->size, ARCH_PAGE_SIZE);
rt_free(varea);
varea = _aspace_bst_search_overlap(aspace, range);
......@@ -552,7 +554,7 @@ static inline void *_align(void *va, rt_ubase_t align_mask)
static void *_ascending_search(rt_varea_t varea, rt_size_t req_size,
rt_ubase_t align_mask, struct _mm_range limit)
{
void *ret = ARCH_MAP_FAILED;
void *ret = RT_NULL;
while (varea && varea->start < limit.end)
{
void *candidate = varea->start + varea->size;
......@@ -586,7 +588,7 @@ static void *_find_head_and_asc_search(rt_aspace_t aspace, rt_size_t req_size,
rt_ubase_t align_mask,
struct _mm_range limit)
{
void *va = ARCH_MAP_FAILED;
void *va = RT_NULL;
rt_varea_t varea = _aspace_bst_search_exceed(aspace, limit.start);
if (varea)
......@@ -638,7 +640,7 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
mm_flag_t flags)
{
rt_varea_t varea = NULL;
void *va = ARCH_MAP_FAILED;
void *va = RT_NULL;
struct _mm_range limit = {limit_start, limit_start + limit_size - 1};
rt_ubase_t align_mask = ~0ul;
......@@ -647,7 +649,7 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
align_mask = ~((1 << MMF_GET_ALIGN(flags)) - 1);
}
if (prefer != ARCH_MAP_FAILED)
if (prefer != RT_NULL)
{
prefer = _align(prefer, align_mask);
struct _mm_range range = {prefer, prefer + req_size - 1};
......@@ -659,11 +661,12 @@ static void *_find_free(rt_aspace_t aspace, void *prefer, rt_size_t req_size,
}
else if (flags & MMF_MAP_FIXED)
{
/* OVERLAP */
}
else
{
va = _ascending_search(varea, req_size, align_mask, limit);
if (va == ARCH_MAP_FAILED)
if (va == RT_NULL)
{
limit.end = varea->start - 1;
va = _find_head_and_asc_search(aspace, req_size, align_mask,
......
......@@ -30,11 +30,11 @@ typedef rt_spinlock_t mm_spinlock;
#define MM_PGTBL_UNLOCK(aspace) (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))
#else
typedef rt_hw_spinlock_t mm_spinlock;
typedef struct rt_spinlock mm_spinlock;
#define MM_PGTBL_LOCK_INIT(aspace) (rt_hw_spin_lock_init(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_LOCK(aspace) (rt_hw_spin_lock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_UNLOCK(aspace) (rt_hw_spin_unlock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_LOCK_INIT(aspace) (rt_spin_lock_init(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_LOCK(aspace) (rt_spin_lock(&((aspace)->pgtbl_lock)))
#define MM_PGTBL_UNLOCK(aspace) (rt_spin_unlock(&((aspace)->pgtbl_lock)))
#endif /* RT_USING_SMP */
......@@ -108,6 +108,8 @@ enum rt_mmu_cntl
{
MMU_CNTL_NONCACHE,
MMU_CNTL_CACHE,
MMU_CNTL_READONLY,
MMU_CNTL_READWRITE,
MMU_CNTL_DUMMY_END,
};
......@@ -135,7 +137,7 @@ void rt_aspace_detach(rt_aspace_t aspace);
/**
* @brief Memory Map on Virtual Address Space to Mappable Object
* *INFO There is no restriction to use NULL address(physical/virtual).
* Vaddr passing in addr must be page aligned. If vaddr is MM_MAP_FAILED,
* Vaddr passing in addr must be page aligned. If vaddr is RT_NULL,
* a suitable address will be chose automatically.
*
* @param aspace target virtual address space
......@@ -206,4 +208,8 @@ rt_ubase_t rt_kmem_pvoff(void);
void rt_kmem_pvoff_set(rt_ubase_t pvoff);
int rt_kmem_map_phy(void *va, void *pa, rt_size_t length, rt_size_t attr);
void *rt_kmem_v2p(void *vaddr);
#endif /* __MM_ASPACE_H__ */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册