未验证 提交 2394e752 编写于 作者: S Shell 提交者: GitHub

[libcpu/risc-v] support noncached normal memory (#7051)

* [libcpu/risc-v] support noncached normal memory

* [mm] check before dereference in _fetch_page

* [mm] add comments on ioremap

* [ioremap] report more info on failed
上级 65301b9c
......@@ -62,7 +62,7 @@ static void *_ioremap_type(void *paddr, size_t size, enum ioremap_type type)
if (err)
{
LOG_W("IOREMAP 0x%lx failed", paddr);
LOG_W("IOREMAP 0x%lx failed %d\n", paddr, err);
v_addr = NULL;
}
else
......
......@@ -16,6 +16,20 @@
extern "C" {
#endif
/**
* IOREMAP family
* `rt_ioremap` default to map physical memory in MMIO region as DEVICE memory
* to kernel space. And there are 3 variants currently supported.
*
* name | attribution
* ------------------ | -----------
* rt_ioremap_nocache | Device (MMU_MAP_K_DEVICE)
* rt_ioremap_cache | Normal memory (MMU_MAP_K_RWCB)
* rt_ioremap_wt | Normal memory but guarantee that
* | Each write access should go to system memory directly
* | Currently as non-cacheable
*/
void *rt_ioremap(void *paddr, size_t size);
void *rt_ioremap_nocache(void *paddr, size_t size);
void *rt_ioremap_cached (void *paddr, size_t size);
......
......@@ -29,23 +29,26 @@
static int _fetch_page(rt_varea_t varea, struct rt_mm_fault_msg *msg)
{
int err = UNRECOVERABLE;
varea->mem_obj->on_page_fault(varea, msg);
if (msg->response.status == MM_FAULT_STATUS_OK)
if (varea->mem_obj && varea->mem_obj->on_page_fault)
{
void *store = msg->response.vaddr;
rt_size_t store_sz = msg->response.size;
if (msg->vaddr + store_sz > varea->start + varea->size)
{
LOG_W("%s more size of buffer is provided than varea", __func__);
}
else
varea->mem_obj->on_page_fault(varea, msg);
if (msg->response.status == MM_FAULT_STATUS_OK)
{
rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET,
store_sz, varea->attr);
rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz,
ARCH_PAGE_SIZE);
err = RECOVERABLE;
void *store = msg->response.vaddr;
rt_size_t store_sz = msg->response.size;
if (msg->vaddr + store_sz > varea->start + varea->size)
{
LOG_W("%s more size of buffer is provided than varea", __func__);
}
else
{
rt_hw_mmu_map(varea->aspace, msg->vaddr, store + PV_OFFSET,
store_sz, varea->attr);
rt_hw_tlb_invalidate_range(varea->aspace, msg->vaddr, store_sz,
ARCH_PAGE_SIZE);
err = RECOVERABLE;
}
}
}
return err;
......
......@@ -88,6 +88,7 @@
#define MMU_MAP_K_DEVICE PTE_WRAP(PAGE_ATTR_DEV | PTE_G | PAGE_ATTR_XN | PTE_V)
#define MMU_MAP_K_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_K_RW PTE_WRAP(PTE_G | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_RWCB PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_RWX | PTE_V)
#define MMU_MAP_U_RWCB_XN PTE_WRAP(PAGE_ATTR_CB | PTE_U | PAGE_ATTR_XN | PTE_V)
#define MMU_MAP_U_RW PTE_WRAP(PTE_U | PAGE_ATTR_RWX | PTE_V)
......
......@@ -73,6 +73,7 @@
#define MMU_MAP_K_DEVICE (PTE_G | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_K_RWCB (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_K_RW (PTE_G | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RWCB (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RWCB_XN (PTE_U | PTE_W | PTE_R | PTE_V)
#define MMU_MAP_U_RW (PTE_U | PTE_X | PTE_W | PTE_R | PTE_V)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册