mm_fault.c 3.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12
/*
 * Copyright (c) 2006-2022, RT-Thread Development Team
 *
 * SPDX-License-Identifier: Apache-2.0
 *
 * Change Logs:
 * Date           Author       Notes
 * 2022-12-06     WangXiaoyao  the first version
 */
#include <rtthread.h>

#ifdef RT_USING_SMART
13 14 15 16
#define DBG_TAG "mm.fault"
#define DBG_LVL DBG_INFO
#include <rtdbg.h>

17 18 19 20 21 22 23 24 25 26 27 28
#include <lwp.h>
#include <lwp_syscall.h>
#include "mm_aspace.h"
#include "mm_fault.h"
#include "mm_flag.h"
#include "mm_private.h"
#include <mmu.h>
#include <tlb.h>

#define UNRECOVERABLE 0
#define RECOVERABLE   1

29
static int _fetch_page(rt_varea_t varea, struct rt_aspace_fault_msg *msg)
30
{
X
xiaoniaoyou6hua 已提交
31
    int err = UNRECOVERABLE;
32 33 34
    msg->response.status = MM_FAULT_STATUS_UNRECOVERABLE;
    msg->response.vaddr = 0;
    msg->response.size = 0;
35
    if (varea->mem_obj && varea->mem_obj->on_page_fault)
36
    {
37
        varea->mem_obj->on_page_fault(varea, msg);
38 39
        err = _varea_map_with_msg(varea, msg);
        err = (err == RT_EOK ? RECOVERABLE : UNRECOVERABLE);
40 41 42 43
    }
    return err;
}

44
static int _read_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
45 46 47 48 49
{
    int err = UNRECOVERABLE;
    if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
    {
        RT_ASSERT(pa == ARCH_MAP_FAILED);
50
        RT_ASSERT(!(varea->flag & MMF_PREFETCH));
51 52 53 54 55 56 57 58 59
        err = _fetch_page(varea, msg);
    }
    else
    {
        /* signal a fault to user? */
    }
    return err;
}

60
static int _write_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
61 62 63 64 65
{
    int err = UNRECOVERABLE;
    if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
    {
        RT_ASSERT(pa == ARCH_MAP_FAILED);
66
        RT_ASSERT(!(varea->flag & MMF_PREFETCH));
67 68 69 70 71 72 73 74 75 76 77 78 79
        err = _fetch_page(varea, msg);
    }
    else if (msg->fault_type == MM_FAULT_TYPE_ACCESS_FAULT &&
             varea->flag & MMF_COW)
    {
    }
    else
    {
        /* signal a fault to user? */
    }
    return err;
}

80
static int _exec_fault(rt_varea_t varea, void *pa, struct rt_aspace_fault_msg *msg)
81 82 83 84 85
{
    int err = UNRECOVERABLE;
    if (msg->fault_type == MM_FAULT_TYPE_PAGE_FAULT)
    {
        RT_ASSERT(pa == ARCH_MAP_FAILED);
86
        RT_ASSERT(!(varea->flag & MMF_PREFETCH));
87 88 89 90 91
        err = _fetch_page(varea, msg);
    }
    return err;
}

92
int rt_aspace_fault_try_fix(rt_aspace_t aspace, struct rt_aspace_fault_msg *msg)
93 94
{
    int err = UNRECOVERABLE;
95
    uintptr_t va = (uintptr_t)msg->fault_vaddr;
96
    va &= ~ARCH_PAGE_MASK;
97
    msg->fault_vaddr = (void *)va;
98

99
    if (aspace)
100
    {
101 102 103 104
        rt_varea_t varea;

        RD_LOCK(aspace);
        varea = _aspace_bst_search(aspace, msg->fault_vaddr);
105 106
        if (varea)
        {
107
            void *pa = rt_hw_mmu_v2p(aspace, msg->fault_vaddr);
108
            msg->off = ((char *)msg->fault_vaddr - (char *)varea->start) >> ARCH_PAGE_SHIFT;
109 110 111 112 113 114 115 116 117 118 119 120 121 122 123

            /* permission checked by fault op */
            switch (msg->fault_op)
            {
            case MM_FAULT_OP_READ:
                err = _read_fault(varea, pa, msg);
                break;
            case MM_FAULT_OP_WRITE:
                err = _write_fault(varea, pa, msg);
                break;
            case MM_FAULT_OP_EXECUTE:
                err = _exec_fault(varea, pa, msg);
                break;
            }
        }
124
        RD_UNLOCK(aspace);
125 126 127 128 129 130
    }

    return err;
}

#endif /* RT_USING_SMART */