diff --git a/arch/arm/arm/include/los_pte_ops.h b/arch/arm/arm/include/los_pte_ops.h index 5fd8adfcb2c12a40aa5ff10cae204d0e6b8bfc8f..ab9d28f4704d68e314c1a149ad7c848434b79575 100644 --- a/arch/arm/arm/include/los_pte_ops.h +++ b/arch/arm/arm/include/los_pte_ops.h @@ -46,7 +46,7 @@ extern "C" { #endif /* __cplusplus */ #endif /* __cplusplus */ -//保存L1 页表项至L1页表 +/// PTE(Page Table Entry),页表条目,保存L1页表项至L1页表 STATIC INLINE VOID OsSavePte1(PTE_T *pte1Ptr, PTE_T pte1) { DMB; diff --git a/arch/arm/arm/src/los_arch_mmu.c b/arch/arm/arm/src/los_arch_mmu.c index 6f486117164e538709b33cee49b693c4af35877e..b09b9d773435b39ba25fd13d105f4f431f5fb48d 100644 --- a/arch/arm/arm/src/los_arch_mmu.c +++ b/arch/arm/arm/src/los_arch_mmu.c @@ -1,3 +1,43 @@ +/*! + * @file los_arch_mmu.c + * @brief 虚实映射其实就是一个建立页表的过程 + * @link http://weharmonyos.com/openharmony/zh-cn/device-dev/kernel/kernel-small-basic-inner-reflect.html + * @verbatim + + 虚实映射是指系统通过内存管理单元(MMU,Memory Management Unit)将进程空间的虚拟地址与实际的物理地址做映射, + 并指定相应的访问权限、缓存属性等。程序执行时,CPU访问的是虚拟内存,通过MMU页表条目找到对应的物理内存, + 并做相应的代码执行或数据读写操作。MMU的映射由页表(Page Table)来描述,其中保存虚拟地址和物理地址的映射关系以及访问权限等。 + 每个进程在创建的时候都会创建一个页表,页表由一个个页表条目(Page Table Entry, PTE)构成, + 每个页表条目描述虚拟地址区间与物理地址区间的映射关系。MMU中有一块页表缓存,称为快表(TLB, Translation Lookaside Buffers), + 做地址转换时,MMU首先在TLB中查找,如果找到对应的页表条目可直接进行转换,提高了查询效率。 + + 虚实映射其实就是一个建立页表的过程。MMU有多级页表,LiteOS-A内核采用二级页表描述进程空间。每个一级页表条目描述符占用4个字节, + 可表示1MiB的内存空间的映射关系,即1GiB用户空间(LiteOS-A内核中用户空间占用1GiB)的虚拟内存空间需要1024个。系统创建用户进程时, + 在内存中申请一块4KiB大小的内存块作为一级页表的存储区域,二级页表根据当前进程的需要做动态的内存申请。 + + 用户程序加载启动时,会将代码段、数据段映射进虚拟内存空间(详细可参考动态加载与链接),此时并没有物理页做实际的映射; + 程序执行时,如下图粗箭头所示,CPU访问虚拟地址,通过MMU查找是否有对应的物理内存,若该虚拟地址无对应的物理地址则触发缺页异常, + 内核申请物理内存并将虚实映射关系及对应的属性配置信息写进页表,并把页表条目缓存至TLB,接着CPU可直接通过转换关系访问实际的物理内存; + 若CPU访问已缓存至TLB的页表条目,无需再访问保存在内存中的页表,可加快查找速度。 + + 开发流程 + 1. 虚实映射相关接口的使用: + 通过LOS_ArchMmuMap映射一块物理内存。 + + 2. 对映射的地址区间做相关操作: + 通过LOS_ArchMmuQuery可以查询相应虚拟地址区间映射的物理地址区间及映射属性; + 通过LOS_ArchMmuChangeProt修改映射属性; + 通过LOS_ArchMmuMove做虚拟地址区间的重映射。 + 3. 通过LOS_ArchMmuUnmap解除映射关系。 + + * @endverbatim + * @version + * @author weharmonyos.com + * @date 2021-11-17 + * + * @history + * + */ /* * Copyright (c) 2013-2019 Huawei Technologies Co., Ltd. All rights reserved. * Copyright (c) 2020-2021 Huawei Device Co., Ltd. All rights reserved. @@ -411,8 +451,8 @@ BOOL OsArchMmuInit(LosArchMmu *archMmu, VADDR_T *virtTtb) /*! - * @brief LOS_ArchMmuQuery 本函数是内核高频函数,通过MMU查询虚拟地址是否映射过,带走映射的物理地址和权限 - * + * @brief LOS_ArchMmuQuery 获取进程空间虚拟地址对应的物理地址以及映射属性。 + * 本函数是内核高频函数,通过MMU查询虚拟地址是否映射过,带走映射的物理地址和权限 * @param archMmu * @param flags * @param paddr @@ -462,7 +502,7 @@ STATUS_T LOS_ArchMmuQuery(const LosArchMmu *archMmu, VADDR_T vaddr, PADDR_T *pad } /*! - * @brief LOS_ArchMmuUnmap 解除映射关系 + * @brief LOS_ArchMmuUnmap 解除进程空间虚拟地址区间与物理地址区间的映射关系 * * @param archMmu * @param count @@ -684,7 +724,18 @@ STATIC UINT32 OsMapL2PageContinuous(PTE_T pte1, UINT32 flags, VADDR_T *vaddr, PA *count -= saveCounts; return saveCounts; } -/// mmu映射,所谓的map就是生成L1,L2页表项的过程 +/*! + * @brief LOS_ArchMmuMap 映射进程空间虚拟地址区间与物理地址区间 + * 所谓的map就是生成L1,L2页表项的过程 + * @param archMmu + * @param count + * @param flags + * @param paddr + * @param vaddr + * @return + * + * @see + */ status_t LOS_ArchMmuMap(LosArchMmu *archMmu, VADDR_T vaddr, PADDR_T paddr, size_t count, UINT32 flags) { PTE_T l1Entry; @@ -721,7 +772,18 @@ status_t LOS_ArchMmuMap(LosArchMmu *archMmu, VADDR_T vaddr, PADDR_T paddr, size_ return mapped; } -/// 改变内存段的访问权限,读/写/可执行/不可用 + +/*! + * @brief LOS_ArchMmuChangeProt 修改进程空间虚拟地址区间的映射属性 + * 改变内存段的访问权限,例如: 读/写/可执行/不可用 == + * @param archMmu + * @param count + * @param flags + * @param vaddr + * @return + * + * @see + */ STATUS_T LOS_ArchMmuChangeProt(LosArchMmu *archMmu, VADDR_T vaddr, size_t count, UINT32 flags) { STATUS_T status; @@ -757,6 +819,18 @@ STATUS_T LOS_ArchMmuChangeProt(LosArchMmu *archMmu, VADDR_T vaddr, size_t count, return LOS_OK; } +/*! + * @brief LOS_ArchMmuMove 将进程空间一个虚拟地址区间的映射关系转移至另一块未使用的虚拟地址区间重新做映射。 + * + * @param archMmu + * @param count + * @param flags + * @param newVaddr + * @param oldVaddr + * @return + * + * @see + */ STATUS_T LOS_ArchMmuMove(LosArchMmu *archMmu, VADDR_T oldVaddr, VADDR_T newVaddr, size_t count, UINT32 flags) { STATUS_T status; @@ -1017,12 +1091,19 @@ VOID OsArchMmuInitPerCPU(VOID) OsArmWriteTtbr0(0); ISB; } -//启动映射初始化 + +/*! + * @brief OsInitMappingStartUp 开始初始化mmu + * + * @return + * + * @see + */ VOID OsInitMappingStartUp(VOID) { OsArmInvalidateTlbBarrier();//使TLB失效 - OsSwitchTmpTTB();//切换到临时TTB + OsSwitchTmpTTB();//切换到临时TTB ,请想想为何要切换到临时 @note_thinking OsSetKSectionAttr(KERNEL_VMM_BASE, FALSE); OsSetKSectionAttr(UNCACHED_VMM_BASE, TRUE); diff --git a/kernel/base/core/los_process.c b/kernel/base/core/los_process.c index d3b1ee15e20e35b0652b02d2702c3ec6e1dcdb06..a41cef7132a4f24a3fb9235ae2f48c677e73558d 100644 --- a/kernel/base/core/los_process.c +++ b/kernel/base/core/los_process.c @@ -1998,6 +1998,14 @@ LITE_OS_SEC_TEXT_INIT UINT32 OsUserInitProcess(VOID) } #endif +/*! + * @brief LOS_Exit + * 进程退出 + * @param status + * @return + * + * @see + */ LITE_OS_SEC_TEXT VOID LOS_Exit(INT32 status) { UINT32 intSave; @@ -2005,15 +2013,26 @@ LITE_OS_SEC_TEXT VOID LOS_Exit(INT32 status) /* The exit of a kernel - state process must be kernel - state and all threads must actively exit */ LosProcessCB *processCB = OsCurrProcessGet(); SCHEDULER_LOCK(intSave); - if (!OsProcessIsUserMode(processCB) && (processCB->threadNumber != 1)) { + if (!OsProcessIsUserMode(processCB) && (processCB->threadNumber != 1)) {//内核态下进程的退出方式,必须是所有的任务都退出了 SCHEDULER_UNLOCK(intSave); PRINT_ERR("Kernel-state processes with multiple threads are not allowed to exit directly\n"); return; } SCHEDULER_UNLOCK(intSave); - OsTaskExitGroup((UINT32)status); - OsProcessExit(OsCurrTaskGet(), (UINT32)status); + OsTaskExitGroup((UINT32)status);//退出进程组 + OsProcessExit(OsCurrTaskGet(), (UINT32)status);//进程退出 } + + +/*! + * @brief LOS_GetUsedPIDList + * 获取使用中的进程列表 + * @param pidList + * @param pidMaxNum + * @return + * + * @see + */ LITE_OS_SEC_TEXT INT32 LOS_GetUsedPIDList(UINT32 *pidList, INT32 pidMaxNum) { LosProcessCB *pcb = NULL; @@ -2025,13 +2044,13 @@ LITE_OS_SEC_TEXT INT32 LOS_GetUsedPIDList(UINT32 *pidList, INT32 pidMaxNum) return 0; } SCHEDULER_LOCK(intSave); - while (OsProcessIDUserCheckInvalid(pid) == false) { + while (OsProcessIDUserCheckInvalid(pid) == false) {//遍历进程池 pcb = OS_PCB_FROM_PID(pid); pid++; - if (OsProcessIsUnused(pcb)) { + if (OsProcessIsUnused(pcb)) {//未使用的不算 continue; } - pidList[num] = pcb->processID; + pidList[num] = pcb->processID;//由参数带走 num++; if (num >= pidMaxNum) { break; @@ -2059,12 +2078,12 @@ LITE_OS_SEC_TEXT struct fd_table_s *LOS_GetFdTable(UINT32 pid) return files->fdt; } #endif -//获取当前进程的进程ID +/// 获取当前进程的进程ID LITE_OS_SEC_TEXT UINT32 LOS_GetCurrProcessID(VOID) { return OsCurrProcessGet()->processID; } -//按指定状态退出指定进程 +/// 按指定状态退出指定进程 LITE_OS_SEC_TEXT VOID OsProcessExit(LosTaskCB *runTask, INT32 status) { UINT32 intSave; @@ -2077,32 +2096,32 @@ LITE_OS_SEC_TEXT VOID OsProcessExit(LosTaskCB *runTask, INT32 status) OsProcessNaturalExit(runTask, status);//进程自然退出 SCHEDULER_UNLOCK(intSave); } -//获取系统支持的最大进程数目 +/// 获取系统支持的最大进程数目 LITE_OS_SEC_TEXT UINT32 LOS_GetSystemProcessMaximum(VOID) { return g_processMaxNum; } -//获取用户态进程的根进程,所有用户进程都是g_processCBArray[g_userInitProcess] fork来的 +/// 获取用户态进程的根进程,所有用户进程都是g_processCBArray[g_userInitProcess] fork来的 LITE_OS_SEC_TEXT UINT32 OsGetUserInitProcessID(VOID) { return g_userInitProcess;//用户态根进程 序号为 1 } - +/// 获取内核态根进程 LITE_OS_SEC_TEXT UINT32 OsGetKernelInitProcessID(VOID) { return g_kernelInitProcess; } - +/// 获取内核态空闲进程 LITE_OS_SEC_TEXT UINT32 OsGetIdleProcessID(VOID) { return g_kernelIdleProcess; } -//设置进程的信号处理函数 +/// 设置进程的信号处理函数 LITE_OS_SEC_TEXT VOID OsSetSigHandler(UINTPTR addr) { OsCurrProcessGet()->sigHandler = addr; } -//获取进程的信号处理函数 +/// 获取进程的信号处理函数 LITE_OS_SEC_TEXT UINTPTR OsGetSigHandler(VOID) { return OsCurrProcessGet()->sigHandler; diff --git a/kernel/base/core/los_task.c b/kernel/base/core/los_task.c index 9620842b62c9418b6b1df18f1237cbd4a37cad55..a4767ec1c701aaea18dcf722d2d59e21c6dbdb80 100644 --- a/kernel/base/core/los_task.c +++ b/kernel/base/core/los_task.c @@ -203,8 +203,17 @@ STATIC INLINE VOID OsInsertTCBToFreeList(LosTaskCB *taskCB) taskCB->taskStatus = OS_TASK_STATUS_UNUSED; taskCB->processID = OS_INVALID_VALUE; LOS_ListAdd(&g_losFreeTask, &taskCB->pendList);//内核挂在g_losFreeTask上的任务都是由pendList完成 -}//查找task 就通过 OS_TCB_FROM_PENDLIST 来完成,相当于由LOS_DL_LIST找到LosTaskCB -//把那些和参数任务绑在一起的task唤醒. +} + +/*! + * @brief OsTaskJoinPostUnsafe + * 查找task 通过 OS_TCB_FROM_PENDLIST 来完成,相当于由LOS_DL_LIST找到LosTaskCB, + * 将那些和参数任务绑在一起的task唤醒. + * @param taskCB + * @return + * + * @see + */ LITE_OS_SEC_TEXT_INIT VOID OsTaskJoinPostUnsafe(LosTaskCB *taskCB) { LosTaskCB *resumedTask = NULL; @@ -212,13 +221,13 @@ LITE_OS_SEC_TEXT_INIT VOID OsTaskJoinPostUnsafe(LosTaskCB *taskCB) if (taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) {//join任务处理 if (!LOS_ListEmpty(&taskCB->joinList)) {//注意到了这里 joinList中的节点身上都有阻塞标签 resumedTask = OS_TCB_FROM_PENDLIST(LOS_DL_LIST_FIRST(&(taskCB->joinList)));//通过贴有JOIN标签链表的第一个节点找到Task - OsTaskWakeClearPendMask(resumedTask); - OsSchedTaskWake(resumedTask); + OsTaskWakeClearPendMask(resumedTask);//清除任务的挂起标记 + OsSchedTaskWake(resumedTask);//唤醒任务 } } taskCB->taskStatus |= OS_TASK_STATUS_EXIT;//贴上任务退出标签 } - +/// 挂起任务,任务进入等待链表,Join代表是支持通过一个任务去唤醒其他的任务 LITE_OS_SEC_TEXT UINT32 OsTaskJoinPendUnsafe(LosTaskCB *taskCB) { LosProcessCB *processCB = OS_PCB_FROM_PID(taskCB->processID); @@ -235,8 +244,8 @@ LITE_OS_SEC_TEXT UINT32 OsTaskJoinPendUnsafe(LosTaskCB *taskCB) } if ((taskCB->taskStatus & OS_TASK_FLAG_PTHREAD_JOIN) && LOS_ListEmpty(&taskCB->joinList)) { - OsTaskWaitSetPendMask(OS_TASK_WAIT_JOIN, taskCB->taskID, LOS_WAIT_FOREVER); - return OsSchedTaskWait(&taskCB->joinList, LOS_WAIT_FOREVER, TRUE); + OsTaskWaitSetPendMask(OS_TASK_WAIT_JOIN, taskCB->taskID, LOS_WAIT_FOREVER);//设置任务的等待标记 + return OsSchedTaskWait(&taskCB->joinList, LOS_WAIT_FOREVER, TRUE);//永久等待 } return LOS_EINVAL; @@ -366,6 +375,14 @@ STATIC INLINE VOID OsTaskSyncDestroy(UINT32 syncSignal) } #ifdef LOSCFG_KERNEL_SMP +/*! + * @brief OsTaskSyncWait + * 任务同步等待,通过信号量保持同步 + * @param taskCB + * @return + * + * @see + */ STATIC INLINE UINT32 OsTaskSyncWait(const LosTaskCB *taskCB) { #ifdef LOSCFG_KERNEL_SMP_TASK_SYNC @@ -1509,7 +1526,7 @@ EXIT: SCHEDULER_UNLOCK(intSave); return err; } - +/// //退群并发起kill信号 STATIC VOID OsExitGroupActiveTaskKilled(LosProcessCB *processCB, LosTaskCB *taskCB) { INT32 ret; @@ -1550,7 +1567,7 @@ LITE_OS_SEC_TEXT VOID OsTaskExitGroup(UINT32 status) LosProcessCB *processCB = OsCurrProcessGet(); LosTaskCB *currTask = OsCurrTaskGet(); - SCHEDULER_LOCK(intSave);//调度自旋锁,这块锁的代码有点多,这块容易出问题!出问题也不好复现,希望鸿蒙有充分测试这块的功能. @note_thinking + SCHEDULER_LOCK(intSave);//调度自旋锁,这块锁的代码有点多,容易出问题!出问题也不好复现,希望鸿蒙有充分测试这块的功能. @note_thinking if ((processCB->processStatus & OS_PROCESS_FLAG_EXIT) || !OsProcessIsUserMode(processCB)) { SCHEDULER_UNLOCK(intSave); return; @@ -1559,25 +1576,25 @@ LITE_OS_SEC_TEXT VOID OsTaskExitGroup(UINT32 status) processCB->processStatus |= OS_PROCESS_FLAG_EXIT;//贴上进程要退出的标签 processCB->threadGroupID = currTask->taskID; - LOS_DL_LIST *list = &processCB->threadSiblingList; + LOS_DL_LIST *list = &processCB->threadSiblingList;//获取进程的任务链表遍历 LOS_DL_LIST *head = list; do { LosTaskCB *taskCB = LOS_DL_LIST_ENTRY(list->pstNext, LosTaskCB, threadList); if ((taskCB->taskStatus & (OS_TASK_STATUS_INIT | OS_TASK_STATUS_EXIT) || ((taskCB->taskStatus & OS_TASK_STATUS_READY) && !taskCB->sig.sigIntLock)) && !(taskCB->taskStatus & OS_TASK_STATUS_RUNNING)) { - OsTaskDeleteInactive(processCB, taskCB); + OsTaskDeleteInactive(processCB, taskCB);//先删除不活动的任务 } else { - if (taskCB != currTask) { - OsExitGroupActiveTaskKilled(processCB, taskCB); + if (taskCB != currTask) {//非当前任务 + OsExitGroupActiveTaskKilled(processCB, taskCB);//退群并发起kill信号 } else { - /* Skip the current task */ + /* Skip the current task | 跳过当前任务 */ list = list->pstNext; } } - } while (head != list->pstNext); + } while (head != list->pstNext);//遍历链表 - SCHEDULER_UNLOCK(intSave); + SCHEDULER_UNLOCK(intSave);//释放锁 LOS_ASSERT(processCB->threadNumber == 1);//这一趟下来,进程只有一个正在活动的任务 return; diff --git a/kernel/base/include/los_percpu_pri.h b/kernel/base/include/los_percpu_pri.h index 2815292e561e3daf44d1dfb413243495bebdb7c5..c7d11fc470cb74693d11b9f54f4fdd913ad8b5b5 100644 --- a/kernel/base/include/los_percpu_pri.h +++ b/kernel/base/include/los_percpu_pri.h @@ -63,8 +63,8 @@ typedef struct { SortLinkAttribute swtmrSortLink; ///< swtmr sort link | 挂还没到时间的定时器 SPIN_LOCK_S swtmrSortLinkSpin; ///< swtmr sort link spin lock |* 操作swtmrSortLink链表的自旋锁 UINT64 responseTime; ///< Response time for current nuclear Tick interrupts | 当前CPU核 Tick 中断的响应时间 - UINT64 tickStartTime; ///< The time when the tick interrupt starts processing | - UINT32 responseID; ///< The response ID of the current nuclear TICK interrupt | 当前CPU核TICK中断的响应ID + UINT64 tickStartTime; ///< The time when the tick interrupt starts processing | 开始处理tick中断的时间 + UINT32 responseID; ///< The response ID of the current nuclear TICK interrupt | 当前CPU核TICK中断的响应任务ID UINTPTR runProcess; ///< The address of the process control block pointer to which the current kernel is running | 当前进程控制块地址 UINT32 idleTaskID; ///< idle task id | 每个CPU都有一个空闲任务 见于 OsIdleTaskCreate UINT32 taskLockCnt; ///< task lock flag | 任务锁的数量,当 > 0 的时候,需要重新调度了 diff --git a/kernel/base/include/los_sched_pri.h b/kernel/base/include/los_sched_pri.h index 5ca4b35559b72e91f84b1d04d81af3e079b2e764..31af6cbb2c6fdfbf4ac5ae49787f67d4c0b84172 100644 --- a/kernel/base/include/los_sched_pri.h +++ b/kernel/base/include/los_sched_pri.h @@ -92,7 +92,7 @@ STATIC INLINE VOID OsSchedIrqStartTime(VOID) typedef enum { INT_NO_RESCH = 0x0, /**< no needs to schedule | 不需要调度*/ INT_PEND_RESCH = 0x1, /**< pending schedule flag | 因不允许抢占或正在中断导致的不允许调度*/ - INT_PEND_TICK = 0x2, /**< pending tick | 更新过期时间遇到正在中断导致的不允许调度*/ + INT_PEND_TICK = 0x2, /**< pending tick | 更新到期时间遇到正在中断导致的不允许调度*/ } SchedFlag; /* Check if preemptable with counter flag */ diff --git a/kernel/base/include/los_signal.h b/kernel/base/include/los_signal.h index 0b6578d41c6279c6ffdea738cac7c34cd0484c6b..34ccf658c7cae8e002ec7f7f30b9bc479726dd4a 100644 --- a/kernel/base/include/los_signal.h +++ b/kernel/base/include/los_signal.h @@ -226,9 +226,9 @@ typedef struct { sigset_t sigwaitmask; /*! Waiting for pending signals | 任务在等待哪些信号的到来 */ siginfo_t sigunbinfo; /*! Signal info when task unblocked | 任务解锁时的信号信息 */ SigInfoListNode *tmpInfoListHead; /*! Signal info List */ - unsigned int sigIntLock; - void *sigContext; - unsigned int count; + unsigned int sigIntLock;///< 信号中断锁 + void *sigContext; ///< 信号上下文 + unsigned int count;///< 信号数量 } sig_cb; #define SIGEV_THREAD_ID 4 diff --git a/kernel/base/include/los_task_pri.h b/kernel/base/include/los_task_pri.h index 6d1c18532fdf1863e968ef39bc6fc15c7ecd1553..38ebd41723e70adb03f1e5a2752f99458c2be2e4 100644 --- a/kernel/base/include/los_task_pri.h +++ b/kernel/base/include/los_task_pri.h @@ -403,7 +403,7 @@ extern LosTaskCB *g_taskCBArray;///< 外部变量 任务池 默认128个 */ typedef struct {//时间片结构体,任务轮询 LosTaskCB *task; /**< Current running task | 当前运行着的任务*/ - UINT16 time; /**< Expiration time point | 过期时间点*/ + UINT16 time; /**< Expiration time point | 到期时间点*/ UINT16 timeout; /**< Expiration duration | 有效期*/ } OsTaskRobin; /// 获取当前CPU core运行的任务 diff --git a/kernel/base/sched/sched_sq/los_sched.c b/kernel/base/sched/sched_sq/los_sched.c index 9150ceaec48bb1a5c7202503a6723fcd5b2960ab..9ea2cb2f8c7d4fe0c89f2b876f4c06dfc9284736 100644 --- a/kernel/base/sched/sched_sq/los_sched.c +++ b/kernel/base/sched/sched_sq/los_sched.c @@ -52,11 +52,11 @@ #define OS_32BIT_MAX 0xFFFFFFFFUL #define OS_SCHED_FIFO_TIMEOUT 0x7FFFFFFF -#define OS_PRIORITY_QUEUE_NUM 32 +#define OS_PRIORITY_QUEUE_NUM 32 ///< 就绪队列数量 #define PRIQUEUE_PRIOR0_BIT 0x80000000U -#define OS_SCHED_TIME_SLICES_MIN ((5000 * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) /* 5ms */ -#define OS_SCHED_TIME_SLICES_MAX ((LOSCFG_BASE_CORE_TIMESLICE_TIMEOUT * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) -#define OS_SCHED_TIME_SLICES_DIFF (OS_SCHED_TIME_SLICES_MAX - OS_SCHED_TIME_SLICES_MIN) +#define OS_SCHED_TIME_SLICES_MIN ((5000 * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) /* 5ms 调度最小时间片 */ +#define OS_SCHED_TIME_SLICES_MAX ((LOSCFG_BASE_CORE_TIMESLICE_TIMEOUT * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) ///< 调度最大时间片 +#define OS_SCHED_TIME_SLICES_DIFF (OS_SCHED_TIME_SLICES_MAX - OS_SCHED_TIME_SLICES_MIN) ///< 最大,最小二者差 #define OS_SCHED_READY_MAX 30 #define OS_TIME_SLICE_MIN (INT32)((50 * OS_SYS_NS_PER_US) / OS_NS_PER_CYCLE) /* 50us */ @@ -237,7 +237,7 @@ UINT32 OsShellShowSchedParam(VOID) return LOS_NOK; } #endif - +///< 设置节拍器类型 UINT32 OsSchedSetTickTimerType(UINT32 timerType) { switch (timerType) { @@ -254,30 +254,30 @@ UINT32 OsSchedSetTickTimerType(UINT32 timerType) return LOS_OK; } -///设置调度开始时间 +/// 设置调度开始时间 STATIC VOID OsSchedSetStartTime(UINT64 currCycle) { if (g_sysSchedStartTime == OS_64BIT_MAX) { g_sysSchedStartTime = currCycle; } } -///升级时间片 +/// 更新时间片 STATIC INLINE VOID OsTimeSliceUpdate(LosTaskCB *taskCB, UINT64 currTime) { - LOS_ASSERT(currTime >= taskCB->startTime); + LOS_ASSERT(currTime >= taskCB->startTime); //断言参数时间必须大于开始时间 - INT32 incTime = (currTime - taskCB->startTime - taskCB->irqUsedTime); + INT32 incTime = (currTime - taskCB->startTime - taskCB->irqUsedTime);//计算增加的时间 LOS_ASSERT(incTime >= 0); - if (taskCB->policy == LOS_SCHED_RR) { - taskCB->timeSlice -= incTime; + if (taskCB->policy == LOS_SCHED_RR) {//抢占调度 + taskCB->timeSlice -= incTime; //任务的时间片减少 #ifdef LOSCFG_SCHED_DEBUG taskCB->schedStat.timeSliceRealTime += incTime; #endif } - taskCB->irqUsedTime = 0; - taskCB->startTime = currTime; + taskCB->irqUsedTime = 0;//中断时间置0 + taskCB->startTime = currTime;//重新设置开始时间 #ifdef LOSCFG_SCHED_DEBUG taskCB->schedStat.allRuntime += incTime; @@ -325,7 +325,7 @@ STATIC INLINE VOID OsSchedTickReload(Percpu *currCpu, UINT64 nextResponseTime, U } #endif } - +/// 设置下一个到期时间 STATIC INLINE VOID OsSchedSetNextExpireTime(UINT64 startTime, UINT32 responseID, UINT64 taskEndTime, UINT32 oldResponseID) { @@ -385,13 +385,13 @@ VOID OsSchedUpdateExpireTime(UINT64 startTime) OsSchedSetNextExpireTime(startTime, runTask->taskID, endTime, runTask->taskID); } - +/// 计算时间片 STATIC INLINE UINT32 OsSchedCalculateTimeSlice(UINT16 proPriority, UINT16 priority) { UINT32 retTime; UINT32 readyTasks; - SchedQueue *queueList = &g_sched->queueList[proPriority]; + SchedQueue *queueList = &g_sched->queueList[proPriority];//拿到优先级调度队列 readyTasks = queueList->readyTasks[priority]; if (readyTasks > OS_SCHED_READY_MAX) { return OS_SCHED_TIME_SLICES_MIN; @@ -540,17 +540,26 @@ STATIC INLINE BOOL OsSchedScanTimerList(VOID) return needSchedule; } +/*! + * @brief OsSchedEnTaskQueue + * 添加任务到进程的就绪队列中 + * @param processCB + * @param taskCB + * @return + * + * @see + */ STATIC INLINE VOID OsSchedEnTaskQueue(LosTaskCB *taskCB, LosProcessCB *processCB) { - LOS_ASSERT(!(taskCB->taskStatus & OS_TASK_STATUS_READY)); + LOS_ASSERT(!(taskCB->taskStatus & OS_TASK_STATUS_READY));//必须是就绪状态,因为只有就绪状态才能入就绪队列 switch (taskCB->policy) { - case LOS_SCHED_RR: { - if (taskCB->timeSlice > OS_TIME_SLICE_MIN) { - OsSchedPriQueueEnHead(processCB->priority, &taskCB->pendList, taskCB->priority); - } else { - taskCB->initTimeSlice = OsSchedCalculateTimeSlice(processCB->priority, taskCB->priority); - taskCB->timeSlice = taskCB->initTimeSlice; + case LOS_SCHED_RR: {//抢占式跳读 + if (taskCB->timeSlice > OS_TIME_SLICE_MIN) {//时间片大于最小的时间片 50微妙 + OsSchedPriQueueEnHead(processCB->priority, &taskCB->pendList, taskCB->priority);//插入对应优先级的就绪队列中 + } else {//如果时间片不够了,咋办? + taskCB->initTimeSlice = OsSchedCalculateTimeSlice(processCB->priority, taskCB->priority);//重新计算时间片 + taskCB->timeSlice = taskCB->initTimeSlice;// OsSchedPriQueueEnTail(processCB->priority, &taskCB->pendList, taskCB->priority); #ifdef LOSCFG_SCHED_DEBUG taskCB->schedStat.timeSliceTime = taskCB->schedStat.timeSliceRealTime; @@ -651,14 +660,14 @@ VOID OsSchedTaskExit(LosTaskCB *taskCB) taskCB->taskStatus &= ~(OS_TASK_STATUS_DELAY | OS_TASK_STATUS_PEND_TIME); } } -///通过本函数可以看出 yield 的真正含义是主动让出CPU,当它自己还是在就绪队列中,跑末位去排队了.像个活雷锋. +///通过本函数可以看出 yield 的真正含义是主动让出CPU,那怎么安置自己呢? 跑到末尾重新排队. 真是个活雷锋,好同志啊!!! VOID OsSchedYield(VOID) { LosTaskCB *runTask = OsCurrTaskGet(); runTask->timeSlice = 0;//时间片变成0,代表主动让出运行时间. - runTask->startTime = OsGetCurrSchedTimeCycle(); + runTask->startTime = OsGetCurrSchedTimeCycle();//重新获取开始时间 OsSchedTaskEnQueue(runTask);//跑队列尾部排队 OsSchedResched();//发起调度 } @@ -985,30 +994,39 @@ STATIC INLINE VOID OsSchedSwitchProcess(LosProcessCB *runProcess, LosProcessCB * OsCurrProcessSet(newProcess); } +/*! + * @brief OsSchedTaskSwitch 实现新老两个任务切换 + * + * @param newTask + * @param runTask + * @return + * + * @see + */ STATIC VOID OsSchedTaskSwitch(LosTaskCB *runTask, LosTaskCB *newTask) { UINT64 endTime; - OsSchedSwitchCheck(runTask, newTask); + OsSchedSwitchCheck(runTask, newTask);//任务内容检查 - runTask->taskStatus &= ~OS_TASK_STATUS_RUNNING; - newTask->taskStatus |= OS_TASK_STATUS_RUNNING; + runTask->taskStatus &= ~OS_TASK_STATUS_RUNNING; //当前任务去掉正在运行的标签 + newTask->taskStatus |= OS_TASK_STATUS_RUNNING; //新任务贴上正在运行的标签,虽标签贴上了,但目前还是在老任务中跑. #ifdef LOSCFG_KERNEL_SMP /* mask new running task's owner processor */ - runTask->currCpu = OS_TASK_INVALID_CPUID; - newTask->currCpu = ArchCurrCpuid(); + runTask->currCpu = OS_TASK_INVALID_CPUID;//褫夺当前任务的CPU使用权 + newTask->currCpu = ArchCurrCpuid(); //标记新任务获取当前CPU使用权 #endif - OsCurrTaskSet((VOID *)newTask); - LosProcessCB *newProcess = OS_PCB_FROM_PID(newTask->processID); - LosProcessCB *runProcess = OS_PCB_FROM_PID(runTask->processID); - if (runProcess != newProcess) { - OsSchedSwitchProcess(runProcess, newProcess); + OsCurrTaskSet((VOID *)newTask);//设置新任务为当前任务 + LosProcessCB *newProcess = OS_PCB_FROM_PID(newTask->processID);//获取新任务所在进程实体 + LosProcessCB *runProcess = OS_PCB_FROM_PID(runTask->processID);//获取老任务所在进程实体 + if (runProcess != newProcess) {//如果不是同一个进程,就需要换行进程上下文,也就是切换MMU,切换进程空间 + OsSchedSwitchProcess(runProcess, newProcess);//切换进程上下文 } - if (OsProcessIsUserMode(newProcess)) { - OsCurrUserTaskSet(newTask->userArea); + if (OsProcessIsUserMode(newProcess)) {//如果是用户模式即应用进程 + OsCurrUserTaskSet(newTask->userArea);//设置用户态栈空间 } #ifdef LOSCFG_KERNEL_CPUP @@ -1018,15 +1036,16 @@ STATIC VOID OsSchedTaskSwitch(LosTaskCB *runTask, LosTaskCB *newTask) #ifdef LOSCFG_SCHED_DEBUG UINT64 waitStartTime = newTask->startTime; #endif - if (runTask->taskStatus & OS_TASK_STATUS_READY) { - /* When a thread enters the ready queue, its slice of time is updated */ + if (runTask->taskStatus & OS_TASK_STATUS_READY) {//注意老任务可不一定是就绪状态 + /* When a thread enters the ready queue, its slice of time is updated + 当一个线程(任务)进入就绪队列时,它的时间片被更新 */ newTask->startTime = runTask->startTime; } else { /* The currently running task is blocked */ - newTask->startTime = OsGetCurrSchedTimeCycle(); + newTask->startTime = OsGetCurrSchedTimeCycle();//重新获取时间 /* The task is in a blocking state and needs to update its time slice before pend */ - OsTimeSliceUpdate(runTask, newTask->startTime); - + OsTimeSliceUpdate(runTask, newTask->startTime);//更新时间片 + //两种状态下将老任务放入CPU的工作链表中 if (runTask->taskStatus & (OS_TASK_STATUS_PEND_TIME | OS_TASK_STATUS_DELAY)) { OsAdd2SortLink(&runTask->sortList, runTask->startTime, runTask->waitTimes, OS_SORT_LINK_TASK); } @@ -1046,7 +1065,7 @@ STATIC VOID OsSchedTaskSwitch(LosTaskCB *runTask, LosTaskCB *newTask) runTask->schedStat.switchCount++; #endif /* do the task context switch */ - OsTaskSchedule(newTask, runTask); + OsTaskSchedule(newTask, runTask); //执行汇编代码 } VOID OsSchedIrqEndCheckNeedSched(VOID) @@ -1080,37 +1099,44 @@ VOID OsSchedIrqEndCheckNeedSched(VOID) OsSchedUpdateExpireTime(runTask->startTime); } } - +/// 申请一次调度 VOID OsSchedResched(VOID) { LOS_ASSERT(LOS_SpinHeld(&g_taskSpin)); #ifdef LOSCFG_KERNEL_SMP - LOS_ASSERT(OsPercpuGet()->taskLockCnt == 1); + LOS_ASSERT(OsPercpuGet()->taskLockCnt == 1); // @note_thinking 为何此处一定得 == 1, 大于1不行吗? #else LOS_ASSERT(OsPercpuGet()->taskLockCnt == 0); #endif - OsPercpuGet()->schedFlag &= ~INT_PEND_RESCH; + OsPercpuGet()->schedFlag &= ~INT_PEND_RESCH;//去掉标签 LosTaskCB *runTask = OsCurrTaskGet(); - LosTaskCB *newTask = OsGetTopTask(); + LosTaskCB *newTask = OsGetTopTask();//获取最高优先级任务 if (runTask == newTask) { return; } - OsSchedTaskSwitch(runTask, newTask); + OsSchedTaskSwitch(runTask, newTask);//CPU将真正的换任务执行 } +/*! + * @brief LOS_Schedule 任务调度主函数 + * + * @return + * + * @see + */ VOID LOS_Schedule(VOID) { UINT32 intSave; LosTaskCB *runTask = OsCurrTaskGet(); - if (OS_INT_ACTIVE) { - OsPercpuGet()->schedFlag |= INT_PEND_RESCH; + if (OS_INT_ACTIVE) { //中断发生中...,需停止调度 + OsPercpuGet()->schedFlag |= INT_PEND_RESCH;//贴上原因 return; } - if (!OsPreemptable()) { + if (!OsPreemptable()) {//当不可抢占时直接返回 return; } @@ -1121,12 +1147,12 @@ VOID LOS_Schedule(VOID) */ SCHEDULER_LOCK(intSave); - OsTimeSliceUpdate(runTask, OsGetCurrSchedTimeCycle()); + OsTimeSliceUpdate(runTask, OsGetCurrSchedTimeCycle());//更新时间片 - /* add run task back to ready queue */ - OsSchedTaskEnQueue(runTask); + /* add run task back to ready queue | 添加任务到就绪队列*/ + OsSchedTaskEnQueue(runTask); - /* reschedule to new thread */ + /* reschedule to new thread | 申请调度,CPU可能将换任务执行*/ OsSchedResched(); SCHEDULER_UNLOCK(intSave); diff --git a/kernel/base/sched/sched_sq/los_sortlink.c b/kernel/base/sched/sched_sq/los_sortlink.c index f4f876c475013623680bd08820d3e05c09b70fd7..3ac76d350670f3d37c6cbf320382114599816624 100644 --- a/kernel/base/sched/sched_sq/los_sortlink.c +++ b/kernel/base/sched/sched_sq/los_sortlink.c @@ -92,7 +92,7 @@ VOID OsDeleteNodeSortLink(SortLinkAttribute *sortLinkHeader, SortLinkList *sortL SET_SORTLIST_VALUE(sortList, OS_SORT_LINK_INVALID_TIME);//重置响应时间 sortLinkHeader->nodeNum--;//cpu的工作量减少一份 } -/// 获取下一个结点的过期时间 +/// 获取下一个结点的到期时间 STATIC INLINE UINT64 OsGetSortLinkNextExpireTime(SortLinkAttribute *sortHeader, UINT64 startTime) { LOS_DL_LIST *head = &sortHeader->sortLink; @@ -215,7 +215,7 @@ UINT64 OsGetNextExpireTime(UINT64 startTime) SortLinkAttribute *swtmrHeader = &cpu->swtmrSortLink; LOS_SpinLockSave(&cpu->taskSortLinkSpin, &intSave); - UINT64 taskExpirTime = OsGetSortLinkNextExpireTime(taskHeader, startTime);//拿到下一个过期时间,注意此处拿到的一定是最短的时间 + UINT64 taskExpirTime = OsGetSortLinkNextExpireTime(taskHeader, startTime);//拿到下一个到期时间,注意此处拿到的一定是最短的时间 LOS_SpinUnlockRestore(&cpu->taskSortLinkSpin, intSave); LOS_SpinLockSave(&cpu->swtmrSortLinkSpin, &intSave); diff --git a/zzz/git/push.sh b/zzz/git/push.sh index bd2ae274290b626b921b53caa930a2c75965d658..04e163a275013a5cc0da0dbd2f0f47c0a017bd16 100644 --- a/zzz/git/push.sh +++ b/zzz/git/push.sh @@ -1,5 +1,5 @@ git add -A -git commit -m ' CPU的工作量是如何分配的, 读懂对 SortLinkList 的注解即可 +git commit -m ' 对任务模块更详细的注解 百万汉字注解 + 百篇博客分析 => 挖透鸿蒙内核源码 博客输出站点(国内):http://weharmonyos.com 博客输出站点(国外):https://weharmony.github.io