diff --git a/bsp/qemu-vexpress-a9/.config b/bsp/qemu-vexpress-a9/.config index 05071cff2786d7bded26a4799f3fdf0808409a80..7c6c2dc1df2c7a790d23e5c2a9414bd71dc3218d 100644 --- a/bsp/qemu-vexpress-a9/.config +++ b/bsp/qemu-vexpress-a9/.config @@ -7,6 +7,8 @@ # RT-Thread Kernel # CONFIG_RT_NAME_MAX=8 +CONFIG_RT_USING_SMP=y +CONFIG_RT_CPUS_NR=2 CONFIG_RT_ALIGN_SIZE=4 # CONFIG_RT_THREAD_PRIORITY_8 is not set CONFIG_RT_THREAD_PRIORITY_32=y @@ -15,6 +17,7 @@ CONFIG_RT_THREAD_PRIORITY_MAX=32 CONFIG_RT_TICK_PER_SECOND=100 CONFIG_RT_USING_OVERFLOW_CHECK=y CONFIG_RT_USING_HOOK=y +CONFIG_RT_USING_IDLE_HOOK=y CONFIG_RT_IDEL_HOOK_LIST_SIZE=4 CONFIG_IDLE_THREAD_STACK_SIZE=1024 CONFIG_RT_USING_TIMER_SOFT=y @@ -66,6 +69,7 @@ CONFIG_RT_CONSOLE_DEVICE_NAME="uart0" CONFIG_ARCH_ARM=y CONFIG_ARCH_ARM_CORTEX_A=y CONFIG_ARCH_ARM_CORTEX_A9=y +# CONFIG_ARCH_CPU_STACK_GROWS_UPWARD is not set # # RT-Thread Components @@ -144,14 +148,15 @@ CONFIG_RT_USING_SERIAL=y CONFIG_RT_USING_I2C=y CONFIG_RT_USING_I2C_BITOPS=y CONFIG_RT_USING_PIN=y +# CONFIG_RT_USING_ADC is not set # CONFIG_RT_USING_PWM is not set CONFIG_RT_USING_MTD_NOR=y CONFIG_RT_USING_MTD_NAND=y CONFIG_RT_MTD_NAND_DEBUG=y # CONFIG_RT_USING_MTD is not set +# CONFIG_RT_USING_PM is not set CONFIG_RT_USING_RTC=y CONFIG_RT_USING_SOFT_RTC=y -# CONFIG_RTC_SYNC_USING_NTP is not set CONFIG_RT_USING_SDIO=y CONFIG_RT_SDIO_STACK_SIZE=512 CONFIG_RT_SDIO_THREAD_PRIORITY=15 @@ -216,6 +221,7 @@ CONFIG_SAL_PROTO_FAMILIES_NUM=4 CONFIG_RT_USING_LWIP=y # CONFIG_RT_USING_LWIP141 is not set CONFIG_RT_USING_LWIP202=y +# CONFIG_RT_USING_LWIP210 is not set CONFIG_RT_USING_LWIP_IPV6=y # CONFIG_RT_LWIP_IGMP is not set CONFIG_RT_LWIP_ICMP=y @@ -291,6 +297,7 @@ CONFIG_LOG_TRACE_USING_LEVEL_INFO=y # CONFIG_LOG_TRACE_USING_LEVEL_DEBUG is not set # CONFIG_LOG_TRACE_USING_MEMLOG is not set # CONFIG_RT_USING_RYM is not set +# CONFIG_RT_USING_ULOG is not set # # RT-Thread online packages @@ -376,18 +383,15 @@ CONFIG_LOG_TRACE_USING_LEVEL_INFO=y # CONFIG_PKG_USING_SQLITE is not set # CONFIG_PKG_USING_RTI is not set # CONFIG_PKG_USING_LITTLEVGL2RTT is not set -# CONFIG_PKG_USING_CMSIS is not set -# CONFIG_PKG_USING_DFS_YAFFS is not set # # peripheral libraries and drivers # +# CONFIG_PKG_USING_STM32F4_HAL is not set +# CONFIG_PKG_USING_STM32F4_DRIVERS is not set # CONFIG_PKG_USING_REALTEK_AMEBA is not set # CONFIG_PKG_USING_SHT2X is not set # CONFIG_PKG_USING_AHT10 is not set -# CONFIG_PKG_USING_AP3216C is not set -# CONFIG_PKG_USING_STM32_SDIO is not set -# CONFIG_PKG_USING_ICM20608 is not set # # miscellaneous packages @@ -405,14 +409,7 @@ CONFIG_LOG_TRACE_USING_LEVEL_INFO=y # # sample package # - -# -# samples: kernel and components samples -# -# CONFIG_PKG_USING_KERNEL_SAMPLES is not set -# CONFIG_PKG_USING_FILESYSTEM_SAMPLES is not set -# CONFIG_PKG_USING_NETWORK_SAMPLES is not set -# CONFIG_PKG_USING_PERIPHERAL_SAMPLES is not set +# CONFIG_PKG_USING_SAMPLES is not set # # example package: hello diff --git a/bsp/qemu-vexpress-a9/cpu/context_gcc.S b/bsp/qemu-vexpress-a9/cpu/context_gcc.S index 86efcede9f431ccbdd3eaa0f85f6253425cbcd65..2b13690e674c478b96b99a213d696dccefeee7bd 100644 --- a/bsp/qemu-vexpress-a9/cpu/context_gcc.S +++ b/bsp/qemu-vexpress-a9/cpu/context_gcc.S @@ -20,9 +20,19 @@ * Change Logs: * Date Author Notes * 2013-07-05 Bernard the first version + * 2018-11-22 Jesven in the smp version, using macro to + * define rt_hw_interrupt_enable and rt_hw_interrupt_disable + * rt_hw_context_switch_interrupt switches to the new thread directly */ +#include "rtconfig.h" .section .text, "ax" + +#ifdef RT_USING_SMP +#define rt_hw_interrupt_disable rt_hw_local_irq_disable +#define rt_hw_interrupt_enable rt_hw_local_irq_enable +#endif + /* * rt_base_t rt_hw_interrupt_disable(); */ @@ -48,6 +58,11 @@ rt_hw_interrupt_enable: rt_hw_context_switch_to: ldr sp, [r0] @ get new task stack pointer +#ifdef RT_USING_SMP + mov r0, r1 + bl rt_cpus_lock_status_restore +#endif /*RT_USING_SMP*/ + ldmfd sp!, {r4} @ pop new task spsr msr spsr_cxsf, r4 @@ -77,6 +92,11 @@ rt_hw_context_switch: str sp, [r0] @ store sp in preempted tasks TCB ldr sp, [r1] @ get new task stack pointer +#ifdef RT_USING_SMP + mov r0, r2 + bl rt_cpus_lock_status_restore +#endif /*RT_USING_SMP*/ + ldmfd sp!, {r4} @ pop new task cpsr to spsr msr spsr_cxsf, r4 ldmfd sp!, {r0-r12, lr, pc}^ @ pop new task r0-r12, lr & pc, copy spsr to cpsr @@ -84,11 +104,60 @@ rt_hw_context_switch: /* * void rt_hw_context_switch_interrupt(rt_uint32 from, rt_uint32 to); */ +.equ Mode_USR, 0x10 +.equ Mode_FIQ, 0x11 +.equ Mode_IRQ, 0x12 +.equ Mode_SVC, 0x13 +.equ Mode_ABT, 0x17 +.equ Mode_UND, 0x1B +.equ Mode_SYS, 0x1F + +.equ I_Bit, 0x80 @ when I bit is set, IRQ is disabled +.equ F_Bit, 0x40 @ when F bit is set, FIQ is disabled + .globl rt_thread_switch_interrupt_flag .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread .globl rt_hw_context_switch_interrupt rt_hw_context_switch_interrupt: +#ifdef RT_USING_SMP + /* r0 :irq_mod context + * r1 :addr of from_thread's sp + * r2 :addr of to_thread's sp + * r3 :to_thread's tcb + */ + + @ r0 point to {r0-r3} in stack + push {r1 - r3} + mov r1, r0 + add r0, r0, #4*4 + ldmfd r0!, {r4-r12,lr}@ reload saved registers + mrs r3, spsr @ get cpsr of interrupt thread + sub r2, lr, #4 @ save old task's pc to r2 + msr cpsr_c, #I_Bit|F_Bit|Mode_SVC + + stmfd sp!, {r2} @ push old task's pc + stmfd sp!, {r4-r12,lr}@ push old task's lr,r12-r4 + ldmfd r0, {r4-r7} @ restore r0-r3 of the interrupt thread + stmfd sp!, {r4-r7} @ push old task's r0-r3 + stmfd sp!, {r3} @ push old task's cpsr + + msr cpsr_c, #I_Bit|F_Bit|Mode_IRQ + pop {r1 - r3} + mov sp, r0 + msr cpsr_c, #I_Bit|F_Bit|Mode_SVC + str sp, [r1] + + ldr sp, [r2] + mov r0, r3 + bl rt_cpus_lock_status_restore + + ldmfd sp!, {r4} @ pop new task's cpsr to spsr + msr spsr_cxsf, r4 + + ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr + +#else /*RT_USING_SMP*/ ldr r2, =rt_thread_switch_interrupt_flag ldr r3, [r2] cmp r3, #1 @@ -101,3 +170,4 @@ _reswitch: ldr r2, =rt_interrupt_to_thread @ set rt_interrupt_to_thread str r1, [r2] bx lr +#endif /*RT_USING_SMP*/ diff --git a/bsp/qemu-vexpress-a9/cpu/cpu.c b/bsp/qemu-vexpress-a9/cpu/cpu.c index 426da965aded8c3de3a99c054d59767c36eca380..10099809a3069e4d48a1de3e148226f6ae3a7704 100644 --- a/bsp/qemu-vexpress-a9/cpu/cpu.c +++ b/bsp/qemu-vexpress-a9/cpu/cpu.c @@ -10,12 +10,26 @@ * Change Logs: * Date Author Notes * 2011-09-15 Bernard first version + * 2018-11-22 Jesven add rt_hw_cpu_id() */ #include #include #include +#ifdef RT_USING_SMP +int rt_hw_cpu_id(void) +{ + int cpu_id; + __asm__ volatile ( + "mrc p15, 0, %0, c0, c0, 5" + :"=r"(cpu_id) + ); + cpu_id &= 0xf; + return cpu_id; +}; +#endif + /** * @addtogroup ARM CPU */ diff --git a/bsp/qemu-vexpress-a9/cpu/gic.c b/bsp/qemu-vexpress-a9/cpu/gic.c index cbef00af22da0da7828a63224373823aec21bdec..66876518ce90fc1b2d7d7c23bc9edc66f1391e73 100644 --- a/bsp/qemu-vexpress-a9/cpu/gic.c +++ b/bsp/qemu-vexpress-a9/cpu/gic.c @@ -11,8 +11,11 @@ * Date Author Notes * 2013-07-20 Bernard first version * 2014-04-03 Grissiom many enhancements + * 2018-11-22 Jesven add rt_hw_ipi_send() + * add rt_hw_ipi_handler_install() */ +#include #include #include @@ -278,3 +281,19 @@ void arm_gic_set_group(rt_uint32_t index, int vector, int group) vector) |= (1 << (vector % 32)); } } + +#ifdef RT_USING_SMP +void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask) + { + /* note: ipi_vector maybe different with irq_vector */ + GIC_DIST_SOFTINT(_gic_table[0].dist_hw_base) = (cpu_mask << 16) | ipi_vector; +} +#endif + +#ifdef RT_USING_SMP +void rt_hw_ipi_handler_install(int ipi_vector, rt_isr_handler_t ipi_isr_handler) +{ + /* note: ipi_vector maybe different with irq_vector */ + rt_hw_interrupt_install(ipi_vector, ipi_isr_handler, 0, "IPI_HANDLER"); +} +#endif diff --git a/bsp/qemu-vexpress-a9/cpu/gic.h b/bsp/qemu-vexpress-a9/cpu/gic.h index 42c061dc1b595ebab84b180c33bc6d8b8cb3e274..9d0368ec943536e075ce68632ae90bf8d4872a3b 100644 --- a/bsp/qemu-vexpress-a9/cpu/gic.h +++ b/bsp/qemu-vexpress-a9/cpu/gic.h @@ -27,6 +27,7 @@ int arm_gic_get_active_irq(rt_uint32_t index); void arm_gic_ack(rt_uint32_t index, int irq); void arm_gic_dump_type(rt_uint32_t index); +void rt_hw_vector_init(void); #endif diff --git a/bsp/qemu-vexpress-a9/cpu/interrupt.c b/bsp/qemu-vexpress-a9/cpu/interrupt.c index e66f74604258dfa2ad04d03140c3447c48fe08c6..febf86a66e20aba70b204b689a85810fe0fd25bd 100644 --- a/bsp/qemu-vexpress-a9/cpu/interrupt.c +++ b/bsp/qemu-vexpress-a9/cpu/interrupt.c @@ -10,6 +10,7 @@ * Change Logs: * Date Author Notes * 2013-07-06 Bernard first version + * 2018-11-22 Jesven add smp support */ #include @@ -19,21 +20,21 @@ #define MAX_HANDLERS NR_IRQS_PBA8 -extern volatile rt_uint8_t rt_interrupt_nest; - /* exception and interrupt handler table */ struct rt_irq_desc isr_table[MAX_HANDLERS]; +#ifndef RT_USING_SMP /* Those varibles will be accessed in ISR, so we need to share them. */ rt_uint32_t rt_interrupt_from_thread; rt_uint32_t rt_interrupt_to_thread; rt_uint32_t rt_thread_switch_interrupt_flag; +#endif const unsigned int VECTOR_BASE = 0x00; extern void rt_cpu_vector_set_base(unsigned int addr); extern int system_vectors; -static void rt_hw_vector_init(void) +void rt_hw_vector_init(void) { rt_cpu_vector_set_base((unsigned int)&system_vectors); } @@ -60,10 +61,11 @@ void rt_hw_interrupt_init(void) arm_gic_cpu_init(0, gic_cpu_base); /* init interrupt nest, and context in thread sp */ - rt_interrupt_nest = 0; +#ifndef RT_USING_SMP rt_interrupt_from_thread = 0; rt_interrupt_to_thread = 0; rt_thread_switch_interrupt_flag = 0; +#endif } /** diff --git a/bsp/qemu-vexpress-a9/cpu/start_gcc.S b/bsp/qemu-vexpress-a9/cpu/start_gcc.S index 569b14fbc112063cddfa9c63b3a36c14494ef31c..d380d02d08ec1be1bd14183a7602cf13f8f628fb 100644 --- a/bsp/qemu-vexpress-a9/cpu/start_gcc.S +++ b/bsp/qemu-vexpress-a9/cpu/start_gcc.S @@ -20,8 +20,12 @@ * Change Logs: * Date Author Notes * 2013-07-05 Bernard the first version + * 2018-11-22 Jesven in the interrupt context, use rt_scheduler_do_irq_switch checks + * and switches to a new thread */ +#include "rtconfig.h" + .equ Mode_USR, 0x10 .equ Mode_FIQ, 0x11 .equ Mode_IRQ, 0x12 @@ -64,6 +68,17 @@ _reset: orr r0, r0, #0x13 msr cpsr_c, r0 + mrc p15, 0, r1, c1, c0, 1 + mov r0, #(1<<6) + orr r1, r0 + mcr p15, 0, r1, c1, c0, 1 //enable smp + + ldr lr, =after_enable_mmu + ldr r0, =mtbl + b enable_mmu + +after_enable_mmu: + /* setup stack */ bl stack_setup @@ -93,6 +108,7 @@ ctor_loop: ctor_end: /* start RT-Thread Kernel */ + bl flush_cache_all ldr pc, _rtthread_startup _rtthread_startup: .word rtthread_startup @@ -127,6 +143,85 @@ stack_setup: msr cpsr_c, #Mode_SVC|I_Bit|F_Bit bx lr + .global enable_mmu +enable_mmu: + orr r0, #0x18 + mcr p15, 0, r0, c2, c0, 0 @ttbr0 + + mov r0, #(1 << 5) @PD1=1 + mcr p15, 0, r0, c2, c0, 2 @ttbcr + + mov r0, #1 + mcr p15, 0, r0, c3, c0, 0 @dacr + + mov r0, #0 + mcr p15, 0, r0, c8, c7, 0 + mcr p15, 0, r0, c7, c5, 0 @iciallu + mcr p15, 0, r0, c7, c5, 6 @bpiall + + mrc p15, 0, r0, c1, c0, 0 + orr r0, #(1 | 4) + orr r0, #(1 << 12) + mcr p15, 0, r0, c1, c0, 0 + dsb + isb + mov pc, lr + +.global flush_cache_all +flush_cache_all: + stmfd sp!, {r0-r12, lr} + bl v7_flush_dcache_all + mov r0, #0 + mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate + dsb + isb + ldmfd sp!, {r0-r12, lr} + mov pc, lr + + v7_flush_dcache_all: + dmb @ ensure ordering with previous memory accesses + mrc p15, 1, r0, c0, c0, 1 @ read clidr + ands r3, r0, #0x7000000 @ extract loc from clidr + mov r3, r3, lsr #23 @ left align loc bit field + beq finished @ if loc is 0, then no need to clean + mov r10, #0 @ start clean at cache level 0 +loop1: + add r2, r10, r10, lsr #1 @ work out 3x current cache level + mov r1, r0, lsr r2 @ extract cache type bits from clidr + and r1, r1, #7 @ mask of the bits for current cache only + cmp r1, #2 @ see what cache we have at this level + blt skip @ skip if no cache, or just i-cache + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + isb @ isb to sych the new cssr&csidr + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr + and r2, r1, #7 @ extract the length of the cache lines + add r2, r2, #4 @ add 4 (line length offset) + ldr r4, =0x3ff + ands r4, r4, r1, lsr #3 @ find maximum number on the way size + clz r5, r4 @ find bit position of way size increment + ldr r7, =0x7fff + ands r7, r7, r1, lsr #13 @ extract max number of the index size +loop2: + mov r9, r4 @ create working copy of max way size +loop3: + orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 + orr r11, r11, r7, lsl r2 @ factor index number into r11 + mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way + subs r9, r9, #1 @ decrement the way + bge loop3 + subs r7, r7, #1 @ decrement the index + bge loop2 +skip: + add r10, r10, #2 @ increment cache number + cmp r3, r10 + bgt loop1 +finished: + mov r10, #0 @ swith back to cache level 0 + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + dsb + isb + mov pc, lr + /* exception handlers: undef, swi, padt, dabt, resv, irq, fiq */ .section .text.isr, "ax" .align 5 @@ -143,17 +238,25 @@ vector_fiq: .globl rt_interrupt_from_thread .globl rt_interrupt_to_thread -.globl rt_current_thread - .align 5 .globl vector_irq vector_irq: +#ifdef RT_USING_SMP + clrex +#endif stmfd sp!, {r0-r12,lr} bl rt_interrupt_enter bl rt_hw_trap_irq bl rt_interrupt_leave +#ifdef RT_USING_SMP + mov r0, sp + bl rt_scheduler_do_irq_switch + + ldmfd sp!, {r0-r12,lr} + subs pc, lr, #4 +#else @ if rt_thread_switch_interrupt_flag set, jump to @ rt_hw_context_switch_interrupt_do and don't return ldr r0, =rt_thread_switch_interrupt_flag @@ -198,6 +301,8 @@ rt_hw_context_switch_interrupt_do: ldmfd sp!, {r0-r12,lr,pc}^ @ pop new task's r0-r12,lr & pc, copy spsr to cpsr +#endif + .macro push_svc_reg sub sp, sp, #17 * 4 @/* Sizeof(struct rt_hw_exp_stack) */ stmia sp, {r0 - r12} @/* Calling r0-r12 */ @@ -244,3 +349,87 @@ vector_resv: push_svc_reg bl rt_hw_trap_resv b . + +#ifdef RT_USING_SMP +.global set_secondary_cpu_boot_address +set_secondary_cpu_boot_address: + ldr r0, =secondary_cpu_start + + mvn r1, #0 //0xffffffff + ldr r2, =0x10000034 + str r1, [r2] + str r0, [r2, #-4] + mov pc, lr + +.global secondary_cpu_start +secondary_cpu_start: + mrc p15, 0, r1, c1, c0, 1 + mov r0, #(1<<6) + orr r1, r0 + mcr p15, 0, r1, c1, c0, 1 //enable smp + + ldr r0, =mtbl + ldr lr, =1f + + b enable_mmu +1: + mrc p15, 0, r0, c1, c0, 0 + bic r0, #(1<<13) + mcr p15, 0, r0, c1, c0, 0 + + cps #Mode_IRQ + ldr sp, =irq_stack_2_limit + + cps #Mode_FIQ + ldr sp, =irq_stack_2_limit + + cps #Mode_SVC + ldr sp, =svc_stack_2_limit + + b secondary_cpu_c_start +#endif + +.bss +.align 2 //align to 2~2=4 +svc_stack_2: + .space (1 << 10) +svc_stack_2_limit: + +irq_stack_2: + .space (1 << 10) +irq_stack_2_limit: + +.data +#define DEVICE_MEM 0x10406 +#define NORMAL_MEM 0x1140e +.align 14 +mtbl: + + //vaddr: 0x00000000 + .rept 0x100 + .word 0x0 + .endr + + //vaddr: 0x10000000 + .equ mmu_tbl_map_paddr, 0x10000000 + .rept 0x400 + .word mmu_tbl_map_paddr | DEVICE_MEM + .equ mmu_tbl_map_paddr, mmu_tbl_map_paddr + 0x100000 + .endr + + //vaddr: 0x50000000 + .rept 0x100 + .word 0x0 + .endr + + //vaddr: 0x60000000 + .equ mmu_tbl_map_paddr, 0x60000000 + .rept 0x800 + .word mmu_tbl_map_paddr | NORMAL_MEM + .equ mmu_tbl_map_paddr, mmu_tbl_map_paddr + 0x100000 + .endr + + //vaddr: 0xe0000000 + .rept 0x200 + .word 0x0 + .endr diff --git a/bsp/qemu-vexpress-a9/cpu/trap.c b/bsp/qemu-vexpress-a9/cpu/trap.c index e3283d7e7e160ae535e7ce20869fbf28b83e11fe..a1f00d00755a2b9bf80772cb7f81b10a41f22ae2 100644 --- a/bsp/qemu-vexpress-a9/cpu/trap.c +++ b/bsp/qemu-vexpress-a9/cpu/trap.c @@ -20,7 +20,6 @@ #include "gic.h" -extern struct rt_thread *rt_current_thread; #ifdef RT_USING_FINSH extern long list_thread(void); #endif diff --git a/bsp/qemu-vexpress-a9/drivers/board.c b/bsp/qemu-vexpress-a9/drivers/board.c index 8be0a47103333e0025004215d2e1cbb45a34d454..26aad5bda158b5f32fe661c6942f4086c9ad760b 100644 --- a/bsp/qemu-vexpress-a9/drivers/board.c +++ b/bsp/qemu-vexpress-a9/drivers/board.c @@ -1,74 +1,24 @@ /* - * File : board.c - * This file is part of RT-Thread RTOS - * COPYRIGHT (C) 2012, RT-Thread Development Team + * Copyright (c) 2006-2018, RT-Thread Development Team * - * The license and distribution terms for this file may be - * found in the file LICENSE in this distribution or at - * http://www.rt-thread.org/license/LICENSE + * SPDX-License-Identifier: Apache-2.0 * * Change Logs: * Date Author Notes * 2012-11-20 Bernard the first version + * 2018-11-22 Jesven add rt_hw_spin_lock + * add rt_hw_spin_unlock + * add smp ipi init */ #include #include #include "board.h" - -#define TIMER_LOAD(hw_base) __REG32(hw_base + 0x00) -#define TIMER_VALUE(hw_base) __REG32(hw_base + 0x04) -#define TIMER_CTRL(hw_base) __REG32(hw_base + 0x08) -#define TIMER_CTRL_ONESHOT (1 << 0) -#define TIMER_CTRL_32BIT (1 << 1) -#define TIMER_CTRL_DIV1 (0 << 2) -#define TIMER_CTRL_DIV16 (1 << 2) -#define TIMER_CTRL_DIV256 (2 << 2) -#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable (versatile only) */ -#define TIMER_CTRL_PERIODIC (1 << 6) -#define TIMER_CTRL_ENABLE (1 << 7) - -#define TIMER_INTCLR(hw_base) __REG32(hw_base + 0x0c) -#define TIMER_RIS(hw_base) __REG32(hw_base + 0x10) -#define TIMER_MIS(hw_base) __REG32(hw_base + 0x14) -#define TIMER_BGLOAD(hw_base) __REG32(hw_base + 0x18) +#include "drv_timer.h" #define SYS_CTRL __REG32(REALVIEW_SCTL_BASE) -#define TIMER_HW_BASE REALVIEW_TIMER2_3_BASE - -static void rt_hw_timer_isr(int vector, void *param) -{ - rt_tick_increase(); - /* clear interrupt */ - TIMER_INTCLR(TIMER_HW_BASE) = 0x01; -} - -int rt_hw_timer_init(void) -{ - rt_uint32_t val; - - SYS_CTRL |= REALVIEW_REFCLK; - - /* Setup Timer0 for generating irq */ - val = TIMER_CTRL(TIMER_HW_BASE); - val &= ~TIMER_CTRL_ENABLE; - val |= (TIMER_CTRL_32BIT | TIMER_CTRL_PERIODIC | TIMER_CTRL_IE); - TIMER_CTRL(TIMER_HW_BASE) = val; - - TIMER_LOAD(TIMER_HW_BASE) = 1000; - - /* enable timer */ - TIMER_CTRL(TIMER_HW_BASE) |= TIMER_CTRL_ENABLE; - - rt_hw_interrupt_install(IRQ_PBA8_TIMER2_3, rt_hw_timer_isr, RT_NULL, "tick"); - rt_hw_interrupt_umask(IRQ_PBA8_TIMER2_3); - - return 0; -} -INIT_BOARD_EXPORT(rt_hw_timer_init); - void idle_wfi(void) { asm volatile ("wfi"); @@ -79,13 +29,57 @@ void idle_wfi(void) */ void rt_hw_board_init(void) { - /* initialzie hardware interrupt */ + /* initialize hardware interrupt */ rt_hw_interrupt_init(); + /* initialize system heap */ rt_system_heap_init(HEAP_BEGIN, HEAP_END); rt_components_board_init(); rt_console_set_device(RT_CONSOLE_DEVICE_NAME); rt_thread_idle_sethook(idle_wfi); + +#ifdef RT_USING_SMP + /* install IPI interrupt */ + rt_hw_interrupt_install(RT_SCHEDULE_IPI_IRQ, rt_scheduler_ipi_handler, RT_NULL, "ipi"); + rt_hw_interrupt_umask(RT_SCHEDULE_IPI_IRQ); +#endif } +#ifdef RT_USING_SMP +void rt_hw_spin_lock(rt_hw_spinlock_t *lock) +{ + unsigned long tmp; + unsigned long newval; + rt_hw_spinlock_t lockval; + + __asm__ __volatile__( + "pld [%0]" + ::"r"(&lock->slock) + ); + + __asm__ __volatile__( + "1: ldrex %0, [%3]\n" + " add %1, %0, %4\n" + " strex %2, %1, [%3]\n" + " teq %2, #0\n" + " bne 1b" + : "=&r" (lockval), "=&r" (newval), "=&r" (tmp) + : "r" (&lock->slock), "I" (1 << 16) + : "cc"); + + while (lockval.tickets.next != lockval.tickets.owner) { + __asm__ __volatile__("wfe":::"memory"); + lockval.tickets.owner = *(volatile unsigned short *)(&lock->tickets.owner); + } + + __asm__ volatile ("dmb":::"memory"); +} + +void rt_hw_spin_unlock(rt_hw_spinlock_t *lock) +{ + __asm__ volatile ("dmb":::"memory"); + lock->tickets.owner++; + __asm__ volatile ("dsb ishst\nsev":::"memory"); +} +#endif /*RT_USING_SMP*/ diff --git a/bsp/qemu-vexpress-a9/drivers/drv_timer.c b/bsp/qemu-vexpress-a9/drivers/drv_timer.c new file mode 100644 index 0000000000000000000000000000000000000000..a4701422026c750510df93d5901dceb454eb6085 --- /dev/null +++ b/bsp/qemu-vexpress-a9/drivers/drv_timer.c @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2006-2018, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2018-11-22 Jesven first version + */ + +#include +#include +#include + +#include "board.h" + +#define TIMER01_HW_BASE 0x10011000 +#define TIMER23_HW_BASE 0x10012000 + +#define TIMER_LOAD(hw_base) __REG32(hw_base + 0x00) +#define TIMER_VALUE(hw_base) __REG32(hw_base + 0x04) +#define TIMER_CTRL(hw_base) __REG32(hw_base + 0x08) +#define TIMER_CTRL_ONESHOT (1 << 0) +#define TIMER_CTRL_32BIT (1 << 1) +#define TIMER_CTRL_DIV1 (0 << 2) +#define TIMER_CTRL_DIV16 (1 << 2) +#define TIMER_CTRL_DIV256 (2 << 2) +#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable (versatile only) */ +#define TIMER_CTRL_PERIODIC (1 << 6) +#define TIMER_CTRL_ENABLE (1 << 7) + +#define TIMER_INTCLR(hw_base) __REG32(hw_base + 0x0c) +#define TIMER_RIS(hw_base) __REG32(hw_base + 0x10) +#define TIMER_MIS(hw_base) __REG32(hw_base + 0x14) +#define TIMER_BGLOAD(hw_base) __REG32(hw_base + 0x18) + +#define TIMER_LOAD(hw_base) __REG32(hw_base + 0x00) +#define TIMER_VALUE(hw_base) __REG32(hw_base + 0x04) +#define TIMER_CTRL(hw_base) __REG32(hw_base + 0x08) +#define TIMER_CTRL_ONESHOT (1 << 0) +#define TIMER_CTRL_32BIT (1 << 1) +#define TIMER_CTRL_DIV1 (0 << 2) +#define TIMER_CTRL_DIV16 (1 << 2) +#define TIMER_CTRL_DIV256 (2 << 2) +#define TIMER_CTRL_IE (1 << 5) /* Interrupt Enable (versatile only) */ +#define TIMER_CTRL_PERIODIC (1 << 6) +#define TIMER_CTRL_ENABLE (1 << 7) + +#define TIMER_INTCLR(hw_base) __REG32(hw_base + 0x0c) +#define TIMER_RIS(hw_base) __REG32(hw_base + 0x10) +#define TIMER_MIS(hw_base) __REG32(hw_base + 0x14) +#define TIMER_BGLOAD(hw_base) __REG32(hw_base + 0x18) + +#define SYS_CTRL __REG32(REALVIEW_SCTL_BASE) +#define TIMER_HW_BASE REALVIEW_TIMER2_3_BASE + +static void rt_hw_timer_isr(int vector, void *param) +{ + rt_tick_increase(); + /* clear interrupt */ + TIMER_INTCLR(TIMER_HW_BASE) = 0x01; +} + +int rt_hw_timer_init(void) +{ + rt_uint32_t val; + + SYS_CTRL |= REALVIEW_REFCLK; + + /* Setup Timer0 for generating irq */ + val = TIMER_CTRL(TIMER_HW_BASE); + val &= ~TIMER_CTRL_ENABLE; + val |= (TIMER_CTRL_32BIT | TIMER_CTRL_PERIODIC | TIMER_CTRL_IE); + TIMER_CTRL(TIMER_HW_BASE) = val; + + TIMER_LOAD(TIMER_HW_BASE) = 1000; + + /* enable timer */ + TIMER_CTRL(TIMER_HW_BASE) |= TIMER_CTRL_ENABLE; + + rt_hw_interrupt_install(IRQ_PBA8_TIMER2_3, rt_hw_timer_isr, RT_NULL, "tick"); + rt_hw_interrupt_umask(IRQ_PBA8_TIMER2_3); + + return 0; +} +INIT_BOARD_EXPORT(rt_hw_timer_init); + +void timer_init(int timer, unsigned int preload) +{ + uint32_t val; + + if (timer == 0) + { + /* Setup Timer0 for generating irq */ + val = TIMER_CTRL(TIMER01_HW_BASE); + val &= ~TIMER_CTRL_ENABLE; + val |= (TIMER_CTRL_32BIT | TIMER_CTRL_PERIODIC | TIMER_CTRL_IE); + TIMER_CTRL(TIMER01_HW_BASE) = val; + + TIMER_LOAD(TIMER01_HW_BASE) = preload; + + /* enable timer */ + TIMER_CTRL(TIMER01_HW_BASE) |= TIMER_CTRL_ENABLE; + } + else + { + /* Setup Timer1 for generating irq */ + val = TIMER_CTRL(TIMER23_HW_BASE); + val &= ~TIMER_CTRL_ENABLE; + val |= (TIMER_CTRL_32BIT | TIMER_CTRL_PERIODIC | TIMER_CTRL_IE); + TIMER_CTRL(TIMER23_HW_BASE) = val; + + TIMER_LOAD(TIMER23_HW_BASE) = preload; + + /* enable timer */ + TIMER_CTRL(TIMER23_HW_BASE) |= TIMER_CTRL_ENABLE; + } +} + +void timer_clear_pending(int timer) +{ + if (timer == 0) + { + TIMER_INTCLR(TIMER01_HW_BASE) = 0x01; + } + else + { + TIMER_INTCLR(TIMER23_HW_BASE) = 0x01; + } +} diff --git a/bsp/qemu-vexpress-a9/drivers/drv_timer.h b/bsp/qemu-vexpress-a9/drivers/drv_timer.h new file mode 100644 index 0000000000000000000000000000000000000000..718f5d43cf230b973f376b8d646267eed9ac9bcf --- /dev/null +++ b/bsp/qemu-vexpress-a9/drivers/drv_timer.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) 2006-2018, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2018-11-22 Jesven first version + */ + +#ifndef DRV_TIMER_H__ +#define DRV_TIMER_H__ + +void timer_init(int timer, unsigned int preload); +void timer_clear_pending(int timer); + +#endif diff --git a/bsp/qemu-vexpress-a9/drivers/secondary_cpu.c b/bsp/qemu-vexpress-a9/drivers/secondary_cpu.c new file mode 100644 index 0000000000000000000000000000000000000000..b8b543692920797f0249db82ec80cdcafe8735df --- /dev/null +++ b/bsp/qemu-vexpress-a9/drivers/secondary_cpu.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2006-2018, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2018-11-22 Jesven first version + */ + +#include +#include +#include + +#include "board.h" +#include "gic.h" +#include "drv_timer.h" + +#ifdef RT_USING_SMP +static void rt_hw_timer2_isr(int vector, void *param) +{ + rt_tick_increase(); + /* clear interrupt */ + timer_clear_pending(0); +} + +void rt_hw_secondary_cpu_up(void) +{ + extern void set_secondary_cpu_boot_address(void); + + set_secondary_cpu_boot_address(); + __asm__ volatile ("dsb":::"memory"); + rt_hw_ipi_send(0, 1 << 1); +} + +void secondary_cpu_c_start(void) +{ + rt_hw_vector_init(); + + rt_hw_spin_lock(&_cpus_lock); + + arm_gic_cpu_init(0, REALVIEW_GIC_CPU_BASE); + arm_gic_set_cpu(0, IRQ_PBA8_TIMER0_1, 0x2); + + timer_init(0, 1000); + rt_hw_interrupt_install(IRQ_PBA8_TIMER0_1, rt_hw_timer2_isr, RT_NULL, "tick"); + rt_hw_interrupt_umask(IRQ_PBA8_TIMER0_1); + + /* install IPI interrupt */ + rt_hw_interrupt_install(RT_SCHEDULE_IPI_IRQ, rt_scheduler_ipi_handler, RT_NULL, "ipi"); + rt_hw_interrupt_umask(RT_SCHEDULE_IPI_IRQ); + + rt_system_scheduler_start(); +} + +void rt_hw_secondary_cpu_idle_exec(void) +{ + asm volatile ("wfe":::"memory", "cc"); +} + +#endif diff --git a/bsp/qemu-vexpress-a9/link.lds b/bsp/qemu-vexpress-a9/link.lds index 51b2ced922fb862ec7f29eff8ca463a2a1c8e8b9..78c38cde1143ecab73f53b5356de5e25766bece2 100644 --- a/bsp/qemu-vexpress-a9/link.lds +++ b/bsp/qemu-vexpress-a9/link.lds @@ -2,7 +2,7 @@ OUTPUT_FORMAT("elf32-littlearm", "elf32-littlearm", "elf32-littlearm") OUTPUT_ARCH(arm) SECTIONS { - . = 0x60000000; + . = 0x60010000; __text_start = .; .text : diff --git a/bsp/qemu-vexpress-a9/qemu-nographic.sh b/bsp/qemu-vexpress-a9/qemu-nographic.sh index 61abb6878da82665bc38ebfc47ff6a229dc77ec8..b4168b8d5c9f78c2ea23735cf04e558463721e83 100755 --- a/bsp/qemu-vexpress-a9/qemu-nographic.sh +++ b/bsp/qemu-vexpress-a9/qemu-nographic.sh @@ -2,5 +2,5 @@ if [ ! -f "sd.bin" ]; then dd if=/dev/zero of=sd.bin bs=1024 count=65536 fi -qemu-system-arm -M vexpress-a9 -kernel rtthread.elf -nographic -sd sd.bin -net nic -net tap +qemu-system-arm -M vexpress-a9 -smp cpus=2 -kernel rtthread.bin -nographic -sd sd.bin -net nic -net tap diff --git a/bsp/qemu-vexpress-a9/qemu.bat b/bsp/qemu-vexpress-a9/qemu.bat index 4a4acb33247da50e01652d241c5f63474be06dc3..460020c1518b20344468ee27e2cc9c58f93a7b98 100644 --- a/bsp/qemu-vexpress-a9/qemu.bat +++ b/bsp/qemu-vexpress-a9/qemu.bat @@ -3,4 +3,4 @@ if exist sd.bin goto run qemu-img create -f raw sd.bin 64M :run -qemu-system-arm -M vexpress-a9 -kernel rtthread.elf -serial stdio -sd sd.bin +qemu-system-arm -M vexpress-a9 -smp cpus=2 -kernel rtthread.bin -serial stdio -sd sd.bin diff --git a/bsp/qemu-vexpress-a9/qemu.sh b/bsp/qemu-vexpress-a9/qemu.sh index c8fef610c71ba98ffc22aefe60e25746b86391d7..ff2cec6f810b0d5a86776547af1a3a66addd82a0 100755 --- a/bsp/qemu-vexpress-a9/qemu.sh +++ b/bsp/qemu-vexpress-a9/qemu.sh @@ -2,4 +2,4 @@ if [ ! -f "sd.bin" ]; then dd if=/dev/zero of=sd.bin bs=1024 count=65536 fi -qemu-system-arm -M vexpress-a9 -kernel rtthread.elf -serial stdio -sd sd.bin +qemu-system-arm -M vexpress-a9 -smp cpus=2 -kernel rtthread.bin -serial stdio -sd sd.bin diff --git a/bsp/qemu-vexpress-a9/rtconfig.h b/bsp/qemu-vexpress-a9/rtconfig.h index 02d54c452eebef60d1b61a13a6edce2ed0f7a6e7..2ac692c6920359a409247dfced9bad2daf9f22ca 100644 --- a/bsp/qemu-vexpress-a9/rtconfig.h +++ b/bsp/qemu-vexpress-a9/rtconfig.h @@ -7,18 +7,33 @@ /* RT-Thread Kernel */ #define RT_NAME_MAX 8 +#define RT_USING_SMP +#define RT_CPUS_NR 2 #define RT_ALIGN_SIZE 4 +/* RT_THREAD_PRIORITY_8 is not set */ #define RT_THREAD_PRIORITY_32 +/* RT_THREAD_PRIORITY_256 is not set */ #define RT_THREAD_PRIORITY_MAX 32 #define RT_TICK_PER_SECOND 100 #define RT_USING_OVERFLOW_CHECK #define RT_USING_HOOK +#define RT_USING_IDLE_HOOK #define RT_IDEL_HOOK_LIST_SIZE 4 #define IDLE_THREAD_STACK_SIZE 1024 #define RT_USING_TIMER_SOFT #define RT_TIMER_THREAD_PRIO 4 #define RT_TIMER_THREAD_STACK_SIZE 1024 #define RT_DEBUG +/* RT_DEBUG_INIT_CONFIG is not set */ +/* RT_DEBUG_THREAD_CONFIG is not set */ +/* RT_DEBUG_SCHEDULER_CONFIG is not set */ +/* RT_DEBUG_IPC_CONFIG is not set */ +/* RT_DEBUG_TIMER_CONFIG is not set */ +/* RT_DEBUG_IRQ_CONFIG is not set */ +/* RT_DEBUG_MEM_CONFIG is not set */ +/* RT_DEBUG_SLAB_CONFIG is not set */ +/* RT_DEBUG_MEMHEAP_CONFIG is not set */ +/* RT_DEBUG_MODULE_CONFIG is not set */ /* Inter-Thread communication */ @@ -33,13 +48,17 @@ #define RT_USING_MEMPOOL #define RT_USING_MEMHEAP +/* RT_USING_NOHEAP is not set */ #define RT_USING_SMALL_MEM +/* RT_USING_SLAB is not set */ +/* RT_USING_MEMHEAP_AS_HEAP is not set */ #define RT_USING_MEMTRACE #define RT_USING_HEAP /* Kernel Device Object */ #define RT_USING_DEVICE +/* RT_USING_DEVICE_OPS is not set */ #define RT_USING_INTERRUPT_INFO #define RT_USING_CONSOLE #define RT_CONSOLEBUF_SIZE 128 @@ -47,6 +66,7 @@ #define ARCH_ARM #define ARCH_ARM_CORTEX_A #define ARCH_ARM_CORTEX_A9 +/* ARCH_CPU_STACK_GROWS_UPWARD is not set */ /* RT-Thread Components */ @@ -67,11 +87,14 @@ #define FINSH_HISTORY_LINES 5 #define FINSH_USING_SYMTAB #define FINSH_USING_DESCRIPTION +/* FINSH_ECHO_DISABLE_DEFAULT is not set */ #define FINSH_THREAD_PRIORITY 20 #define FINSH_THREAD_STACK_SIZE 4096 #define FINSH_CMD_SIZE 80 +/* FINSH_USING_AUTH is not set */ #define FINSH_USING_MSH #define FINSH_USING_MSH_DEFAULT +/* FINSH_USING_MSH_ONLY is not set */ #define FINSH_ARG_MAX 10 /* Device virtual file system */ @@ -81,33 +104,48 @@ #define DFS_FILESYSTEMS_MAX 2 #define DFS_FILESYSTEM_TYPES_MAX 2 #define DFS_FD_MAX 16 +/* RT_USING_DFS_MNTTABLE is not set */ #define RT_USING_DFS_ELMFAT /* elm-chan's FatFs, Generic FAT Filesystem Module */ #define RT_DFS_ELM_CODE_PAGE 437 #define RT_DFS_ELM_WORD_ACCESS +/* RT_DFS_ELM_USE_LFN_0 is not set */ +/* RT_DFS_ELM_USE_LFN_1 is not set */ +/* RT_DFS_ELM_USE_LFN_2 is not set */ #define RT_DFS_ELM_USE_LFN_3 #define RT_DFS_ELM_USE_LFN 3 #define RT_DFS_ELM_MAX_LFN 255 #define RT_DFS_ELM_DRIVES 2 #define RT_DFS_ELM_MAX_SECTOR_SIZE 4096 +/* RT_DFS_ELM_USE_ERASE is not set */ #define RT_DFS_ELM_REENTRANT #define RT_USING_DFS_DEVFS #define RT_USING_DFS_ROMFS #define RT_USING_DFS_RAMFS +/* RT_USING_DFS_UFFS is not set */ +/* RT_USING_DFS_JFFS2 is not set */ +/* RT_USING_DFS_NFS is not set */ /* Device Drivers */ #define RT_USING_DEVICE_IPC #define RT_PIPE_BUFSZ 512 #define RT_USING_SERIAL +/* RT_USING_CAN is not set */ +/* RT_USING_HWTIMER is not set */ +/* RT_USING_CPUTIME is not set */ #define RT_USING_I2C #define RT_USING_I2C_BITOPS #define RT_USING_PIN +/* RT_USING_ADC is not set */ +/* RT_USING_PWM is not set */ #define RT_USING_MTD_NOR #define RT_USING_MTD_NAND #define RT_MTD_NAND_DEBUG +/* RT_USING_MTD is not set */ +/* RT_USING_PM is not set */ #define RT_USING_RTC #define RT_USING_SOFT_RTC #define RT_USING_SDIO @@ -116,18 +154,28 @@ #define RT_MMCSD_STACK_SIZE 1024 #define RT_MMCSD_THREAD_PREORITY 22 #define RT_MMCSD_MAX_PARTITION 16 +/* RT_SDIO_DEBUG is not set */ #define RT_USING_SPI #define RT_USING_SPI_MSD #define RT_USING_SFUD #define RT_SFUD_USING_SFDP #define RT_SFUD_USING_FLASH_INFO_TABLE +/* RT_DEBUG_SFUD is not set */ +/* RT_USING_W25QXX is not set */ +/* RT_USING_GD is not set */ +/* RT_USING_ENC28J60 is not set */ +/* RT_USING_SPI_WIFI is not set */ #define RT_USING_WDT +/* RT_USING_AUDIO is not set */ /* Using WiFi */ +/* RT_USING_WIFI is not set */ /* Using USB */ +/* RT_USING_USB_HOST is not set */ +/* RT_USING_USB_DEVICE is not set */ /* POSIX layer and C standard library */ @@ -137,6 +185,7 @@ #define RT_USING_POSIX_MMAP #define RT_USING_POSIX_TERMIOS #define RT_USING_POSIX_AIO +/* RT_USING_MODULE is not set */ /* Network */ @@ -153,9 +202,13 @@ /* light weight TCP/IP stack */ #define RT_USING_LWIP +/* RT_USING_LWIP141 is not set */ #define RT_USING_LWIP202 +/* RT_USING_LWIP210 is not set */ #define RT_USING_LWIP_IPV6 +/* RT_LWIP_IGMP is not set */ #define RT_LWIP_ICMP +/* RT_LWIP_SNMP is not set */ #define RT_LWIP_DNS #define RT_LWIP_DHCP #define IP_SOF_BROADCAST 1 @@ -168,6 +221,8 @@ #define RT_LWIP_MSKADDR "255.255.255.0" #define RT_LWIP_UDP #define RT_LWIP_TCP +/* RT_LWIP_RAW is not set */ +/* RT_LWIP_PPP is not set */ #define RT_MEMP_NUM_NETCONN 8 #define RT_LWIP_PBUF_NUM 16 #define RT_LWIP_RAW_PCB_NUM 4 @@ -179,6 +234,8 @@ #define RT_LWIP_TCPTHREAD_PRIORITY 10 #define RT_LWIP_TCPTHREAD_MBOX_SIZE 8 #define RT_LWIP_TCPTHREAD_STACKSIZE 1024 +/* LWIP_NO_RX_THREAD is not set */ +/* LWIP_NO_TX_THREAD is not set */ #define RT_LWIP_ETHTHREAD_PRIORITY 12 #define RT_LWIP_ETHTHREAD_STACKSIZE 1024 #define RT_LWIP_ETHTHREAD_MBOX_SIZE 8 @@ -188,70 +245,141 @@ #define LWIP_SO_RCVTIMEO 1 #define LWIP_SO_SNDTIMEO 1 #define LWIP_SO_RCVBUF 1 +/* RT_LWIP_NETIF_LOOPBACK is not set */ #define LWIP_NETIF_LOOPBACK 0 +/* RT_LWIP_STATS is not set */ +/* RT_LWIP_DEBUG is not set */ /* Modbus master and slave stack */ +/* RT_USING_MODBUS is not set */ /* AT commands */ +/* RT_USING_AT is not set */ +/* LWIP_USING_DHCPD is not set */ /* VBUS(Virtual Software BUS) */ +/* RT_USING_VBUS is not set */ /* Utilities */ #define RT_USING_LOGTRACE #define LOG_TRACE_MAX_SESSION 16 +/* LOG_TRACE_USING_LEVEL_NOTRACE is not set */ +/* LOG_TRACE_USING_LEVEL_ERROR is not set */ +/* LOG_TRACE_USING_LEVEL_WARNING is not set */ #define LOG_TRACE_USING_LEVEL_INFO +/* LOG_TRACE_USING_LEVEL_VERBOSE is not set */ +/* LOG_TRACE_USING_LEVEL_DEBUG is not set */ +/* LOG_TRACE_USING_MEMLOG is not set */ +/* RT_USING_RYM is not set */ +/* RT_USING_ULOG is not set */ /* RT-Thread online packages */ /* IoT - internet of things */ +/* PKG_USING_PAHOMQTT is not set */ +/* PKG_USING_WEBCLIENT is not set */ +/* PKG_USING_MONGOOSE is not set */ +/* PKG_USING_WEBTERMINAL is not set */ +/* PKG_USING_CJSON is not set */ +/* PKG_USING_JSMN is not set */ +/* PKG_USING_LJSON is not set */ +/* PKG_USING_EZXML is not set */ +/* PKG_USING_NANOPB is not set */ /* Wi-Fi */ /* Marvell WiFi */ +/* PKG_USING_WLANMARVELL is not set */ /* Wiced WiFi */ +/* PKG_USING_WLAN_WICED is not set */ +/* PKG_USING_COAP is not set */ +/* PKG_USING_NOPOLL is not set */ +/* PKG_USING_NETUTILS is not set */ +/* PKG_USING_AT_DEVICE is not set */ /* IoT Cloud */ +/* PKG_USING_ONENET is not set */ +/* PKG_USING_GAGENT_CLOUD is not set */ +/* PKG_USING_ALI_IOTKIT is not set */ +/* PKG_USING_AZURE is not set */ /* security packages */ +/* PKG_USING_MBEDTLS is not set */ +/* PKG_USING_libsodium is not set */ +/* PKG_USING_TINYCRYPT is not set */ /* language packages */ +/* PKG_USING_LUA is not set */ +/* PKG_USING_JERRYSCRIPT is not set */ +/* PKG_USING_MICROPYTHON is not set */ /* multimedia packages */ +/* PKG_USING_OPENMV is not set */ +/* PKG_USING_MUPDF is not set */ /* tools packages */ +/* PKG_USING_CMBACKTRACE is not set */ +/* PKG_USING_EASYFLASH is not set */ +/* PKG_USING_EASYLOGGER is not set */ +/* PKG_USING_SYSTEMVIEW is not set */ /* system packages */ +/* PKG_USING_GUIENGINE is not set */ +/* PKG_USING_PERSIMMON is not set */ +/* PKG_USING_CAIRO is not set */ +/* PKG_USING_PIXMAN is not set */ +/* PKG_USING_LWEXT4 is not set */ +/* PKG_USING_PARTITION is not set */ +/* PKG_USING_FAL is not set */ +/* PKG_USING_SQLITE is not set */ +/* PKG_USING_RTI is not set */ +/* PKG_USING_LITTLEVGL2RTT is not set */ /* peripheral libraries and drivers */ +/* PKG_USING_STM32F4_HAL is not set */ +/* PKG_USING_STM32F4_DRIVERS is not set */ +/* PKG_USING_REALTEK_AMEBA is not set */ +/* PKG_USING_SHT2X is not set */ +/* PKG_USING_AHT10 is not set */ /* miscellaneous packages */ +/* PKG_USING_LIBCSV is not set */ +/* PKG_USING_OPTPARSE is not set */ +/* PKG_USING_FASTLZ is not set */ +/* PKG_USING_MINILZO is not set */ +/* PKG_USING_QUICKLZ is not set */ +/* PKG_USING_MULTIBUTTON is not set */ +/* PKG_USING_CANFESTIVAL is not set */ +/* PKG_USING_ZLIB is not set */ +/* PKG_USING_DSTR is not set */ /* sample package */ -/* samples: kernel and components samples */ - +/* PKG_USING_SAMPLES is not set */ /* example package: hello */ +/* PKG_USING_HELLO is not set */ #define SOC_VEXPRESS_A9 #define RT_USING_UART0 #define RT_USING_UART1 #define BSP_DRV_EMAC +/* BSP_DRV_AUDIO is not set */ #endif diff --git a/bsp/qemu-vexpress-a9/rtconfig.py b/bsp/qemu-vexpress-a9/rtconfig.py index 545ba76af9b81d3d0dfef413d130fe5b018ee38f..3f833d682da31f6cc1fe6b359f67c7206867550a 100644 --- a/bsp/qemu-vexpress-a9/rtconfig.py +++ b/bsp/qemu-vexpress-a9/rtconfig.py @@ -32,7 +32,7 @@ if PLATFORM == 'gcc': DEVICE = ' -march=armv7-a -marm -msoft-float' CFLAGS = DEVICE + ' -Wall' - AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__' + AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -D__ASSEMBLY__ -I.' LINK_SCRIPT = 'link.lds' LFLAGS = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,system_vectors'+\ ' -T %s' % LINK_SCRIPT diff --git a/components/dfs/src/dfs_posix.c b/components/dfs/src/dfs_posix.c index 97531ad59cf35f8d443e040cb316aedda179668a..f47a60ee5dc34da490b16155143b43f326205b90 100644 --- a/components/dfs/src/dfs_posix.c +++ b/components/dfs/src/dfs_posix.c @@ -890,9 +890,9 @@ int access(const char *path, int amode) char *getcwd(char *buf, size_t size) { #ifdef DFS_USING_WORKDIR - rt_enter_critical(); + dfs_lock(); strncpy(buf, working_directory, size); - rt_exit_critical(); + dfs_unlock(); #else rt_kprintf(NO_WORKING_DIR); #endif diff --git a/components/finsh/cmd.c b/components/finsh/cmd.c index 41e8a01f41d1cc66d86de4f67dd60ac9139bb9f5..0c06303b1475c92292a5f6f9a915658d40440535 100644 --- a/components/finsh/cmd.c +++ b/components/finsh/cmd.c @@ -25,8 +25,10 @@ * 2012-06-02 lgnq add list_memheap * 2012-10-22 Bernard add MS VC++ patch. * 2016-06-02 armink beautify the list_thread command + * 2018-11-22 Jesven list_thread add smp support */ +#include #include #ifdef RT_USING_FINSH @@ -87,14 +89,26 @@ static long _list_thread(struct rt_list_node *list) maxlen = object_name_maxlen(item_title, list); +#ifdef RT_USING_SMP + rt_kprintf("%-*.s cpu pri status sp stack size max used left tick error\n", maxlen, item_title); object_split(maxlen); + rt_kprintf( " --- --- ------- ---------- ---------- ------ ---------- ---\n"); +#else rt_kprintf("%-*.s pri status sp stack size max used left tick error\n", maxlen, item_title); object_split(maxlen); rt_kprintf( " --- ------- ---------- ---------- ------ ---------- ---\n"); +#endif /*RT_USING_SMP*/ for (node = list->next; node != list; node = node->next) { rt_uint8_t stat; thread = rt_list_entry(node, struct rt_thread, list); - rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority); +#ifdef RT_USING_SMP + if (thread->oncpu != RT_CPU_DETACHED) + rt_kprintf("%-*.*s %3d %3d ", maxlen, RT_NAME_MAX, thread->name, thread->oncpu, thread->current_priority); + else + rt_kprintf("%-*.*s N/A %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority); +#else + rt_kprintf("%-*.*s %3d ", maxlen, RT_NAME_MAX, thread->name, thread->current_priority); +#endif /*RT_USING_SMP*/ stat = (thread->stat & RT_THREAD_STAT_MASK); if (stat == RT_THREAD_READY) rt_kprintf(" ready "); else if (stat == RT_THREAD_SUSPEND) rt_kprintf(" suspend"); @@ -130,10 +144,15 @@ static long _list_thread(struct rt_list_node *list) long list_thread(void) { + rt_ubase_t level; struct rt_object_information *info; + long ret; + level = rt_hw_interrupt_disable(); info = rt_object_get_information(RT_Object_Class_Thread); - return _list_thread(&info->object_list); + ret = _list_thread(&info->object_list); + rt_hw_interrupt_enable(level); + return ret; } FINSH_FUNCTION_EXPORT(list_thread, list thread); MSH_CMD_EXPORT(list_thread, list thread); diff --git a/include/rtdef.h b/include/rtdef.h index fef08599c79b0199bfe01d639d086396f3c834ea..253ec369689282b42e990e25ec2392f7f8b8ad37 100644 --- a/include/rtdef.h +++ b/include/rtdef.h @@ -26,6 +26,9 @@ * 2018-09-14 Bernard apply Apache License v2.0 to RT-Thread Kernel * 2018-10-13 Bernard change version number to v4.0.0 * 2018-10-02 Bernard add 64bit arch support + * 2018-11-22 Jesven add smp member to struct rt_thread + * add struct rt_cpu + * add smp relevant macros */ #ifndef __RT_DEF_H__ @@ -130,11 +133,11 @@ typedef rt_base_t rt_off_t; /**< Type for offset */ #include #else /* the version of GNU GCC must be greater than 4.x */ - typedef __builtin_va_list __gnuc_va_list; - typedef __gnuc_va_list va_list; - #define va_start(v,l) __builtin_va_start(v,l) - #define va_end(v) __builtin_va_end(v) - #define va_arg(v,l) __builtin_va_arg(v,l) + typedef __builtin_va_list __gnuc_va_list; + typedef __gnuc_va_list va_list; + #define va_start(v,l) __builtin_va_start(v,l) + #define va_end(v) __builtin_va_end(v) + #define va_arg(v,l) __builtin_va_arg(v,l) #endif #define SECTION(x) __attribute__((section(x))) @@ -497,6 +500,41 @@ typedef siginfo_t rt_siginfo_t; #define RT_THREAD_CTRL_CLOSE 0x01 /**< Close thread. */ #define RT_THREAD_CTRL_CHANGE_PRIORITY 0x02 /**< Change thread priority. */ #define RT_THREAD_CTRL_INFO 0x03 /**< Get thread information. */ +#define RT_THREAD_CTRL_BIND_CPU 0x03 /**< Set thread bind cpu. */ + +#ifdef RT_USING_SMP + +#define RT_CPU_DETACHED RT_CPUS_NR /**< The thread not running on cpu. */ +#define RT_CPU_MASK ((1 << RT_CPUS_NR) - 1) /**< All CPUs mask bit. */ + +#ifndef RT_SCHEDULE_IPI_IRQ +#define RT_SCHEDULE_IPI_IRQ 0 +#endif + +/** + * CPUs definitions + * + */ +struct rt_cpu +{ + struct rt_thread *current_thread; + + rt_uint16_t irq_nest; + rt_uint8_t irq_switch_flag; + + rt_uint8_t current_priority; + rt_list_t priority_table[RT_THREAD_PRIORITY_MAX]; +#if RT_THREAD_PRIORITY_MAX > 32 + rt_uint32_t priority_group; + rt_uint8_t ready_table[32]; +#else + rt_uint32_t priority_group; +#endif + + rt_tick_t tick; +}; + +#endif /** * Thread structure @@ -527,6 +565,14 @@ struct rt_thread rt_uint8_t stat; /**< thread status */ +#ifdef RT_USING_SMP + rt_uint8_t bind_cpu; /**< thread is bind to cpu */ + rt_uint8_t oncpu; /**< process on cpu` */ + + rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */ + rt_uint16_t cpus_lock_nest; /**< cpus lock count */ +#endif /*RT_USING_SMP*/ + /* priority */ rt_uint8_t current_priority; /**< current priority */ rt_uint8_t init_priority; /**< initialized priority */ diff --git a/include/rthw.h b/include/rthw.h index 17151de99a8292c49a642d381c87828063d5b5ae..3fa0ba301c3d44133126d9168aec4671ea6062b1 100644 --- a/include/rthw.h +++ b/include/rthw.h @@ -10,6 +10,8 @@ * 2006-09-24 Bernard add rt_hw_context_switch_to declaration * 2012-12-29 Bernard add rt_hw_exception_install declaration * 2017-10-17 Hichard add some micros + * 2018-11-17 Jesven add rt_hw_spinlock_t + * add smp support */ #ifndef __RT_HW_H__ @@ -92,15 +94,30 @@ rt_isr_handler_t rt_hw_interrupt_install(int vector, void *param, char *name); +#ifdef RT_USING_SMP +rt_base_t rt_hw_local_irq_disable(); +void rt_hw_local_irq_enable(rt_base_t level); + +#define rt_hw_interrupt_disable rt_cpus_lock +#define rt_hw_interrupt_enable rt_cpus_unlock + +#else rt_base_t rt_hw_interrupt_disable(void); void rt_hw_interrupt_enable(rt_base_t level); +#endif /*RT_USING_SMP*/ /* * Context interfaces */ +#ifdef RT_USING_SMP +void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread); +void rt_hw_context_switch_to(rt_ubase_t to, struct rt_thread *to_thread); +void rt_hw_context_switch_interrupt(void *context, rt_ubase_t from, rt_ubase_t to, struct rt_thread *to_thread); +#else void rt_hw_context_switch(rt_ubase_t from, rt_ubase_t to); void rt_hw_context_switch_to(rt_ubase_t to); void rt_hw_context_switch_interrupt(rt_ubase_t from, rt_ubase_t to); +#endif /*RT_USING_SMP*/ void rt_hw_console_output(const char *str); @@ -117,6 +134,47 @@ void rt_hw_exception_install(rt_err_t (*exception_handle)(void *context)); */ void rt_hw_us_delay(rt_uint32_t us); +#ifdef RT_USING_SMP +typedef union { + unsigned long slock; + struct __arch_tickets { + unsigned short owner; + unsigned short next; + } tickets; +} rt_hw_spinlock_t; + +void rt_hw_spin_lock(rt_hw_spinlock_t *lock); +void rt_hw_spin_unlock(rt_hw_spinlock_t *lock); + +int rt_hw_cpu_id(void); + +extern rt_hw_spinlock_t _cpus_lock; +extern rt_hw_spinlock_t _rt_critical_lock; + +#define __RT_HW_SPIN_LOCK_INITIALIZER(lockname) {0} + +#define __RT_HW_SPIN_LOCK_UNLOCKED(lockname) \ + (struct rt_hw_spinlock ) __RT_HW_SPIN_LOCK_INITIALIZER(lockname) + +#define RT_DEFINE_SPINLOCK(x) struct rt_hw_spinlock x = __RT_HW_SPIN_LOCK_UNLOCKED(x) + +/** + * ipi function + */ +void rt_hw_ipi_send(int ipi_vector, unsigned int cpu_mask); + +/** + * boot secondary cpu + */ +void rt_hw_secondary_cpu_up(void); + +/** + * secondary cpu idle function + */ +void rt_hw_secondary_cpu_idle_exec(void); + +#endif + #ifdef __cplusplus } #endif diff --git a/include/rtthread.h b/include/rtthread.h index 3fc77d343c4ff93fceba7f7549d22bbc04b3475c..d2356b8eda778f647d04992f10031f5474866966 100644 --- a/include/rtthread.h +++ b/include/rtthread.h @@ -13,6 +13,7 @@ * 2010-04-11 yi.qiu add module feature * 2013-06-24 Bernard add rt_kprintf re-define when not use RT_USING_CONSOLE. * 2016-08-09 ArdaFu add new thread and interrupt hook. + * 2018-11-22 Jesven add all cpu's lock and ipi handler */ #ifndef __RT_THREAD_H__ @@ -184,6 +185,10 @@ rt_uint16_t rt_critical_level(void); void rt_scheduler_sethook(void (*hook)(rt_thread_t from, rt_thread_t to)); #endif +#ifdef RT_USING_SMP +void rt_scheduler_ipi_handler(int vector, void *param); +#endif + /**@}*/ /** @@ -439,6 +444,20 @@ rt_err_t rt_device_control(rt_device_t dev, int cmd, void *arg); void rt_interrupt_enter(void); void rt_interrupt_leave(void); +#ifdef RT_USING_SMP + +/* + * smp cpus lock service + */ + +rt_base_t rt_cpus_lock(void); +void rt_cpus_unlock(rt_base_t level); + +struct rt_cpu *rt_cpu_self(void); +struct rt_cpu *rt_cpu_index(int index); + +#endif + /* * the number of nested interrupts. */ diff --git a/src/Kconfig b/src/Kconfig index 78a3bf6d67094b38d588e251fe3e66bda1cd4c26..bf0bd5a3fdfdb7d40f1b7e211f8862d99ddc878a 100644 --- a/src/Kconfig +++ b/src/Kconfig @@ -8,6 +8,22 @@ config RT_NAME_MAX Each kernel object, such as thread, timer, semaphore etc, has a name, the RT_NAME_MAX is the maximal size of this object name. +config RT_USING_SMP + bool "Enable SMP(Symmetric multiprocessing)" + default n + help + This option should be selected by machines which have an SMP- + capable CPU. + The only effect of this option is to make the SMP-related + options available to the user for configuration. + +config RT_CPUS_NR + int "Number of CPUs" + default 2 + depends on RT_USING_SMP + help + Number of CPUs in the system + config RT_ALIGN_SIZE int "Alignment size for CPU architecture data access" default 4 diff --git a/src/clock.c b/src/clock.c index 651465e0a86214088739e32af9f21ee8c4fd7d1a..1bedb4e06ad8c82a9daa9005c6fed5e9f4e35c39 100644 --- a/src/clock.c +++ b/src/clock.c @@ -12,12 +12,17 @@ * 2010-05-20 Bernard fix the tick exceeds the maximum limits * 2010-07-13 Bernard fix rt_tick_from_millisecond issue found by kuronca * 2011-06-26 Bernard add rt_tick_set function. + * 2018-11-22 Jesven add per cpu tick */ #include #include +#ifdef RT_USING_SMP +#define rt_tick rt_cpu_index(0)->tick +#else static rt_tick_t rt_tick = 0; +#endif extern void rt_timer_check(void); @@ -71,7 +76,11 @@ void rt_tick_increase(void) struct rt_thread *thread; /* increase the global tick */ +#ifdef RT_USING_SMP + rt_cpu_self()->tick ++; +#else ++ rt_tick; +#endif /* check time slice */ thread = rt_thread_self(); diff --git a/src/components.c b/src/components.c index 68e5799f598795c1a9ce56f0594f593fcf1cbd5f..d77ef8300f43d4fd42f8b7019447c0a67aa34b37 100644 --- a/src/components.c +++ b/src/components.c @@ -14,6 +14,7 @@ * 2015-05-04 Bernard Rename it to components.c because compiling issue * in some IDEs. * 2015-07-29 Arda.Fu Add support to use RT_USING_USER_MAIN with IAR + * 2018-11-22 Jesven Add secondary cpu boot up */ #include @@ -180,6 +181,9 @@ void main_thread_entry(void *parameter) /* RT-Thread components initialization */ rt_components_init(); +#ifdef RT_USING_SMP + rt_hw_secondary_cpu_up(); +#endif /* invoke system main function */ #if defined(__CC_ARM) || defined(__CLANG_ARM) $Super$$main(); /* for ARMCC. */ @@ -243,6 +247,10 @@ int rtthread_startup(void) /* idle thread initialization */ rt_thread_idle_init(); +#ifdef RT_USING_SMP + rt_hw_spin_lock(&_cpus_lock); +#endif /*RT_USING_SMP*/ + /* start scheduler */ rt_system_scheduler_start(); diff --git a/src/cpu.c b/src/cpu.c new file mode 100644 index 0000000000000000000000000000000000000000..ef6dce1787ca359006bd939ab371564f7f0b37a2 --- /dev/null +++ b/src/cpu.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2006-2018, RT-Thread Development Team + * + * SPDX-License-Identifier: Apache-2.0 + * + * Change Logs: + * Date Author Notes + * 2018-10-30 Bernard The first version + */ + +#include +#include + +#ifdef RT_USING_SMP + +static struct rt_cpu rt_cpus[RT_CPUS_NR]; +rt_hw_spinlock_t _cpus_lock; + +/** + * This fucntion will return current cpu. + */ +struct rt_cpu *rt_cpu_self(void) +{ + return &rt_cpus[rt_hw_cpu_id()]; +} + +struct rt_cpu *rt_cpu_index(int index) +{ + return &rt_cpus[index]; +} + +/** + * This function will lock all cpus's scheduler and disable local irq. + */ +rt_base_t rt_cpus_lock(void) +{ + rt_base_t level; + struct rt_cpu* pcpu; + + level = rt_hw_local_irq_disable(); + + pcpu = rt_cpu_self(); + if (pcpu->current_thread != RT_NULL) + { + if (pcpu->current_thread->cpus_lock_nest++ == 0) + { + pcpu->current_thread->scheduler_lock_nest++; + rt_hw_spin_lock(&_cpus_lock); + } + } + return level; +} +RTM_EXPORT(rt_cpus_lock); + +/** + * This function will restore all cpus's scheduler and restore local irq. + */ +void rt_cpus_unlock(rt_base_t level) +{ + struct rt_cpu* pcpu = rt_cpu_self(); + + if (pcpu->current_thread != RT_NULL) + { + if (--pcpu->current_thread->cpus_lock_nest == 0) + { + pcpu->current_thread->scheduler_lock_nest--; + rt_hw_spin_unlock(&_cpus_lock); + } + } + rt_hw_local_irq_enable(level); +} +RTM_EXPORT(rt_cpus_unlock); + +/** + * This function is invoked by scheduler. + * It will restore the lock state to whatever the thread's counter expects. + * If target thread not locked the cpus then unlock the cpus lock. + */ +void rt_cpus_lock_status_restore(struct rt_thread *thread) +{ + struct rt_cpu* pcpu = rt_cpu_self(); + + pcpu->current_thread = thread; + if (!thread->cpus_lock_nest) + { + rt_hw_spin_unlock(&_cpus_lock); + } +} +RTM_EXPORT(rt_post_switch); + +#endif diff --git a/src/idle.c b/src/idle.c index ec7d6bea6ead7f18d427d9132e4e8dec8aa9cfeb..d02b199f32fa3a8cbeb04daa99b8a6283917bad7 100644 --- a/src/idle.c +++ b/src/idle.c @@ -13,6 +13,8 @@ * 2016-08-09 ArdaFu add method to get the handler of the idle thread. * 2018-02-07 Bernard lock scheduler to protect tid->cleanup. * 2018-07-14 armink add idle hook list + * 2018-11-22 Jesven add per cpu idle task + * combine the code of primary and secondary cpu */ #include @@ -36,16 +38,21 @@ #endif #endif -static struct rt_thread idle; -ALIGN(RT_ALIGN_SIZE) -static rt_uint8_t rt_thread_stack[IDLE_THREAD_STACK_SIZE]; +#ifdef RT_USING_SMP +#define _CPUS_NR RT_CPUS_NR +#else +#define _CPUS_NR 1 +#endif extern rt_list_t rt_thread_defunct; -#ifdef RT_USING_IDLE_HOOK +static struct rt_thread idle[_CPUS_NR]; +ALIGN(RT_ALIGN_SIZE) +static rt_uint8_t rt_thread_stack[_CPUS_NR][IDLE_THREAD_STACK_SIZE]; +#ifdef RT_USING_IDLE_HOOK #ifndef RT_IDEL_HOOK_LIST_SIZE -#define RT_IDEL_HOOK_LIST_SIZE 4 +#define RT_IDEL_HOOK_LIST_SIZE 4 #endif static void (*idle_hook_list[RT_IDEL_HOOK_LIST_SIZE])(); @@ -221,14 +228,21 @@ void rt_thread_idle_excute(void) static void rt_thread_idle_entry(void *parameter) { -#ifdef RT_USING_IDLE_HOOK - rt_size_t i; +#ifdef RT_USING_SMP + if (rt_hw_cpu_id() != 0) + { + while (1) + { + rt_hw_secondary_cpu_idle_exec(); + } + } #endif while (1) { - #ifdef RT_USING_IDLE_HOOK + rt_size_t i; + for (i = 0; i < RT_IDEL_HOOK_LIST_SIZE; i++) { if (idle_hook_list[i] != RT_NULL) @@ -251,18 +265,26 @@ static void rt_thread_idle_entry(void *parameter) */ void rt_thread_idle_init(void) { - /* initialize thread */ - rt_thread_init(&idle, - "tidle", - rt_thread_idle_entry, - RT_NULL, - &rt_thread_stack[0], - sizeof(rt_thread_stack), - RT_THREAD_PRIORITY_MAX - 1, - 32); - - /* startup */ - rt_thread_startup(&idle); + int i; + char tidle_name[RT_NAME_MAX]; + + for (i = 0; i < _CPUS_NR; i++) + { + rt_sprintf(tidle_name, "tidle%d", i); + rt_thread_init(&idle[i], + tidle_name, + rt_thread_idle_entry, + RT_NULL, + &rt_thread_stack[i][0], + sizeof(rt_thread_stack[i]), + RT_THREAD_PRIORITY_MAX - 1, + 32); +#ifdef RT_USING_SMP + rt_thread_control(&idle[i], RT_THREAD_CTRL_BIND_CPU, (void*)i); +#endif + /* startup */ + rt_thread_startup(&idle[i]); + } } /** @@ -273,5 +295,11 @@ void rt_thread_idle_init(void) */ rt_thread_t rt_thread_idle_gethandler(void) { - return (rt_thread_t)(&idle); +#ifdef RT_USING_SMP + register int id = rt_hw_cpu_id(); +#else + register int id = 0; +#endif + + return (rt_thread_t)(&idle[id]); } diff --git a/src/irq.c b/src/irq.c index 72d220bd92d2687b61edd26adcfc1b3d76f4ca86..ee032048198d7a814cc6695db03d1717fab9dba3 100644 --- a/src/irq.c +++ b/src/irq.c @@ -8,6 +8,7 @@ * 2006-02-24 Bernard first version * 2006-05-03 Bernard add IRQ_DEBUG * 2016-08-09 ArdaFu add interrupt enter and leave hook. + * 2018-11-22 Jesven rt_interrupt_get_nest function add disable irq */ #include @@ -48,7 +49,11 @@ void rt_interrupt_leave_sethook(void (*hook)(void)) /**@{*/ +#ifdef RT_USING_SMP +#define rt_interrupt_nest rt_cpu_self()->irq_nest +#else volatile rt_uint8_t rt_interrupt_nest; +#endif /** * This function will be invoked by BSP, when enter interrupt service routine @@ -102,7 +107,13 @@ RTM_EXPORT(rt_interrupt_leave); */ rt_uint8_t rt_interrupt_get_nest(void) { - return rt_interrupt_nest; + rt_uint8_t ret; + rt_base_t level; + + level = rt_hw_interrupt_disable(); + ret = rt_interrupt_nest; + rt_hw_interrupt_enable(level); + return ret; } RTM_EXPORT(rt_interrupt_get_nest); diff --git a/src/scheduler.c b/src/scheduler.c index fd068a6671a98fae0208e1be5f4d4b794b4c0940..e177bde6b7ae5ac77c92a8bca85068778cf15265 100644 --- a/src/scheduler.c +++ b/src/scheduler.c @@ -21,28 +21,36 @@ * 2010-12-13 Bernard add defunct list initialization even if not use heap. * 2011-05-10 Bernard clean scheduler debug log. * 2013-12-21 Grissiom add rt_critical_level + * 2018-11-22 Jesven remove the current task from ready queue + * add per cpu ready queue + * add _get_highest_priority_thread to find highest priority task + * rt_schedule_insert_thread won't insert current task to ready queue + * in smp version, rt_hw_context_switch_interrupt maybe switch to + * new task directly + * */ #include #include -static rt_int16_t rt_scheduler_lock_nest; -extern volatile rt_uint8_t rt_interrupt_nest; +#ifdef RT_USING_SMP +rt_hw_spinlock_t _rt_critical_lock; +#endif /*RT_USING_SMP*/ rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]; -struct rt_thread *rt_current_thread; - -rt_uint8_t rt_current_priority; - +rt_uint32_t rt_thread_ready_priority_group; #if RT_THREAD_PRIORITY_MAX > 32 /* Maximum priority level, 256 */ -rt_uint32_t rt_thread_ready_priority_group; rt_uint8_t rt_thread_ready_table[32]; -#else -/* Maximum priority level, 32 */ -rt_uint32_t rt_thread_ready_priority_group; #endif +#ifndef RT_USING_SMP +extern volatile rt_uint8_t rt_interrupt_nest; +static rt_int16_t rt_scheduler_lock_nest; +struct rt_thread *rt_current_thread; +rt_uint8_t rt_current_priority; +#endif /*RT_USING_SMP*/ + rt_list_t rt_thread_defunct; #ifdef RT_USING_HOOK @@ -91,19 +99,84 @@ static void _rt_scheduler_stack_check(struct rt_thread *thread) level = rt_hw_interrupt_disable(); while (level); } -#if defined(ARCH_CPU_STACK_GROWS_UPWARD) - else if ((rt_ubase_t)thread->sp > ((rt_ubase_t)thread->stack_addr + thread->stack_size)) + else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32)) { - rt_kprintf("warning: %s stack is close to the top of stack address.\n", + rt_kprintf("warning: %s stack is close to end of stack address.\n", thread->name); } +} +#endif + +/* + * get the highest priority thread in ready queue + */ +#ifdef RT_USING_SMP +static struct rt_thread* _get_highest_priority_thread(rt_ubase_t *highest_prio) +{ + register struct rt_thread *highest_priority_thread; + register rt_ubase_t highest_ready_priority, local_highest_ready_priority; + struct rt_cpu* pcpu = rt_cpu_self(); + +#if RT_THREAD_PRIORITY_MAX > 32 + register rt_ubase_t number; + + if (rt_thread_ready_priority_group == 0 && pcpu->priority_group == 0) + { + *highest_prio = pcpu->current_thread->current_priority; + /* only local IDLE is readly */ + return pcpu->current_thread; + } + + number = __rt_ffs(rt_thread_ready_priority_group) - 1; + highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1; + number = __rt_ffs(pcpu->priority_group) - 1; + local_highest_ready_priority = (number << 3) + __rt_ffs(pcpu->ready_table[number]) - 1; #else - else if ((rt_ubase_t)thread->sp <= ((rt_ubase_t)thread->stack_addr + 32)) + highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1; + local_highest_ready_priority = __rt_ffs(pcpu->priority_group) - 1; +#endif + + /* get highest ready priority thread */ + if (highest_ready_priority < local_highest_ready_priority) { - rt_kprintf("warning: %s stack is close to the bottom of stack address.\n", - thread->name); + *highest_prio = highest_ready_priority; + highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next, + struct rt_thread, + tlist); + } + else + { + *highest_prio = local_highest_ready_priority; + highest_priority_thread = rt_list_entry(pcpu->priority_table[local_highest_ready_priority].next, + struct rt_thread, + tlist); } + + return highest_priority_thread; +} +#else +static struct rt_thread* _get_highest_priority_thread(rt_ubase_t *highest_prio) +{ + register struct rt_thread *highest_priority_thread; + register rt_ubase_t highest_ready_priority; + +#if RT_THREAD_PRIORITY_MAX > 32 + register rt_ubase_t number; + + number = __rt_ffs(rt_thread_ready_priority_group) - 1; + highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1; +#else + highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1; #endif + + /* get highest ready priority thread */ + highest_priority_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next, + struct rt_thread, + tlist); + + *highest_prio = highest_ready_priority; + + return highest_priority_thread; } #endif @@ -113,9 +186,14 @@ static void _rt_scheduler_stack_check(struct rt_thread *thread) */ void rt_system_scheduler_init(void) { +#ifdef RT_USING_SMP + int cpu; +#endif /*RT_USING_SMP*/ register rt_base_t offset; +#ifndef RT_USING_SMP rt_scheduler_lock_nest = 0; +#endif /*RT_USING_SMP*/ RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("start scheduler: max priority 0x%02x\n", RT_THREAD_PRIORITY_MAX)); @@ -124,9 +202,25 @@ void rt_system_scheduler_init(void) { rt_list_init(&rt_thread_priority_table[offset]); } +#ifdef RT_USING_SMP + for (cpu = 0; cpu < RT_CPUS_NR; cpu++) + { + struct rt_cpu *pcpu = rt_cpu_index(cpu); + for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++) + { + rt_list_init(&pcpu->priority_table[offset]); + } - rt_current_priority = RT_THREAD_PRIORITY_MAX - 1; - rt_current_thread = RT_NULL; + pcpu->irq_switch_flag = 0; + pcpu->current_priority = RT_THREAD_PRIORITY_MAX - 1; + pcpu->current_thread = RT_NULL; + pcpu->priority_group = 0; + +#if RT_THREAD_PRIORITY_MAX > 32 + rt_memset(pcpu->ready_table, 0, sizeof(pcpu->ready_table)); +#endif + } +#endif /*RT_USING_SMP*/ /* initialize ready priority group */ rt_thread_ready_priority_group = 0; @@ -148,26 +242,24 @@ void rt_system_scheduler_init(void) void rt_system_scheduler_start(void) { register struct rt_thread *to_thread; - register rt_ubase_t highest_ready_priority; + rt_ubase_t highest_ready_priority; -#if RT_THREAD_PRIORITY_MAX > 32 - register rt_ubase_t number; + to_thread = _get_highest_priority_thread(&highest_ready_priority); - number = __rt_ffs(rt_thread_ready_priority_group) - 1; - highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1; +#ifdef RT_USING_SMP + to_thread->oncpu = rt_hw_cpu_id(); #else - highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1; -#endif - - /* get switch to thread */ - to_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next, - struct rt_thread, - tlist); - rt_current_thread = to_thread; +#endif /*RT_USING_SMP*/ + + rt_schedule_remove_thread(to_thread); /* switch to new thread */ - rt_hw_context_switch_to((rt_ubase_t)&to_thread->sp); +#ifdef RT_USING_SMP + rt_hw_context_switch_to((rt_uint32_t)&to_thread->sp, to_thread); +#else + rt_hw_context_switch_to((rt_uint32_t)&to_thread->sp); +#endif /*RT_USING_SMP*/ /* never come back */ } @@ -178,6 +270,114 @@ void rt_system_scheduler_start(void) /**@{*/ + +#ifdef RT_USING_SMP +/** + * This function will handle IPI interrupt and do a scheduling in system; + * + * @param vector, the number of IPI interrupt for system scheduling + * @param param, use RT_NULL + * + * NOTE: this function should be invoke or register as ISR in BSP. + */ +void rt_scheduler_ipi_handler(int vector, void *param) +{ + rt_schedule(); +} + +/** + * This function will perform one scheduling. It will select one thread + * with the highest priority level in global ready queue or local ready queue, + * then switch to it. + */ +void rt_schedule(void) +{ + rt_base_t level; + struct rt_thread *to_thread; + struct rt_thread *current_thread; + struct rt_cpu *pcpu; + int cpu_id; + + /* disable interrupt */ + level = rt_hw_interrupt_disable(); + + cpu_id = rt_hw_cpu_id(); + pcpu = rt_cpu_index(cpu_id); + current_thread = pcpu->current_thread; + + /* whether do switch in interrupt */ + if (pcpu->irq_nest) + { + pcpu->irq_switch_flag = 1; + } + else if (current_thread->scheduler_lock_nest == 1) /* whether lock scheduler */ + { + rt_ubase_t highest_ready_priority; + + if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0) + { + to_thread = _get_highest_priority_thread(&highest_ready_priority); + current_thread->oncpu = RT_CPU_DETACHED; + if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY) + { + if (current_thread->current_priority < highest_ready_priority) + { + to_thread = current_thread; + } + else + { + rt_schedule_insert_thread(current_thread); + } + } + to_thread->oncpu = cpu_id; + if (to_thread != current_thread) + { + /* if the destination thread is not the same as current thread */ + pcpu->current_priority = (rt_uint8_t)highest_ready_priority; + + RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread)); + + rt_schedule_remove_thread(to_thread); + + /* switch to new thread */ + RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, + ("[%d]switch to priority#%d " + "thread:%.*s(sp:0x%08x), " + "from thread:%.*s(sp: 0x%08x)\n", + pcpu->irq_nest, highest_ready_priority, + RT_NAME_MAX, to_thread->name, to_thread->sp, + RT_NAME_MAX, current_thread->name, current_thread->sp)); + +#ifdef RT_USING_OVERFLOW_CHECK + _rt_scheduler_stack_check(to_thread); +#endif + + { + extern void rt_thread_handle_sig(rt_bool_t clean_state); + + rt_hw_context_switch((rt_ubase_t)¤t_thread->sp, + (rt_ubase_t)&to_thread->sp, to_thread); + + /* enable interrupt */ + rt_hw_interrupt_enable(level); + +#ifdef RT_USING_SIGNALS + /* check signal status */ + rt_thread_handle_sig(RT_TRUE); +#endif + goto __exit; + } + } + } + } + + /* enable interrupt */ + rt_hw_interrupt_enable(level); + +__exit: + return ; +} +#else /** * This function will perform one schedule. It will select one thread * with the highest priority level, then switch to it. @@ -194,81 +394,169 @@ void rt_schedule(void) /* check the scheduler is enabled or not */ if (rt_scheduler_lock_nest == 0) { - register rt_ubase_t highest_ready_priority; + rt_ubase_t highest_ready_priority; -#if RT_THREAD_PRIORITY_MAX <= 32 - highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1; -#else - register rt_ubase_t number; + if (rt_thread_ready_priority_group != 0) + { + int need_insert_from_thread = 0; - number = __rt_ffs(rt_thread_ready_priority_group) - 1; - highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1; -#endif + to_thread = _get_highest_priority_thread(&highest_ready_priority); - /* get switch to thread */ - to_thread = rt_list_entry(rt_thread_priority_table[highest_ready_priority].next, - struct rt_thread, - tlist); + if ((rt_current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY) + { + if (rt_current_thread->current_priority < highest_ready_priority) + { + to_thread = rt_current_thread; + } + else + { + need_insert_from_thread = 1; + } + } - /* if the destination thread is not the same as current thread */ - if (to_thread != rt_current_thread) - { - rt_current_priority = (rt_uint8_t)highest_ready_priority; - from_thread = rt_current_thread; - rt_current_thread = to_thread; + if (to_thread != rt_current_thread) + { + /* if the destination thread is not the same as current thread */ + rt_current_priority = (rt_uint8_t)highest_ready_priority; + from_thread = rt_current_thread; + rt_current_thread = to_thread; + + RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread)); - RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread)); + if (need_insert_from_thread) + { + rt_schedule_insert_thread(from_thread); + } - /* switch to new thread */ - RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, - ("[%d]switch to priority#%d " - "thread:%.*s(sp:0x%08x), " - "from thread:%.*s(sp: 0x%08x)\n", - rt_interrupt_nest, highest_ready_priority, - RT_NAME_MAX, to_thread->name, to_thread->sp, - RT_NAME_MAX, from_thread->name, from_thread->sp)); + rt_schedule_remove_thread(to_thread); + + /* switch to new thread */ + RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, + ("[%d]switch to priority#%d " + "thread:%.*s(sp:0x%p), " + "from thread:%.*s(sp: 0x%p)\n", + rt_interrupt_nest, highest_ready_priority, + RT_NAME_MAX, to_thread->name, to_thread->sp, + RT_NAME_MAX, from_thread->name, from_thread->sp)); #ifdef RT_USING_OVERFLOW_CHECK - _rt_scheduler_stack_check(to_thread); + _rt_scheduler_stack_check(to_thread); #endif - if (rt_interrupt_nest == 0) - { - extern void rt_thread_handle_sig(rt_bool_t clean_state); + if (rt_interrupt_nest == 0) + { + extern void rt_thread_handle_sig(rt_bool_t clean_state); - rt_hw_context_switch((rt_ubase_t)&from_thread->sp, - (rt_ubase_t)&to_thread->sp); + rt_hw_context_switch((rt_ubase_t)&from_thread->sp, + (rt_ubase_t)&to_thread->sp); - /* enable interrupt */ - rt_hw_interrupt_enable(level); + /* enable interrupt */ + rt_hw_interrupt_enable(level); #ifdef RT_USING_SIGNALS - /* check signal status */ - rt_thread_handle_sig(RT_TRUE); + /* check signal status */ + rt_thread_handle_sig(RT_TRUE); #endif + goto __exit; + } + else + { + RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n")); + + rt_hw_context_switch_interrupt((rt_ubase_t)&from_thread->sp, + (rt_ubase_t)&to_thread->sp); + } } else { - RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n")); - - rt_hw_context_switch_interrupt((rt_ubase_t)&from_thread->sp, - (rt_ubase_t)&to_thread->sp); - /* enable interrupt */ - rt_hw_interrupt_enable(level); + rt_schedule_remove_thread(rt_current_thread); } } - else - { - /* enable interrupt */ - rt_hw_interrupt_enable(level); - } } - else + + /* enable interrupt */ + rt_hw_interrupt_enable(level); + +__exit: + return; +} +#endif /*RT_USING_SMP*/ + +/** + * This function checks if a scheduling is needed after IRQ context. If yes, + * it will select one thread with the highest priority level, and then switch + * to it. + */ +#ifdef RT_USING_SMP +void rt_scheduler_do_irq_switch(void *context) +{ + int cpu_id; + rt_base_t level; + struct rt_cpu* pcpu; + struct rt_thread *to_thread; + struct rt_thread *current_thread; + + level = rt_hw_interrupt_disable(); + + cpu_id = rt_hw_cpu_id(); + pcpu = rt_cpu_index(cpu_id); + current_thread = pcpu->current_thread; + + if (pcpu->irq_switch_flag == 0) { - /* enable interrupt */ rt_hw_interrupt_enable(level); + return; + } + + if (current_thread->scheduler_lock_nest == 1 && pcpu->irq_nest == 0) + { + rt_ubase_t highest_ready_priority; + + /* clear irq switch flag */ + pcpu->irq_switch_flag = 0; + + if (rt_thread_ready_priority_group != 0 || pcpu->priority_group != 0) + { + to_thread = _get_highest_priority_thread(&highest_ready_priority); + current_thread->oncpu = RT_CPU_DETACHED; + if ((current_thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY) + { + if (current_thread->current_priority < highest_ready_priority) + { + to_thread = current_thread; + } + else + { + rt_schedule_insert_thread(current_thread); + } + } + to_thread->oncpu = cpu_id; + if (to_thread != current_thread) + { + /* if the destination thread is not the same as current thread */ + + pcpu->current_priority = (rt_uint8_t)highest_ready_priority; + + RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (current_thread, to_thread)); + + rt_schedule_remove_thread(to_thread); + +#ifdef RT_USING_OVERFLOW_CHECK + _rt_scheduler_stack_check(to_thread); +#endif + RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("switch in interrupt\n")); + + current_thread->cpus_lock_nest--; + current_thread->scheduler_lock_nest--; + + rt_hw_context_switch_interrupt(context, (rt_ubase_t)¤t_thread->sp, + (rt_ubase_t)&to_thread->sp, to_thread); + } + } } + rt_hw_interrupt_enable(level); } +#endif /*RT_USING_SMP*/ /* * This function will insert a thread to system ready queue. The state of @@ -277,6 +565,70 @@ void rt_schedule(void) * @param thread the thread to be inserted * @note Please do not invoke this function in user application. */ +#ifdef RT_USING_SMP +void rt_schedule_insert_thread(struct rt_thread *thread) +{ + int cpu_id; + int bind_cpu; + rt_uint32_t cpu_mask; + register rt_base_t level; + + RT_ASSERT(thread != RT_NULL); + + /* disable interrupt */ + level = rt_hw_interrupt_disable(); + + /* change stat */ + thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK); + + if (thread->oncpu != RT_CPU_DETACHED) + { + goto __exit; + } + + cpu_id = rt_hw_cpu_id(); + bind_cpu = thread->bind_cpu ; + + /* insert thread to ready list */ + if (bind_cpu == RT_CPUS_NR) + { +#if RT_THREAD_PRIORITY_MAX > 32 + rt_thread_ready_table[thread->number] |= thread->high_mask; +#endif + rt_thread_ready_priority_group |= thread->number_mask; + + rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]), + &(thread->tlist)); + cpu_mask = RT_CPU_MASK ^ (1 << cpu_id); + rt_hw_ipi_send(RT_SCHEDULE_IPI_IRQ, cpu_mask); + } + else + { + struct rt_cpu *pcpu = rt_cpu_index(bind_cpu); + +#if RT_THREAD_PRIORITY_MAX > 32 + pcpu->ready_table[thread->number] |= thread->high_mask; +#endif + pcpu->priority_group |= thread->number_mask; + + rt_list_insert_before(&(rt_cpu_index(bind_cpu)->priority_table[thread->current_priority]), + &(thread->tlist)); + + if (cpu_id != bind_cpu) + { + cpu_mask = 1 << bind_cpu; + rt_hw_ipi_send(RT_SCHEDULE_IPI_IRQ, cpu_mask); + } + } + + RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n", + RT_NAME_MAX, thread->name, thread->current_priority)); + +__exit: + /* enable interrupt */ + rt_hw_interrupt_enable(level); +} +#else void rt_schedule_insert_thread(struct rt_thread *thread) { register rt_base_t temp; @@ -289,32 +641,29 @@ void rt_schedule_insert_thread(struct rt_thread *thread) /* change stat */ thread->stat = RT_THREAD_READY | (thread->stat & ~RT_THREAD_STAT_MASK); + if (thread == rt_current_thread) + { + goto __exit; + } + /* insert thread to ready list */ rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]), &(thread->tlist)); - /* set priority mask */ -#if RT_THREAD_PRIORITY_MAX <= 32 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("insert thread[%.*s], the priority: %d\n", RT_NAME_MAX, thread->name, thread->current_priority)); -#else - RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, - ("insert thread[%.*s], the priority: %d 0x%x %d\n", - RT_NAME_MAX, - thread->name, - thread->number, - thread->number_mask, - thread->high_mask)); -#endif + /* set priority mask */ #if RT_THREAD_PRIORITY_MAX > 32 rt_thread_ready_table[thread->number] |= thread->high_mask; #endif rt_thread_ready_priority_group |= thread->number_mask; +__exit: /* enable interrupt */ rt_hw_interrupt_enable(temp); } +#endif /*RT_USING_SMP*/ /* * This function will remove a thread from system ready queue. @@ -323,28 +672,71 @@ void rt_schedule_insert_thread(struct rt_thread *thread) * * @note Please do not invoke this function in user application. */ +#ifdef RT_USING_SMP void rt_schedule_remove_thread(struct rt_thread *thread) { - register rt_base_t temp; + register rt_base_t level; RT_ASSERT(thread != RT_NULL); /* disable interrupt */ - temp = rt_hw_interrupt_disable(); + level = rt_hw_interrupt_disable(); -#if RT_THREAD_PRIORITY_MAX <= 32 RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n", RT_NAME_MAX, thread->name, thread->current_priority)); + + /* remove thread from ready list */ + rt_list_remove(&(thread->tlist)); + if (thread->bind_cpu == RT_CPUS_NR) + { + if (rt_list_isempty(&(rt_thread_priority_table[thread->current_priority]))) + { +#if RT_THREAD_PRIORITY_MAX > 32 + rt_thread_ready_table[thread->number] &= ~thread->high_mask; + if (rt_thread_ready_table[thread->number] == 0) + { + rt_thread_ready_priority_group &= ~thread->number_mask; + } +#else + rt_thread_ready_priority_group &= ~thread->number_mask; +#endif + } + } + else + { + struct rt_cpu *pcpu = rt_cpu_index(thread->bind_cpu); + + if (rt_list_isempty(&(pcpu->priority_table[thread->current_priority]))) + { +#if RT_THREAD_PRIORITY_MAX > 32 + pcpu->ready_table[thread->number] &= ~thread->high_mask; + if (rt_thread_ready_table[thread->number] == 0) + { + pcpu->priority_group &= ~thread->number_mask; + } #else - RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, - ("remove thread[%.*s], the priority: %d 0x%x %d\n", - RT_NAME_MAX, - thread->name, - thread->number, - thread->number_mask, - thread->high_mask)); + pcpu->priority_group &= ~thread->number_mask; #endif + } + } + + /* enable interrupt */ + rt_hw_interrupt_enable(level); +} +#else +void rt_schedule_remove_thread(struct rt_thread *thread) +{ + register rt_base_t level; + + RT_ASSERT(thread != RT_NULL); + + /* disable interrupt */ + level = rt_hw_interrupt_disable(); + + RT_DEBUG_LOG(RT_DEBUG_SCHEDULER, ("remove thread[%.*s], the priority: %d\n", + RT_NAME_MAX, thread->name, + thread->current_priority)); /* remove thread from ready list */ rt_list_remove(&(thread->tlist)); @@ -362,12 +754,41 @@ void rt_schedule_remove_thread(struct rt_thread *thread) } /* enable interrupt */ - rt_hw_interrupt_enable(temp); + rt_hw_interrupt_enable(level); } +#endif /*RT_USING_SMP*/ /** * This function will lock the thread scheduler. */ +#ifdef RT_USING_SMP +void rt_enter_critical(void) +{ + register rt_base_t level; + struct rt_thread *current_thread; + + /* disable interrupt */ + level = rt_hw_local_irq_disable(); + + current_thread = rt_cpu_self()->current_thread; + /* + * the maximal number of nest is RT_UINT16_MAX, which is big + * enough and does not check here + */ + + /* lock scheduler for all cpus */ + if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest) + { + rt_hw_spin_lock(&_rt_critical_lock); + } + + /* lock scheduler for local cpu */ + current_thread->scheduler_lock_nest ++; + + /* enable interrupt */ + rt_hw_local_irq_enable(level); +} +#else void rt_enter_critical(void) { register rt_base_t level; @@ -384,11 +805,45 @@ void rt_enter_critical(void) /* enable interrupt */ rt_hw_interrupt_enable(level); } +#endif /*RT_USING_SMP*/ RTM_EXPORT(rt_enter_critical); /** * This function will unlock the thread scheduler. */ +#ifdef RT_USING_SMP +void rt_exit_critical(void) +{ + register rt_base_t level; + struct rt_thread *current_thread; + + /* disable interrupt */ + level = rt_hw_local_irq_disable(); + + current_thread = rt_cpu_self()->current_thread; + + current_thread->scheduler_lock_nest --; + + if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest) + { + rt_hw_spin_unlock(&_rt_critical_lock); + } + + if (current_thread->scheduler_lock_nest <= 0) + { + current_thread->scheduler_lock_nest = 0; + /* enable interrupt */ + rt_hw_local_irq_enable(level); + + rt_schedule(); + } + else + { + /* enable interrupt */ + rt_hw_local_irq_enable(level); + } +} +#else void rt_exit_critical(void) { register rt_base_t level; @@ -412,6 +867,7 @@ void rt_exit_critical(void) rt_hw_interrupt_enable(level); } } +#endif /*RT_USING_SMP*/ RTM_EXPORT(rt_exit_critical); /** @@ -421,8 +877,14 @@ RTM_EXPORT(rt_exit_critical); */ rt_uint16_t rt_critical_level(void) { - return rt_scheduler_lock_nest; +#ifdef RT_USING_SMP + struct rt_thread *current_thread = rt_cpu_self()->current_thread; + + return current_thread->scheduler_lock_nest; +#else + return rt_scheduler_lock_nest; +#endif /*RT_USING_SMP*/ } RTM_EXPORT(rt_critical_level); -/**@}*/ +/**@}*/ diff --git a/src/signal.c b/src/signal.c index 890f16f497d0c0152d79be8ca462f45dcc97ca5d..05632b2c4a387eb3441b1decc8957634ad846d24 100644 --- a/src/signal.c +++ b/src/signal.c @@ -6,6 +6,8 @@ * Change Logs: * Date Author Notes * 2017/10/5 Bernard the first version + * 2018/09/17 Jesven fix: in _signal_deliver RT_THREAD_STAT_MASK to RT_THREAD_STAT_SIGNAL_MASK + * 2018/11/22 Jesven in smp version rt_hw_context_switch_to add a param */ #include @@ -60,10 +62,14 @@ static void _signal_entry(void *parameter) tid->sp = tid->sig_ret; tid->sig_ret = RT_NULL; - LOG_D("switch back to: 0x%08x", tid->sp); + LOG_D("switch back to: 0x%08x\n", tid->sp); tid->stat &= ~RT_THREAD_STAT_SIGNAL; - rt_hw_context_switch_to((rt_uint32_t) & (tid->sp)); +#ifdef RT_USING_SMP + rt_hw_context_switch_to((rt_ubase_t)&(tid->sp), tid); +#else + rt_hw_context_switch_to((rt_ubase_t)&(tid->sp)); +#endif /*RT_USING_SMP*/ } /* @@ -108,7 +114,7 @@ static void _signal_deliver(rt_thread_t tid) /* do signal action in self thread context */ rt_thread_handle_sig(RT_TRUE); } - else if (!((tid->stat & RT_THREAD_STAT_MASK) & RT_THREAD_STAT_SIGNAL)) + else if (!((tid->stat & RT_THREAD_STAT_SIGNAL_MASK) & RT_THREAD_STAT_SIGNAL)) { /* add signal state */ tid->stat |= RT_THREAD_STAT_SIGNAL; diff --git a/src/thread.c b/src/thread.c index 07c7619c6fc8f837828605f62aff1f77c1fdcc68..df9f151834e2f8ccb807155f9f9810573dc56364 100644 --- a/src/thread.c +++ b/src/thread.c @@ -23,18 +23,17 @@ * 2012-12-29 Bernard fixed compiling warning. * 2016-08-09 ArdaFu add thread suspend and resume hook. * 2017-04-10 armink fixed the rt_thread_delete and rt_thread_detach - bug when thread has not startup. + * bug when thread has not startup. + * 2018-11-22 Jesven yield is same to rt_schedule + * add support for tasks bound to cpu */ -#include #include +#include -extern rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX]; -extern struct rt_thread *rt_current_thread; extern rt_list_t rt_thread_defunct; #ifdef RT_USING_HOOK - static void (*rt_thread_suspend_hook)(rt_thread_t thread); static void (*rt_thread_resume_hook) (rt_thread_t thread); static void (*rt_thread_inited_hook) (rt_thread_t thread); @@ -84,7 +83,7 @@ void rt_thread_exit(void) register rt_base_t level; /* get current thread */ - thread = rt_current_thread; + thread = rt_thread_self(); /* disable interrupt */ level = rt_hw_interrupt_disable(); @@ -165,6 +164,16 @@ static rt_err_t _rt_thread_init(struct rt_thread *thread, thread->error = RT_EOK; thread->stat = RT_THREAD_INIT; +#ifdef RT_USING_SMP + /* not bind on any cpu */ + thread->bind_cpu = RT_CPUS_NR; + thread->oncpu = RT_CPU_DETACHED; + + /* lock init */ + thread->scheduler_lock_nest = 0; + thread->cpus_lock_nest = 0; +#endif /*RT_USING_SMP*/ + /* initialize cleanup function and user data */ thread->cleanup = 0; thread->user_data = 0; @@ -251,7 +260,19 @@ RTM_EXPORT(rt_thread_init); */ rt_thread_t rt_thread_self(void) { +#ifdef RT_USING_SMP + rt_base_t lock; + rt_thread_t self; + + lock = rt_hw_local_irq_disable(); + self = rt_cpu_self()->current_thread; + rt_hw_local_irq_enable(lock); + return self; +#else + extern rt_thread_t rt_current_thread; + return rt_current_thread; +#endif } RTM_EXPORT(rt_thread_self); @@ -449,36 +470,7 @@ RTM_EXPORT(rt_thread_delete); */ rt_err_t rt_thread_yield(void) { - register rt_base_t level; - struct rt_thread *thread; - - /* disable interrupt */ - level = rt_hw_interrupt_disable(); - - /* set to current thread */ - thread = rt_current_thread; - - /* if the thread stat is READY and on ready queue list */ - if ((thread->stat & RT_THREAD_STAT_MASK) == RT_THREAD_READY && - thread->tlist.next != thread->tlist.prev) - { - /* remove thread from thread list */ - rt_list_remove(&(thread->tlist)); - - /* put thread to end of ready queue */ - rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]), - &(thread->tlist)); - - /* enable interrupt */ - rt_hw_interrupt_enable(level); - - rt_schedule(); - - return RT_EOK; - } - - /* enable interrupt */ - rt_hw_interrupt_enable(level); + rt_schedule(); return RT_EOK; } @@ -496,13 +488,14 @@ rt_err_t rt_thread_sleep(rt_tick_t tick) register rt_base_t temp; struct rt_thread *thread; - /* disable interrupt */ - temp = rt_hw_interrupt_disable(); /* set to current thread */ - thread = rt_current_thread; + thread = rt_thread_self(); RT_ASSERT(thread != RT_NULL); RT_ASSERT(rt_object_get_type((rt_object_t)thread) == RT_Object_Class_Thread); + /* disable interrupt */ + temp = rt_hw_interrupt_disable(); + /* suspend thread */ rt_thread_suspend(thread); @@ -559,7 +552,8 @@ RTM_EXPORT(rt_thread_mdelay); * @param cmd the control command, which includes * RT_THREAD_CTRL_CHANGE_PRIORITY for changing priority level of thread; * RT_THREAD_CTRL_STARTUP for starting a thread; - * RT_THREAD_CTRL_CLOSE for delete a thread. + * RT_THREAD_CTRL_CLOSE for delete a thread; + * RT_THREAD_CTRL_BIND_CPU for bind the thread to a CPU. * @param arg the argument of control command * * @return RT_EOK @@ -625,6 +619,22 @@ rt_err_t rt_thread_control(rt_thread_t thread, int cmd, void *arg) return rt_thread_delete(thread); #endif +#ifdef RT_USING_SMP + case RT_THREAD_CTRL_BIND_CPU: + { + rt_uint8_t cpu; + + if ((thread->stat & RT_THREAD_STAT_MASK) != RT_THREAD_INIT) + { + /* we only support bind cpu before started phase. */ + return RT_ERROR; + } + + cpu = (rt_uint8_t)(size_t)arg; + thread->bind_cpu = cpu > RT_CPUS_NR? RT_CPUS_NR : cpu; + break; + } +#endif /*RT_USING_SMP*/ default: break; } @@ -661,6 +671,8 @@ rt_err_t rt_thread_suspend(rt_thread_t thread) return -RT_ERROR; } + RT_ASSERT(thread == rt_thread_self()); + /* disable interrupt */ temp = rt_hw_interrupt_disable();