未验证 提交 c07ebbc9 编写于 作者: B Bernard Xiong 提交者: GitHub

Merge pull request #3111 from BernardXiong/spin_lock

Add spin lock API in Kernel
...@@ -577,6 +577,7 @@ struct rt_thread ...@@ -577,6 +577,7 @@ struct rt_thread
rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */ rt_uint16_t scheduler_lock_nest; /**< scheduler lock count */
rt_uint16_t cpus_lock_nest; /**< cpus lock count */ rt_uint16_t cpus_lock_nest; /**< cpus lock count */
rt_uint16_t critical_lock_nest; /**< critical lock count */
#endif /*RT_USING_SMP*/ #endif /*RT_USING_SMP*/
/* priority */ /* priority */
......
...@@ -143,6 +143,12 @@ typedef union { ...@@ -143,6 +143,12 @@ typedef union {
} tickets; } tickets;
} rt_hw_spinlock_t; } rt_hw_spinlock_t;
struct rt_spinlock
{
rt_hw_spinlock_t lock;
};
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock);
void rt_hw_spin_lock(rt_hw_spinlock_t *lock); void rt_hw_spin_lock(rt_hw_spinlock_t *lock);
void rt_hw_spin_unlock(rt_hw_spinlock_t *lock); void rt_hw_spin_unlock(rt_hw_spinlock_t *lock);
......
...@@ -391,6 +391,27 @@ rt_err_t rt_mq_recv(rt_mq_t mq, ...@@ -391,6 +391,27 @@ rt_err_t rt_mq_recv(rt_mq_t mq,
rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg); rt_err_t rt_mq_control(rt_mq_t mq, int cmd, void *arg);
#endif #endif
/*
* spinlock
*/
#ifdef RT_USING_SMP
struct rt_spinlock;
void rt_spin_lock_init(struct rt_spinlock *lock);
void rt_spin_lock(struct rt_spinlock *lock);
void rt_spin_unlock(struct rt_spinlock *lock);
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock);
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level);
#else
#define rt_spin_lock_init(lock) /* nothing */
#define rt_spin_lock(lock) rt_enter_critical()
#define rt_spin_unlock(lock) rt_exit_critical()
#define rt_spin_lock_irqsave(lock) rt_hw_interrupt_disable()
#define rt_spin_unlock_irqrestore(lock, level) rt_hw_interrupt_enable(level)
#endif
/**@}*/ /**@}*/
#ifdef RT_USING_DEVICE #ifdef RT_USING_DEVICE
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <board.h> #include <board.h>
#ifdef RT_USING_SMP #ifdef RT_USING_SMP
int rt_hw_cpu_id(void) int rt_hw_cpu_id(void)
{ {
int cpu_id; int cpu_id;
...@@ -25,6 +26,11 @@ int rt_hw_cpu_id(void) ...@@ -25,6 +26,11 @@ int rt_hw_cpu_id(void)
return cpu_id; return cpu_id;
}; };
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
{
lock->slock = 0;
}
void rt_hw_spin_lock(rt_hw_spinlock_t *lock) void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
{ {
unsigned long tmp; unsigned long tmp;
......
...@@ -25,6 +25,11 @@ int rt_hw_cpu_id(void) ...@@ -25,6 +25,11 @@ int rt_hw_cpu_id(void)
return read_csr(mhartid); return read_csr(mhartid);
} }
void rt_hw_spin_lock_init(rt_hw_spinlock_t *lock)
{
((spinlock_t *)lock)->lock = 0;
}
void rt_hw_spin_lock(rt_hw_spinlock_t *lock) void rt_hw_spin_lock(rt_hw_spinlock_t *lock)
{ {
spinlock_lock((spinlock_t *)lock); spinlock_lock((spinlock_t *)lock);
......
...@@ -20,7 +20,7 @@ config RT_USING_ARCH_DATA_TYPE ...@@ -20,7 +20,7 @@ config RT_USING_ARCH_DATA_TYPE
config RT_USING_SMP config RT_USING_SMP
bool "Enable SMP(Symmetric multiprocessing)" bool "Enable SMP(Symmetric multiprocessing)"
default n default n
help help
This option should be selected by machines which have an SMP- This option should be selected by machines which have an SMP-
capable CPU. capable CPU.
The only effect of this option is to make the SMP-related The only effect of this option is to make the SMP-related
...@@ -28,10 +28,10 @@ config RT_USING_SMP ...@@ -28,10 +28,10 @@ config RT_USING_SMP
config RT_CPUS_NR config RT_CPUS_NR
int "Number of CPUs" int "Number of CPUs"
default 2 default 2
depends on RT_USING_SMP depends on RT_USING_SMP
help help
Number of CPUs in the system Number of CPUs in the system
config RT_ALIGN_SIZE config RT_ALIGN_SIZE
int "Alignment size for CPU architecture data access" int "Alignment size for CPU architecture data access"
......
...@@ -26,6 +26,9 @@ if GetDepend('RT_USING_MEMHEAP') == False: ...@@ -26,6 +26,9 @@ if GetDepend('RT_USING_MEMHEAP') == False:
if GetDepend('RT_USING_DEVICE') == False: if GetDepend('RT_USING_DEVICE') == False:
SrcRemove(src, ['device.c']) SrcRemove(src, ['device.c'])
if GetDepend('RT_USING_SMP') == False:
SrcRemove(src, ['cpu.c'])
group = DefineGroup('Kernel', src, depend = [''], CPPPATH = CPPPATH) group = DefineGroup('Kernel', src, depend = [''], CPPPATH = CPPPATH)
Return('group') Return('group')
...@@ -7,15 +7,106 @@ ...@@ -7,15 +7,106 @@
* Date Author Notes * Date Author Notes
* 2018-10-30 Bernard The first version * 2018-10-30 Bernard The first version
*/ */
#include <rtthread.h>
#include <rthw.h> #include <rthw.h>
#include <rtthread.h>
#ifdef RT_USING_SMP #ifdef RT_USING_SMP
static struct rt_cpu rt_cpus[RT_CPUS_NR]; static struct rt_cpu rt_cpus[RT_CPUS_NR];
rt_hw_spinlock_t _cpus_lock; rt_hw_spinlock_t _cpus_lock;
/*
* disable scheduler
*/
static void rt_preempt_disable(void)
{
register rt_base_t level;
struct rt_thread *current_thread;
/* disable interrupt */
level = rt_hw_local_irq_disable();
current_thread = rt_thread_self();
if (!current_thread)
{
rt_hw_local_irq_enable(level);
return;
}
/* lock scheduler for local cpu */
current_thread->scheduler_lock_nest ++;
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
/*
* enable scheduler
*/
static void rt_preempt_enable(void)
{
register rt_base_t level;
struct rt_thread *current_thread;
/* disable interrupt */
level = rt_hw_local_irq_disable();
current_thread = rt_thread_self();
if (!current_thread)
{
rt_hw_local_irq_enable(level);
return;
}
/* unlock scheduler for local cpu */
current_thread->scheduler_lock_nest --;
rt_schedule();
/* enable interrupt */
rt_hw_local_irq_enable(level);
}
void rt_spin_lock_init(struct rt_spinlock *lock)
{
rt_hw_spin_lock_init(&lock->lock);
}
RTM_EXPORT(rt_spin_lock_init)
void rt_spin_lock(struct rt_spinlock *lock)
{
rt_preempt_disable();
rt_hw_spin_lock(&lock->lock);
}
RTM_EXPORT(rt_spin_lock)
void rt_spin_unlock(struct rt_spinlock *lock)
{
rt_hw_spin_unlock(&lock->lock);
rt_preempt_enable();
}
RTM_EXPORT(rt_spin_unlock)
rt_base_t rt_spin_lock_irqsave(struct rt_spinlock *lock)
{
unsigned long level;
rt_preempt_disable();
level = rt_hw_local_irq_disable();
rt_hw_spin_lock(&lock->lock);
return level;
}
RTM_EXPORT(rt_spin_lock_irqsave)
void rt_spin_unlock_irqrestore(struct rt_spinlock *lock, rt_base_t level)
{
rt_hw_spin_unlock(&lock->lock);
rt_hw_local_irq_enable(level);
rt_preempt_enable();
}
RTM_EXPORT(rt_spin_unlock_irqrestore)
/** /**
* This fucntion will return current cpu. * This fucntion will return current cpu.
*/ */
...@@ -42,7 +133,7 @@ rt_base_t rt_cpus_lock(void) ...@@ -42,7 +133,7 @@ rt_base_t rt_cpus_lock(void)
pcpu = rt_cpu_self(); pcpu = rt_cpu_self();
if (pcpu->current_thread != RT_NULL) if (pcpu->current_thread != RT_NULL)
{ {
register rt_uint16_t lock_nest = pcpu->current_thread->cpus_lock_nest; register rt_ubase_t lock_nest = pcpu->current_thread->cpus_lock_nest;
pcpu->current_thread->cpus_lock_nest++; pcpu->current_thread->cpus_lock_nest++;
if (lock_nest == 0) if (lock_nest == 0)
......
...@@ -83,7 +83,7 @@ static void _rt_scheduler_stack_check(struct rt_thread *thread) ...@@ -83,7 +83,7 @@ static void _rt_scheduler_stack_check(struct rt_thread *thread)
RT_ASSERT(thread != RT_NULL); RT_ASSERT(thread != RT_NULL);
#if defined(ARCH_CPU_STACK_GROWS_UPWARD) #if defined(ARCH_CPU_STACK_GROWS_UPWARD)
if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' || if (*((rt_uint8_t *)((rt_ubase_t)thread->stack_addr + thread->stack_size - 1)) != '#' ||
#else #else
if (*((rt_uint8_t *)thread->stack_addr) != '#' || if (*((rt_uint8_t *)thread->stack_addr) != '#' ||
#endif #endif
...@@ -840,11 +840,14 @@ void rt_enter_critical(void) ...@@ -840,11 +840,14 @@ void rt_enter_critical(void)
*/ */
/* lock scheduler for all cpus */ /* lock scheduler for all cpus */
if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest) if (current_thread->critical_lock_nest == 0)
{ {
rt_hw_spin_lock(&_rt_critical_lock); rt_hw_spin_lock(&_rt_critical_lock);
} }
/* critical for local cpu */
current_thread->critical_lock_nest ++;
/* lock scheduler for local cpu */ /* lock scheduler for local cpu */
current_thread->scheduler_lock_nest ++; current_thread->scheduler_lock_nest ++;
...@@ -892,7 +895,9 @@ void rt_exit_critical(void) ...@@ -892,7 +895,9 @@ void rt_exit_critical(void)
current_thread->scheduler_lock_nest --; current_thread->scheduler_lock_nest --;
if (current_thread->scheduler_lock_nest == !!current_thread->cpus_lock_nest) current_thread->critical_lock_nest --;
if (current_thread->critical_lock_nest == 0)
{ {
rt_hw_spin_unlock(&_rt_critical_lock); rt_hw_spin_unlock(&_rt_critical_lock);
} }
...@@ -951,9 +956,9 @@ rt_uint16_t rt_critical_level(void) ...@@ -951,9 +956,9 @@ rt_uint16_t rt_critical_level(void)
#ifdef RT_USING_SMP #ifdef RT_USING_SMP
struct rt_thread *current_thread = rt_cpu_self()->current_thread; struct rt_thread *current_thread = rt_cpu_self()->current_thread;
return current_thread->scheduler_lock_nest; return current_thread->critical_lock_nest;
#else #else
return rt_scheduler_lock_nest; return rt_scheduler_lock_nest;
#endif /*RT_USING_SMP*/ #endif /*RT_USING_SMP*/
} }
RTM_EXPORT(rt_critical_level); RTM_EXPORT(rt_critical_level);
......
...@@ -172,6 +172,7 @@ static rt_err_t _rt_thread_init(struct rt_thread *thread, ...@@ -172,6 +172,7 @@ static rt_err_t _rt_thread_init(struct rt_thread *thread,
/* lock init */ /* lock init */
thread->scheduler_lock_nest = 0; thread->scheduler_lock_nest = 0;
thread->cpus_lock_nest = 0; thread->cpus_lock_nest = 0;
thread->critical_lock_nest = 0;
#endif /*RT_USING_SMP*/ #endif /*RT_USING_SMP*/
/* initialize cleanup function and user data */ /* initialize cleanup function and user data */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册