提交 0ab7aefc 编写于 作者: R Ralf Baechle

[MIPS] MT: Scheduler support for SMT

Signed-off-by: NRalf Baechle <ralf@linux-mips.org>
上级 92b1e6a6
...@@ -1442,6 +1442,7 @@ config MIPS_MT_SMP ...@@ -1442,6 +1442,7 @@ config MIPS_MT_SMP
select MIPS_MT select MIPS_MT
select NR_CPUS_DEFAULT_2 select NR_CPUS_DEFAULT_2
select SMP select SMP
select SYS_SUPPORTS_SCHED_SMT if SMP
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
help help
This is a kernel model which is also known a VSMP or lately This is a kernel model which is also known a VSMP or lately
...@@ -1468,6 +1469,19 @@ endchoice ...@@ -1468,6 +1469,19 @@ endchoice
config MIPS_MT config MIPS_MT
bool bool
config SCHED_SMT
bool "SMT (multithreading) scheduler support"
depends on SYS_SUPPORTS_SCHED_SMT
default n
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with MIPS MT enabled cores at a cost of slightly
increased overhead in some places. If unsure say N here.
config SYS_SUPPORTS_SCHED_SMT
bool
config SYS_SUPPORTS_MULTITHREADING config SYS_SUPPORTS_MULTITHREADING
bool bool
......
...@@ -62,6 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v) ...@@ -62,6 +62,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
); );
seq_printf(m, "shadow register sets\t: %d\n", seq_printf(m, "shadow register sets\t: %d\n",
cpu_data[n].srsets); cpu_data[n].srsets);
seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
cpu_has_vce ? "%u" : "not available"); cpu_has_vce ? "%u" : "not available");
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/smp.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
...@@ -30,7 +31,6 @@ ...@@ -30,7 +31,6 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
...@@ -223,6 +223,7 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) ...@@ -223,6 +223,7 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
void __init plat_smp_setup(void) void __init plat_smp_setup(void)
{ {
unsigned int mvpconf0, ntc, tc, ncpu = 0; unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe;
#ifdef CONFIG_MIPS_MT_FPAFF #ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */ /* If we have an FPU, enroll ourselves in the FPU-full mask */
...@@ -242,6 +243,9 @@ void __init plat_smp_setup(void) ...@@ -242,6 +243,9 @@ void __init plat_smp_setup(void)
mvpconf0 = read_c0_mvpconf0(); mvpconf0 = read_c0_mvpconf0();
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
smp_num_siblings = nvpe;
/* we'll always have more TC's than VPE's, so loop setting everything /* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */ to a sensible state */
for (tc = 0; tc <= ntc; tc++) { for (tc = 0; tc <= ntc; tc++) {
......
...@@ -56,6 +56,34 @@ EXPORT_SYMBOL(cpu_online_map); ...@@ -56,6 +56,34 @@ EXPORT_SYMBOL(cpu_online_map);
extern void __init calibrate_delay(void); extern void __init calibrate_delay(void);
extern void cpu_idle(void); extern void cpu_idle(void);
/* Number of TCs (or siblings in Intel speak) per CPU core */
int smp_num_siblings = 1;
EXPORT_SYMBOL(smp_num_siblings);
/* representing the TCs (or siblings in Intel speak) of each logical CPU */
cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_sibling_map);
/* representing cpus for which sibling maps can be computed */
static cpumask_t cpu_sibling_setup_map;
static inline void set_cpu_sibling_map(int cpu)
{
int i;
cpu_set(cpu, cpu_sibling_setup_map);
if (smp_num_siblings > 1) {
for_each_cpu_mask(i, cpu_sibling_setup_map) {
if (cpu_data[cpu].core == cpu_data[i].core) {
cpu_set(i, cpu_sibling_map[cpu]);
cpu_set(cpu, cpu_sibling_map[i]);
}
}
} else
cpu_set(cpu, cpu_sibling_map[cpu]);
}
/* /*
* First C code run on the secondary CPUs after being started up by * First C code run on the secondary CPUs after being started up by
* the master. * the master.
...@@ -85,6 +113,7 @@ asmlinkage __cpuinit void start_secondary(void) ...@@ -85,6 +113,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_data[cpu].udelay_val = loops_per_jiffy; cpu_data[cpu].udelay_val = loops_per_jiffy;
prom_smp_finish(); prom_smp_finish();
set_cpu_sibling_map(cpu);
cpu_set(cpu, cpu_callin_map); cpu_set(cpu, cpu_callin_map);
...@@ -258,6 +287,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -258,6 +287,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
init_new_context(current, &init_mm); init_new_context(current, &init_mm);
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
plat_prepare_cpus(max_cpus); plat_prepare_cpus(max_cpus);
set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU #ifndef CONFIG_HOTPLUG_CPU
cpu_present_map = cpu_possible_map; cpu_present_map = cpu_possible_map;
#endif #endif
......
...@@ -55,6 +55,7 @@ struct cpuinfo_mips { ...@@ -55,6 +55,7 @@ struct cpuinfo_mips {
struct cache_desc scache; /* Secondary cache */ struct cache_desc scache; /* Secondary cache */
struct cache_desc tcache; /* Tertiary/split secondary cache */ struct cache_desc tcache; /* Tertiary/split secondary cache */
int srsets; /* Shadow register sets */ int srsets; /* Shadow register sets */
int core; /* physical core number */
#if defined(CONFIG_MIPS_MT_SMTC) #if defined(CONFIG_MIPS_MT_SMTC)
/* /*
* In the MIPS MT "SMTC" model, each TC is considered * In the MIPS MT "SMTC" model, each TC is considered
...@@ -63,8 +64,10 @@ struct cpuinfo_mips { ...@@ -63,8 +64,10 @@ struct cpuinfo_mips {
* to all TCs within the same VPE. * to all TCs within the same VPE.
*/ */
int vpe_id; /* Virtual Processor number */ int vpe_id; /* Virtual Processor number */
int tc_id; /* Thread Context number */
#endif /* CONFIG_MIPS_MT */ #endif /* CONFIG_MIPS_MT */
#ifdef CONFIG_MIPS_MT_SMTC
int tc_id; /* Thread Context number */
#endif
void *data; /* Additional data */ void *data; /* Additional data */
} __attribute__((aligned(SMP_CACHE_BYTES))); } __attribute__((aligned(SMP_CACHE_BYTES)));
......
...@@ -20,6 +20,9 @@ ...@@ -20,6 +20,9 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/atomic.h> #include <asm/atomic.h>
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
#define raw_smp_processor_id() (current_thread_info()->cpu) #define raw_smp_processor_id() (current_thread_info()->cpu)
/* Map from cpu id to sequential logical cpu number. This will only /* Map from cpu id to sequential logical cpu number. This will only
......
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 by Ralf Baechle
*/
#ifndef __ASM_TOPOLOGY_H
#define __ASM_TOPOLOGY_H
#include <topology.h> #include <topology.h>
#ifdef CONFIG_SMP
#define smt_capable() (smp_num_siblings > 1)
#endif
#endif /* __ASM_TOPOLOGY_H */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册