Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
fe3f2053
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
fe3f2053
编写于
12月 11, 2005
作者:
L
Linus Torvalds
浏览文件
操作
浏览文件
下载
差异文件
Merge
git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge
上级
7fc7e2ee
ef969434
变更
14
隐藏空白更改
内联
并排
Showing
14 changed file
with
139 addition
and
60 deletion
+139
-60
arch/powerpc/Kconfig
arch/powerpc/Kconfig
+1
-1
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/setup_64.c
+9
-1
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hash_utils_64.c
+1
-1
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/hugetlbpage.c
+77
-18
arch/powerpc/mm/numa.c
arch/powerpc/mm/numa.c
+1
-1
arch/powerpc/mm/stab.c
arch/powerpc/mm/stab.c
+1
-6
arch/powerpc/platforms/powermac/feature.c
arch/powerpc/platforms/powermac/feature.c
+16
-5
arch/powerpc/platforms/pseries/iommu.c
arch/powerpc/platforms/pseries/iommu.c
+7
-4
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/lpar.c
+0
-12
arch/ppc/Kconfig
arch/ppc/Kconfig
+3
-3
arch/ppc/kernel/smp.c
arch/ppc/kernel/smp.c
+4
-0
arch/ppc/platforms/pmac_feature.c
arch/ppc/platforms/pmac_feature.c
+15
-5
drivers/macintosh/windfarm_pm81.c
drivers/macintosh/windfarm_pm81.c
+2
-2
include/asm-powerpc/mmu.h
include/asm-powerpc/mmu.h
+2
-1
未找到文件。
arch/powerpc/Kconfig
浏览文件 @
fe3f2053
...
...
@@ -227,7 +227,7 @@ config SMP
If you don't know what to do here, say N.
config NR_CPUS
int "Maximum number of CPUs (2-
32
)"
int "Maximum number of CPUs (2-
128
)"
range 2 128
depends on SMP
default "32" if PPC64
...
...
arch/powerpc/kernel/setup_64.c
浏览文件 @
fe3f2053
...
...
@@ -102,7 +102,15 @@ int boot_cpuid_phys = 0;
dev_t
boot_dev
;
u64
ppc64_pft_size
;
struct
ppc64_caches
ppc64_caches
;
/* Pick defaults since we might want to patch instructions
* before we've read this from the device tree.
*/
struct
ppc64_caches
ppc64_caches
=
{
.
dline_size
=
0x80
,
.
log_dline_size
=
7
,
.
iline_size
=
0x80
,
.
log_iline_size
=
7
};
EXPORT_SYMBOL_GPL
(
ppc64_caches
);
/*
...
...
arch/powerpc/mm/hash_utils_64.c
浏览文件 @
fe3f2053
...
...
@@ -601,7 +601,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
/* Handle hugepage regions */
if
(
unlikely
(
in_hugepage_area
(
mm
->
context
,
ea
)))
{
DBG_LOW
(
" -> huge page !
\n
"
);
return
hash_huge_page
(
mm
,
access
,
ea
,
vsid
,
local
);
return
hash_huge_page
(
mm
,
access
,
ea
,
vsid
,
local
,
trap
);
}
/* Get PTE and page size from page tables */
...
...
arch/powerpc/mm/hugetlbpage.c
浏览文件 @
fe3f2053
...
...
@@ -148,43 +148,63 @@ int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
return
0
;
}
struct
slb_flush_info
{
struct
mm_struct
*
mm
;
u16
newareas
;
};
static
void
flush_low_segments
(
void
*
parm
)
{
u16
areas
=
(
unsigned
long
)
parm
;
struct
slb_flush_info
*
fi
=
parm
;
unsigned
long
i
;
asm
volatile
(
"isync"
:
:
:
"memory"
);
BUILD_BUG_ON
((
sizeof
(
fi
->
newareas
)
*
8
)
!=
NUM_LOW_AREAS
);
if
(
current
->
active_mm
!=
fi
->
mm
)
return
;
/* Only need to do anything if this CPU is working in the same
* mm as the one which has changed */
BUILD_BUG_ON
((
sizeof
(
areas
)
*
8
)
!=
NUM_LOW_AREAS
);
/* update the paca copy of the context struct */
get_paca
()
->
context
=
current
->
active_mm
->
context
;
asm
volatile
(
"isync"
:
:
:
"memory"
);
for
(
i
=
0
;
i
<
NUM_LOW_AREAS
;
i
++
)
{
if
(
!
(
areas
&
(
1U
<<
i
)))
if
(
!
(
fi
->
new
areas
&
(
1U
<<
i
)))
continue
;
asm
volatile
(
"slbie %0"
:
:
"r"
((
i
<<
SID_SHIFT
)
|
SLBIE_C
));
}
asm
volatile
(
"isync"
:
:
:
"memory"
);
}
static
void
flush_high_segments
(
void
*
parm
)
{
u16
areas
=
(
unsigned
long
)
parm
;
struct
slb_flush_info
*
fi
=
parm
;
unsigned
long
i
,
j
;
asm
volatile
(
"isync"
:
:
:
"memory"
);
BUILD_BUG_ON
((
sizeof
(
areas
)
*
8
)
!=
NUM_HIGH_AREAS
);
BUILD_BUG_ON
((
sizeof
(
fi
->
new
areas
)
*
8
)
!=
NUM_HIGH_AREAS
);
if
(
current
->
active_mm
!=
fi
->
mm
)
return
;
/* Only need to do anything if this CPU is working in the same
* mm as the one which has changed */
/* update the paca copy of the context struct */
get_paca
()
->
context
=
current
->
active_mm
->
context
;
asm
volatile
(
"isync"
:
:
:
"memory"
);
for
(
i
=
0
;
i
<
NUM_HIGH_AREAS
;
i
++
)
{
if
(
!
(
areas
&
(
1U
<<
i
)))
if
(
!
(
fi
->
new
areas
&
(
1U
<<
i
)))
continue
;
for
(
j
=
0
;
j
<
(
1UL
<<
(
HTLB_AREA_SHIFT
-
SID_SHIFT
));
j
++
)
asm
volatile
(
"slbie %0"
::
"r"
(((
i
<<
HTLB_AREA_SHIFT
)
+
(
j
<<
SID_SHIFT
))
|
SLBIE_C
));
+
(
j
<<
SID_SHIFT
))
|
SLBIE_C
));
}
asm
volatile
(
"isync"
:
:
:
"memory"
);
}
...
...
@@ -229,6 +249,7 @@ static int prepare_high_area_for_htlb(struct mm_struct *mm, unsigned long area)
static
int
open_low_hpage_areas
(
struct
mm_struct
*
mm
,
u16
newareas
)
{
unsigned
long
i
;
struct
slb_flush_info
fi
;
BUILD_BUG_ON
((
sizeof
(
newareas
)
*
8
)
!=
NUM_LOW_AREAS
);
BUILD_BUG_ON
((
sizeof
(
mm
->
context
.
low_htlb_areas
)
*
8
)
!=
NUM_LOW_AREAS
);
...
...
@@ -244,19 +265,20 @@ static int open_low_hpage_areas(struct mm_struct *mm, u16 newareas)
mm
->
context
.
low_htlb_areas
|=
newareas
;
/* update the paca copy of the context struct */
get_paca
()
->
context
=
mm
->
context
;
/* the context change must make it to memory before the flush,
* so that further SLB misses do the right thing. */
mb
();
on_each_cpu
(
flush_low_segments
,
(
void
*
)(
unsigned
long
)
newareas
,
0
,
1
);
fi
.
mm
=
mm
;
fi
.
newareas
=
newareas
;
on_each_cpu
(
flush_low_segments
,
&
fi
,
0
,
1
);
return
0
;
}
static
int
open_high_hpage_areas
(
struct
mm_struct
*
mm
,
u16
newareas
)
{
struct
slb_flush_info
fi
;
unsigned
long
i
;
BUILD_BUG_ON
((
sizeof
(
newareas
)
*
8
)
!=
NUM_HIGH_AREAS
);
...
...
@@ -280,7 +302,10 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
/* the context change must make it to memory before the flush,
* so that further SLB misses do the right thing. */
mb
();
on_each_cpu
(
flush_high_segments
,
(
void
*
)(
unsigned
long
)
newareas
,
0
,
1
);
fi
.
mm
=
mm
;
fi
.
newareas
=
newareas
;
on_each_cpu
(
flush_high_segments
,
&
fi
,
0
,
1
);
return
0
;
}
...
...
@@ -639,8 +664,36 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
return
-
ENOMEM
;
}
/*
* Called by asm hashtable.S for doing lazy icache flush
*/
static
unsigned
int
hash_huge_page_do_lazy_icache
(
unsigned
long
rflags
,
pte_t
pte
,
int
trap
)
{
struct
page
*
page
;
int
i
;
if
(
!
pfn_valid
(
pte_pfn
(
pte
)))
return
rflags
;
page
=
pte_page
(
pte
);
/* page is dirty */
if
(
!
test_bit
(
PG_arch_1
,
&
page
->
flags
)
&&
!
PageReserved
(
page
))
{
if
(
trap
==
0x400
)
{
for
(
i
=
0
;
i
<
(
HPAGE_SIZE
/
PAGE_SIZE
);
i
++
)
__flush_dcache_icache
(
page_address
(
page
+
i
));
set_bit
(
PG_arch_1
,
&
page
->
flags
);
}
else
{
rflags
|=
HPTE_R_N
;
}
}
return
rflags
;
}
int
hash_huge_page
(
struct
mm_struct
*
mm
,
unsigned
long
access
,
unsigned
long
ea
,
unsigned
long
vsid
,
int
local
)
unsigned
long
ea
,
unsigned
long
vsid
,
int
local
,
unsigned
long
trap
)
{
pte_t
*
ptep
;
unsigned
long
old_pte
,
new_pte
;
...
...
@@ -691,6 +744,11 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
rflags
=
0x2
|
(
!
(
new_pte
&
_PAGE_RW
));
/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
rflags
|=
((
new_pte
&
_PAGE_EXEC
)
?
0
:
HPTE_R_N
);
if
(
!
cpu_has_feature
(
CPU_FTR_COHERENT_ICACHE
))
/* No CPU has hugepages but lacks no execute, so we
* don't need to worry about that case */
rflags
=
hash_huge_page_do_lazy_icache
(
rflags
,
__pte
(
old_pte
),
trap
);
/* Check if pte already has an hpte (case 2) */
if
(
unlikely
(
old_pte
&
_PAGE_HASHPTE
))
{
...
...
@@ -703,7 +761,8 @@ int hash_huge_page(struct mm_struct *mm, unsigned long access,
slot
=
(
hash
&
htab_hash_mask
)
*
HPTES_PER_GROUP
;
slot
+=
(
old_pte
&
_PAGE_F_GIX
)
>>
12
;
if
(
ppc_md
.
hpte_updatepp
(
slot
,
rflags
,
va
,
1
,
local
)
==
-
1
)
if
(
ppc_md
.
hpte_updatepp
(
slot
,
rflags
,
va
,
mmu_huge_psize
,
local
)
==
-
1
)
old_pte
&=
~
_PAGE_HPTEFLAGS
;
}
...
...
arch/powerpc/mm/numa.c
浏览文件 @
fe3f2053
...
...
@@ -125,7 +125,7 @@ void __init get_region(unsigned int nid, unsigned long *start_pfn,
/* We didnt find a matching region, return start/end as 0 */
if
(
*
start_pfn
==
-
1UL
)
start_pfn
=
0
;
*
start_pfn
=
0
;
}
static
inline
void
map_cpu_to_node
(
int
cpu
,
int
node
)
...
...
arch/powerpc/mm/stab.c
浏览文件 @
fe3f2053
...
...
@@ -288,11 +288,6 @@ void stab_initialize(unsigned long stab)
return
;
}
#endif
/* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PPC_PSERIES
if
(
platform_is_lpar
())
{
plpar_hcall_norets
(
H_SET_ASR
,
stabreal
);
return
;
}
#endif
mtspr
(
SPRN_ASR
,
stabreal
);
}
arch/powerpc/platforms/powermac/feature.c
浏览文件 @
fe3f2053
...
...
@@ -1650,11 +1650,19 @@ void pmac_tweak_clock_spreading(int enable)
*/
if
(
macio
->
type
==
macio_intrepid
)
{
if
(
enable
)
UN_OUT
(
UNI_N_CLOCK_SPREADING
,
2
);
else
UN_OUT
(
UNI_N_CLOCK_SPREADING
,
0
);
mdelay
(
40
);
struct
device_node
*
clock
=
of_find_node_by_path
(
"/uni-n@f8000000/hw-clock"
);
if
(
clock
&&
get_property
(
clock
,
"platform-do-clockspreading"
,
NULL
))
{
printk
(
KERN_INFO
"%sabling clock spreading on Intrepid"
" ASIC
\n
"
,
enable
?
"En"
:
"Dis"
);
if
(
enable
)
UN_OUT
(
UNI_N_CLOCK_SPREADING
,
2
);
else
UN_OUT
(
UNI_N_CLOCK_SPREADING
,
0
);
mdelay
(
40
);
}
of_node_put
(
clock
);
}
while
(
machine_is_compatible
(
"PowerBook5,2"
)
||
...
...
@@ -1724,6 +1732,9 @@ void pmac_tweak_clock_spreading(int enable)
pmac_low_i2c_close
(
ui2c
);
break
;
}
printk
(
KERN_INFO
"%sabling clock spreading on i2c clock chip
\n
"
,
enable
?
"En"
:
"Dis"
);
pmac_low_i2c_setmode
(
ui2c
,
pmac_low_i2c_mode_stdsub
);
rc
=
pmac_low_i2c_xfer
(
ui2c
,
0xd2
|
pmac_low_i2c_write
,
0x80
,
buffer
,
9
);
DBG
(
"write result: %d,"
,
rc
);
...
...
arch/powerpc/platforms/pseries/iommu.c
浏览文件 @
fe3f2053
...
...
@@ -109,6 +109,9 @@ static void tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
u64
rc
;
union
tce_entry
tce
;
tcenum
<<=
TCE_PAGE_FACTOR
;
npages
<<=
TCE_PAGE_FACTOR
;
tce
.
te_word
=
0
;
tce
.
te_rpn
=
(
virt_to_abs
(
uaddr
))
>>
TCE_SHIFT
;
tce
.
te_rdwr
=
1
;
...
...
@@ -143,10 +146,7 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
union
tce_entry
tce
,
*
tcep
;
long
l
,
limit
;
tcenum
<<=
TCE_PAGE_FACTOR
;
npages
<<=
TCE_PAGE_FACTOR
;
if
(
npages
==
1
)
if
(
TCE_PAGE_FACTOR
==
0
&&
npages
==
1
)
return
tce_build_pSeriesLP
(
tbl
,
tcenum
,
npages
,
uaddr
,
direction
);
...
...
@@ -164,6 +164,9 @@ static void tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
__get_cpu_var
(
tce_page
)
=
tcep
;
}
tcenum
<<=
TCE_PAGE_FACTOR
;
npages
<<=
TCE_PAGE_FACTOR
;
tce
.
te_word
=
0
;
tce
.
te_rpn
=
(
virt_to_abs
(
uaddr
))
>>
TCE_SHIFT
;
tce
.
te_rdwr
=
1
;
...
...
arch/powerpc/platforms/pseries/lpar.c
浏览文件 @
fe3f2053
...
...
@@ -298,18 +298,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
if
(
!
(
vflags
&
HPTE_V_BOLTED
))
DBG_LOW
(
" hpte_v=%016lx, hpte_r=%016lx
\n
"
,
hpte_v
,
hpte_r
);
#if 1
{
int
i
;
for
(
i
=
0
;
i
<
8
;
i
++
)
{
unsigned
long
w0
,
w1
;
plpar_pte_read
(
0
,
hpte_group
,
&
w0
,
&
w1
);
BUG_ON
(
HPTE_V_COMPARE
(
hpte_v
,
w0
)
&&
(
w0
&
HPTE_V_VALID
));
}
}
#endif
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
/* Zero page = 0 */
...
...
arch/ppc/Kconfig
浏览文件 @
fe3f2053
...
...
@@ -767,14 +767,14 @@ config CPM2
on it (826x, 827x, 8560).
config PPC_CHRP
bool
" Common Hardware Reference Platform (CHRP) based machines"
bool
depends on PPC_MULTIPLATFORM
select PPC_I8259
select PPC_INDIRECT_PCI
default y
config PPC_PMAC
bool
" Apple PowerMac based machines"
bool
depends on PPC_MULTIPLATFORM
select PPC_INDIRECT_PCI
default y
...
...
@@ -785,7 +785,7 @@ config PPC_PMAC64
default y
config PPC_PREP
bool
" PowerPC Reference Platform (PReP) based machines"
bool
depends on PPC_MULTIPLATFORM
select PPC_I8259
select PPC_INDIRECT_PCI
...
...
arch/ppc/kernel/smp.c
浏览文件 @
fe3f2053
...
...
@@ -301,6 +301,10 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* Probe platform for CPUs: always linear. */
num_cpus
=
smp_ops
->
probe
();
if
(
num_cpus
<
2
)
smp_tb_synchronized
=
1
;
for
(
i
=
0
;
i
<
num_cpus
;
++
i
)
cpu_set
(
i
,
cpu_possible_map
);
...
...
arch/ppc/platforms/pmac_feature.c
浏览文件 @
fe3f2053
...
...
@@ -1606,11 +1606,19 @@ void pmac_tweak_clock_spreading(int enable)
*/
if
(
macio
->
type
==
macio_intrepid
)
{
if
(
enable
)
UN_OUT
(
UNI_N_CLOCK_SPREADING
,
2
);
else
UN_OUT
(
UNI_N_CLOCK_SPREADING
,
0
);
mdelay
(
40
);
struct
device_node
*
clock
=
of_find_node_by_path
(
"/uni-n@f8000000/hw-clock"
);
if
(
clock
&&
get_property
(
clock
,
"platform-do-clockspreading"
,
NULL
))
{
printk
(
KERN_INFO
"%sabling clock spreading on Intrepid"
" ASIC
\n
"
,
enable
?
"En"
:
"Dis"
);
if
(
enable
)
UN_OUT
(
UNI_N_CLOCK_SPREADING
,
2
);
else
UN_OUT
(
UNI_N_CLOCK_SPREADING
,
0
);
mdelay
(
40
);
}
of_node_put
(
clock
);
}
while
(
machine_is_compatible
(
"PowerBook5,2"
)
||
...
...
@@ -1680,6 +1688,8 @@ void pmac_tweak_clock_spreading(int enable)
pmac_low_i2c_close
(
ui2c
);
break
;
}
printk
(
KERN_INFO
"%sabling clock spreading on i2c clock chip
\n
"
,
enable
?
"En"
:
"Dis"
);
pmac_low_i2c_setmode
(
ui2c
,
pmac_low_i2c_mode_stdsub
);
rc
=
pmac_low_i2c_xfer
(
ui2c
,
0xd2
|
pmac_low_i2c_write
,
0x80
,
buffer
,
9
);
DBG
(
"write result: %d,"
,
rc
);
...
...
drivers/macintosh/windfarm_pm81.c
浏览文件 @
fe3f2053
...
...
@@ -207,7 +207,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
},
/* Model ID 3 */
{
.
model_id
=
2
,
.
model_id
=
3
,
.
itarget
=
0x350000
,
.
gd
=
0x08e00000
,
.
gp
=
0x00566666
,
...
...
@@ -219,7 +219,7 @@ static struct wf_smu_sys_fans_param wf_smu_sys_all_params[] = {
},
/* Model ID 5 */
{
.
model_id
=
2
,
.
model_id
=
5
,
.
itarget
=
0x3a0000
,
.
gd
=
0x15400000
,
.
gp
=
0x00233333
,
...
...
include/asm-powerpc/mmu.h
浏览文件 @
fe3f2053
...
...
@@ -220,7 +220,8 @@ extern int __hash_page_64K(unsigned long ea, unsigned long access,
unsigned
int
local
);
struct
mm_struct
;
extern
int
hash_huge_page
(
struct
mm_struct
*
mm
,
unsigned
long
access
,
unsigned
long
ea
,
unsigned
long
vsid
,
int
local
);
unsigned
long
ea
,
unsigned
long
vsid
,
int
local
,
unsigned
long
trap
);
extern
void
htab_finish_init
(
void
);
extern
int
htab_bolt_mapping
(
unsigned
long
vstart
,
unsigned
long
vend
,
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录