Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
c2b078e7
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c2b078e7
编写于
4月 02, 2015
作者:
I
Ingo Molnar
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'perf/urgent' into perf/core, before applying dependent patches
Signed-off-by:
N
Ingo Molnar
<
mingo@kernel.org
>
上级
8062382c
c420f19b
变更
37
显示空白变更内容
内联
并排
Showing
37 changed file
with
269 addition
and
162 deletion
+269
-162
MAINTAINERS
MAINTAINERS
+1
-0
Makefile
Makefile
+1
-1
arch/arc/kernel/signal.c
arch/arc/kernel/signal.c
+18
-6
arch/arm/Kconfig
arch/arm/Kconfig
+1
-0
arch/arm/boot/dts/dm8168-evm.dts
arch/arm/boot/dts/dm8168-evm.dts
+19
-0
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/dm816x.dtsi
+14
-4
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra7.dtsi
+0
-2
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap3.dtsi
+4
-0
arch/arm/boot/dts/rk3288.dtsi
arch/arm/boot/dts/rk3288.dtsi
+1
-0
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/socfpga.dtsi
+1
-1
arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
+16
-0
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
+1
-2
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
+1
-2
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
+1
-2
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/id.c
+2
-0
arch/arm/mach-pxa/irq.c
arch/arm/mach-pxa/irq.c
+48
-63
arch/arm/mach-pxa/zeus.c
arch/arm/mach-pxa/zeus.c
+1
-1
arch/arm/mach-sunxi/Kconfig
arch/arm/mach-sunxi/Kconfig
+2
-6
arch/arm/plat-omap/dmtimer.c
arch/arm/plat-omap/dmtimer.c
+14
-1
arch/arm64/boot/dts/arm/juno-clocks.dtsi
arch/arm64/boot/dts/arm/juno-clocks.dtsi
+1
-1
arch/parisc/include/asm/pgalloc.h
arch/parisc/include/asm/pgalloc.h
+10
-7
arch/parisc/kernel/syscall_table.S
arch/parisc/kernel/syscall_table.S
+6
-3
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv.c
+4
-4
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_hv_rmhandlers.S
+1
-0
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel.c
+5
-5
arch/x86/kernel/entry_64.S
arch/x86/kernel/entry_64.S
+14
-4
drivers/clocksource/Kconfig
drivers/clocksource/Kconfig
+3
-0
drivers/clocksource/timer-sun5i.c
drivers/clocksource/timer-sun5i.c
+0
-7
drivers/watchdog/imgpdc_wdt.c
drivers/watchdog/imgpdc_wdt.c
+4
-4
drivers/watchdog/mtk_wdt.c
drivers/watchdog/mtk_wdt.c
+1
-1
kernel/locking/lockdep.c
kernel/locking/lockdep.c
+55
-26
kernel/module.c
kernel/module.c
+4
-4
kernel/sched/core.c
kernel/sched/core.c
+2
-0
kernel/time/tick-broadcast-hrtimer.c
kernel/time/tick-broadcast-hrtimer.c
+9
-2
security/selinux/selinuxfs.c
security/selinux/selinuxfs.c
+1
-1
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_intel.c
+1
-1
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_realtek.c
+2
-1
未找到文件。
MAINTAINERS
浏览文件 @
c2b078e7
...
...
@@ -1362,6 +1362,7 @@ F: drivers/i2c/busses/i2c-rk3x.c
F: drivers/*/*rockchip*
F: drivers/*/*/*rockchip*
F: sound/soc/rockchip/
N: rockchip
ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene@kernel.org>
...
...
Makefile
浏览文件 @
c2b078e7
VERSION
=
4
PATCHLEVEL
=
0
SUBLEVEL
=
0
EXTRAVERSION
=
-rc
5
EXTRAVERSION
=
-rc
6
NAME
=
Hurr durr I
'ma sheep
# *DOCUMENTATION*
...
...
arch/arc/kernel/signal.c
浏览文件 @
c2b078e7
...
...
@@ -67,7 +67,7 @@ stash_usr_regs(struct rt_sigframe __user *sf, struct pt_regs *regs,
sigset_t
*
set
)
{
int
err
;
err
=
__copy_to_user
(
&
(
sf
->
uc
.
uc_mcontext
.
regs
),
regs
,
err
=
__copy_to_user
(
&
(
sf
->
uc
.
uc_mcontext
.
regs
.
scratch
),
regs
,
sizeof
(
sf
->
uc
.
uc_mcontext
.
regs
.
scratch
));
err
|=
__copy_to_user
(
&
sf
->
uc
.
uc_sigmask
,
set
,
sizeof
(
sigset_t
));
...
...
@@ -83,7 +83,7 @@ static int restore_usr_regs(struct pt_regs *regs, struct rt_sigframe __user *sf)
if
(
!
err
)
set_current_blocked
(
&
set
);
err
|=
__copy_from_user
(
regs
,
&
(
sf
->
uc
.
uc_mcontext
.
regs
),
err
|=
__copy_from_user
(
regs
,
&
(
sf
->
uc
.
uc_mcontext
.
regs
.
scratch
),
sizeof
(
sf
->
uc
.
uc_mcontext
.
regs
.
scratch
));
return
err
;
...
...
@@ -131,6 +131,15 @@ SYSCALL_DEFINE0(rt_sigreturn)
/* Don't restart from sigreturn */
syscall_wont_restart
(
regs
);
/*
* Ensure that sigreturn always returns to user mode (in case the
* regs saved on user stack got fudged between save and sigreturn)
* Otherwise it is easy to panic the kernel with a custom
* signal handler and/or restorer which clobberes the status32/ret
* to return to a bogus location in kernel mode.
*/
regs
->
status32
|=
STATUS_U_MASK
;
return
regs
->
r0
;
badframe:
...
...
@@ -229,8 +238,11 @@ setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
/*
* handler returns using sigreturn stub provided already by userpsace
* If not, nuke the process right away
*/
BUG_ON
(
!
(
ksig
->
ka
.
sa
.
sa_flags
&
SA_RESTORER
));
if
(
!
(
ksig
->
ka
.
sa
.
sa_flags
&
SA_RESTORER
))
return
1
;
regs
->
blink
=
(
unsigned
long
)
ksig
->
ka
.
sa
.
sa_restorer
;
/* User Stack for signal handler will be above the frame just carved */
...
...
@@ -296,12 +308,12 @@ static void
handle_signal
(
struct
ksignal
*
ksig
,
struct
pt_regs
*
regs
)
{
sigset_t
*
oldset
=
sigmask_to_save
();
int
ret
;
int
failed
;
/* Set up the stack frame */
ret
=
setup_rt_frame
(
ksig
,
oldset
,
regs
);
failed
=
setup_rt_frame
(
ksig
,
oldset
,
regs
);
signal_setup_done
(
ret
,
ksig
,
0
);
signal_setup_done
(
failed
,
ksig
,
0
);
}
void
do_signal
(
struct
pt_regs
*
regs
)
...
...
arch/arm/Kconfig
浏览文件 @
c2b078e7
...
...
@@ -619,6 +619,7 @@ config ARCH_PXA
select GENERIC_CLOCKEVENTS
select GPIO_PXA
select HAVE_IDE
select IRQ_DOMAIN
select MULTI_IRQ_HANDLER
select PLAT_PXA
select SPARSE_IRQ
...
...
arch/arm/boot/dts/dm8168-evm.dts
浏览文件 @
c2b078e7
...
...
@@ -36,6 +36,20 @@
>;
};
mmc_pins
:
pinmux_mmc_pins
{
pinctrl
-
single
,
pins
=
<
DM816X_IOPAD
(
0x0a70
,
MUX_MODE0
)
/*
SD_POW
*/
DM816X_IOPAD
(
0x0a74
,
MUX_MODE0
)
/*
SD_CLK
*/
DM816X_IOPAD
(
0x0a78
,
MUX_MODE0
)
/*
SD_CMD
*/
DM816X_IOPAD
(
0x0a7C
,
MUX_MODE0
)
/*
SD_DAT0
*/
DM816X_IOPAD
(
0x0a80
,
MUX_MODE0
)
/*
SD_DAT1
*/
DM816X_IOPAD
(
0x0a84
,
MUX_MODE0
)
/*
SD_DAT2
*/
DM816X_IOPAD
(
0x0a88
,
MUX_MODE0
)
/*
SD_DAT2
*/
DM816X_IOPAD
(
0x0a8c
,
MUX_MODE2
)
/*
GP1
[
7
]
*/
DM816X_IOPAD
(
0x0a90
,
MUX_MODE2
)
/*
GP1
[
8
]
*/
>;
};
usb0_pins
:
pinmux_usb0_pins
{
pinctrl
-
single
,
pins
=
<
DM816X_IOPAD
(
0x0d00
,
MUX_MODE0
)
/*
USB0_DRVVBUS
*/
...
...
@@ -137,7 +151,12 @@
};
&
mmc1
{
pinctrl
-
names
=
"default"
;
pinctrl
-
0
=
<&
mmc_pins
>;
vmmc
-
supply
=
<&
vmmcsd_fixed
>;
bus
-
width
=
<
4
>;
cd
-
gpios
=
<&
gpio2
7
GPIO_ACTIVE_LOW
>;
wp
-
gpios
=
<&
gpio2
8
GPIO_ACTIVE_LOW
>;
};
/*
At
least
dm8168
-
evm
rev
c
won
't support multipoint, later may */
...
...
arch/arm/boot/dts/dm816x.dtsi
浏览文件 @
c2b078e7
...
...
@@ -150,17 +150,27 @@
};
gpio1: gpio@48032000 {
compatible = "ti,omap
3
-gpio";
compatible = "ti,omap
4
-gpio";
ti,hwmods = "gpio1";
ti,gpio-always-on;
reg = <0x48032000 0x1000>;
interrupts = <97>;
interrupts = <96>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
gpio2: gpio@4804c000 {
compatible = "ti,omap
3
-gpio";
compatible = "ti,omap
4
-gpio";
ti,hwmods = "gpio2";
ti,gpio-always-on;
reg = <0x4804c000 0x1000>;
interrupts = <99>;
interrupts = <98>;
gpio-controller;
#gpio-cells = <2>;
interrupt-controller;
#interrupt-cells = <2>;
};
gpmc: gpmc@50000000 {
...
...
arch/arm/boot/dts/dra7.dtsi
浏览文件 @
c2b078e7
...
...
@@ -1111,7 +1111,6 @@
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
ti,hwmods = "pcie1-phy";
};
pcie2_phy: pciephy@4a095000 {
...
...
@@ -1130,7 +1129,6 @@
"wkupclk", "refclk",
"div-clk", "phy-div";
#phy-cells = <0>;
ti,hwmods = "pcie2-phy";
status = "disabled";
};
};
...
...
arch/arm/boot/dts/omap3.dtsi
浏览文件 @
c2b078e7
...
...
@@ -92,6 +92,8 @@
ti,hwmods = "aes";
reg = <0x480c5000 0x50>;
interrupts = <0>;
dmas = <&sdma 65 &sdma 66>;
dma-names = "tx", "rx";
};
prm: prm@48306000 {
...
...
@@ -550,6 +552,8 @@
ti,hwmods = "sham";
reg = <0x480c3000 0x64>;
interrupts = <49>;
dmas = <&sdma 69>;
dma-names = "rx";
};
smartreflex_core: smartreflex@480cb000 {
...
...
arch/arm/boot/dts/rk3288.dtsi
浏览文件 @
c2b078e7
...
...
@@ -411,6 +411,7 @@
"mac_clk_rx", "mac_clk_tx",
"clk_mac_ref", "clk_mac_refout",
"aclk_mac", "pclk_mac";
status = "disabled";
};
usb_host0_ehci: usb@ff500000 {
...
...
arch/arm/boot/dts/socfpga.dtsi
浏览文件 @
c2b078e7
...
...
@@ -660,7 +660,7 @@
#address-cells = <1>;
#size-cells = <0>;
reg = <0xfff01000 0x1000>;
interrupts = <0 15
6
4>;
interrupts = <0 15
5
4>;
num-cs = <4>;
clocks = <&spi_m_clk>;
status = "disabled";
...
...
arch/arm/boot/dts/sun4i-a10-olinuxino-lime.dts
浏览文件 @
c2b078e7
...
...
@@ -56,6 +56,22 @@
model
=
"Olimex A10-OLinuXino-LIME"
;
compatible
=
"olimex,a10-olinuxino-lime"
,
"allwinner,sun4i-a10"
;
cpus
{
cpu0
:
cpu
@
0
{
/*
*
The
A10
-
Lime
is
known
to
be
unstable
*
when
running
at
1008
MHz
*/
operating
-
points
=
<
/*
kHz
uV
*/
912000
1350000
864000
1300000
624000
1250000
>;
cooling
-
max
-
level
=
<
2
>;
};
};
soc
@
01
c00000
{
emac
:
ethernet
@
01
c0b000
{
pinctrl
-
names
=
"default"
;
...
...
arch/arm/boot/dts/sun4i-a10.dtsi
浏览文件 @
c2b078e7
...
...
@@ -75,7 +75,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1056000 1500000
1008000 1400000
912000 1350000
864000 1300000
...
...
@@ -83,7 +82,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <
4
>;
cooling-max-level = <
3
>;
};
};
...
...
arch/arm/boot/dts/sun5i-a13.dtsi
浏览文件 @
c2b078e7
...
...
@@ -47,7 +47,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1104000 1500000
1008000 1400000
912000 1350000
864000 1300000
...
...
@@ -57,7 +56,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <
6
>;
cooling-max-level = <
5
>;
};
};
...
...
arch/arm/boot/dts/sun7i-a20.dtsi
浏览文件 @
c2b078e7
...
...
@@ -105,7 +105,6 @@
clock-latency = <244144>; /* 8 32k periods */
operating-points = <
/* kHz uV */
1008000 1450000
960000 1400000
912000 1400000
864000 1300000
...
...
@@ -116,7 +115,7 @@
>;
#cooling-cells = <2>;
cooling-min-level = <0>;
cooling-max-level = <
7
>;
cooling-max-level = <
6
>;
};
cpu@1 {
...
...
arch/arm/mach-omap2/id.c
浏览文件 @
c2b078e7
...
...
@@ -720,6 +720,8 @@ static const char * __init omap_get_family(void)
return
kasprintf
(
GFP_KERNEL
,
"OMAP4"
);
else
if
(
soc_is_omap54xx
())
return
kasprintf
(
GFP_KERNEL
,
"OMAP5"
);
else
if
(
soc_is_am33xx
()
||
soc_is_am335x
())
return
kasprintf
(
GFP_KERNEL
,
"AM33xx"
);
else
if
(
soc_is_am43xx
())
return
kasprintf
(
GFP_KERNEL
,
"AM43xx"
);
else
if
(
soc_is_dra7xx
())
...
...
arch/arm/mach-pxa/irq.c
浏览文件 @
c2b078e7
...
...
@@ -11,6 +11,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
...
...
@@ -40,7 +41,6 @@
#define ICHP_VAL_IRQ (1 << 31)
#define ICHP_IRQ(i) (((i) >> 16) & 0x7fff)
#define IPR_VALID (1 << 31)
#define IRQ_BIT(n) (((n) - PXA_IRQ(0)) & 0x1f)
#define MAX_INTERNAL_IRQS 128
...
...
@@ -51,6 +51,7 @@
static
void
__iomem
*
pxa_irq_base
;
static
int
pxa_internal_irq_nr
;
static
bool
cpu_has_ipr
;
static
struct
irq_domain
*
pxa_irq_domain
;
static
inline
void
__iomem
*
irq_base
(
int
i
)
{
...
...
@@ -66,18 +67,20 @@ static inline void __iomem *irq_base(int i)
void
pxa_mask_irq
(
struct
irq_data
*
d
)
{
void
__iomem
*
base
=
irq_data_get_irq_chip_data
(
d
);
irq_hw_number_t
irq
=
irqd_to_hwirq
(
d
);
uint32_t
icmr
=
__raw_readl
(
base
+
ICMR
);
icmr
&=
~
(
1
<<
IRQ_BIT
(
d
->
irq
)
);
icmr
&=
~
BIT
(
irq
&
0x1f
);
__raw_writel
(
icmr
,
base
+
ICMR
);
}
void
pxa_unmask_irq
(
struct
irq_data
*
d
)
{
void
__iomem
*
base
=
irq_data_get_irq_chip_data
(
d
);
irq_hw_number_t
irq
=
irqd_to_hwirq
(
d
);
uint32_t
icmr
=
__raw_readl
(
base
+
ICMR
);
icmr
|=
1
<<
IRQ_BIT
(
d
->
irq
);
icmr
|=
BIT
(
irq
&
0x1f
);
__raw_writel
(
icmr
,
base
+
ICMR
);
}
...
...
@@ -118,40 +121,63 @@ asmlinkage void __exception_irq_entry ichp_handle_irq(struct pt_regs *regs)
}
while
(
1
);
}
void
__init
pxa_init_irq
(
int
irq_nr
,
int
(
*
fn
)(
struct
irq_data
*
,
unsigned
int
))
static
int
pxa_irq_map
(
struct
irq_domain
*
h
,
unsigned
int
virq
,
irq_hw_number_t
hw
)
{
int
irq
,
i
,
n
;
void
__iomem
*
base
=
irq_base
(
hw
/
32
)
;
BUG_ON
(
irq_nr
>
MAX_INTERNAL_IRQS
);
/* initialize interrupt priority */
if
(
cpu_has_ipr
)
__raw_writel
(
hw
|
IPR_VALID
,
pxa_irq_base
+
IPR
(
hw
));
irq_set_chip_and_handler
(
virq
,
&
pxa_internal_irq_chip
,
handle_level_irq
);
irq_set_chip_data
(
virq
,
base
);
set_irq_flags
(
virq
,
IRQF_VALID
);
return
0
;
}
static
struct
irq_domain_ops
pxa_irq_ops
=
{
.
map
=
pxa_irq_map
,
.
xlate
=
irq_domain_xlate_onecell
,
};
static
__init
void
pxa_init_irq_common
(
struct
device_node
*
node
,
int
irq_nr
,
int
(
*
fn
)(
struct
irq_data
*
,
unsigned
int
))
{
int
n
;
pxa_internal_irq_nr
=
irq_nr
;
cpu_has_ipr
=
!
cpu_is_pxa25x
();
pxa_irq_base
=
io_p2v
(
0x40d00000
);
pxa_irq_domain
=
irq_domain_add_legacy
(
node
,
irq_nr
,
PXA_IRQ
(
0
),
0
,
&
pxa_irq_ops
,
NULL
);
if
(
!
pxa_irq_domain
)
panic
(
"Unable to add PXA IRQ domain
\n
"
);
irq_set_default_host
(
pxa_irq_domain
);
for
(
n
=
0
;
n
<
irq_nr
;
n
+=
32
)
{
void
__iomem
*
base
=
irq_base
(
n
>>
5
);
__raw_writel
(
0
,
base
+
ICMR
);
/* disable all IRQs */
__raw_writel
(
0
,
base
+
ICLR
);
/* all IRQs are IRQ, not FIQ */
for
(
i
=
n
;
(
i
<
(
n
+
32
))
&&
(
i
<
irq_nr
);
i
++
)
{
/* initialize interrupt priority */
if
(
cpu_has_ipr
)
__raw_writel
(
i
|
IPR_VALID
,
pxa_irq_base
+
IPR
(
i
));
irq
=
PXA_IRQ
(
i
);
irq_set_chip_and_handler
(
irq
,
&
pxa_internal_irq_chip
,
handle_level_irq
);
irq_set_chip_data
(
irq
,
base
);
set_irq_flags
(
irq
,
IRQF_VALID
);
}
}
/* only unmasked interrupts kick us out of idle */
__raw_writel
(
1
,
irq_base
(
0
)
+
ICCR
);
pxa_internal_irq_chip
.
irq_set_wake
=
fn
;
}
void
__init
pxa_init_irq
(
int
irq_nr
,
int
(
*
fn
)(
struct
irq_data
*
,
unsigned
int
))
{
BUG_ON
(
irq_nr
>
MAX_INTERNAL_IRQS
);
pxa_irq_base
=
io_p2v
(
0x40d00000
);
cpu_has_ipr
=
!
cpu_is_pxa25x
();
pxa_init_irq_common
(
NULL
,
irq_nr
,
fn
);
}
#ifdef CONFIG_PM
static
unsigned
long
saved_icmr
[
MAX_INTERNAL_IRQS
/
32
];
static
unsigned
long
saved_ipr
[
MAX_INTERNAL_IRQS
];
...
...
@@ -203,30 +229,6 @@ struct syscore_ops pxa_irq_syscore_ops = {
};
#ifdef CONFIG_OF
static
struct
irq_domain
*
pxa_irq_domain
;
static
int
pxa_irq_map
(
struct
irq_domain
*
h
,
unsigned
int
virq
,
irq_hw_number_t
hw
)
{
void
__iomem
*
base
=
irq_base
(
hw
/
32
);
/* initialize interrupt priority */
if
(
cpu_has_ipr
)
__raw_writel
(
hw
|
IPR_VALID
,
pxa_irq_base
+
IPR
(
hw
));
irq_set_chip_and_handler
(
hw
,
&
pxa_internal_irq_chip
,
handle_level_irq
);
irq_set_chip_data
(
hw
,
base
);
set_irq_flags
(
hw
,
IRQF_VALID
);
return
0
;
}
static
struct
irq_domain_ops
pxa_irq_ops
=
{
.
map
=
pxa_irq_map
,
.
xlate
=
irq_domain_xlate_onecell
,
};
static
const
struct
of_device_id
intc_ids
[]
__initconst
=
{
{
.
compatible
=
"marvell,pxa-intc"
,
},
{}
...
...
@@ -236,7 +238,7 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
{
struct
device_node
*
node
;
struct
resource
res
;
int
n
,
ret
;
int
ret
;
node
=
of_find_matching_node
(
NULL
,
intc_ids
);
if
(
!
node
)
{
...
...
@@ -267,23 +269,6 @@ void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int))
return
;
}
pxa_irq_domain
=
irq_domain_add_legacy
(
node
,
pxa_internal_irq_nr
,
0
,
0
,
&
pxa_irq_ops
,
NULL
);
if
(
!
pxa_irq_domain
)
panic
(
"Unable to add PXA IRQ domain
\n
"
);
irq_set_default_host
(
pxa_irq_domain
);
for
(
n
=
0
;
n
<
pxa_internal_irq_nr
;
n
+=
32
)
{
void
__iomem
*
base
=
irq_base
(
n
>>
5
);
__raw_writel
(
0
,
base
+
ICMR
);
/* disable all IRQs */
__raw_writel
(
0
,
base
+
ICLR
);
/* all IRQs are IRQ, not FIQ */
}
/* only unmasked interrupts kick us out of idle */
__raw_writel
(
1
,
irq_base
(
0
)
+
ICCR
);
pxa_internal_irq_chip
.
irq_set_wake
=
fn
;
pxa_init_irq_common
(
node
,
pxa_internal_irq_nr
,
fn
);
}
#endif
/* CONFIG_OF */
arch/arm/mach-pxa/zeus.c
浏览文件 @
c2b078e7
...
...
@@ -412,7 +412,7 @@ static struct fixed_voltage_config can_regulator_pdata = {
};
static
struct
platform_device
can_regulator_device
=
{
.
name
=
"reg-fixed-volage"
,
.
name
=
"reg-fixed-vol
t
age"
,
.
id
=
0
,
.
dev
=
{
.
platform_data
=
&
can_regulator_pdata
,
...
...
arch/arm/mach-sunxi/Kconfig
浏览文件 @
c2b078e7
menuconfig ARCH_SUNXI
bool "Allwinner SoCs" if ARCH_MULTI_V7
select ARCH_REQUIRE_GPIOLIB
select ARCH_HAS_RESET_CONTROLLER
select CLKSRC_MMIO
select GENERIC_IRQ_CHIP
select PINCTRL
select SUN4I_TIMER
select RESET_CONTROLLER
if ARCH_SUNXI
...
...
@@ -20,10 +22,8 @@ config MACH_SUN5I
config MACH_SUN6I
bool "Allwinner A31 (sun6i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
select RESET_CONTROLLER
select SUN5I_HSTIMER
config MACH_SUN7I
...
...
@@ -37,16 +37,12 @@ config MACH_SUN7I
config MACH_SUN8I
bool "Allwinner A23 (sun8i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select MFD_SUN6I_PRCM
select RESET_CONTROLLER
config MACH_SUN9I
bool "Allwinner (sun9i) SoCs support"
default ARCH_SUNXI
select ARCH_HAS_RESET_CONTROLLER
select ARM_GIC
select RESET_CONTROLLER
endif
arch/arm/plat-omap/dmtimer.c
浏览文件 @
c2b078e7
...
...
@@ -799,6 +799,7 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
struct
device
*
dev
=
&
pdev
->
dev
;
const
struct
of_device_id
*
match
;
const
struct
dmtimer_platform_data
*
pdata
;
int
ret
;
match
=
of_match_device
(
of_match_ptr
(
omap_timer_match
),
dev
);
pdata
=
match
?
match
->
data
:
dev
->
platform_data
;
...
...
@@ -860,7 +861,12 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
}
if
(
!
timer
->
reserved
)
{
pm_runtime_get_sync
(
dev
);
ret
=
pm_runtime_get_sync
(
dev
);
if
(
ret
<
0
)
{
dev_err
(
dev
,
"%s: pm_runtime_get_sync failed!
\n
"
,
__func__
);
goto
err_get_sync
;
}
__omap_dm_timer_init_regs
(
timer
);
pm_runtime_put
(
dev
);
}
...
...
@@ -873,6 +879,11 @@ static int omap_dm_timer_probe(struct platform_device *pdev)
dev_dbg
(
dev
,
"Device Probed.
\n
"
);
return
0
;
err_get_sync:
pm_runtime_put_noidle
(
dev
);
pm_runtime_disable
(
dev
);
return
ret
;
}
/**
...
...
@@ -899,6 +910,8 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
}
spin_unlock_irqrestore
(
&
dm_timer_lock
,
flags
);
pm_runtime_disable
(
&
pdev
->
dev
);
return
ret
;
}
...
...
arch/arm64/boot/dts/arm/juno-clocks.dtsi
浏览文件 @
c2b078e7
...
...
@@ -8,7 +8,7 @@
*/
/* SoC fixed clocks */
soc_uartclk: refclk72738
k
hz {
soc_uartclk: refclk72738
00
hz {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <7273800>;
...
...
arch/parisc/include/asm/pgalloc.h
浏览文件 @
c2b078e7
...
...
@@ -26,7 +26,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
if
(
likely
(
pgd
!=
NULL
))
{
memset
(
pgd
,
0
,
PAGE_SIZE
<<
PGD_ALLOC_ORDER
);
#if
def CONFIG_64BIT
#if
PT_NLEVELS == 3
actual_pgd
+=
PTRS_PER_PGD
;
/* Populate first pmd with allocated memory. We mark it
* with PxD_FLAG_ATTACHED as a signal to the system that this
...
...
@@ -45,7 +45,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
static
inline
void
pgd_free
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd
)
{
#if
def CONFIG_64BIT
#if
PT_NLEVELS == 3
pgd
-=
PTRS_PER_PGD
;
#endif
free_pages
((
unsigned
long
)
pgd
,
PGD_ALLOC_ORDER
);
...
...
@@ -72,12 +72,15 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
static
inline
void
pmd_free
(
struct
mm_struct
*
mm
,
pmd_t
*
pmd
)
{
#ifdef CONFIG_64BIT
if
(
pmd_flag
(
*
pmd
)
&
PxD_FLAG_ATTACHED
)
/* This is the permanent pmd attached to the pgd;
* cannot free it */
/*
* This is the permanent pmd attached to the pgd;
* cannot free it.
* Increment the counter to compensate for the decrement
* done by generic mm code.
*/
mm_inc_nr_pmds
(
mm
);
return
;
#endif
free_pages
((
unsigned
long
)
pmd
,
PMD_ORDER
);
}
...
...
@@ -99,7 +102,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static
inline
void
pmd_populate_kernel
(
struct
mm_struct
*
mm
,
pmd_t
*
pmd
,
pte_t
*
pte
)
{
#if
def CONFIG_64BIT
#if
PT_NLEVELS == 3
/* preserve the gateway marker if this is the beginning of
* the permanent pmd */
if
(
pmd_flag
(
*
pmd
)
&
PxD_FLAG_ATTACHED
)
...
...
arch/parisc/kernel/syscall_table.S
浏览文件 @
c2b078e7
...
...
@@ -55,8 +55,8 @@
#define ENTRY_COMP(_name_) .word sys_##_name_
#endif
ENTRY_SAME
(
restart_syscall
)
/*
0
*/
ENTRY_SAME
(
exit
)
90
:
ENTRY_SAME
(
restart_syscall
)
/*
0
*/
91
:
ENTRY_SAME
(
exit
)
ENTRY_SAME
(
fork_wrapper
)
ENTRY_SAME
(
read
)
ENTRY_SAME
(
write
)
...
...
@@ -439,7 +439,10 @@
ENTRY_SAME
(
bpf
)
ENTRY_COMP
(
execveat
)
/
*
Nothing
yet
*/
.
ifne
(.
-
90
b
)
-
(
__NR_Linux_syscalls
*
(
91
b
-
90
b
))
.
error
"
size
of
syscall
table
does
not
fit
value
of
__NR_Linux_syscalls
"
.
endif
#undef ENTRY_SAME
#undef ENTRY_DIFF
...
...
arch/powerpc/kvm/book3s_hv.c
浏览文件 @
c2b078e7
...
...
@@ -636,7 +636,7 @@ static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
spin_lock
(
&
vcpu
->
arch
.
vpa_update_lock
);
lppaca
=
(
struct
lppaca
*
)
vcpu
->
arch
.
vpa
.
pinned_addr
;
if
(
lppaca
)
yield_count
=
lppaca
->
yield_count
;
yield_count
=
be32_to_cpu
(
lppaca
->
yield_count
)
;
spin_unlock
(
&
vcpu
->
arch
.
vpa_update_lock
);
return
yield_count
;
}
...
...
@@ -942,20 +942,20 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
static
void
kvmppc_set_lpcr
(
struct
kvm_vcpu
*
vcpu
,
u64
new_lpcr
,
bool
preserve_top32
)
{
struct
kvm
*
kvm
=
vcpu
->
kvm
;
struct
kvmppc_vcore
*
vc
=
vcpu
->
arch
.
vcore
;
u64
mask
;
mutex_lock
(
&
kvm
->
lock
);
spin_lock
(
&
vc
->
lock
);
/*
* If ILE (interrupt little-endian) has changed, update the
* MSR_LE bit in the intr_msr for each vcpu in this vcore.
*/
if
((
new_lpcr
&
LPCR_ILE
)
!=
(
vc
->
lpcr
&
LPCR_ILE
))
{
struct
kvm
*
kvm
=
vcpu
->
kvm
;
struct
kvm_vcpu
*
vcpu
;
int
i
;
mutex_lock
(
&
kvm
->
lock
);
kvm_for_each_vcpu
(
i
,
vcpu
,
kvm
)
{
if
(
vcpu
->
arch
.
vcore
!=
vc
)
continue
;
...
...
@@ -964,7 +964,6 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
else
vcpu
->
arch
.
intr_msr
&=
~
MSR_LE
;
}
mutex_unlock
(
&
kvm
->
lock
);
}
/*
...
...
@@ -981,6 +980,7 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
mask
&=
0xFFFFFFFF
;
vc
->
lpcr
=
(
vc
->
lpcr
&
~
mask
)
|
(
new_lpcr
&
mask
);
spin_unlock
(
&
vc
->
lock
);
mutex_unlock
(
&
kvm
->
lock
);
}
static
int
kvmppc_get_one_reg_hv
(
struct
kvm_vcpu
*
vcpu
,
u64
id
,
...
...
arch/powerpc/kvm/book3s_hv_rmhandlers.S
浏览文件 @
c2b078e7
...
...
@@ -1005,6 +1005,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
/
*
Save
HEIR
(
HV
emulation
assist
reg
)
in
emul_inst
if
this
is
an
HEI
(
HV
emulation
interrupt
,
e40
)
*/
li
r3
,
KVM_INST_FETCH_FAILED
stw
r3
,
VCPU_LAST_INST
(
r9
)
cmpwi
r12
,
BOOK3S_INTERRUPT_H_EMUL_ASSIST
bne
11
f
mfspr
r3
,
SPRN_HEIR
...
...
arch/x86/kernel/cpu/perf_event_intel.c
浏览文件 @
c2b078e7
...
...
@@ -212,11 +212,11 @@ static struct event_constraint intel_hsw_event_constraints[] = {
INTEL_UEVENT_CONSTRAINT
(
0x01c0
,
0x2
),
/* INST_RETIRED.PREC_DIST */
INTEL_EVENT_CONSTRAINT
(
0xcd
,
0x8
),
/* MEM_TRANS_RETIRED.LOAD_LATENCY */
/* CYCLE_ACTIVITY.CYCLES_L1D_PENDING */
INTEL_EVENT_CONSTRAINT
(
0x08a3
,
0x4
),
INTEL_
U
EVENT_CONSTRAINT
(
0x08a3
,
0x4
),
/* CYCLE_ACTIVITY.STALLS_L1D_PENDING */
INTEL_EVENT_CONSTRAINT
(
0x0ca3
,
0x4
),
INTEL_
U
EVENT_CONSTRAINT
(
0x0ca3
,
0x4
),
/* CYCLE_ACTIVITY.CYCLES_NO_EXECUTE */
INTEL_EVENT_CONSTRAINT
(
0x04a3
,
0xf
),
INTEL_
U
EVENT_CONSTRAINT
(
0x04a3
,
0xf
),
EVENT_CONSTRAINT_END
};
...
...
@@ -1852,11 +1852,11 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event
if
(
c
)
return
c
;
c
=
intel_
pebs_constraints
(
event
);
c
=
intel_
shared_regs_constraints
(
cpuc
,
event
);
if
(
c
)
return
c
;
c
=
intel_
shared_regs_constraints
(
cpuc
,
event
);
c
=
intel_
pebs_constraints
(
event
);
if
(
c
)
return
c
;
...
...
arch/x86/kernel/entry_64.S
浏览文件 @
c2b078e7
...
...
@@ -364,12 +364,21 @@ system_call_fastpath:
*
Has
incomplete
stack
frame
and
undefined
top
of
stack
.
*/
ret_from_sys_call
:
testl
$
_TIF_ALLWORK_MASK
,
TI_flags
+
THREAD_INFO
(%
rsp
,
RIP
-
ARGOFFSET
)
jnz
int_ret_from_sys_call_fixup
/*
Go
the
the
slow
path
*/
LOCKDEP_SYS_EXIT
DISABLE_INTERRUPTS
(
CLBR_NONE
)
TRACE_IRQS_OFF
/
*
*
We
must
check
ti
flags
with
interrupts
(
or
at
least
preemption
)
*
off
because
we
must
*
never
*
return
to
userspace
without
*
processing
exit
work
that
is
enqueued
if
we
're preempted here.
*
In
particular
,
returning
to
userspace
with
any
of
the
one
-
shot
*
flags
(
TIF_NOTIFY_RESUME
,
TIF_USER_RETURN_NOTIFY
,
etc
)
set
is
*
very
bad
.
*/
testl
$
_TIF_ALLWORK_MASK
,
TI_flags
+
THREAD_INFO
(%
rsp
,
RIP
-
ARGOFFSET
)
jnz
int_ret_from_sys_call_fixup
/*
Go
the
the
slow
path
*/
CFI_REMEMBER_STATE
/
*
*
sysretq
will
re
-
enable
interrupts
:
...
...
@@ -386,7 +395,7 @@ ret_from_sys_call:
int_ret_from_sys_call_fixup
:
FIXUP_TOP_OF_STACK
%
r11
,
-
ARGOFFSET
jmp
int_ret_from_sys_call
jmp
int_ret_from_sys_call
_irqs_off
/
*
Do
syscall
tracing
*/
tracesys
:
...
...
@@ -432,6 +441,7 @@ tracesys_phase2:
GLOBAL
(
int_ret_from_sys_call
)
DISABLE_INTERRUPTS
(
CLBR_NONE
)
TRACE_IRQS_OFF
int_ret_from_sys_call_irqs_off
:
movl
$
_TIF_ALLWORK_MASK
,%
edi
/
*
edi
:
mask
to
check
*/
GLOBAL
(
int_with_check
)
...
...
drivers/clocksource/Kconfig
浏览文件 @
c2b078e7
...
...
@@ -192,6 +192,7 @@ config SYS_SUPPORTS_EM_STI
config SH_TIMER_CMT
bool "Renesas CMT timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_CMT
help
This enables build of a clocksource and clockevent driver for
...
...
@@ -201,6 +202,7 @@ config SH_TIMER_CMT
config SH_TIMER_MTU2
bool "Renesas MTU2 timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_MTU2
help
This enables build of a clockevent driver for the Multi-Function
...
...
@@ -210,6 +212,7 @@ config SH_TIMER_MTU2
config SH_TIMER_TMU
bool "Renesas TMU timer driver" if COMPILE_TEST
depends on GENERIC_CLOCKEVENTS
depends on HAS_IOMEM
default SYS_SUPPORTS_SH_TMU
help
This enables build of a clocksource and clockevent driver for
...
...
drivers/clocksource/timer-sun5i.c
浏览文件 @
c2b078e7
...
...
@@ -17,7 +17,6 @@
#include <linux/irq.h>
#include <linux/irqreturn.h>
#include <linux/reset.h>
#include <linux/sched_clock.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
...
...
@@ -137,11 +136,6 @@ static struct irqaction sun5i_timer_irq = {
.
dev_id
=
&
sun5i_clockevent
,
};
static
u64
sun5i_timer_sched_read
(
void
)
{
return
~
readl
(
timer_base
+
TIMER_CNTVAL_LO_REG
(
1
));
}
static
void
__init
sun5i_timer_init
(
struct
device_node
*
node
)
{
struct
reset_control
*
rstc
;
...
...
@@ -172,7 +166,6 @@ static void __init sun5i_timer_init(struct device_node *node)
writel
(
TIMER_CTL_ENABLE
|
TIMER_CTL_RELOAD
,
timer_base
+
TIMER_CTL_REG
(
1
));
sched_clock_register
(
sun5i_timer_sched_read
,
32
,
rate
);
clocksource_mmio_init
(
timer_base
+
TIMER_CNTVAL_LO_REG
(
1
),
node
->
name
,
rate
,
340
,
32
,
clocksource_mmio_readl_down
);
...
...
drivers/watchdog/imgpdc_wdt.c
浏览文件 @
c2b078e7
...
...
@@ -42,10 +42,10 @@
#define PDC_WDT_MIN_TIMEOUT 1
#define PDC_WDT_DEF_TIMEOUT 64
static
int
heartbeat
;
static
int
heartbeat
=
PDC_WDT_DEF_TIMEOUT
;
module_param
(
heartbeat
,
int
,
0
);
MODULE_PARM_DESC
(
heartbeat
,
"Watchdog heartbeats in seconds
.
"
"(default
=
"
__MODULE_STRING
(
PDC_WDT_DEF_TIMEOUT
)
")"
);
MODULE_PARM_DESC
(
heartbeat
,
"Watchdog heartbeats in seconds "
"(default
=
"
__MODULE_STRING
(
PDC_WDT_DEF_TIMEOUT
)
")"
);
static
bool
nowayout
=
WATCHDOG_NOWAYOUT
;
module_param
(
nowayout
,
bool
,
0
);
...
...
@@ -191,6 +191,7 @@ static int pdc_wdt_probe(struct platform_device *pdev)
pdc_wdt
->
wdt_dev
.
ops
=
&
pdc_wdt_ops
;
pdc_wdt
->
wdt_dev
.
max_timeout
=
1
<<
PDC_WDT_CONFIG_DELAY_MASK
;
pdc_wdt
->
wdt_dev
.
parent
=
&
pdev
->
dev
;
watchdog_set_drvdata
(
&
pdc_wdt
->
wdt_dev
,
pdc_wdt
);
ret
=
watchdog_init_timeout
(
&
pdc_wdt
->
wdt_dev
,
heartbeat
,
&
pdev
->
dev
);
if
(
ret
<
0
)
{
...
...
@@ -232,7 +233,6 @@ static int pdc_wdt_probe(struct platform_device *pdev)
watchdog_set_nowayout
(
&
pdc_wdt
->
wdt_dev
,
nowayout
);
platform_set_drvdata
(
pdev
,
pdc_wdt
);
watchdog_set_drvdata
(
&
pdc_wdt
->
wdt_dev
,
pdc_wdt
);
ret
=
watchdog_register_device
(
&
pdc_wdt
->
wdt_dev
);
if
(
ret
)
...
...
drivers/watchdog/mtk_wdt.c
浏览文件 @
c2b078e7
...
...
@@ -133,7 +133,7 @@ static int mtk_wdt_start(struct watchdog_device *wdt_dev)
u32
reg
;
struct
mtk_wdt_dev
*
mtk_wdt
=
watchdog_get_drvdata
(
wdt_dev
);
void
__iomem
*
wdt_base
=
mtk_wdt
->
wdt_base
;
u32
ret
;
int
ret
;
ret
=
mtk_wdt_set_timeout
(
wdt_dev
,
wdt_dev
->
timeout
);
if
(
ret
<
0
)
...
...
kernel/locking/lockdep.c
浏览文件 @
c2b078e7
...
...
@@ -633,7 +633,7 @@ static int count_matching_names(struct lock_class *new_class)
if
(
!
new_class
->
name
)
return
0
;
list_for_each_entry
(
class
,
&
all_lock_classes
,
lock_entry
)
{
list_for_each_entry
_rcu
(
class
,
&
all_lock_classes
,
lock_entry
)
{
if
(
new_class
->
key
-
new_class
->
subclass
==
class
->
key
)
return
class
->
name_version
;
if
(
class
->
name
&&
!
strcmp
(
class
->
name
,
new_class
->
name
))
...
...
@@ -700,10 +700,12 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
hash_head
=
classhashentry
(
key
);
/*
* We can walk the hash lockfree, because the hash only
* grows, and we are careful when adding entries to the end:
* We do an RCU walk of the hash, see lockdep_free_key_range().
*/
list_for_each_entry
(
class
,
hash_head
,
hash_entry
)
{
if
(
DEBUG_LOCKS_WARN_ON
(
!
irqs_disabled
()))
return
NULL
;
list_for_each_entry_rcu
(
class
,
hash_head
,
hash_entry
)
{
if
(
class
->
key
==
key
)
{
/*
* Huh! same key, different name? Did someone trample
...
...
@@ -728,7 +730,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
struct
lockdep_subclass_key
*
key
;
struct
list_head
*
hash_head
;
struct
lock_class
*
class
;
unsigned
long
flags
;
DEBUG_LOCKS_WARN_ON
(
!
irqs_disabled
());
class
=
look_up_lock_class
(
lock
,
subclass
);
if
(
likely
(
class
))
...
...
@@ -750,28 +753,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
key
=
lock
->
key
->
subkeys
+
subclass
;
hash_head
=
classhashentry
(
key
);
raw_local_irq_save
(
flags
);
if
(
!
graph_lock
())
{
raw_local_irq_restore
(
flags
);
return
NULL
;
}
/*
* We have to do the hash-walk again, to avoid races
* with another CPU:
*/
list_for_each_entry
(
class
,
hash_head
,
hash_entry
)
list_for_each_entry
_rcu
(
class
,
hash_head
,
hash_entry
)
{
if
(
class
->
key
==
key
)
goto
out_unlock_set
;
}
/*
* Allocate a new key from the static array, and add it to
* the hash:
*/
if
(
nr_lock_classes
>=
MAX_LOCKDEP_KEYS
)
{
if
(
!
debug_locks_off_graph_unlock
())
{
raw_local_irq_restore
(
flags
);
return
NULL
;
}
raw_local_irq_restore
(
flags
);
print_lockdep_off
(
"BUG: MAX_LOCKDEP_KEYS too low!"
);
dump_stack
();
...
...
@@ -798,7 +799,6 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
if
(
verbose
(
class
))
{
graph_unlock
();
raw_local_irq_restore
(
flags
);
printk
(
"
\n
new class %p: %s"
,
class
->
key
,
class
->
name
);
if
(
class
->
name_version
>
1
)
...
...
@@ -806,15 +806,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
printk
(
"
\n
"
);
dump_stack
();
raw_local_irq_save
(
flags
);
if
(
!
graph_lock
())
{
raw_local_irq_restore
(
flags
);
return
NULL
;
}
}
out_unlock_set:
graph_unlock
();
raw_local_irq_restore
(
flags
);
out_set_class_cache:
if
(
!
subclass
||
force
)
...
...
@@ -870,11 +867,9 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
entry
->
distance
=
distance
;
entry
->
trace
=
*
trace
;
/*
* Since we never remove from the dependency list, the list can
* be walked lockless by other CPUs, it's only allocation
* that must be protected by the spinlock. But this also means
* we must make new entries visible only once writes to the
* entry become visible - hence the RCU op:
* Both allocation and removal are done under the graph lock; but
* iteration is under RCU-sched; see look_up_lock_class() and
* lockdep_free_key_range().
*/
list_add_tail_rcu
(
&
entry
->
entry
,
head
);
...
...
@@ -1025,7 +1020,9 @@ static int __bfs(struct lock_list *source_entry,
else
head
=
&
lock
->
class
->
locks_before
;
list_for_each_entry
(
entry
,
head
,
entry
)
{
DEBUG_LOCKS_WARN_ON
(
!
irqs_disabled
());
list_for_each_entry_rcu
(
entry
,
head
,
entry
)
{
if
(
!
lock_accessed
(
entry
))
{
unsigned
int
cq_depth
;
mark_lock_accessed
(
entry
,
lock
);
...
...
@@ -2022,7 +2019,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
* We can walk it lock-free, because entries only get added
* to the hash:
*/
list_for_each_entry
(
chain
,
hash_head
,
entry
)
{
list_for_each_entry
_rcu
(
chain
,
hash_head
,
entry
)
{
if
(
chain
->
chain_key
==
chain_key
)
{
cache_hit:
debug_atomic_inc
(
chain_lookup_hits
);
...
...
@@ -2996,8 +2993,18 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
if
(
unlikely
(
!
debug_locks
))
return
;
if
(
subclass
)
if
(
subclass
)
{
unsigned
long
flags
;
if
(
DEBUG_LOCKS_WARN_ON
(
current
->
lockdep_recursion
))
return
;
raw_local_irq_save
(
flags
);
current
->
lockdep_recursion
=
1
;
register_lock_class
(
lock
,
subclass
,
1
);
current
->
lockdep_recursion
=
0
;
raw_local_irq_restore
(
flags
);
}
}
EXPORT_SYMBOL_GPL
(
lockdep_init_map
);
...
...
@@ -3887,9 +3894,17 @@ static inline int within(const void *addr, void *start, unsigned long size)
return
addr
>=
start
&&
addr
<
start
+
size
;
}
/*
* Used in module.c to remove lock classes from memory that is going to be
* freed; and possibly re-used by other modules.
*
* We will have had one sync_sched() before getting here, so we're guaranteed
* nobody will look up these exact classes -- they're properly dead but still
* allocated.
*/
void
lockdep_free_key_range
(
void
*
start
,
unsigned
long
size
)
{
struct
lock_class
*
class
,
*
next
;
struct
lock_class
*
class
;
struct
list_head
*
head
;
unsigned
long
flags
;
int
i
;
...
...
@@ -3905,7 +3920,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
head
=
classhash_table
+
i
;
if
(
list_empty
(
head
))
continue
;
list_for_each_entry_
safe
(
class
,
next
,
head
,
hash_entry
)
{
list_for_each_entry_
rcu
(
class
,
head
,
hash_entry
)
{
if
(
within
(
class
->
key
,
start
,
size
))
zap_class
(
class
);
else
if
(
within
(
class
->
name
,
start
,
size
))
...
...
@@ -3916,11 +3931,25 @@ void lockdep_free_key_range(void *start, unsigned long size)
if
(
locked
)
graph_unlock
();
raw_local_irq_restore
(
flags
);
/*
* Wait for any possible iterators from look_up_lock_class() to pass
* before continuing to free the memory they refer to.
*
* sync_sched() is sufficient because the read-side is IRQ disable.
*/
synchronize_sched
();
/*
* XXX at this point we could return the resources to the pool;
* instead we leak them. We would need to change to bitmap allocators
* instead of the linear allocators we have now.
*/
}
void
lockdep_reset_lock
(
struct
lockdep_map
*
lock
)
{
struct
lock_class
*
class
,
*
next
;
struct
lock_class
*
class
;
struct
list_head
*
head
;
unsigned
long
flags
;
int
i
,
j
;
...
...
@@ -3948,7 +3977,7 @@ void lockdep_reset_lock(struct lockdep_map *lock)
head
=
classhash_table
+
i
;
if
(
list_empty
(
head
))
continue
;
list_for_each_entry_
safe
(
class
,
next
,
head
,
hash_entry
)
{
list_for_each_entry_
rcu
(
class
,
head
,
hash_entry
)
{
int
match
=
0
;
for
(
j
=
0
;
j
<
NR_LOCKDEP_CACHING_CLASSES
;
j
++
)
...
...
kernel/module.c
浏览文件 @
c2b078e7
...
...
@@ -1865,7 +1865,7 @@ static void free_module(struct module *mod)
kfree
(
mod
->
args
);
percpu_modfree
(
mod
);
/* Free lock-classes
:
*/
/* Free lock-classes
; relies on the preceding sync_rcu().
*/
lockdep_free_key_range
(
mod
->
module_core
,
mod
->
core_size
);
/* Finally, free the core (containing the module structure) */
...
...
@@ -3349,9 +3349,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
module_bug_cleanup
(
mod
);
mutex_unlock
(
&
module_mutex
);
/* Free lock-classes: */
lockdep_free_key_range
(
mod
->
module_core
,
mod
->
core_size
);
/* we can't deallocate the module until we clear memory protection */
unset_module_init_ro_nx
(
mod
);
unset_module_core_ro_nx
(
mod
);
...
...
@@ -3375,6 +3372,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
synchronize_rcu
();
mutex_unlock
(
&
module_mutex
);
free_module:
/* Free lock-classes; relies on the preceding sync_rcu() */
lockdep_free_key_range
(
mod
->
module_core
,
mod
->
core_size
);
module_deallocate
(
mod
,
info
);
free_copy:
free_copy
(
info
);
...
...
kernel/sched/core.c
浏览文件 @
c2b078e7
...
...
@@ -3034,6 +3034,8 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
}
else
{
if
(
dl_prio
(
oldprio
))
p
->
dl
.
dl_boosted
=
0
;
if
(
rt_prio
(
oldprio
))
p
->
rt
.
timeout
=
0
;
p
->
sched_class
=
&
fair_sched_class
;
}
...
...
kernel/time/tick-broadcast-hrtimer.c
浏览文件 @
c2b078e7
...
...
@@ -49,6 +49,7 @@ static void bc_set_mode(enum clock_event_mode mode,
*/
static
int
bc_set_next
(
ktime_t
expires
,
struct
clock_event_device
*
bc
)
{
int
bc_moved
;
/*
* We try to cancel the timer first. If the callback is on
* flight on some other cpu then we let it handle it. If we
...
...
@@ -60,9 +61,15 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
* restart the timer because we are in the callback, but we
* can set the expiry time and let the callback return
* HRTIMER_RESTART.
*
* Since we are in the idle loop at this point and because
* hrtimer_{start/cancel} functions call into tracing,
* calls to these functions must be bound within RCU_NONIDLE.
*/
if
(
hrtimer_try_to_cancel
(
&
bctimer
)
>=
0
)
{
hrtimer_start
(
&
bctimer
,
expires
,
HRTIMER_MODE_ABS_PINNED
);
RCU_NONIDLE
(
bc_moved
=
(
hrtimer_try_to_cancel
(
&
bctimer
)
>=
0
)
?
!
hrtimer_start
(
&
bctimer
,
expires
,
HRTIMER_MODE_ABS_PINNED
)
:
0
);
if
(
bc_moved
)
{
/* Bind the "device" to the cpu */
bc
->
bound_on
=
smp_processor_id
();
}
else
if
(
bc
->
bound_on
==
smp_processor_id
())
{
...
...
security/selinux/selinuxfs.c
浏览文件 @
c2b078e7
...
...
@@ -152,7 +152,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
goto
out
;
/* No partial writes. */
length
=
EINVAL
;
length
=
-
EINVAL
;
if
(
*
ppos
!=
0
)
goto
out
;
...
...
sound/pci/hda/hda_intel.c
浏览文件 @
c2b078e7
...
...
@@ -1989,7 +1989,7 @@ static const struct pci_device_id azx_ids[] = {
.
driver_data
=
AZX_DRIVER_PCH
|
AZX_DCAPS_INTEL_PCH
},
/* Sunrise Point */
{
PCI_DEVICE
(
0x8086
,
0xa170
),
.
driver_data
=
AZX_DRIVER_PCH
|
AZX_DCAPS_INTEL_
PCH
},
.
driver_data
=
AZX_DRIVER_PCH
|
AZX_DCAPS_INTEL_
SKYLAKE
},
/* Sunrise Point-LP */
{
PCI_DEVICE
(
0x8086
,
0x9d70
),
.
driver_data
=
AZX_DRIVER_PCH
|
AZX_DCAPS_INTEL_SKYLAKE
},
...
...
sound/pci/hda/patch_realtek.c
浏览文件 @
c2b078e7
...
...
@@ -396,7 +396,7 @@ static void alc_auto_setup_eapd(struct hda_codec *codec, bool on)
{
/* We currently only handle front, HP */
static
hda_nid_t
pins
[]
=
{
0x0f
,
0x10
,
0x14
,
0x15
,
0
0x0f
,
0x10
,
0x14
,
0x15
,
0
x17
,
0
};
hda_nid_t
*
p
;
for
(
p
=
pins
;
*
p
;
p
++
)
...
...
@@ -5036,6 +5036,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK
(
0x17aa
,
0x501a
,
"Thinkpad"
,
ALC283_FIXUP_INT_MIC
),
SND_PCI_QUIRK
(
0x17aa
,
0x501e
,
"Thinkpad L440"
,
ALC292_FIXUP_TPT440_DOCK
),
SND_PCI_QUIRK
(
0x17aa
,
0x5026
,
"Thinkpad"
,
ALC269_FIXUP_LIMIT_INT_MIC_BOOST
),
SND_PCI_QUIRK
(
0x17aa
,
0x5036
,
"Thinkpad T450s"
,
ALC292_FIXUP_TPT440_DOCK
),
SND_PCI_QUIRK
(
0x17aa
,
0x5109
,
"Thinkpad"
,
ALC269_FIXUP_LIMIT_INT_MIC_BOOST
),
SND_PCI_QUIRK
(
0x17aa
,
0x3bf8
,
"Quanta FL1"
,
ALC269_FIXUP_PCM_44K
),
SND_PCI_QUIRK
(
0x17aa
,
0x9e54
,
"LENOVO NB"
,
ALC269_FIXUP_LENOVO_EAPD
),
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录