Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
b5ed7639
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b5ed7639
编写于
6月 13, 2006
作者:
J
Jeff Garzik
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' into upstream
上级
0638dec0
eb35cf60
变更
52
隐藏空白更改
内联
并排
Showing
52 changed file
with
784 addition
and
342 deletion
+784
-342
Documentation/memory-barriers.txt
Documentation/memory-barriers.txt
+270
-78
arch/alpha/Kconfig
arch/alpha/Kconfig
+1
-1
arch/arm/mach-ep93xx/ts72xx.c
arch/arm/mach-ep93xx/ts72xx.c
+4
-4
arch/arm/mach-imx/irq.c
arch/arm/mach-imx/irq.c
+1
-1
arch/arm/mach-integrator/integrator_cp.c
arch/arm/mach-integrator/integrator_cp.c
+1
-4
arch/arm/mach-pxa/spitz.c
arch/arm/mach-pxa/spitz.c
+1
-0
arch/arm/mach-sa1100/neponset.c
arch/arm/mach-sa1100/neponset.c
+8
-0
arch/arm/mach-versatile/core.c
arch/arm/mach-versatile/core.c
+2
-3
arch/i386/kernel/acpi/earlyquirk.c
arch/i386/kernel/acpi/earlyquirk.c
+20
-3
arch/i386/kernel/setup.c
arch/i386/kernel/setup.c
+7
-4
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init.c
+10
-0
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_32.c
+10
-1
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/signal_64.c
+2
-0
arch/powerpc/platforms/cell/setup.c
arch/powerpc/platforms/cell/setup.c
+5
-6
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/setup.c
+8
-0
arch/sparc/kernel/smp.c
arch/sparc/kernel/smp.c
+11
-0
arch/sparc64/kernel/pci_sun4v.c
arch/sparc64/kernel/pci_sun4v.c
+118
-6
arch/sparc64/kernel/smp.c
arch/sparc64/kernel/smp.c
+35
-0
arch/sparc64/kernel/sparc64_ksyms.c
arch/sparc64/kernel/sparc64_ksyms.c
+0
-1
arch/sparc64/kernel/traps.c
arch/sparc64/kernel/traps.c
+7
-4
arch/x86_64/kernel/io_apic.c
arch/x86_64/kernel/io_apic.c
+25
-5
block/as-iosched.c
block/as-iosched.c
+6
-7
block/cfq-iosched.c
block/cfq-iosched.c
+4
-6
block/deadline-iosched.c
block/deadline-iosched.c
+6
-7
block/elevator.c
block/elevator.c
+34
-21
block/noop-iosched.c
block/noop-iosched.c
+3
-4
drivers/acpi/processor_perflib.c
drivers/acpi/processor_perflib.c
+4
-1
drivers/char/Makefile
drivers/char/Makefile
+1
-1
drivers/char/n_tty.c
drivers/char/n_tty.c
+3
-1
drivers/message/fusion/mptspi.c
drivers/message/fusion/mptspi.c
+2
-0
drivers/message/i2o/exec-osm.c
drivers/message/i2o/exec-osm.c
+37
-35
drivers/message/i2o/iop.c
drivers/message/i2o/iop.c
+1
-3
drivers/net/sky2.c
drivers/net/sky2.c
+31
-15
drivers/net/tg3.c
drivers/net/tg3.c
+48
-96
drivers/net/tg3.h
drivers/net/tg3.h
+2
-1
drivers/pci/pci-driver.c
drivers/pci/pci-driver.c
+8
-5
drivers/pci/pci.c
drivers/pci/pci.c
+16
-2
drivers/scsi/sata_mv.c
drivers/scsi/sata_mv.c
+3
-0
drivers/usb/host/ohci-pxa27x.c
drivers/usb/host/ohci-pxa27x.c
+3
-0
drivers/video/console/fbcon.c
drivers/video/console/fbcon.c
+1
-1
fs/debugfs/inode.c
fs/debugfs/inode.c
+2
-1
include/asm-arm/arch-pxa/ohci.h
include/asm-arm/arch-pxa/ohci.h
+2
-0
include/asm-s390/futex.h
include/asm-s390/futex.h
+8
-7
include/linux/elevator.h
include/linux/elevator.h
+1
-1
include/linux/i2o.h
include/linux/i2o.h
+4
-1
include/linux/mempolicy.h
include/linux/mempolicy.h
+1
-0
include/linux/pci-acpi.h
include/linux/pci-acpi.h
+1
-1
mm/shmem.c
mm/shmem.c
+2
-0
mm/vmscan.c
mm/vmscan.c
+1
-1
net/dccp/ackvec.c
net/dccp/ackvec.c
+1
-0
net/ipv4/ip_forward.c
net/ipv4/ip_forward.c
+1
-0
net/ipv4/tcp_input.c
net/ipv4/tcp_input.c
+1
-3
未找到文件。
Documentation/memory-barriers.txt
浏览文件 @
b5ed7639
...
...
@@ -19,6 +19,7 @@ Contents:
- Control dependencies.
- SMP barrier pairing.
- Examples of memory barrier sequences.
- Read memory barriers vs load speculation.
(*) Explicit kernel barriers.
...
...
@@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
we may get either of:
STORE *A = X; Y = LOAD *A;
STORE *A = Y;
STORE *A = Y
= X
;
=========================
...
...
@@ -344,9 +345,12 @@ Memory barriers come in four basic varieties:
(4) General memory barriers.
A general memory barrier is a combination of both a read memory barrier
and a write memory barrier. It is a partial ordering over both loads and
stores.
A general memory barrier gives a guarantee that all the LOAD and STORE
operations specified before the barrier will appear to happen before all
the LOAD and STORE operations specified after the barrier with respect to
the other components of the system.
A general memory barrier is a partial ordering over both loads and stores.
General memory barriers imply both read and write memory barriers, and so
can substitute for either.
...
...
@@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable:
=============== ===============
a = 1;
<write barrier>
b = 2; x =
a
;
b = 2; x =
b
;
<read barrier>
y =
b
;
y =
a
;
Or:
...
...
@@ -563,6 +567,18 @@ Or:
Basically, the read barrier always has to be there, even though it can be of
the "weaker" type.
[!] Note that the stores before the write barrier would normally be expected to
match the loads after the read barrier or data dependency barrier, and vice
versa:
CPU 1 CPU 2
=============== ===============
a = 1; }---- --->{ v = c
b = 2; } \ / { w = d
<write barrier> \ <read barrier>
c = 3; } / \ { x = a;
d = 4; }---- --->{ y = b;
EXAMPLES OF MEMORY BARRIER SEQUENCES
------------------------------------
...
...
@@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
| | +------+
+-------+ : :
|
| Sequence in which stores
committed to memory system
| by CPU 1
| Sequence in which stores
are committed to the
|
memory system
by CPU 1
V
...
...
@@ -683,14 +699,12 @@ then the following will occur:
| : : | |
| : : | CPU 2 |
| +-------+ | |
\ | X->9 |------>| |
\ +-------+ | |
----->| B->2 | | |
+-------+ | |
Makes sure all effects ---> ddddddddddddddddd | |
prior to the store of C +-------+ | |
are perceptible to | B->2 |------>| |
successive loads +-------+ | |
| | X->9 |------>| |
| +-------+ | |
Makes sure all effects ---> \ ddddddddddddddddd | |
prior to the store of C \ +-------+ | |
are perceptible to ----->| B->2 |------>| |
subsequent loads +-------+ | |
: : +-------+
...
...
@@ -699,73 +713,239 @@ following sequence of events:
CPU 1 CPU 2
======================= =======================
{ A = 0, B = 9 }
STORE A=1
STORE B=2
STORE C=3
<write barrier>
STORE D=4
STORE E=5
LOAD A
STORE B=2
LOAD B
LOAD C
LOAD D
LOAD E
LOAD A
Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
some effectively random order, despite the write barrier issued by CPU 1:
+-------+ : :
| | +------+
| |------>| C=3 | }
| | : +------+ }
| | : | A=1 | }
| | : +------+ }
| CPU 1 | : | B=2 | }---
| | +------+ } \
| | wwwwwwwwwwwww} \
| | +------+ } \ : : +-------+
| | : | E=5 | } \ +-------+ | |
| | : +------+ } \ { | C->3 |------>| |
| |------>| D=4 | } \ { +-------+ : | |
| | +------+ \ { | E->5 | : | |
+-------+ : : \ { +-------+ : | |
Transfer -->{ | A->1 | : | CPU 2 |
from CPU 1 { +-------+ : | |
to CPU 2 { | D->4 | : | |
{ +-------+ : | |
{ | B->2 |------>| |
+-------+ | |
: : +-------+
If, however, a read barrier were to be placed between the load of C and the
load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
perceived correctly by CPU 2.
+-------+ : : : :
| | +------+ +-------+
| |------>| A=1 |------ --->| A->0 |
| | +------+ \ +-------+
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
| | +------+ | +-------+
| |------>| B=2 |--- | : :
| | +------+ \ | : : +-------+
+-------+ : : \ | +-------+ | |
---------->| B->2 |------>| |
| +-------+ | CPU 2 |
| | A->0 |------>| |
| +-------+ | |
| : : +-------+
\ : :
\ +-------+
---->| A->1 |
+-------+
: :
+-------+ : :
| | +------+
| |------>| C=3 | }
| | : +------+ }
| | : | A=1 | }---
| | : +------+ } \
| CPU 1 | : | B=2 | } \
| | +------+ \
| | wwwwwwwwwwwwwwww \
| | +------+ \ : : +-------+
| | : | E=5 | } \ +-------+ | |
| | : +------+ }--- \ { | C->3 |------>| |
| |------>| D=4 | } \ \ { +-------+ : | |
| | +------+ \ -->{ | B->2 | : | |
+-------+ : : \ { +-------+ : | |
\ { | A->1 | : | CPU 2 |
\ +-------+ | |
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
barrier causes all effects \ +-------+ | |
prior to the storage of C \ { | E->5 | : | |
to be perceptible to CPU 2 -->{ +-------+ : | |
{ | D->4 |------>| |
+-------+ | |
: : +-------+
If, however, a read barrier were to be placed between the load of E and the
load of A on CPU 2:
CPU 1 CPU 2
======================= =======================
{ A = 0, B = 9 }
STORE A=1
<write barrier>
STORE B=2
LOAD B
<read barrier>
LOAD A
then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
2:
+-------+ : : : :
| | +------+ +-------+
| |------>| A=1 |------ --->| A->0 |
| | +------+ \ +-------+
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
| | +------+ | +-------+
| |------>| B=2 |--- | : :
| | +------+ \ | : : +-------+
+-------+ : : \ | +-------+ | |
---------->| B->2 |------>| |
| +-------+ | CPU 2 |
| : : | |
| : : | |
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
barrier causes all effects \ +-------+ | |
prior to the storage of B ---->| A->1 |------>| |
to be perceptible to CPU 2 +-------+ | |
: : +-------+
To illustrate this more completely, consider what could happen if the code
contained a load of A either side of the read barrier:
CPU 1 CPU 2
======================= =======================
{ A = 0, B = 9 }
STORE A=1
<write barrier>
STORE B=2
LOAD B
LOAD A [first load of A]
<read barrier>
LOAD A [second load of A]
Even though the two loads of A both occur after the load of B, they may both
come up with different values:
+-------+ : : : :
| | +------+ +-------+
| |------>| A=1 |------ --->| A->0 |
| | +------+ \ +-------+
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
| | +------+ | +-------+
| |------>| B=2 |--- | : :
| | +------+ \ | : : +-------+
+-------+ : : \ | +-------+ | |
---------->| B->2 |------>| |
| +-------+ | CPU 2 |
| : : | |
| : : | |
| +-------+ | |
| | A->0 |------>| 1st |
| +-------+ | |
At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
barrier causes all effects \ +-------+ | |
prior to the storage of B ---->| A->1 |------>| 2nd |
to be perceptible to CPU 2 +-------+ | |
: : +-------+
But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
before the read barrier completes anyway:
+-------+ : : : :
| | +------+ +-------+
| |------>| A=1 |------ --->| A->0 |
| | +------+ \ +-------+
| CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
| | +------+ | +-------+
| |------>| B=2 |--- | : :
| | +------+ \ | : : +-------+
+-------+ : : \ | +-------+ | |
---------->| B->2 |------>| |
| +-------+ | CPU 2 |
| : : | |
\ : : | |
\ +-------+ | |
---->| A->1 |------>| 1st |
+-------+ | |
rrrrrrrrrrrrrrrrr | |
+-------+ | |
| A->1 |------>| 2nd |
+-------+ | |
: : +-------+
The guarantee is that the second load will always come up with A == 1 if the
load of B came up with B == 2. No such guarantee exists for the first load of
A; that may come up with either A == 0 or A == 1.
READ MEMORY BARRIERS VS LOAD SPECULATION
----------------------------------------
Many CPUs speculate with loads: that is they see that they will need to load an
item from memory, and they find a time where they're not using the bus for any
other loads, and so do the load in advance - even though they haven't actually
got to that point in the instruction execution flow yet. This permits the
actual load instruction to potentially complete immediately because the CPU
already has the value to hand.
It may turn out that the CPU didn't actually need the value - perhaps because a
branch circumvented the load - in which case it can discard the value or just
cache it for later use.
Consider:
CPU 1 CPU 2
======================= =======================
LOAD B
DIVIDE } Divide instructions generally
DIVIDE } take a long time to perform
LOAD A
Which might appear as this:
: : +-------+
+-------+ | |
--->| B->2 |------>| |
+-------+ | CPU 2 |
: :DIVIDE | |
+-------+ | |
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
division speculates on the +-------+ ~ | |
LOAD of A : : ~ | |
: :DIVIDE | |
: : ~ | |
Once the divisions are complete --> : : ~-->| |
the CPU can then perform the : : | |
LOAD with immediate effect : : +-------+
Placing a read barrier or a data dependency barrier just before the second
load:
CPU 1 CPU 2
======================= =======================
LOAD B
DIVIDE
DIVIDE
<read barrier>
LOAD A
will force any value speculatively obtained to be reconsidered to an extent
dependent on the type of barrier used. If there was no change made to the
speculated memory location, then the speculated value will just be used:
: : +-------+
+-------+ | |
--->| B->2 |------>| |
+-------+ | CPU 2 |
: :DIVIDE | |
+-------+ | |
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
division speculates on the +-------+ ~ | |
LOAD of A : : ~ | |
: :DIVIDE | |
: : ~ | |
: : ~ | |
rrrrrrrrrrrrrrrr~ | |
: : ~ | |
: : ~-->| |
: : | |
: : +-------+
but if there was an update or an invalidation from another CPU pending, then
the speculation will be cancelled and the value reloaded:
: : +-------+
+-------+ | |
--->| B->2 |------>| |
+-------+ | CPU 2 |
: :DIVIDE | |
+-------+ | |
The CPU being busy doing a ---> --->| A->0 |~~~~ | |
division speculates on the +-------+ ~ | |
LOAD of A : : ~ | |
: :DIVIDE | |
: : ~ | |
: : ~ | |
rrrrrrrrrrrrrrrrr | |
+-------+ | |
The speculation is discarded ---> --->| A->1 |------>| |
and an updated value is +-------+ | |
retrieved : : +-------+
========================
...
...
@@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS
===============================
Some of the other functions in the linux kernel imply memory barriers, amongst
which are locking
, scheduling and memory allocation
functions.
which are locking
and scheduling
functions.
This specification is a _minimum_ guarantee; any particular architecture may
provide more substantial guarantees, but these may not be relied upon outside
...
...
@@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
barriers is that the effects instructions outside of a critical section may
seep into the inside of the critical section.
A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
because it is possible for an access preceding the LOCK to happen after the
LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
two accesses can themselves then cross:
*A = a;
LOCK
UNLOCK
*B = b;
may occur as:
LOCK, STORE *B, STORE *A, UNLOCK
Locks and semaphores may not provide any guarantee of ordering on UP compiled
systems, and so cannot be counted on in such a situation to actually achieve
anything at all - especially with respect to I/O accesses - unless combined
...
...
@@ -1016,8 +1210,6 @@ Other functions that imply barriers:
(*) schedule() and similar imply full memory barriers.
(*) Memory allocation and release functions imply full memory barriers.
=================================
INTER-CPU LOCKING BARRIER EFFECTS
...
...
arch/alpha/Kconfig
浏览文件 @
b5ed7639
...
...
@@ -453,7 +453,7 @@ config ALPHA_IRONGATE
config GENERIC_HWEIGHT
bool
default y if !ALPHA_EV6
&& !ALPHA_EV6
7
default y if !ALPHA_EV67
config ALPHA_AVANTI
bool
...
...
arch/arm/mach-ep93xx/ts72xx.c
浏览文件 @
b5ed7639
...
...
@@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void)
}
}
static
unsigned
char
ts72xx_rtc_readb
(
unsigned
long
addr
)
static
unsigned
char
ts72xx_rtc_readb
yte
(
unsigned
long
addr
)
{
__raw_writeb
(
addr
,
TS72XX_RTC_INDEX_VIRT_BASE
);
return
__raw_readb
(
TS72XX_RTC_DATA_VIRT_BASE
);
}
static
void
ts72xx_rtc_writeb
(
unsigned
char
value
,
unsigned
long
addr
)
static
void
ts72xx_rtc_writeb
yte
(
unsigned
char
value
,
unsigned
long
addr
)
{
__raw_writeb
(
addr
,
TS72XX_RTC_INDEX_VIRT_BASE
);
__raw_writeb
(
value
,
TS72XX_RTC_DATA_VIRT_BASE
);
}
static
struct
m48t86_ops
ts72xx_rtc_ops
=
{
.
readb
=
ts72xx_rtc_readb
,
.
writeb
=
ts72xx_rtc_writeb
,
.
readb
yte
=
ts72xx_rtc_readbyte
,
.
writeb
yte
=
ts72xx_rtc_writebyte
,
};
static
struct
platform_device
ts72xx_rtc_device
=
{
...
...
arch/arm/mach-imx/irq.c
浏览文件 @
b5ed7639
...
...
@@ -127,7 +127,7 @@ static void
imx_gpio_ack_irq
(
unsigned
int
irq
)
{
DEBUG_IRQ
(
"%s: irq %d
\n
"
,
__FUNCTION__
,
irq
);
ISR
(
IRQ_TO_REG
(
irq
))
|
=
1
<<
((
irq
-
IRQ_GPIOA
(
0
))
%
32
);
ISR
(
IRQ_TO_REG
(
irq
))
=
1
<<
((
irq
-
IRQ_GPIOA
(
0
))
%
32
);
}
static
void
...
...
arch/arm/mach-integrator/integrator_cp.c
浏览文件 @
b5ed7639
...
...
@@ -232,8 +232,6 @@ static void __init intcp_init_irq(void)
for
(
i
=
IRQ_PIC_START
;
i
<=
IRQ_PIC_END
;
i
++
)
{
if
(
i
==
11
)
i
=
22
;
if
(
i
==
IRQ_CP_CPPLDINT
)
i
++
;
if
(
i
==
29
)
break
;
set_irq_chip
(
i
,
&
pic_chip
);
...
...
@@ -259,8 +257,7 @@ static void __init intcp_init_irq(void)
set_irq_flags
(
i
,
IRQF_VALID
|
IRQF_PROBE
);
}
set_irq_handler
(
IRQ_CP_CPPLDINT
,
sic_handle_irq
);
pic_unmask_irq
(
IRQ_CP_CPPLDINT
);
set_irq_chained_handler
(
IRQ_CP_CPPLDINT
,
sic_handle_irq
);
}
/*
...
...
arch/arm/mach-pxa/spitz.c
浏览文件 @
b5ed7639
...
...
@@ -371,6 +371,7 @@ static int spitz_ohci_init(struct device *dev)
static
struct
pxaohci_platform_data
spitz_ohci_platform_data
=
{
.
port_mode
=
PMM_NPS_MODE
,
.
init
=
spitz_ohci_init
,
.
power_budget
=
150
,
};
...
...
arch/arm/mach-sa1100/neponset.c
浏览文件 @
b5ed7639
...
...
@@ -59,6 +59,14 @@ neponset_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *reg
if
(
irr
&
(
IRR_ETHERNET
|
IRR_USAR
))
{
desc
->
chip
->
mask
(
irq
);
/*
* Ack the interrupt now to prevent re-entering
* this neponset handler. Again, this is safe
* since we'll check the IRR register prior to
* leaving.
*/
desc
->
chip
->
ack
(
irq
);
if
(
irr
&
IRR_ETHERNET
)
{
d
=
irq_desc
+
IRQ_NEPONSET_SMC9196
;
desc_handle_irq
(
IRQ_NEPONSET_SMC9196
,
d
,
regs
);
...
...
arch/arm/mach-versatile/core.c
浏览文件 @
b5ed7639
...
...
@@ -112,10 +112,9 @@ void __init versatile_init_irq(void)
{
unsigned
int
i
;
vic_init
(
VA_VIC_BASE
,
IRQ_VIC_START
,
~
(
1
<<
31
)
);
vic_init
(
VA_VIC_BASE
,
IRQ_VIC_START
,
~
0
);
set_irq_handler
(
IRQ_VICSOURCE31
,
sic_handle_irq
);
enable_irq
(
IRQ_VICSOURCE31
);
set_irq_chained_handler
(
IRQ_VICSOURCE31
,
sic_handle_irq
);
/* Do second interrupt controller */
writel
(
~
0
,
VA_SIC_BASE
+
SIC_IRQ_ENABLE_CLEAR
);
...
...
arch/i386/kernel/acpi/earlyquirk.c
浏览文件 @
b5ed7639
...
...
@@ -5,17 +5,34 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/acpi.h>
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>
#ifdef CONFIG_ACPI
static
int
nvidia_hpet_detected
__initdata
;
static
int
__init
nvidia_hpet_check
(
unsigned
long
phys
,
unsigned
long
size
)
{
nvidia_hpet_detected
=
1
;
return
0
;
}
#endif
static
int
__init
check_bridge
(
int
vendor
,
int
device
)
{
#ifdef CONFIG_ACPI
/* According to Nvidia all timer overrides are bogus
. Just ignore
them all
. */
/* According to Nvidia all timer overrides are bogus
unless HPET
is enabled
. */
if
(
vendor
==
PCI_VENDOR_ID_NVIDIA
)
{
acpi_skip_timer_override
=
1
;
nvidia_hpet_detected
=
0
;
acpi_table_parse
(
ACPI_HPET
,
nvidia_hpet_check
);
if
(
nvidia_hpet_detected
==
0
)
{
acpi_skip_timer_override
=
1
;
}
}
#endif
if
(
vendor
==
PCI_VENDOR_ID_ATI
&&
timer_over_8254
==
1
)
{
...
...
arch/i386/kernel/setup.c
浏览文件 @
b5ed7639
...
...
@@ -1547,15 +1547,18 @@ void __init setup_arch(char **cmdline_p)
if
(
efi_enabled
)
efi_map_memmap
();
#ifdef CONFIG_X86_IO_APIC
check_acpi_pci
();
/* Checks more than just ACPI actually */
#endif
#ifdef CONFIG_ACPI
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
acpi_boot_table_init
();
#endif
#ifdef CONFIG_X86_IO_APIC
check_acpi_pci
();
/* Checks more than just ACPI actually */
#endif
#ifdef CONFIG_ACPI
acpi_boot_init
();
#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
...
...
arch/powerpc/kernel/prom_init.c
浏览文件 @
b5ed7639
...
...
@@ -822,6 +822,7 @@ static void __init prom_send_capabilities(void)
/* try calling the ibm,client-architecture-support method */
if
(
call_prom_ret
(
"call-method"
,
3
,
2
,
&
ret
,
ADDR
(
"ibm,client-architecture-support"
),
root
,
ADDR
(
ibm_architecture_vec
))
==
0
)
{
/* the call exists... */
if
(
ret
)
...
...
@@ -1622,6 +1623,15 @@ static int __init prom_find_machine_type(void)
if
(
strstr
(
p
,
RELOC
(
"Power Macintosh"
))
||
strstr
(
p
,
RELOC
(
"MacRISC"
)))
return
PLATFORM_POWERMAC
;
#ifdef CONFIG_PPC64
/* We must make sure we don't detect the IBM Cell
* blades as pSeries due to some firmware issues,
* so we do it here.
*/
if
(
strstr
(
p
,
RELOC
(
"IBM,CBEA"
))
||
strstr
(
p
,
RELOC
(
"IBM,CPBW-1.0"
)))
return
PLATFORM_GENERIC
;
#endif
/* CONFIG_PPC64 */
i
+=
sl
+
1
;
}
}
...
...
arch/powerpc/kernel/signal_32.c
浏览文件 @
b5ed7639
...
...
@@ -803,10 +803,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
if
(
__get_user
(
cmcp
,
&
ucp
->
uc_regs
))
return
-
EFAULT
;
mcp
=
(
struct
mcontext
__user
*
)(
u64
)
cmcp
;
/* no need to check access_ok(mcp), since mcp < 4GB */
}
#else
if
(
__get_user
(
mcp
,
&
ucp
->
uc_regs
))
return
-
EFAULT
;
if
(
!
access_ok
(
VERIFY_READ
,
mcp
,
sizeof
(
*
mcp
)))
return
-
EFAULT
;
#endif
restore_sigmask
(
&
set
);
if
(
restore_user_regs
(
regs
,
mcp
,
sig
))
...
...
@@ -908,13 +911,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
{
struct
sig_dbg_op
op
;
int
i
;
unsigned
char
tmp
;
unsigned
long
new_msr
=
regs
->
msr
;
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
unsigned
long
new_dbcr0
=
current
->
thread
.
dbcr0
;
#endif
for
(
i
=
0
;
i
<
ndbg
;
i
++
)
{
if
(
__copy_from_user
(
&
op
,
dbg
,
sizeof
(
op
)))
if
(
copy_from_user
(
&
op
,
dbg
+
i
,
sizeof
(
op
)))
return
-
EFAULT
;
switch
(
op
.
dbg_type
)
{
case
SIG_DBG_SINGLE_STEPPING
:
...
...
@@ -959,6 +963,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
current
->
thread
.
dbcr0
=
new_dbcr0
;
#endif
if
(
!
access_ok
(
VERIFY_READ
,
ctx
,
sizeof
(
*
ctx
))
||
__get_user
(
tmp
,
(
u8
__user
*
)
ctx
)
||
__get_user
(
tmp
,
(
u8
__user
*
)
(
ctx
+
1
)
-
1
))
return
-
EFAULT
;
/*
* If we get a fault copying the context into the kernel's
* image of the user's registers, we can't just return -EFAULT
...
...
arch/powerpc/kernel/signal_64.c
浏览文件 @
b5ed7639
...
...
@@ -182,6 +182,8 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err
|=
__get_user
(
msr
,
&
sc
->
gp_regs
[
PT_MSR
]);
if
(
err
)
return
err
;
if
(
v_regs
&&
!
access_ok
(
VERIFY_READ
,
v_regs
,
34
*
sizeof
(
vector128
)))
return
-
EFAULT
;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if
(
v_regs
!=
0
&&
(
msr
&
MSR_VEC
)
!=
0
)
err
|=
__copy_from_user
(
current
->
thread
.
vr
,
v_regs
,
...
...
arch/powerpc/platforms/cell/setup.c
浏览文件 @
b5ed7639
...
...
@@ -125,14 +125,13 @@ static void __init cell_init_early(void)
static
int
__init
cell_probe
(
void
)
{
/* XXX This is temporary, the Cell maintainer will come up with
* more appropriate detection logic
*/
unsigned
long
root
=
of_get_flat_dt_root
();
if
(
!
of_flat_dt_is_compatible
(
root
,
"IBM,CPBW-1.0"
))
return
0
;
return
1
;
if
(
of_flat_dt_is_compatible
(
root
,
"IBM,CBEA"
)
||
of_flat_dt_is_compatible
(
root
,
"IBM,CPBW-1.0"
))
return
1
;
return
0
;
}
/*
...
...
arch/powerpc/platforms/pseries/setup.c
浏览文件 @
b5ed7639
...
...
@@ -389,6 +389,7 @@ static int __init pSeries_probe_hypertas(unsigned long node,
static
int
__init
pSeries_probe
(
void
)
{
unsigned
long
root
=
of_get_flat_dt_root
();
char
*
dtype
=
of_get_flat_dt_prop
(
of_get_flat_dt_root
(),
"device_type"
,
NULL
);
if
(
dtype
==
NULL
)
...
...
@@ -396,6 +397,13 @@ static int __init pSeries_probe(void)
if
(
strcmp
(
dtype
,
"chrp"
))
return
0
;
/* Cell blades firmware claims to be chrp while it's not. Until this
* is fixed, we need to avoid those here.
*/
if
(
of_flat_dt_is_compatible
(
root
,
"IBM,CPBW-1.0"
)
||
of_flat_dt_is_compatible
(
root
,
"IBM,CBEA"
))
return
0
;
DBG
(
"pSeries detected, looking for LPAR capability...
\n
"
);
/* Now try to figure out if we are running on LPAR */
...
...
arch/sparc/kernel/smp.c
浏览文件 @
b5ed7639
...
...
@@ -69,6 +69,17 @@ void __init smp_store_cpu_info(int id)
"clock-frequency"
,
0
);
cpu_data
(
id
).
prom_node
=
cpu_node
;
cpu_data
(
id
).
mid
=
cpu_get_hwmid
(
cpu_node
);
/* this is required to tune the scheduler correctly */
/* is it possible to have CPUs with different cache sizes? */
if
(
id
==
boot_cpu_id
)
{
int
cache_line
,
cache_nlines
;
cache_line
=
0x20
;
cache_line
=
prom_getintdefault
(
cpu_node
,
"ecache-line-size"
,
cache_line
);
cache_nlines
=
0x8000
;
cache_nlines
=
prom_getintdefault
(
cpu_node
,
"ecache-nlines"
,
cache_nlines
);
max_cache_size
=
cache_line
*
cache_nlines
;
}
if
(
cpu_data
(
id
).
mid
<
0
)
panic
(
"No MID found for CPU%d at node 0x%08d"
,
id
,
cpu_node
);
}
...
...
arch/sparc64/kernel/pci_sun4v.c
浏览文件 @
b5ed7639
...
...
@@ -599,18 +599,128 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
/* SUN4V PCI configuration space accessors. */
static
inline
int
pci_sun4v_out_of_range
(
struct
pci_pbm_info
*
pbm
,
unsigned
int
bus
,
unsigned
int
device
,
unsigned
int
func
)
struct
pdev_entry
{
struct
pdev_entry
*
next
;
u32
devhandle
;
unsigned
int
bus
;
unsigned
int
device
;
unsigned
int
func
;
};
#define PDEV_HTAB_SIZE 16
#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
static
struct
pdev_entry
*
pdev_htab
[
PDEV_HTAB_SIZE
];
static
inline
unsigned
int
pdev_hashfn
(
u32
devhandle
,
unsigned
int
bus
,
unsigned
int
device
,
unsigned
int
func
)
{
if
(
bus
==
pbm
->
pci_first_busno
)
{
if
(
device
==
0
&&
func
==
0
)
return
0
;
return
1
;
unsigned
int
val
;
val
=
(
devhandle
^
(
devhandle
>>
4
));
val
^=
bus
;
val
^=
device
;
val
^=
func
;
return
val
&
PDEV_HTAB_MASK
;
}
static
int
pdev_htab_add
(
u32
devhandle
,
unsigned
int
bus
,
unsigned
int
device
,
unsigned
int
func
)
{
struct
pdev_entry
*
p
=
kmalloc
(
sizeof
(
*
p
),
GFP_KERNEL
);
struct
pdev_entry
**
slot
;
if
(
!
p
)
return
-
ENOMEM
;
slot
=
&
pdev_htab
[
pdev_hashfn
(
devhandle
,
bus
,
device
,
func
)];
p
->
next
=
*
slot
;
*
slot
=
p
;
p
->
devhandle
=
devhandle
;
p
->
bus
=
bus
;
p
->
device
=
device
;
p
->
func
=
func
;
return
0
;
}
/* Recursively descend into the OBP device tree, rooted at toplevel_node,
* looking for a PCI device matching bus and devfn.
*/
static
int
obp_find
(
struct
linux_prom_pci_registers
*
pregs
,
int
toplevel_node
,
unsigned
int
bus
,
unsigned
int
devfn
)
{
toplevel_node
=
prom_getchild
(
toplevel_node
);
while
(
toplevel_node
!=
0
)
{
int
ret
=
obp_find
(
pregs
,
toplevel_node
,
bus
,
devfn
);
if
(
ret
!=
0
)
return
ret
;
ret
=
prom_getproperty
(
toplevel_node
,
"reg"
,
(
char
*
)
pregs
,
sizeof
(
*
pregs
)
*
PROMREG_MAX
);
if
(
ret
==
0
||
ret
==
-
1
)
goto
next_sibling
;
if
(((
pregs
[
0
].
phys_hi
>>
16
)
&
0xff
)
==
bus
&&
((
pregs
[
0
].
phys_hi
>>
8
)
&
0xff
)
==
devfn
)
break
;
next_sibling:
toplevel_node
=
prom_getsibling
(
toplevel_node
);
}
return
toplevel_node
;
}
static
int
pdev_htab_populate
(
struct
pci_pbm_info
*
pbm
)
{
struct
linux_prom_pci_registers
pr
[
PROMREG_MAX
];
u32
devhandle
=
pbm
->
devhandle
;
unsigned
int
bus
;
for
(
bus
=
pbm
->
pci_first_busno
;
bus
<=
pbm
->
pci_last_busno
;
bus
++
)
{
unsigned
int
devfn
;
for
(
devfn
=
0
;
devfn
<
256
;
devfn
++
)
{
unsigned
int
device
=
PCI_SLOT
(
devfn
);
unsigned
int
func
=
PCI_FUNC
(
devfn
);
if
(
obp_find
(
pr
,
pbm
->
prom_node
,
bus
,
devfn
))
{
int
err
=
pdev_htab_add
(
devhandle
,
bus
,
device
,
func
);
if
(
err
)
return
err
;
}
}
}
return
0
;
}
static
struct
pdev_entry
*
pdev_find
(
u32
devhandle
,
unsigned
int
bus
,
unsigned
int
device
,
unsigned
int
func
)
{
struct
pdev_entry
*
p
;
p
=
pdev_htab
[
pdev_hashfn
(
devhandle
,
bus
,
device
,
func
)];
while
(
p
)
{
if
(
p
->
devhandle
==
devhandle
&&
p
->
bus
==
bus
&&
p
->
device
==
device
&&
p
->
func
==
func
)
break
;
p
=
p
->
next
;
}
return
p
;
}
static
inline
int
pci_sun4v_out_of_range
(
struct
pci_pbm_info
*
pbm
,
unsigned
int
bus
,
unsigned
int
device
,
unsigned
int
func
)
{
if
(
bus
<
pbm
->
pci_first_busno
||
bus
>
pbm
->
pci_last_busno
)
return
1
;
return
0
;
return
pdev_find
(
pbm
->
devhandle
,
bus
,
device
,
func
)
==
NULL
;
}
static
int
pci_sun4v_read_pci_cfg
(
struct
pci_bus
*
bus_dev
,
unsigned
int
devfn
,
...
...
@@ -1063,6 +1173,8 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32
pci_sun4v_get_bus_range
(
pbm
);
pci_sun4v_iommu_init
(
pbm
);
pdev_htab_populate
(
pbm
);
}
void
sun4v_pci_init
(
int
node
,
char
*
model_name
)
...
...
arch/sparc64/kernel/smp.c
浏览文件 @
b5ed7639
...
...
@@ -1287,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier)
return
0
;
}
static
void
__init
smp_tune_scheduling
(
void
)
{
int
instance
,
node
;
unsigned
int
def
,
smallest
=
~
0U
;
def
=
((
tlb_type
==
hypervisor
)
?
(
3
*
1024
*
1024
)
:
(
4
*
1024
*
1024
));
instance
=
0
;
while
(
!
cpu_find_by_instance
(
instance
,
&
node
,
NULL
))
{
unsigned
int
val
;
val
=
prom_getintdefault
(
node
,
"ecache-size"
,
def
);
if
(
val
<
smallest
)
smallest
=
val
;
instance
++
;
}
/* Any value less than 256K is nonsense. */
if
(
smallest
<
(
256U
*
1024U
))
smallest
=
256
*
1024
;
max_cache_size
=
smallest
;
if
(
smallest
<
1U
*
1024U
*
1024U
)
printk
(
KERN_INFO
"Using max_cache_size of %uKB
\n
"
,
smallest
/
1024U
);
else
printk
(
KERN_INFO
"Using max_cache_size of %uMB
\n
"
,
smallest
/
1024U
/
1024U
);
}
/* Constrain the number of cpus to max_cpus. */
void
__init
smp_prepare_cpus
(
unsigned
int
max_cpus
)
{
...
...
@@ -1322,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
smp_store_cpu_info
(
boot_cpu_id
);
smp_tune_scheduling
();
}
/* Set this up early so that things like the scheduler can init
...
...
arch/sparc64/kernel/sparc64_ksyms.c
浏览文件 @
b5ed7639
...
...
@@ -297,7 +297,6 @@ EXPORT_SYMBOL(svr4_getcontext);
EXPORT_SYMBOL
(
svr4_setcontext
);
EXPORT_SYMBOL
(
compat_sys_ioctl
);
EXPORT_SYMBOL
(
sparc32_open
);
EXPORT_SYMBOL
(
sys_close
);
#endif
/* Special internal versions of library functions. */
...
...
arch/sparc64/kernel/traps.c
浏览文件 @
b5ed7639
...
...
@@ -1797,7 +1797,9 @@ static const char *sun4v_err_type_to_str(u32 type)
};
}
static
void
sun4v_log_error
(
struct
sun4v_error_entry
*
ent
,
int
cpu
,
const
char
*
pfx
,
atomic_t
*
ocnt
)
extern
void
__show_regs
(
struct
pt_regs
*
regs
);
static
void
sun4v_log_error
(
struct
pt_regs
*
regs
,
struct
sun4v_error_entry
*
ent
,
int
cpu
,
const
char
*
pfx
,
atomic_t
*
ocnt
)
{
int
cnt
;
...
...
@@ -1830,6 +1832,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *
pfx
,
ent
->
err_raddr
,
ent
->
err_size
,
ent
->
err_cpu
);
__show_regs
(
regs
);
if
((
cnt
=
atomic_read
(
ocnt
))
!=
0
)
{
atomic_set
(
ocnt
,
0
);
wmb
();
...
...
@@ -1862,7 +1866,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
put_cpu
();
sun4v_log_error
(
&
local_copy
,
cpu
,
sun4v_log_error
(
regs
,
&
local_copy
,
cpu
,
KERN_ERR
"RESUMABLE ERROR"
,
&
sun4v_resum_oflow_cnt
);
}
...
...
@@ -1910,7 +1914,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
}
#endif
sun4v_log_error
(
&
local_copy
,
cpu
,
sun4v_log_error
(
regs
,
&
local_copy
,
cpu
,
KERN_EMERG
"NON-RESUMABLE ERROR"
,
&
sun4v_nonresum_oflow_cnt
);
...
...
@@ -2200,7 +2204,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
void
die_if_kernel
(
char
*
str
,
struct
pt_regs
*
regs
)
{
static
int
die_counter
;
extern
void
__show_regs
(
struct
pt_regs
*
regs
);
extern
void
smp_report_regs
(
void
);
int
count
=
0
;
...
...
arch/x86_64/kernel/io_apic.c
浏览文件 @
b5ed7639
...
...
@@ -271,6 +271,18 @@ __setup("enable_8254_timer", setup_enable_8254_timer);
#include <linux/pci_ids.h>
#include <linux/pci.h>
#ifdef CONFIG_ACPI
static
int
nvidia_hpet_detected
__initdata
;
static
int
__init
nvidia_hpet_check
(
unsigned
long
phys
,
unsigned
long
size
)
{
nvidia_hpet_detected
=
1
;
return
0
;
}
#endif
/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
off. Check for an Nvidia or VIA PCI bridge and turn it off.
Use pci direct infrastructure because this runs before the PCI subsystem.
...
...
@@ -317,11 +329,19 @@ void __init check_ioapic(void)
return
;
case
PCI_VENDOR_ID_NVIDIA
:
#ifdef CONFIG_ACPI
/* All timer overrides on Nvidia
seem to be wrong. Skip them. */
acpi_skip_timer_override
=
1
;
printk
(
KERN_INFO
"Nvidia board detected. Ignoring ACPI timer override.
\n
"
);
/*
* All timer overrides on Nvidia are
* wrong unless HPET is enabled.
*/
nvidia_hpet_detected
=
0
;
acpi_table_parse
(
ACPI_HPET
,
nvidia_hpet_check
);
if
(
nvidia_hpet_detected
==
0
)
{
acpi_skip_timer_override
=
1
;
printk
(
KERN_INFO
"Nvidia board "
"detected. Ignoring ACPI "
"timer override.
\n
"
);
}
#endif
/* RED-PEN skip them on mptables too? */
return
;
...
...
block/as-iosched.c
浏览文件 @
b5ed7639
...
...
@@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e)
* initialize elevator private data (as_data), and alloc a arq for
* each request on the free lists
*/
static
int
as_init_queue
(
request_queue_t
*
q
,
elevator_t
*
e
)
static
void
*
as_init_queue
(
request_queue_t
*
q
,
elevator_t
*
e
)
{
struct
as_data
*
ad
;
int
i
;
if
(
!
arq_pool
)
return
-
ENOMEM
;
return
NULL
;
ad
=
kmalloc_node
(
sizeof
(
*
ad
),
GFP_KERNEL
,
q
->
node
);
if
(
!
ad
)
return
-
ENOMEM
;
return
NULL
;
memset
(
ad
,
0
,
sizeof
(
*
ad
));
ad
->
q
=
q
;
/* Identify what queue the data belongs to */
...
...
@@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
GFP_KERNEL
,
q
->
node
);
if
(
!
ad
->
hash
)
{
kfree
(
ad
);
return
-
ENOMEM
;
return
NULL
;
}
ad
->
arq_pool
=
mempool_create_node
(
BLKDEV_MIN_RQ
,
mempool_alloc_slab
,
...
...
@@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
if
(
!
ad
->
arq_pool
)
{
kfree
(
ad
->
hash
);
kfree
(
ad
);
return
-
ENOMEM
;
return
NULL
;
}
/* anticipatory scheduling helpers */
...
...
@@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
ad
->
antic_expire
=
default_antic_expire
;
ad
->
batch_expire
[
REQ_SYNC
]
=
default_read_batch_expire
;
ad
->
batch_expire
[
REQ_ASYNC
]
=
default_write_batch_expire
;
e
->
elevator_data
=
ad
;
ad
->
current_batch_expires
=
jiffies
+
ad
->
batch_expire
[
REQ_SYNC
];
ad
->
write_batch_count
=
ad
->
batch_expire
[
REQ_ASYNC
]
/
10
;
if
(
ad
->
write_batch_count
<
2
)
ad
->
write_batch_count
=
2
;
return
0
;
return
ad
;
}
/*
...
...
block/cfq-iosched.c
浏览文件 @
b5ed7639
...
...
@@ -2251,14 +2251,14 @@ static void cfq_exit_queue(elevator_t *e)
kfree
(
cfqd
);
}
static
int
cfq_init_queue
(
request_queue_t
*
q
,
elevator_t
*
e
)
static
void
*
cfq_init_queue
(
request_queue_t
*
q
,
elevator_t
*
e
)
{
struct
cfq_data
*
cfqd
;
int
i
;
cfqd
=
kmalloc
(
sizeof
(
*
cfqd
),
GFP_KERNEL
);
if
(
!
cfqd
)
return
-
ENOMEM
;
return
NULL
;
memset
(
cfqd
,
0
,
sizeof
(
*
cfqd
));
...
...
@@ -2288,8 +2288,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
for
(
i
=
0
;
i
<
CFQ_QHASH_ENTRIES
;
i
++
)
INIT_HLIST_HEAD
(
&
cfqd
->
cfq_hash
[
i
]);
e
->
elevator_data
=
cfqd
;
cfqd
->
queue
=
q
;
cfqd
->
max_queued
=
q
->
nr_requests
/
4
;
...
...
@@ -2316,14 +2314,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd
->
cfq_slice_async_rq
=
cfq_slice_async_rq
;
cfqd
->
cfq_slice_idle
=
cfq_slice_idle
;
return
0
;
return
cfqd
;
out_crqpool:
kfree
(
cfqd
->
cfq_hash
);
out_cfqhash:
kfree
(
cfqd
->
crq_hash
);
out_crqhash:
kfree
(
cfqd
);
return
-
ENOMEM
;
return
NULL
;
}
static
void
cfq_slab_kill
(
void
)
...
...
block/deadline-iosched.c
浏览文件 @
b5ed7639
...
...
@@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e)
* initialize elevator private data (deadline_data), and alloc a drq for
* each request on the free lists
*/
static
int
deadline_init_queue
(
request_queue_t
*
q
,
elevator_t
*
e
)
static
void
*
deadline_init_queue
(
request_queue_t
*
q
,
elevator_t
*
e
)
{
struct
deadline_data
*
dd
;
int
i
;
if
(
!
drq_pool
)
return
-
ENOMEM
;
return
NULL
;
dd
=
kmalloc_node
(
sizeof
(
*
dd
),
GFP_KERNEL
,
q
->
node
);
if
(
!
dd
)
return
-
ENOMEM
;
return
NULL
;
memset
(
dd
,
0
,
sizeof
(
*
dd
));
dd
->
hash
=
kmalloc_node
(
sizeof
(
struct
list_head
)
*
DL_HASH_ENTRIES
,
GFP_KERNEL
,
q
->
node
);
if
(
!
dd
->
hash
)
{
kfree
(
dd
);
return
-
ENOMEM
;
return
NULL
;
}
dd
->
drq_pool
=
mempool_create_node
(
BLKDEV_MIN_RQ
,
mempool_alloc_slab
,
...
...
@@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
if
(
!
dd
->
drq_pool
)
{
kfree
(
dd
->
hash
);
kfree
(
dd
);
return
-
ENOMEM
;
return
NULL
;
}
for
(
i
=
0
;
i
<
DL_HASH_ENTRIES
;
i
++
)
...
...
@@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
dd
->
writes_starved
=
writes_starved
;
dd
->
front_merges
=
1
;
dd
->
fifo_batch
=
fifo_batch
;
e
->
elevator_data
=
dd
;
return
0
;
return
dd
;
}
static
void
deadline_put_request
(
request_queue_t
*
q
,
struct
request
*
rq
)
...
...
block/elevator.c
浏览文件 @
b5ed7639
...
...
@@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
return
e
;
}
static
int
elevator_attach
(
request_queue_t
*
q
,
struct
elevator_queue
*
eq
)
static
void
*
elevator_init_queue
(
request_queue_t
*
q
,
struct
elevator_queue
*
eq
)
{
int
ret
=
0
;
return
eq
->
ops
->
elevator_init_fn
(
q
,
eq
);
}
static
void
elevator_attach
(
request_queue_t
*
q
,
struct
elevator_queue
*
eq
,
void
*
data
)
{
q
->
elevator
=
eq
;
if
(
eq
->
ops
->
elevator_init_fn
)
ret
=
eq
->
ops
->
elevator_init_fn
(
q
,
eq
);
return
ret
;
eq
->
elevator_data
=
data
;
}
static
char
chosen_elevator
[
16
];
...
...
@@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name)
struct
elevator_type
*
e
=
NULL
;
struct
elevator_queue
*
eq
;
int
ret
=
0
;
void
*
data
;
INIT_LIST_HEAD
(
&
q
->
queue_head
);
q
->
last_merge
=
NULL
;
...
...
@@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name)
if
(
!
eq
)
return
-
ENOMEM
;
ret
=
elevator_attach
(
q
,
eq
);
if
(
ret
)
data
=
elevator_init_queue
(
q
,
eq
);
if
(
!
data
)
{
kobject_put
(
&
eq
->
kobj
);
return
-
ENOMEM
;
}
elevator_attach
(
q
,
eq
,
data
);
return
ret
;
}
...
...
@@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q)
return
error
;
}
static
void
__elv_unregister_queue
(
elevator_t
*
e
)
{
kobject_uevent
(
&
e
->
kobj
,
KOBJ_REMOVE
);
kobject_del
(
&
e
->
kobj
);
}
void
elv_unregister_queue
(
struct
request_queue
*
q
)
{
if
(
q
)
{
elevator_t
*
e
=
q
->
elevator
;
kobject_uevent
(
&
e
->
kobj
,
KOBJ_REMOVE
);
kobject_del
(
&
e
->
kobj
);
}
if
(
q
)
__elv_unregister_queue
(
q
->
elevator
);
}
int
elv_register
(
struct
elevator_type
*
e
)
...
...
@@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
static
int
elevator_switch
(
request_queue_t
*
q
,
struct
elevator_type
*
new_e
)
{
elevator_t
*
old_elevator
,
*
e
;
void
*
data
;
/*
* Allocate new elevator
...
...
@@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
if
(
!
e
)
return
0
;
data
=
elevator_init_queue
(
q
,
e
);
if
(
!
data
)
{
kobject_put
(
&
e
->
kobj
);
return
0
;
}
/*
* Turn on BYPASS and drain all requests w/ elevator private data
*/
...
...
@@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
elv_drain_elevator
(
q
);
}
spin_unlock_irq
(
q
->
queue_lock
);
/*
*
unregister old elevator data
*
Remember old elevator.
*/
elv_unregister_queue
(
q
);
old_elevator
=
q
->
elevator
;
/*
* attach and start new elevator
*/
if
(
elevator_attach
(
q
,
e
))
goto
fail
;
elevator_attach
(
q
,
e
,
data
);
spin_unlock_irq
(
q
->
queue_lock
);
__elv_unregister_queue
(
old_elevator
);
if
(
elv_register_queue
(
q
))
goto
fail_register
;
...
...
@@ -837,7 +851,6 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
*/
elevator_exit
(
e
);
e
=
NULL
;
fail:
q
->
elevator
=
old_elevator
;
elv_register_queue
(
q
);
clear_bit
(
QUEUE_FLAG_ELVSWITCH
,
&
q
->
queue_flags
);
...
...
block/noop-iosched.c
浏览文件 @
b5ed7639
...
...
@@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq)
return
list_entry
(
rq
->
queuelist
.
next
,
struct
request
,
queuelist
);
}
static
int
noop_init_queue
(
request_queue_t
*
q
,
elevator_t
*
e
)
static
void
*
noop_init_queue
(
request_queue_t
*
q
,
elevator_t
*
e
)
{
struct
noop_data
*
nd
;
nd
=
kmalloc
(
sizeof
(
*
nd
),
GFP_KERNEL
);
if
(
!
nd
)
return
-
ENOMEM
;
return
NULL
;
INIT_LIST_HEAD
(
&
nd
->
queue
);
e
->
elevator_data
=
nd
;
return
0
;
return
nd
;
}
static
void
noop_exit_queue
(
elevator_t
*
e
)
...
...
drivers/acpi/processor_perflib.c
浏览文件 @
b5ed7639
...
...
@@ -577,6 +577,8 @@ acpi_processor_register_performance(struct acpi_processor_performance
return_VALUE
(
-
EBUSY
);
}
WARN_ON
(
!
performance
);
pr
->
performance
=
performance
;
if
(
acpi_processor_get_performance_info
(
pr
))
{
...
...
@@ -609,7 +611,8 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
return_VOID
;
}
kfree
(
pr
->
performance
->
states
);
if
(
pr
->
performance
)
kfree
(
pr
->
performance
->
states
);
pr
->
performance
=
NULL
;
acpi_cpufreq_remove_file
(
pr
);
...
...
drivers/char/Makefile
浏览文件 @
b5ed7639
...
...
@@ -41,9 +41,9 @@ obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL)
+=
amiserial.o
obj-$(CONFIG_SX)
+=
sx.o generic_serial.o
obj-$(CONFIG_RIO)
+=
rio/ generic_serial.o
obj-$(CONFIG_HVC_DRIVER)
+=
hvc_console.o
obj-$(CONFIG_HVC_CONSOLE)
+=
hvc_vio.o hvsi.o
obj-$(CONFIG_HVC_RTAS)
+=
hvc_rtas.o
obj-$(CONFIG_HVC_DRIVER)
+=
hvc_console.o
obj-$(CONFIG_RAW_DRIVER)
+=
raw.o
obj-$(CONFIG_SGI_SNSC)
+=
snsc.o snsc_event.o
obj-$(CONFIG_MMTIMER)
+=
mmtimer.o
...
...
drivers/char/n_tty.c
浏览文件 @
b5ed7639
...
...
@@ -1384,8 +1384,10 @@ static ssize_t read_chan(struct tty_struct *tty, struct file *file,
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
if
(
n_tty_chars_in_buffer
(
tty
)
<=
TTY_THRESHOLD_UNTHROTTLE
)
if
(
n_tty_chars_in_buffer
(
tty
)
<=
TTY_THRESHOLD_UNTHROTTLE
)
{
n_tty_set_room
(
tty
);
check_unthrottle
(
tty
);
}
if
(
b
-
buf
>=
minimum
)
break
;
...
...
drivers/message/fusion/mptspi.c
浏览文件 @
b5ed7639
...
...
@@ -831,6 +831,7 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
return
rc
;
}
#ifdef CONFIG_PM
/*
* spi module resume handler
*/
...
...
@@ -846,6 +847,7 @@ mptspi_resume(struct pci_dev *pdev)
return
rc
;
}
#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
...
...
drivers/message/i2o/exec-osm.c
浏览文件 @
b5ed7639
...
...
@@ -55,6 +55,7 @@ struct i2o_exec_wait {
u32
m
;
/* message id */
struct
i2o_message
*
msg
;
/* pointer to the reply message */
struct
list_head
list
;
/* node in global wait list */
spinlock_t
lock
;
/* lock before modifying */
};
/* Work struct needed to handle LCT NOTIFY replies */
...
...
@@ -87,6 +88,7 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
return
NULL
;
INIT_LIST_HEAD
(
&
wait
->
list
);
spin_lock_init
(
&
wait
->
lock
);
return
wait
;
};
...
...
@@ -125,6 +127,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
DECLARE_WAIT_QUEUE_HEAD
(
wq
);
struct
i2o_exec_wait
*
wait
;
static
u32
tcntxt
=
0x80000000
;
long
flags
;
int
rc
=
0
;
wait
=
i2o_exec_wait_alloc
();
...
...
@@ -146,33 +149,28 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
wait
->
tcntxt
=
tcntxt
++
;
msg
->
u
.
s
.
tcntxt
=
cpu_to_le32
(
wait
->
tcntxt
);
wait
->
wq
=
&
wq
;
/*
* we add elements to the head, because if a entry in the list will
* never be removed, we have to iterate over it every time
*/
list_add
(
&
wait
->
list
,
&
i2o_exec_wait_list
);
/*
* Post the message to the controller. At some point later it will
* return. If we time out before it returns then complete will be zero.
*/
i2o_msg_post
(
c
,
msg
);
if
(
!
wait
->
complete
)
{
wait
->
wq
=
&
wq
;
/*
* we add elements add the head, because if a entry in the list
* will never be removed, we have to iterate over it every time
*/
list_add
(
&
wait
->
list
,
&
i2o_exec_wait_list
);
wait_event_interruptible_timeout
(
wq
,
wait
->
complete
,
timeout
*
HZ
);
wait_event_interruptible_timeout
(
wq
,
wait
->
complete
,
timeout
*
HZ
);
wait
->
wq
=
NULL
;
}
spin_lock_irqsave
(
&
wait
->
lock
,
flags
);
barrier
()
;
wait
->
wq
=
NULL
;
if
(
wait
->
complete
)
{
if
(
wait
->
complete
)
rc
=
le32_to_cpu
(
wait
->
msg
->
body
[
0
])
>>
24
;
i2o_flush_reply
(
c
,
wait
->
m
);
i2o_exec_wait_free
(
wait
);
}
else
{
else
{
/*
* We cannot remove it now. This is important. When it does
* terminate (which it must do if the controller has not
...
...
@@ -186,6 +184,13 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
rc
=
-
ETIMEDOUT
;
}
spin_unlock_irqrestore
(
&
wait
->
lock
,
flags
);
if
(
rc
!=
-
ETIMEDOUT
)
{
i2o_flush_reply
(
c
,
wait
->
m
);
i2o_exec_wait_free
(
wait
);
}
return
rc
;
};
...
...
@@ -213,7 +218,6 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
{
struct
i2o_exec_wait
*
wait
,
*
tmp
;
unsigned
long
flags
;
static
spinlock_t
lock
=
SPIN_LOCK_UNLOCKED
;
int
rc
=
1
;
/*
...
...
@@ -223,23 +227,24 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
* already expired. Not much we can do about that except log it for
* debug purposes, increase timeout, and recompile.
*/
spin_lock_irqsave
(
&
lock
,
flags
);
list_for_each_entry_safe
(
wait
,
tmp
,
&
i2o_exec_wait_list
,
list
)
{
if
(
wait
->
tcntxt
==
context
)
{
list_del
(
&
wait
->
list
);
spin_lock_irqsave
(
&
wait
->
lock
,
flags
);
spin_unlock_irqrestore
(
&
lock
,
flags
);
list_del
(
&
wait
->
list
);
wait
->
m
=
m
;
wait
->
msg
=
msg
;
wait
->
complete
=
1
;
barrier
();
if
(
wait
->
wq
)
{
wake_up_interruptible
(
wait
->
wq
);
if
(
wait
->
wq
)
rc
=
0
;
}
else
{
else
rc
=
-
1
;
spin_unlock_irqrestore
(
&
wait
->
lock
,
flags
);
if
(
rc
)
{
struct
device
*
dev
;
dev
=
&
c
->
pdev
->
dev
;
...
...
@@ -248,15 +253,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
c
->
name
);
i2o_dma_free
(
dev
,
&
wait
->
dma
);
i2o_exec_wait_free
(
wait
);
rc
=
-
1
;
}
}
else
wake_up_interruptible
(
wait
->
wq
);
return
rc
;
}
}
spin_unlock_irqrestore
(
&
lock
,
flags
);
osm_warn
(
"%s: Bogus reply in POST WAIT (tr-context: %08x)!
\n
"
,
c
->
name
,
context
);
...
...
@@ -322,14 +325,9 @@ static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
static
int
i2o_exec_probe
(
struct
device
*
dev
)
{
struct
i2o_device
*
i2o_dev
=
to_i2o_device
(
dev
);
struct
i2o_controller
*
c
=
i2o_dev
->
iop
;
i2o_event_register
(
i2o_dev
,
&
i2o_exec_driver
,
0
,
0xffffffff
);
c
->
exec
=
i2o_dev
;
i2o_exec_lct_notify
(
c
,
c
->
lct
->
change_ind
+
1
);
device_create_file
(
dev
,
&
dev_attr_vendor_id
);
device_create_file
(
dev
,
&
dev_attr_product_id
);
...
...
@@ -523,6 +521,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
struct
device
*
dev
;
struct
i2o_message
*
msg
;
down
(
&
c
->
lct_lock
);
dev
=
&
c
->
pdev
->
dev
;
if
(
i2o_dma_realloc
...
...
@@ -545,6 +545,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
i2o_msg_post
(
c
,
msg
);
up
(
&
c
->
lct_lock
);
return
0
;
};
...
...
drivers/message/i2o/iop.c
浏览文件 @
b5ed7639
...
...
@@ -804,8 +804,6 @@ void i2o_iop_remove(struct i2o_controller *c)
/* Ask the IOP to switch to RESET state */
i2o_iop_reset
(
c
);
put_device
(
&
c
->
device
);
}
/**
...
...
@@ -1059,7 +1057,7 @@ struct i2o_controller *i2o_iop_alloc(void)
snprintf
(
poolname
,
sizeof
(
poolname
),
"i2o_%s_msg_inpool"
,
c
->
name
);
if
(
i2o_pool_alloc
(
&
c
->
in_msg
,
poolname
,
I2O_INBOUND_MSG_FRAME_SIZE
*
4
,
(
&
c
->
in_msg
,
poolname
,
I2O_INBOUND_MSG_FRAME_SIZE
*
4
+
sizeof
(
u32
)
,
I2O_MSG_INPOOL_MIN
))
{
kfree
(
c
);
return
ERR_PTR
(
-
ENOMEM
);
...
...
drivers/net/sky2.c
浏览文件 @
b5ed7639
...
...
@@ -187,12 +187,11 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
return
v
;
}
static
int
sky2_set_power_state
(
struct
sky2_hw
*
hw
,
pci_power_t
state
)
static
void
sky2_set_power_state
(
struct
sky2_hw
*
hw
,
pci_power_t
state
)
{
u16
power_control
;
u32
reg1
;
int
vaux
;
int
ret
=
0
;
pr_debug
(
"sky2_set_power_state %d
\n
"
,
state
);
sky2_write8
(
hw
,
B2_TST_CTRL1
,
TST_CFG_WRITE_ON
);
...
...
@@ -275,12 +274,10 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
break
;
default:
printk
(
KERN_ERR
PFX
"Unknown power state %d
\n
"
,
state
);
ret
=
-
1
;
}
sky2_pci_write16
(
hw
,
hw
->
pm_cap
+
PCI_PM_CTRL
,
power_control
);
sky2_write8
(
hw
,
B2_TST_CTRL1
,
TST_CFG_WRITE_OFF
);
return
ret
;
}
static
void
sky2_phy_reset
(
struct
sky2_hw
*
hw
,
unsigned
port
)
...
...
@@ -2164,6 +2161,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
/* If idle then force a fake soft NAPI poll once a second
* to work around cases where sharing an edge triggered interrupt.
*/
static
inline
void
sky2_idle_start
(
struct
sky2_hw
*
hw
)
{
if
(
idle_timeout
>
0
)
mod_timer
(
&
hw
->
idle_timer
,
jiffies
+
msecs_to_jiffies
(
idle_timeout
));
}
static
void
sky2_idle
(
unsigned
long
arg
)
{
struct
sky2_hw
*
hw
=
(
struct
sky2_hw
*
)
arg
;
...
...
@@ -2183,6 +2187,9 @@ static int sky2_poll(struct net_device *dev0, int *budget)
int
work_done
=
0
;
u32
status
=
sky2_read32
(
hw
,
B0_Y2_SP_EISR
);
if
(
!~
status
)
goto
out
;
if
(
status
&
Y2_IS_HW_ERR
)
sky2_hw_intr
(
hw
);
...
...
@@ -2219,7 +2226,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
if
(
sky2_more_work
(
hw
))
return
1
;
out:
netif_rx_complete
(
dev0
);
sky2_read32
(
hw
,
B0_Y2_SP_LISR
);
...
...
@@ -3350,9 +3357,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
sky2_write32
(
hw
,
B0_IMSK
,
Y2_IS_BASE
);
setup_timer
(
&
hw
->
idle_timer
,
sky2_idle
,
(
unsigned
long
)
hw
);
if
(
idle_timeout
>
0
)
mod_timer
(
&
hw
->
idle_timer
,
jiffies
+
msecs_to_jiffies
(
idle_timeout
));
sky2_idle_start
(
hw
);
pci_set_drvdata
(
pdev
,
hw
);
...
...
@@ -3425,8 +3430,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct
sky2_hw
*
hw
=
pci_get_drvdata
(
pdev
);
int
i
;
pci_power_t
pstate
=
pci_choose_state
(
pdev
,
state
);
if
(
!
(
pstate
==
PCI_D3hot
||
pstate
==
PCI_D3cold
))
return
-
EINVAL
;
del_timer_sync
(
&
hw
->
idle_timer
);
for
(
i
=
0
;
i
<
2
;
i
++
)
{
for
(
i
=
0
;
i
<
hw
->
ports
;
i
++
)
{
struct
net_device
*
dev
=
hw
->
dev
[
i
];
if
(
dev
)
{
...
...
@@ -3438,7 +3449,10 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
}
}
return
sky2_set_power_state
(
hw
,
pci_choose_state
(
pdev
,
state
));
sky2_write32
(
hw
,
B0_IMSK
,
0
);
pci_save_state
(
pdev
);
sky2_set_power_state
(
hw
,
pstate
);
return
0
;
}
static
int
sky2_resume
(
struct
pci_dev
*
pdev
)
...
...
@@ -3448,15 +3462,15 @@ static int sky2_resume(struct pci_dev *pdev)
pci_restore_state
(
pdev
);
pci_enable_wake
(
pdev
,
PCI_D0
,
0
);
err
=
sky2_set_power_state
(
hw
,
PCI_D0
);
if
(
err
)
goto
out
;
sky2_set_power_state
(
hw
,
PCI_D0
);
err
=
sky2_reset
(
hw
);
if
(
err
)
goto
out
;
for
(
i
=
0
;
i
<
2
;
i
++
)
{
sky2_write32
(
hw
,
B0_IMSK
,
Y2_IS_BASE
);
for
(
i
=
0
;
i
<
hw
->
ports
;
i
++
)
{
struct
net_device
*
dev
=
hw
->
dev
[
i
];
if
(
dev
&&
netif_running
(
dev
))
{
netif_device_attach
(
dev
);
...
...
@@ -3465,10 +3479,12 @@ static int sky2_resume(struct pci_dev *pdev)
printk
(
KERN_ERR
PFX
"%s: could not up: %d
\n
"
,
dev
->
name
,
err
);
dev_close
(
dev
);
break
;
goto
out
;
}
}
}
sky2_idle_start
(
hw
);
out:
return
err
;
}
...
...
drivers/net/tg3.c
浏览文件 @
b5ed7639
...
...
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "3.5
8
"
#define DRV_MODULE_RELDATE "
May 22
, 2006"
#define DRV_MODULE_VERSION "3.5
9
"
#define DRV_MODULE_RELDATE "
June 8
, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
...
...
@@ -4485,9 +4485,8 @@ static void tg3_disable_nvram_access(struct tg3 *tp)
/* tp->lock is held. */
static
void
tg3_write_sig_pre_reset
(
struct
tg3
*
tp
,
int
kind
)
{
if
(
!
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
))
tg3_write_mem
(
tp
,
NIC_SRAM_FIRMWARE_MBOX
,
NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
tg3_write_mem
(
tp
,
NIC_SRAM_FIRMWARE_MBOX
,
NIC_SRAM_FIRMWARE_MBOX_MAGIC1
);
if
(
tp
->
tg3_flags2
&
TG3_FLG2_ASF_NEW_HANDSHAKE
)
{
switch
(
kind
)
{
...
...
@@ -4568,13 +4567,12 @@ static int tg3_chip_reset(struct tg3 *tp)
void
(
*
write_op
)(
struct
tg3
*
,
u32
,
u32
);
int
i
;
if
(
!
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
))
{
tg3_nvram_lock
(
tp
);
/* No matching tg3_nvram_unlock() after this because
* chip reset below will undo the nvram lock.
*/
tp
->
nvram_lock_cnt
=
0
;
}
tg3_nvram_lock
(
tp
);
/* No matching tg3_nvram_unlock() after this because
* chip reset below will undo the nvram lock.
*/
tp
->
nvram_lock_cnt
=
0
;
if
(
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5752
||
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5755
||
...
...
@@ -4727,20 +4725,25 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32_f
(
MAC_MODE
,
0
);
udelay
(
40
);
if
(
!
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
))
{
/* Wait for firmware initialization to complete. */
for
(
i
=
0
;
i
<
100000
;
i
++
)
{
tg3_read_mem
(
tp
,
NIC_SRAM_FIRMWARE_MBOX
,
&
val
);
if
(
val
==
~
NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
break
;
udelay
(
10
);
}
if
(
i
>=
100000
)
{
printk
(
KERN_ERR
PFX
"tg3_reset_hw timed out for %s, "
"firmware will not restart magic=%08x
\n
"
,
tp
->
dev
->
name
,
val
);
return
-
ENODEV
;
}
/* Wait for firmware initialization to complete. */
for
(
i
=
0
;
i
<
100000
;
i
++
)
{
tg3_read_mem
(
tp
,
NIC_SRAM_FIRMWARE_MBOX
,
&
val
);
if
(
val
==
~
NIC_SRAM_FIRMWARE_MBOX_MAGIC1
)
break
;
udelay
(
10
);
}
/* Chip might not be fitted with firmare. Some Sun onboard
* parts are configured like that. So don't signal the timeout
* of the above loop as an error, but do report the lack of
* running firmware once.
*/
if
(
i
>=
100000
&&
!
(
tp
->
tg3_flags2
&
TG3_FLG2_NO_FWARE_REPORTED
))
{
tp
->
tg3_flags2
|=
TG3_FLG2_NO_FWARE_REPORTED
;
printk
(
KERN_INFO
PFX
"%s: No firmware running.
\n
"
,
tp
->
dev
->
name
);
}
if
((
tp
->
tg3_flags2
&
TG3_FLG2_PCI_EXPRESS
)
&&
...
...
@@ -9075,9 +9078,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
{
int
j
;
if
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
)
return
;
tw32_f
(
GRC_EEPROM_ADDR
,
(
EEPROM_ADDR_FSM_RESET
|
(
EEPROM_DEFAULT_CLOCK_PERIOD
<<
...
...
@@ -9210,11 +9210,6 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
{
int
ret
;
if
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
)
{
printk
(
KERN_ERR
PFX
"Attempt to do nvram_read on Sun 570X
\n
"
);
return
-
EINVAL
;
}
if
(
!
(
tp
->
tg3_flags
&
TG3_FLAG_NVRAM
))
return
tg3_nvram_read_using_eeprom
(
tp
,
offset
,
val
);
...
...
@@ -9447,11 +9442,6 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
{
int
ret
;
if
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
)
{
printk
(
KERN_ERR
PFX
"Attempt to do nvram_write on Sun 570X
\n
"
);
return
-
EINVAL
;
}
if
(
tp
->
tg3_flags
&
TG3_FLAG_EEPROM_WRITE_PROT
)
{
tw32_f
(
GRC_LOCAL_CTRL
,
tp
->
grc_local_ctrl
&
~
GRC_LCLCTRL_GPIO_OUTPUT1
);
...
...
@@ -9578,15 +9568,19 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
pci_write_config_dword
(
tp
->
pdev
,
TG3PCI_MISC_HOST_CTRL
,
tp
->
misc_host_ctrl
);
/* The memory arbiter has to be enabled in order for SRAM accesses
* to succeed. Normally on powerup the tg3 chip firmware will make
* sure it is enabled, but other entities such as system netboot
* code might disable it.
*/
val
=
tr32
(
MEMARB_MODE
);
tw32
(
MEMARB_MODE
,
val
|
MEMARB_MODE_ENABLE
);
tp
->
phy_id
=
PHY_ID_INVALID
;
tp
->
led_ctrl
=
LED_CTRL_MODE_PHY_1
;
/* Do not even try poking around in here on Sun parts. */
if
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
)
{
/* All SUN chips are built-in LOMs. */
tp
->
tg3_flags
|=
TG3_FLAG_EEPROM_WRITE_PROT
;
return
;
}
/* Assume an onboard device by default. */
tp
->
tg3_flags
|=
TG3_FLAG_EEPROM_WRITE_PROT
;
tg3_read_mem
(
tp
,
NIC_SRAM_DATA_SIG
,
&
val
);
if
(
val
==
NIC_SRAM_DATA_SIG_MAGIC
)
{
...
...
@@ -9686,6 +9680,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if
(
nic_cfg
&
NIC_SRAM_DATA_CFG_EEPROM_WP
)
tp
->
tg3_flags
|=
TG3_FLAG_EEPROM_WRITE_PROT
;
else
tp
->
tg3_flags
&=
~
TG3_FLAG_EEPROM_WRITE_PROT
;
if
(
nic_cfg
&
NIC_SRAM_DATA_CFG_ASF_ENABLE
)
{
tp
->
tg3_flags
|=
TG3_FLAG_ENABLE_ASF
;
...
...
@@ -9834,16 +9830,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
int
i
;
u32
magic
;
if
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
)
{
/* Sun decided not to put the necessary bits in the
* NVRAM of their onboard tg3 parts :(
*/
strcpy
(
tp
->
board_part_number
,
"Sun 570X"
);
return
;
}
if
(
tg3_nvram_read_swab
(
tp
,
0x0
,
&
magic
))
return
;
goto
out_not_found
;
if
(
magic
==
TG3_EEPROM_MAGIC
)
{
for
(
i
=
0
;
i
<
256
;
i
+=
4
)
{
...
...
@@ -9874,6 +9862,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
break
;
msleep
(
1
);
}
if
(
!
(
tmp16
&
0x8000
))
goto
out_not_found
;
pci_read_config_dword
(
tp
->
pdev
,
vpd_cap
+
PCI_VPD_DATA
,
&
tmp
);
tmp
=
cpu_to_le32
(
tmp
);
...
...
@@ -9965,37 +9956,6 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
}
}
#ifdef CONFIG_SPARC64
static
int
__devinit
tg3_is_sun_570X
(
struct
tg3
*
tp
)
{
struct
pci_dev
*
pdev
=
tp
->
pdev
;
struct
pcidev_cookie
*
pcp
=
pdev
->
sysdata
;
if
(
pcp
!=
NULL
)
{
int
node
=
pcp
->
prom_node
;
u32
venid
;
int
err
;
err
=
prom_getproperty
(
node
,
"subsystem-vendor-id"
,
(
char
*
)
&
venid
,
sizeof
(
venid
));
if
(
err
==
0
||
err
==
-
1
)
return
0
;
if
(
venid
==
PCI_VENDOR_ID_SUN
)
return
1
;
/* TG3 chips onboard the SunBlade-2500 don't have the
* subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
* are distinguishable from non-Sun variants by being
* named "network" by the firmware. Non-Sun cards will
* show up as being named "ethernet".
*/
if
(
!
strcmp
(
pcp
->
prom_name
,
"network"
))
return
1
;
}
return
0
;
}
#endif
static
int
__devinit
tg3_get_invariants
(
struct
tg3
*
tp
)
{
static
struct
pci_device_id
write_reorder_chipsets
[]
=
{
...
...
@@ -10012,11 +9972,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
u16
pci_cmd
;
int
err
;
#ifdef CONFIG_SPARC64
if
(
tg3_is_sun_570X
(
tp
))
tp
->
tg3_flags2
|=
TG3_FLG2_SUN_570X
;
#endif
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
* The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
...
...
@@ -10312,8 +10267,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if
(
tp
->
write32
==
tg3_write_indirect_reg32
||
((
tp
->
tg3_flags
&
TG3_FLAG_PCIX_MODE
)
&&
(
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5700
||
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5701
))
||
(
tp
->
tg3_flags2
&
TG3_FLG2_SUN_570X
))
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5701
)))
tp
->
tg3_flags
|=
TG3_FLAG_SRAM_USE_CONFIG
;
/* Get eeprom hw config before calling tg3_set_power_state().
...
...
@@ -10594,8 +10548,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
#endif
mac_offset
=
0x7c
;
if
((
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5704
&&
!
(
tp
->
tg3_flags
&
TG3_FLG2_SUN_570X
))
||
if
((
GET_ASIC_REV
(
tp
->
pci_chip_rev_id
)
==
ASIC_REV_5704
)
||
(
tp
->
tg3_flags2
&
TG3_FLG2_5780_CLASS
))
{
if
(
tr32
(
TG3PCI_DUAL_MAC_CTRL
)
&
DUAL_MAC_CTRL_ID
)
mac_offset
=
0xcc
;
...
...
@@ -10622,8 +10575,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
}
if
(
!
addr_ok
)
{
/* Next, try NVRAM. */
if
(
!
(
tp
->
tg3_flags
&
TG3_FLG2_SUN_570X
)
&&
!
tg3_nvram_read
(
tp
,
mac_offset
+
0
,
&
hi
)
&&
if
(
!
tg3_nvram_read
(
tp
,
mac_offset
+
0
,
&
hi
)
&&
!
tg3_nvram_read
(
tp
,
mac_offset
+
4
,
&
lo
))
{
dev
->
dev_addr
[
0
]
=
((
hi
>>
16
)
&
0xff
);
dev
->
dev_addr
[
1
]
=
((
hi
>>
24
)
&
0xff
);
...
...
drivers/net/tg3.h
浏览文件 @
b5ed7639
...
...
@@ -2184,7 +2184,7 @@ struct tg3 {
#define TG3_FLAG_INIT_COMPLETE 0x80000000
u32
tg3_flags2
;
#define TG3_FLG2_RESTART_TIMER 0x00000001
#define TG3_FLG2_SUN_570X 0x00000002
/* 0x00000002 available */
#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
#define TG3_FLG2_IS_5788 0x00000008
#define TG3_FLG2_MAX_RXPEND_64 0x00000010
...
...
@@ -2216,6 +2216,7 @@ struct tg3 {
#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
#define TG3_FLG2_1SHOT_MSI 0x10000000
#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
u32
split_mode_max_reqs
;
#define SPLIT_MODE_5704_MAX_REQ 3
...
...
drivers/pci/pci-driver.c
浏览文件 @
b5ed7639
...
...
@@ -285,9 +285,9 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
* Default resume method for devices that have no driver provided resume,
* or not even a driver at all.
*/
static
void
pci_default_resume
(
struct
pci_dev
*
pci_dev
)
static
int
pci_default_resume
(
struct
pci_dev
*
pci_dev
)
{
int
retval
;
int
retval
=
0
;
/* restore the PCI config space */
pci_restore_state
(
pci_dev
);
...
...
@@ -297,18 +297,21 @@ static void pci_default_resume(struct pci_dev *pci_dev)
/* if the device was busmaster before the suspend, make it busmaster again */
if
(
pci_dev
->
is_busmaster
)
pci_set_master
(
pci_dev
);
return
retval
;
}
static
int
pci_device_resume
(
struct
device
*
dev
)
{
int
error
;
struct
pci_dev
*
pci_dev
=
to_pci_dev
(
dev
);
struct
pci_driver
*
drv
=
pci_dev
->
driver
;
if
(
drv
&&
drv
->
resume
)
drv
->
resume
(
pci_dev
);
error
=
drv
->
resume
(
pci_dev
);
else
pci_default_resume
(
pci_dev
);
return
0
;
error
=
pci_default_resume
(
pci_dev
);
return
error
;
}
static
void
pci_device_shutdown
(
struct
device
*
dev
)
...
...
drivers/pci/pci.c
浏览文件 @
b5ed7639
...
...
@@ -460,9 +460,23 @@ int
pci_restore_state
(
struct
pci_dev
*
dev
)
{
int
i
;
int
val
;
for
(
i
=
0
;
i
<
16
;
i
++
)
pci_write_config_dword
(
dev
,
i
*
4
,
dev
->
saved_config_space
[
i
]);
/*
* The Base Address register should be programmed before the command
* register(s)
*/
for
(
i
=
15
;
i
>=
0
;
i
--
)
{
pci_read_config_dword
(
dev
,
i
*
4
,
&
val
);
if
(
val
!=
dev
->
saved_config_space
[
i
])
{
printk
(
KERN_DEBUG
"PM: Writing back config space on "
"device %s at offset %x (was %x, writing %x)
\n
"
,
pci_name
(
dev
),
i
,
val
,
(
int
)
dev
->
saved_config_space
[
i
]);
pci_write_config_dword
(
dev
,
i
*
4
,
dev
->
saved_config_space
[
i
]);
}
}
pci_restore_msi_state
(
dev
);
pci_restore_msix_state
(
dev
);
return
0
;
...
...
drivers/scsi/sata_mv.c
浏览文件 @
b5ed7639
...
...
@@ -2035,6 +2035,7 @@ static void mv_phy_reset(struct ata_port *ap)
static
void
mv_eng_timeout
(
struct
ata_port
*
ap
)
{
struct
ata_queued_cmd
*
qc
;
unsigned
long
flags
;
printk
(
KERN_ERR
"ata%u: Entering mv_eng_timeout
\n
"
,
ap
->
id
);
DPRINTK
(
"All regs @ start of eng_timeout
\n
"
);
...
...
@@ -2046,8 +2047,10 @@ static void mv_eng_timeout(struct ata_port *ap)
ap
->
host_set
->
mmio_base
,
ap
,
qc
,
qc
->
scsicmd
,
&
qc
->
scsicmd
->
cmnd
);
spin_lock_irqsave
(
&
ap
->
host_set
->
lock
,
flags
);
mv_err_intr
(
ap
,
0
);
mv_stop_and_reset
(
ap
);
spin_unlock_irqrestore
(
&
ap
->
host_set
->
lock
,
flags
);
WARN_ON
(
!
(
qc
->
flags
&
ATA_QCFLAG_ACTIVE
));
if
(
qc
->
flags
&
ATA_QCFLAG_ACTIVE
)
{
...
...
drivers/usb/host/ohci-pxa27x.c
浏览文件 @
b5ed7639
...
...
@@ -185,6 +185,9 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
/* Select Power Management Mode */
pxa27x_ohci_select_pmm
(
inf
->
port_mode
);
if
(
inf
->
power_budget
)
hcd
->
power_budget
=
inf
->
power_budget
;
ohci_hcd_init
(
hcd_to_ohci
(
hcd
));
retval
=
usb_add_hcd
(
hcd
,
pdev
->
resource
[
1
].
start
,
SA_INTERRUPT
);
...
...
drivers/video/console/fbcon.c
浏览文件 @
b5ed7639
...
...
@@ -1745,7 +1745,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
fbcon_redraw_move
(
vc
,
p
,
0
,
t
,
count
);
ypan_up_redraw
(
vc
,
t
,
count
);
if
(
vc
->
vc_rows
-
b
>
0
)
fbcon_redraw_move
(
vc
,
p
,
b
-
count
,
fbcon_redraw_move
(
vc
,
p
,
b
,
vc
->
vc_rows
-
b
,
b
);
}
else
fbcon_redraw_move
(
vc
,
p
,
t
+
count
,
b
-
t
-
count
,
t
);
...
...
fs/debugfs/inode.c
浏览文件 @
b5ed7639
...
...
@@ -67,12 +67,13 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
static
int
debugfs_mknod
(
struct
inode
*
dir
,
struct
dentry
*
dentry
,
int
mode
,
dev_t
dev
)
{
struct
inode
*
inode
=
debugfs_get_inode
(
dir
->
i_sb
,
mode
,
dev
)
;
struct
inode
*
inode
;
int
error
=
-
EPERM
;
if
(
dentry
->
d_inode
)
return
-
EEXIST
;
inode
=
debugfs_get_inode
(
dir
->
i_sb
,
mode
,
dev
);
if
(
inode
)
{
d_instantiate
(
dentry
,
inode
);
dget
(
dentry
);
...
...
include/asm-arm/arch-pxa/ohci.h
浏览文件 @
b5ed7639
...
...
@@ -11,6 +11,8 @@ struct pxaohci_platform_data {
#define PMM_NPS_MODE 1
#define PMM_GLOBAL_MODE 2
#define PMM_PERPORT_MODE 3
int
power_budget
;
};
extern
void
pxa_set_ohci_info
(
struct
pxaohci_platform_data
*
info
);
...
...
include/asm-s390/futex.h
浏览文件 @
b5ed7639
...
...
@@ -11,23 +11,24 @@
#define __futex_atomic_fixup \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
" .long 0b,
2b,1b,2b\n"
\
" .long 0b,
4b,2b,4b,3b,4b\n"
\
".previous"
#else
/* __s390x__ */
#define __futex_atomic_fixup \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
" .quad 0b,
2b,1b,2b\n"
\
" .quad 0b,
4b,2b,4b,3b,4b\n"
\
".previous"
#endif
/* __s390x__ */
#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
asm volatile(" l %1,0(%6)\n" \
"0: " insn \
" cs %1,%2,0(%6)\n" \
"1: jl 0b\n" \
asm volatile(" sacf 256\n" \
"0: l %1,0(%6)\n" \
"1: " insn \
"2: cs %1,%2,0(%6)\n" \
"3: jl 1b\n" \
" lhi %0,0\n" \
"
2:\n"
\
"
4: sacf 0\n"
\
__futex_atomic_fixup \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
...
...
include/linux/elevator.h
浏览文件 @
b5ed7639
...
...
@@ -21,7 +21,7 @@ typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
typedef
void
(
elevator_activate_req_fn
)
(
request_queue_t
*
,
struct
request
*
);
typedef
void
(
elevator_deactivate_req_fn
)
(
request_queue_t
*
,
struct
request
*
);
typedef
int
(
elevator_init_fn
)
(
request_queue_t
*
,
elevator_t
*
);
typedef
void
*
(
elevator_init_fn
)
(
request_queue_t
*
,
elevator_t
*
);
typedef
void
(
elevator_exit_fn
)
(
elevator_t
*
);
struct
elevator_ops
...
...
include/linux/i2o.h
浏览文件 @
b5ed7639
...
...
@@ -1114,8 +1114,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
mmsg
->
mfa
=
readl
(
c
->
in_port
);
if
(
unlikely
(
mmsg
->
mfa
>=
c
->
in_queue
.
len
))
{
u32
mfa
=
mmsg
->
mfa
;
mempool_free
(
mmsg
,
c
->
in_msg
.
mempool
);
if
(
mmsg
->
mfa
==
I2O_QUEUE_EMPTY
)
if
(
mfa
==
I2O_QUEUE_EMPTY
)
return
ERR_PTR
(
-
EBUSY
);
return
ERR_PTR
(
-
EFAULT
);
}
...
...
include/linux/mempolicy.h
浏览文件 @
b5ed7639
...
...
@@ -36,6 +36,7 @@
#include <linux/nodemask.h>
struct
vm_area_struct
;
struct
mm_struct
;
#ifdef CONFIG_NUMA
...
...
include/linux/pci-acpi.h
浏览文件 @
b5ed7639
...
...
@@ -50,7 +50,7 @@
extern
acpi_status
pci_osc_control_set
(
acpi_handle
handle
,
u32
flags
);
extern
acpi_status
pci_osc_support_set
(
u32
flags
);
#else
#if !defined(
acpi_status
)
#if !defined(
AE_ERROR
)
typedef
u32
acpi_status
;
#define AE_ERROR (acpi_status) (0x0001)
#endif
...
...
mm/shmem.c
浏览文件 @
b5ed7639
...
...
@@ -1780,6 +1780,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
if
(
!
simple_empty
(
dentry
))
return
-
ENOTEMPTY
;
dentry
->
d_inode
->
i_nlink
--
;
dir
->
i_nlink
--
;
return
shmem_unlink
(
dir
,
dentry
);
}
...
...
@@ -2102,6 +2103,7 @@ static int shmem_fill_super(struct super_block *sb,
sb
->
s_blocksize_bits
=
PAGE_CACHE_SHIFT
;
sb
->
s_magic
=
TMPFS_MAGIC
;
sb
->
s_op
=
&
shmem_ops
;
sb
->
s_time_gran
=
1
;
inode
=
shmem_get_inode
(
sb
,
S_IFDIR
|
mode
,
0
);
if
(
!
inode
)
...
...
mm/vmscan.c
浏览文件 @
b5ed7639
...
...
@@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
loop_again:
total_scanned
=
0
;
nr_reclaimed
=
0
;
sc
.
may_writepage
=
!
laptop_mode
,
sc
.
may_writepage
=
!
laptop_mode
;
sc
.
nr_mapped
=
read_page_state
(
nr_mapped
);
inc_page_state
(
pageoutrun
);
...
...
net/dccp/ackvec.c
浏览文件 @
b5ed7639
...
...
@@ -452,6 +452,7 @@ static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
(
unsigned
long
long
)
avr
->
dccpavr_ack_ackno
);
dccp_ackvec_throw_record
(
av
,
avr
);
break
;
}
/*
* If it wasn't received, continue scanning... we might
...
...
net/ipv4/ip_forward.c
浏览文件 @
b5ed7639
...
...
@@ -116,6 +116,7 @@ int ip_forward(struct sk_buff *skb)
too_many_hops:
/* Tell the sender its packet died... */
IP_INC_STATS_BH
(
IPSTATS_MIB_INHDRERRORS
);
icmp_send
(
skb
,
ICMP_TIME_EXCEEDED
,
ICMP_EXC_TTL
,
0
);
drop:
kfree_skb
(
skb
);
...
...
net/ipv4/tcp_input.c
浏览文件 @
b5ed7639
...
...
@@ -1649,7 +1649,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
* Hence, we can detect timed out packets during fast
* retransmit without falling to slow start.
*/
if
(
tcp_head_timedout
(
sk
,
tp
))
{
if
(
!
IsReno
(
tp
)
&&
tcp_head_timedout
(
sk
,
tp
))
{
struct
sk_buff
*
skb
;
skb
=
tp
->
scoreboard_skb_hint
?
tp
->
scoreboard_skb_hint
...
...
@@ -1662,8 +1662,6 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
if
(
!
(
TCP_SKB_CB
(
skb
)
->
sacked
&
TCPCB_TAGBITS
))
{
TCP_SKB_CB
(
skb
)
->
sacked
|=
TCPCB_LOST
;
tp
->
lost_out
+=
tcp_skb_pcount
(
skb
);
if
(
IsReno
(
tp
))
tcp_remove_reno_sacks
(
sk
,
tp
,
tcp_skb_pcount
(
skb
)
+
1
);
/* clear xmit_retrans hint */
if
(
tp
->
retransmit_skb_hint
&&
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录