Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
raspberrypi-kernel
提交
966ea8c4
R
raspberrypi-kernel
项目概览
openeuler
/
raspberrypi-kernel
通知
13
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
R
raspberrypi-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
966ea8c4
编写于
3月 04, 2008
作者:
S
Steve French
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
上级
41c5ae68
976dde01
变更
47
隐藏空白更改
内联
并排
Showing
47 changed file
with
593 addition
and
594 deletion
+593
-594
arch/arm/mm/pgd.c
arch/arm/mm/pgd.c
+2
-4
arch/powerpc/boot/cuboot-bamboo.c
arch/powerpc/boot/cuboot-bamboo.c
+1
-0
arch/powerpc/boot/cuboot-ebony.c
arch/powerpc/boot/cuboot-ebony.c
+1
-0
arch/powerpc/boot/cuboot-katmai.c
arch/powerpc/boot/cuboot-katmai.c
+1
-0
arch/powerpc/boot/cuboot-taishan.c
arch/powerpc/boot/cuboot-taishan.c
+2
-0
arch/powerpc/boot/cuboot-warp.c
arch/powerpc/boot/cuboot-warp.c
+1
-0
arch/powerpc/boot/dts/haleakala.dts
arch/powerpc/boot/dts/haleakala.dts
+1
-1
arch/powerpc/boot/dts/katmai.dts
arch/powerpc/boot/dts/katmai.dts
+29
-29
arch/powerpc/oprofile/op_model_cell.c
arch/powerpc/oprofile/op_model_cell.c
+1
-1
arch/powerpc/platforms/52xx/mpc52xx_common.c
arch/powerpc/platforms/52xx/mpc52xx_common.c
+1
-0
arch/powerpc/platforms/cell/iommu.c
arch/powerpc/platforms/cell/iommu.c
+88
-63
arch/powerpc/platforms/cell/setup.c
arch/powerpc/platforms/cell/setup.c
+7
-0
arch/powerpc/platforms/cell/spu_base.c
arch/powerpc/platforms/cell/spu_base.c
+13
-3
arch/powerpc/platforms/cell/spufs/context.c
arch/powerpc/platforms/cell/spufs/context.c
+3
-4
arch/powerpc/platforms/cell/spufs/file.c
arch/powerpc/platforms/cell/spufs/file.c
+11
-1
arch/powerpc/platforms/cell/spufs/sched.c
arch/powerpc/platforms/cell/spufs/sched.c
+1
-1
arch/powerpc/platforms/cell/spufs/sputrace.c
arch/powerpc/platforms/cell/spufs/sputrace.c
+4
-3
arch/powerpc/platforms/cell/spufs/switch.c
arch/powerpc/platforms/cell/spufs/switch.c
+6
-0
arch/powerpc/platforms/celleb/beat.h
arch/powerpc/platforms/celleb/beat.h
+0
-3
arch/x86/mm/pageattr.c
arch/x86/mm/pageattr.c
+10
-11
drivers/acorn/char/defkeymap-l7200.c
drivers/acorn/char/defkeymap-l7200.c
+34
-34
drivers/base/transport_class.c
drivers/base/transport_class.c
+1
-3
drivers/char/defkeymap.c_shipped
drivers/char/defkeymap.c_shipped
+34
-34
drivers/char/xilinx_hwicap/buffer_icap.c
drivers/char/xilinx_hwicap/buffer_icap.c
+40
-40
drivers/char/xilinx_hwicap/fifo_icap.c
drivers/char/xilinx_hwicap/fifo_icap.c
+30
-30
drivers/char/xilinx_hwicap/xilinx_hwicap.c
drivers/char/xilinx_hwicap/xilinx_hwicap.c
+62
-76
drivers/char/xilinx_hwicap/xilinx_hwicap.h
drivers/char/xilinx_hwicap/xilinx_hwicap.h
+12
-12
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptbase.c
+13
-12
drivers/message/fusion/mptscsih.c
drivers/message/fusion/mptscsih.c
+5
-9
drivers/pci/rom.c
drivers/pci/rom.c
+1
-2
drivers/rapidio/rio-driver.c
drivers/rapidio/rio-driver.c
+3
-5
drivers/s390/char/defkeymap.c
drivers/s390/char/defkeymap.c
+2
-2
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_scan.c
+1
-2
drivers/usb/core/usb.c
drivers/usb/core/usb.c
+2
-4
fs/buffer.c
fs/buffer.c
+1
-2
fs/exec.c
fs/exec.c
+9
-1
fs/jbd/transaction.c
fs/jbd/transaction.c
+9
-8
fs/mpage.c
fs/mpage.c
+3
-8
include/asm-powerpc/reg.h
include/asm-powerpc/reg.h
+3
-0
include/asm-x86/pgtable_32.h
include/asm-x86/pgtable_32.h
+1
-3
include/asm-x86/pgtable_64.h
include/asm-x86/pgtable_64.h
+2
-4
include/linux/mm_types.h
include/linux/mm_types.h
+1
-4
include/linux/slub_def.h
include/linux/slub_def.h
+2
-2
include/linux/usb.h
include/linux/usb.h
+3
-6
kernel/exit.c
kernel/exit.c
+46
-50
mm/slub.c
mm/slub.c
+89
-115
mm/truncate.c
mm/truncate.c
+1
-2
未找到文件。
arch/arm/mm/pgd.c
浏览文件 @
966ea8c4
...
...
@@ -75,7 +75,7 @@ pgd_t *get_pgd_slow(struct mm_struct *mm)
void
free_pgd_slow
(
struct
mm_struct
*
mm
,
pgd_t
*
pgd
)
{
pmd_t
*
pmd
;
struct
page
*
pte
;
pgtable_t
pte
;
if
(
!
pgd
)
return
;
...
...
@@ -90,10 +90,8 @@ void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
goto
free
;
}
pte
=
pmd_p
ag
e
(
*
pmd
);
pte
=
pmd_p
gtabl
e
(
*
pmd
);
pmd_clear
(
pmd
);
dec_zone_page_state
(
virt_to_page
((
unsigned
long
*
)
pgd
),
NR_PAGETABLE
);
pte_lock_deinit
(
pte
);
pte_free
(
mm
,
pte
);
pmd_free
(
mm
,
pmd
);
free:
...
...
arch/powerpc/boot/cuboot-bamboo.c
浏览文件 @
966ea8c4
...
...
@@ -17,6 +17,7 @@
#include "44x.h"
#include "cuboot.h"
#define TARGET_4xx
#define TARGET_44x
#include "ppcboot.h"
...
...
arch/powerpc/boot/cuboot-ebony.c
浏览文件 @
966ea8c4
...
...
@@ -17,6 +17,7 @@
#include "44x.h"
#include "cuboot.h"
#define TARGET_4xx
#define TARGET_44x
#include "ppcboot.h"
...
...
arch/powerpc/boot/cuboot-katmai.c
浏览文件 @
966ea8c4
...
...
@@ -22,6 +22,7 @@
#include "44x.h"
#include "cuboot.h"
#define TARGET_4xx
#define TARGET_44x
#include "ppcboot.h"
...
...
arch/powerpc/boot/cuboot-taishan.c
浏览文件 @
966ea8c4
...
...
@@ -21,7 +21,9 @@
#include "dcr.h"
#include "4xx.h"
#define TARGET_4xx
#define TARGET_44x
#define TARGET_440GX
#include "ppcboot.h"
static
bd_t
bd
;
...
...
arch/powerpc/boot/cuboot-warp.c
浏览文件 @
966ea8c4
...
...
@@ -11,6 +11,7 @@
#include "4xx.h"
#include "cuboot.h"
#define TARGET_4xx
#define TARGET_44x
#include "ppcboot.h"
...
...
arch/powerpc/boot/dts/haleakala.dts
浏览文件 @
966ea8c4
...
...
@@ -235,7 +235,7 @@
#
interrupt
-
cells
=
<
1
>;
#
size
-
cells
=
<
2
>;
#
address
-
cells
=
<
3
>;
compatible
=
"ibm,plb-pciex-405ex
r
"
,
"ibm,plb-pciex"
;
compatible
=
"ibm,plb-pciex-405ex"
,
"ibm,plb-pciex"
;
primary
;
port
=
<
0
>;
/*
port
number
*/
reg
=
<
a0000000
20000000
/*
Config
space
access
*/
...
...
arch/powerpc/boot/dts/katmai.dts
浏览文件 @
966ea8c4
...
...
@@ -38,8 +38,8 @@
timebase
-
frequency
=
<
0
>;
/*
Filled
in
by
zImage
*/
i
-
cache
-
line
-
size
=
<
20
>;
d
-
cache
-
line
-
size
=
<
20
>;
i
-
cache
-
size
=
<
20
000
>;
d
-
cache
-
size
=
<
20
000
>;
i
-
cache
-
size
=
<
8
000
>;
d
-
cache
-
size
=
<
8
000
>;
dcr
-
controller
;
dcr
-
access
-
method
=
"native"
;
};
...
...
@@ -136,11 +136,11 @@
};
POB0
:
opb
{
compatible
=
"ibm,opb-440spe"
,
"ibm,opb-440gp"
,
"ibm,opb"
;
compatible
=
"ibm,opb-440spe"
,
"ibm,opb-440gp"
,
"ibm,opb"
;
#
address
-
cells
=
<
1
>;
#
size
-
cells
=
<
1
>;
ranges
=
<
00000000
4
e0000000
20000000
>;
clock
-
frequency
=
<
0
>;
/*
Filled
in
by
zImage
*/
ranges
=
<
00000000
4
e0000000
20000000
>;
clock
-
frequency
=
<
0
>;
/*
Filled
in
by
zImage
*/
EBC0
:
ebc
{
compatible
=
"ibm,ebc-440spe"
,
"ibm,ebc-440gp"
,
"ibm,ebc"
;
...
...
@@ -153,38 +153,38 @@
};
UART0
:
serial
@
10000200
{
device_type
=
"serial"
;
compatible
=
"ns16550"
;
reg
=
<
10000200
8
>;
device_type
=
"serial"
;
compatible
=
"ns16550"
;
reg
=
<
10000200
8
>;
virtual
-
reg
=
<
a0000200
>;
clock
-
frequency
=
<
0
>;
/*
Filled
in
by
zImage
*/
current
-
speed
=
<
1
c200
>;
interrupt
-
parent
=
<&
UIC0
>;
interrupts
=
<
0
4
>;
};
clock
-
frequency
=
<
0
>;
/*
Filled
in
by
zImage
*/
current
-
speed
=
<
1
c200
>;
interrupt
-
parent
=
<&
UIC0
>;
interrupts
=
<
0
4
>;
};
UART1
:
serial
@
10000300
{
device_type
=
"serial"
;
compatible
=
"ns16550"
;
reg
=
<
10000300
8
>;
device_type
=
"serial"
;
compatible
=
"ns16550"
;
reg
=
<
10000300
8
>;
virtual
-
reg
=
<
a0000300
>;
clock
-
frequency
=
<
0
>;
current
-
speed
=
<
0
>;
interrupt
-
parent
=
<&
UIC0
>;
interrupts
=
<
1
4
>;
};
clock
-
frequency
=
<
0
>;
current
-
speed
=
<
0
>;
interrupt
-
parent
=
<&
UIC0
>;
interrupts
=
<
1
4
>;
};
UART2
:
serial
@
10000600
{
device_type
=
"serial"
;
compatible
=
"ns16550"
;
reg
=
<
10000600
8
>;
device_type
=
"serial"
;
compatible
=
"ns16550"
;
reg
=
<
10000600
8
>;
virtual
-
reg
=
<
a0000600
>;
clock
-
frequency
=
<
0
>;
current
-
speed
=
<
0
>;
interrupt
-
parent
=
<&
UIC1
>;
interrupts
=
<
5
4
>;
};
clock
-
frequency
=
<
0
>;
current
-
speed
=
<
0
>;
interrupt
-
parent
=
<&
UIC1
>;
interrupts
=
<
5
4
>;
};
IIC0
:
i2c
@
10000400
{
compatible
=
"ibm,iic-440spe"
,
"ibm,iic-440gp"
,
"ibm,iic"
;
...
...
arch/powerpc/oprofile/op_model_cell.c
浏览文件 @
966ea8c4
...
...
@@ -1151,7 +1151,7 @@ static void cell_handle_interrupt(struct pt_regs *regs,
for
(
i
=
0
;
i
<
num_counters
;
++
i
)
{
if
((
interrupt_mask
&
CBE_PM_CTR_OVERFLOW_INTR
(
i
))
&&
ctr
[
i
].
enabled
)
{
oprofile_add_
pc
(
pc
,
is_kernel
,
i
);
oprofile_add_
ext_sample
(
pc
,
regs
,
i
,
is_kernel
);
cbe_write_ctr
(
cpu
,
i
,
reset_value
[
i
]);
}
}
...
...
arch/powerpc/platforms/52xx/mpc52xx_common.c
浏览文件 @
966ea8c4
...
...
@@ -199,6 +199,7 @@ int mpc52xx_set_psc_clkdiv(int psc_id, int clkdiv)
return
0
;
}
EXPORT_SYMBOL
(
mpc52xx_set_psc_clkdiv
);
/**
* mpc52xx_restart: ppc_md->restart hook for mpc5200 using the watchdog timer
...
...
arch/powerpc/platforms/cell/iommu.c
浏览文件 @
966ea8c4
...
...
@@ -113,7 +113,7 @@
/* IOMMU sizing */
#define IO_SEGMENT_SHIFT 28
#define IO_PAGENO_BITS
(IO_SEGMENT_SHIFT - IOMMU_PAGE_SHIFT
)
#define IO_PAGENO_BITS
(shift) (IO_SEGMENT_SHIFT - (shift)
)
/* The high bit needs to be set on every DMA address */
#define SPIDER_DMA_OFFSET 0x80000000ul
...
...
@@ -123,7 +123,6 @@ struct iommu_window {
struct
cbe_iommu
*
iommu
;
unsigned
long
offset
;
unsigned
long
size
;
unsigned
long
pte_offset
;
unsigned
int
ioid
;
struct
iommu_table
table
;
};
...
...
@@ -200,7 +199,7 @@ static void tce_build_cell(struct iommu_table *tbl, long index, long npages,
(
window
->
ioid
&
IOPTE_IOID_Mask
);
#endif
io_pte
=
(
unsigned
long
*
)
tbl
->
it_base
+
(
index
-
window
->
pte
_offset
);
io_pte
=
(
unsigned
long
*
)
tbl
->
it_base
+
(
index
-
tbl
->
it
_offset
);
for
(
i
=
0
;
i
<
npages
;
i
++
,
uaddr
+=
IOMMU_PAGE_SIZE
)
io_pte
[
i
]
=
base_pte
|
(
__pa
(
uaddr
)
&
IOPTE_RPN_Mask
);
...
...
@@ -232,7 +231,7 @@ static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
|
(
window
->
ioid
&
IOPTE_IOID_Mask
);
#endif
io_pte
=
(
unsigned
long
*
)
tbl
->
it_base
+
(
index
-
window
->
pte
_offset
);
io_pte
=
(
unsigned
long
*
)
tbl
->
it_base
+
(
index
-
tbl
->
it
_offset
);
for
(
i
=
0
;
i
<
npages
;
i
++
)
io_pte
[
i
]
=
pte
;
...
...
@@ -307,76 +306,84 @@ static int cell_iommu_find_ioc(int nid, unsigned long *base)
return
-
ENODEV
;
}
static
void
cell_iommu_setup_
page_tables
(
struct
cbe_iommu
*
iommu
,
static
void
cell_iommu_setup_
stab
(
struct
cbe_iommu
*
iommu
,
unsigned
long
dbase
,
unsigned
long
dsize
,
unsigned
long
fbase
,
unsigned
long
fsize
)
{
struct
page
*
page
;
int
i
;
unsigned
long
reg
,
segments
,
pages_per_segment
,
ptab_size
,
stab_size
,
n_pte_pages
,
base
;
base
=
dbase
;
if
(
fsize
!=
0
)
base
=
min
(
fbase
,
dbase
);
unsigned
long
segments
,
stab_size
;
segments
=
max
(
dbase
+
dsize
,
fbase
+
fsize
)
>>
IO_SEGMENT_SHIFT
;
pages_per_segment
=
1ull
<<
IO_PAGENO_BITS
;
pr_debug
(
"%s: iommu[%d]: segments: %lu
, pages per segment: %lu
\n
"
,
__FUNCTION__
,
iommu
->
nid
,
segments
,
pages_per_segment
);
pr_debug
(
"%s: iommu[%d]: segments: %lu
\n
"
,
__FUNCTION__
,
iommu
->
nid
,
segments
);
/* set up the segment table */
stab_size
=
segments
*
sizeof
(
unsigned
long
);
page
=
alloc_pages_node
(
iommu
->
nid
,
GFP_KERNEL
,
get_order
(
stab_size
));
BUG_ON
(
!
page
);
iommu
->
stab
=
page_address
(
page
);
clear_page
(
iommu
->
stab
);
memset
(
iommu
->
stab
,
0
,
stab_size
);
}
static
unsigned
long
*
cell_iommu_alloc_ptab
(
struct
cbe_iommu
*
iommu
,
unsigned
long
base
,
unsigned
long
size
,
unsigned
long
gap_base
,
unsigned
long
gap_size
,
unsigned
long
page_shift
)
{
struct
page
*
page
;
int
i
;
unsigned
long
reg
,
segments
,
pages_per_segment
,
ptab_size
,
n_pte_pages
,
start_seg
,
*
ptab
;
start_seg
=
base
>>
IO_SEGMENT_SHIFT
;
segments
=
size
>>
IO_SEGMENT_SHIFT
;
pages_per_segment
=
1ull
<<
IO_PAGENO_BITS
(
page_shift
);
/* PTEs for each segment must start on a 4K bounday */
pages_per_segment
=
max
(
pages_per_segment
,
(
1
<<
12
)
/
sizeof
(
unsigned
long
));
/* ... and the page tables. Since these are contiguous, we can treat
* the page tables as one array of ptes, like pSeries does.
*/
ptab_size
=
segments
*
pages_per_segment
*
sizeof
(
unsigned
long
);
pr_debug
(
"%s: iommu[%d]: ptab_size: %lu, order: %d
\n
"
,
__FUNCTION__
,
iommu
->
nid
,
ptab_size
,
get_order
(
ptab_size
));
page
=
alloc_pages_node
(
iommu
->
nid
,
GFP_KERNEL
,
get_order
(
ptab_size
));
BUG_ON
(
!
page
);
iommu
->
ptab
=
page_address
(
page
);
memset
(
iommu
->
ptab
,
0
,
ptab_size
);
ptab
=
page_address
(
page
);
memset
(
ptab
,
0
,
ptab_size
);
/* allocate a bogus page for the end of each mapping */
page
=
alloc_pages_node
(
iommu
->
nid
,
GFP_KERNEL
,
0
);
BUG_ON
(
!
page
);
iommu
->
pad_page
=
page_address
(
page
);
clear_page
(
iommu
->
pad_page
);
/* number of pages needed for a page table */
n_pte_pages
=
(
pages_per_segment
*
sizeof
(
unsigned
long
))
>>
IOMMU_PAGE_SHIFT
;
/* number of 4K pages needed for a page table */
n_pte_pages
=
(
pages_per_segment
*
sizeof
(
unsigned
long
))
>>
12
;
pr_debug
(
"%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu
\n
"
,
__FUNCTION__
,
iommu
->
nid
,
iommu
->
stab
,
iommu
->
ptab
,
__FUNCTION__
,
iommu
->
nid
,
iommu
->
stab
,
ptab
,
n_pte_pages
);
/* initialise the STEs */
reg
=
IOSTE_V
|
((
n_pte_pages
-
1
)
<<
5
);
if
(
IOMMU_PAGE_SIZE
==
0x1000
)
reg
|=
IOSTE_PS_4K
;
else
if
(
IOMMU_PAGE_SIZE
==
0x10000
)
reg
|=
IOSTE_PS_64K
;
else
{
extern
void
__unknown_page_size_error
(
void
);
__unknown_page_size_error
();
switch
(
page_shift
)
{
case
12
:
reg
|=
IOSTE_PS_4K
;
break
;
case
16
:
reg
|=
IOSTE_PS_64K
;
break
;
case
20
:
reg
|=
IOSTE_PS_1M
;
break
;
case
24
:
reg
|=
IOSTE_PS_16M
;
break
;
default:
BUG
();
}
gap_base
=
gap_base
>>
IO_SEGMENT_SHIFT
;
gap_size
=
gap_size
>>
IO_SEGMENT_SHIFT
;
pr_debug
(
"Setting up IOMMU stab:
\n
"
);
for
(
i
=
base
>>
IO_SEGMENT_SHIFT
;
i
<
segments
;
i
++
)
{
iommu
->
stab
[
i
]
=
reg
|
(
__pa
(
iommu
->
ptab
)
+
n_pte_pages
*
IOMMU_PAGE_SIZE
*
i
);
for
(
i
=
start_seg
;
i
<
(
start_seg
+
segments
);
i
++
)
{
if
(
i
>=
gap_base
&&
i
<
(
gap_base
+
gap_size
))
{
pr_debug
(
"
\t
overlap at %d, skipping
\n
"
,
i
);
continue
;
}
iommu
->
stab
[
i
]
=
reg
|
(
__pa
(
ptab
)
+
(
n_pte_pages
<<
12
)
*
(
i
-
start_seg
));
pr_debug
(
"
\t
[%d] 0x%016lx
\n
"
,
i
,
iommu
->
stab
[
i
]);
}
return
ptab
;
}
static
void
cell_iommu_enable_hardware
(
struct
cbe_iommu
*
iommu
)
...
...
@@ -423,7 +430,9 @@ static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
static
void
cell_iommu_setup_hardware
(
struct
cbe_iommu
*
iommu
,
unsigned
long
base
,
unsigned
long
size
)
{
cell_iommu_setup_page_tables
(
iommu
,
base
,
size
,
0
,
0
);
cell_iommu_setup_stab
(
iommu
,
base
,
size
,
0
,
0
);
iommu
->
ptab
=
cell_iommu_alloc_ptab
(
iommu
,
base
,
size
,
0
,
0
,
IOMMU_PAGE_SHIFT
);
cell_iommu_enable_hardware
(
iommu
);
}
...
...
@@ -464,6 +473,7 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
unsigned
long
pte_offset
)
{
struct
iommu_window
*
window
;
struct
page
*
page
;
u32
ioid
;
ioid
=
cell_iommu_get_ioid
(
np
);
...
...
@@ -475,13 +485,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
window
->
size
=
size
;
window
->
ioid
=
ioid
;
window
->
iommu
=
iommu
;
window
->
pte_offset
=
pte_offset
;
window
->
table
.
it_blocksize
=
16
;
window
->
table
.
it_base
=
(
unsigned
long
)
iommu
->
ptab
;
window
->
table
.
it_index
=
iommu
->
nid
;
window
->
table
.
it_offset
=
(
offset
>>
IOMMU_PAGE_SHIFT
)
+
window
->
pte_offset
;
window
->
table
.
it_offset
=
(
offset
>>
IOMMU_PAGE_SHIFT
)
+
pte_offset
;
window
->
table
.
it_size
=
size
>>
IOMMU_PAGE_SHIFT
;
iommu_init_table
(
&
window
->
table
,
iommu
->
nid
);
...
...
@@ -504,6 +512,11 @@ cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
* This code also assumes that we have a window that starts at 0,
* which is the case on all spider based blades.
*/
page
=
alloc_pages_node
(
iommu
->
nid
,
GFP_KERNEL
,
0
);
BUG_ON
(
!
page
);
iommu
->
pad_page
=
page_address
(
page
);
clear_page
(
iommu
->
pad_page
);
__set_bit
(
0
,
window
->
table
.
it_map
);
tce_build_cell
(
&
window
->
table
,
window
->
table
.
it_offset
,
1
,
(
unsigned
long
)
iommu
->
pad_page
,
DMA_TO_DEVICE
);
...
...
@@ -549,7 +562,7 @@ static void cell_dma_dev_setup_iommu(struct device *dev)
archdata
->
dma_data
=
&
window
->
table
;
}
static
void
cell_dma_dev_setup_
static
(
struct
device
*
dev
);
static
void
cell_dma_dev_setup_
fixed
(
struct
device
*
dev
);
static
void
cell_dma_dev_setup
(
struct
device
*
dev
)
{
...
...
@@ -557,7 +570,7 @@ static void cell_dma_dev_setup(struct device *dev)
/* Order is important here, these are not mutually exclusive */
if
(
get_dma_ops
(
dev
)
==
&
dma_iommu_fixed_ops
)
cell_dma_dev_setup_
static
(
dev
);
cell_dma_dev_setup_
fixed
(
dev
);
else
if
(
get_pci_dma_ops
()
==
&
dma_iommu_ops
)
cell_dma_dev_setup_iommu
(
dev
);
else
if
(
get_pci_dma_ops
()
==
&
dma_direct_ops
)
...
...
@@ -858,7 +871,7 @@ static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask)
return
0
;
}
static
void
cell_dma_dev_setup_
static
(
struct
device
*
dev
)
static
void
cell_dma_dev_setup_
fixed
(
struct
device
*
dev
)
{
struct
dev_archdata
*
archdata
=
&
dev
->
archdata
;
u64
addr
;
...
...
@@ -869,35 +882,45 @@ static void cell_dma_dev_setup_static(struct device *dev)
dev_dbg
(
dev
,
"iommu: fixed addr = %lx
\n
"
,
addr
);
}
static
void
insert_16M_pte
(
unsigned
long
addr
,
unsigned
long
*
ptab
,
unsigned
long
base_pte
)
{
unsigned
long
segment
,
offset
;
segment
=
addr
>>
IO_SEGMENT_SHIFT
;
offset
=
(
addr
>>
24
)
-
(
segment
<<
IO_PAGENO_BITS
(
24
));
ptab
=
ptab
+
(
segment
*
(
1
<<
12
)
/
sizeof
(
unsigned
long
));
pr_debug
(
"iommu: addr %lx ptab %p segment %lx offset %lx
\n
"
,
addr
,
ptab
,
segment
,
offset
);
ptab
[
offset
]
=
base_pte
|
(
__pa
(
addr
)
&
IOPTE_RPN_Mask
);
}
static
void
cell_iommu_setup_fixed_ptab
(
struct
cbe_iommu
*
iommu
,
struct
device_node
*
np
,
unsigned
long
dbase
,
unsigned
long
dsize
,
unsigned
long
fbase
,
unsigned
long
fsize
)
{
unsigned
long
base_pte
,
uaddr
,
*
io_pte
;
int
i
;
unsigned
long
base_pte
,
uaddr
,
ioaddr
,
*
ptab
;
dma_iommu_fixed_base
=
fbase
;
ptab
=
cell_iommu_alloc_ptab
(
iommu
,
fbase
,
fsize
,
dbase
,
dsize
,
24
)
;
/* convert from bytes into page table indices */
dbase
=
dbase
>>
IOMMU_PAGE_SHIFT
;
dsize
=
dsize
>>
IOMMU_PAGE_SHIFT
;
fbase
=
fbase
>>
IOMMU_PAGE_SHIFT
;
fsize
=
fsize
>>
IOMMU_PAGE_SHIFT
;
dma_iommu_fixed_base
=
fbase
;
pr_debug
(
"iommu: mapping 0x%lx pages from 0x%lx
\n
"
,
fsize
,
fbase
);
io_pte
=
iommu
->
ptab
;
base_pte
=
IOPTE_PP_W
|
IOPTE_PP_R
|
IOPTE_M
|
IOPTE_SO_RW
|
(
cell_iommu_get_ioid
(
np
)
&
IOPTE_IOID_Mask
);
uaddr
=
0
;
for
(
i
=
fbase
;
i
<
fbase
+
fsize
;
i
++
,
uaddr
+=
IOMMU_PAGE_SIZE
)
{
for
(
uaddr
=
0
;
uaddr
<
fsize
;
uaddr
+=
(
1
<<
24
))
{
/* Don't touch the dynamic region */
if
(
i
>=
dbase
&&
i
<
(
dbase
+
dsize
))
{
pr_debug
(
"iommu: static/dynamic overlap, skipping
\n
"
);
ioaddr
=
uaddr
+
fbase
;
if
(
ioaddr
>=
dbase
&&
ioaddr
<
(
dbase
+
dsize
))
{
pr_debug
(
"iommu: fixed/dynamic overlap, skipping
\n
"
);
continue
;
}
io_pte
[
i
]
=
base_pte
|
(
__pa
(
uaddr
)
&
IOPTE_RPN_Mask
);
insert_16M_pte
(
uaddr
,
ptab
,
base_pte
);
}
mb
();
...
...
@@ -995,7 +1018,9 @@ static int __init cell_iommu_fixed_mapping_init(void)
"fixed window 0x%lx-0x%lx
\n
"
,
iommu
->
nid
,
dbase
,
dbase
+
dsize
,
fbase
,
fbase
+
fsize
);
cell_iommu_setup_page_tables
(
iommu
,
dbase
,
dsize
,
fbase
,
fsize
);
cell_iommu_setup_stab
(
iommu
,
dbase
,
dsize
,
fbase
,
fsize
);
iommu
->
ptab
=
cell_iommu_alloc_ptab
(
iommu
,
dbase
,
dsize
,
0
,
0
,
IOMMU_PAGE_SHIFT
);
cell_iommu_setup_fixed_ptab
(
iommu
,
np
,
dbase
,
dsize
,
fbase
,
fsize
);
cell_iommu_enable_hardware
(
iommu
);
...
...
arch/powerpc/platforms/cell/setup.c
浏览文件 @
966ea8c4
...
...
@@ -149,6 +149,11 @@ static void __init cell_init_irq(void)
mpic_init_IRQ
();
}
static
void
__init
cell_set_dabrx
(
void
)
{
mtspr
(
SPRN_DABRX
,
DABRX_KERNEL
|
DABRX_USER
);
}
static
void
__init
cell_setup_arch
(
void
)
{
#ifdef CONFIG_SPU_BASE
...
...
@@ -158,6 +163,8 @@ static void __init cell_setup_arch(void)
cbe_regs_init
();
cell_set_dabrx
();
#ifdef CONFIG_CBE_RAS
cbe_ras_init
();
#endif
...
...
arch/powerpc/platforms/cell/spu_base.c
浏览文件 @
966ea8c4
...
...
@@ -81,9 +81,12 @@ struct spu_slb {
void
spu_invalidate_slbs
(
struct
spu
*
spu
)
{
struct
spu_priv2
__iomem
*
priv2
=
spu
->
priv2
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
spu
->
register_lock
,
flags
);
if
(
spu_mfc_sr1_get
(
spu
)
&
MFC_STATE1_RELOCATE_MASK
)
out_be64
(
&
priv2
->
slb_invalidate_all_W
,
0UL
);
spin_unlock_irqrestore
(
&
spu
->
register_lock
,
flags
);
}
EXPORT_SYMBOL_GPL
(
spu_invalidate_slbs
);
...
...
@@ -148,7 +151,11 @@ static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
__func__
,
slbe
,
slb
->
vsid
,
slb
->
esid
);
out_be64
(
&
priv2
->
slb_index_W
,
slbe
);
/* set invalid before writing vsid */
out_be64
(
&
priv2
->
slb_esid_RW
,
0
);
/* now it's safe to write the vsid */
out_be64
(
&
priv2
->
slb_vsid_RW
,
slb
->
vsid
);
/* setting the new esid makes the entry valid again */
out_be64
(
&
priv2
->
slb_esid_RW
,
slb
->
esid
);
}
...
...
@@ -290,9 +297,11 @@ void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa,
nr_slbs
++
;
}
spin_lock_irq
(
&
spu
->
register_lock
);
/* Add the set of SLBs */
for
(
i
=
0
;
i
<
nr_slbs
;
i
++
)
spu_load_slb
(
spu
,
i
,
&
slbs
[
i
]);
spin_unlock_irq
(
&
spu
->
register_lock
);
}
EXPORT_SYMBOL_GPL
(
spu_setup_kernel_slbs
);
...
...
@@ -337,13 +346,14 @@ spu_irq_class_1(int irq, void *data)
if
(
stat
&
CLASS1_STORAGE_FAULT_INTR
)
spu_mfc_dsisr_set
(
spu
,
0ul
);
spu_int_stat_clear
(
spu
,
1
,
stat
);
spin_unlock
(
&
spu
->
register_lock
);
pr_debug
(
"%s: %lx %lx %lx %lx
\n
"
,
__FUNCTION__
,
mask
,
stat
,
dar
,
dsisr
);
if
(
stat
&
CLASS1_SEGMENT_FAULT_INTR
)
__spu_trap_data_seg
(
spu
,
dar
);
spin_unlock
(
&
spu
->
register_lock
);
pr_debug
(
"%s: %lx %lx %lx %lx
\n
"
,
__FUNCTION__
,
mask
,
stat
,
dar
,
dsisr
);
if
(
stat
&
CLASS1_STORAGE_FAULT_INTR
)
__spu_trap_data_map
(
spu
,
dar
,
dsisr
);
...
...
arch/powerpc/platforms/cell/spufs/context.c
浏览文件 @
966ea8c4
...
...
@@ -109,13 +109,12 @@ void spu_forget(struct spu_context *ctx)
/*
* This is basically an open-coded spu_acquire_saved, except that
* we don't acquire the state mutex interruptible.
* we don't acquire the state mutex interruptible, and we don't
* want this context to be rescheduled on release.
*/
mutex_lock
(
&
ctx
->
state_mutex
);
if
(
ctx
->
state
!=
SPU_STATE_SAVED
)
{
set_bit
(
SPU_SCHED_WAS_ACTIVE
,
&
ctx
->
sched_flags
);
if
(
ctx
->
state
!=
SPU_STATE_SAVED
)
spu_deactivate
(
ctx
);
}
mm
=
ctx
->
owner
;
ctx
->
owner
=
NULL
;
...
...
arch/powerpc/platforms/cell/spufs/file.c
浏览文件 @
966ea8c4
...
...
@@ -366,6 +366,13 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if
(
offset
>=
ps_size
)
return
NOPFN_SIGBUS
;
/*
* Because we release the mmap_sem, the context may be destroyed while
* we're in spu_wait. Grab an extra reference so it isn't destroyed
* in the meantime.
*/
get_spu_context
(
ctx
);
/*
* We have to wait for context to be loaded before we have
* pages to hand out to the user, but we don't want to wait
...
...
@@ -375,7 +382,7 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
* hanged.
*/
if
(
spu_acquire
(
ctx
))
return
NOPFN_REFAULT
;
goto
refault
;
if
(
ctx
->
state
==
SPU_STATE_SAVED
)
{
up_read
(
&
current
->
mm
->
mmap_sem
);
...
...
@@ -391,6 +398,9 @@ static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
if
(
!
ret
)
spu_release
(
ctx
);
refault:
put_spu_context
(
ctx
);
return
NOPFN_REFAULT
;
}
...
...
arch/powerpc/platforms/cell/spufs/sched.c
浏览文件 @
966ea8c4
...
...
@@ -246,7 +246,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
spu_switch_notify
(
spu
,
ctx
);
ctx
->
state
=
SPU_STATE_RUNNABLE
;
spuctx_switch_state
(
ctx
,
SPU_UTIL_
IDLE_LOADED
);
spuctx_switch_state
(
ctx
,
SPU_UTIL_
USER
);
}
/*
...
...
arch/powerpc/platforms/cell/spufs/sputrace.c
浏览文件 @
966ea8c4
...
...
@@ -58,12 +58,12 @@ static int sputrace_sprint(char *tbuf, int n)
ktime_to_timespec
(
ktime_sub
(
t
->
tstamp
,
sputrace_start
));
return
snprintf
(
tbuf
,
n
,
"[%lu.%09lu] %d: %s (thread = %d, spu = %d)
\n
"
,
"[%lu.%09lu] %d: %s (
ctx
thread = %d, spu = %d)
\n
"
,
(
unsigned
long
)
tv
.
tv_sec
,
(
unsigned
long
)
tv
.
tv_nsec
,
t
->
owner_tid
,
t
->
name
,
t
->
curr_tid
,
t
->
name
,
t
->
owner_tid
,
t
->
number
);
}
...
...
@@ -188,6 +188,7 @@ struct spu_probe spu_probes[] = {
{
"spufs_ps_nopfn__insert"
,
"%p %p"
,
spu_context_event
},
{
"spu_acquire_saved__enter"
,
"%p"
,
spu_context_nospu_event
},
{
"destroy_spu_context__enter"
,
"%p"
,
spu_context_nospu_event
},
{
"spufs_stop_callback__enter"
,
"%p %p"
,
spu_context_event
},
};
static
int
__init
sputrace_init
(
void
)
...
...
arch/powerpc/platforms/cell/spufs/switch.c
浏览文件 @
966ea8c4
...
...
@@ -34,6 +34,7 @@
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
...
...
@@ -117,6 +118,8 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
* Write INT_MASK_class1 with value of 0.
* Save INT_Mask_class2 in CSA.
* Write INT_MASK_class2 with value of 0.
* Synchronize all three interrupts to be sure
* we no longer execute a handler on another CPU.
*/
spin_lock_irq
(
&
spu
->
register_lock
);
if
(
csa
)
{
...
...
@@ -129,6 +132,9 @@ static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
spu_int_mask_set
(
spu
,
2
,
0ul
);
eieio
();
spin_unlock_irq
(
&
spu
->
register_lock
);
synchronize_irq
(
spu
->
irqs
[
0
]);
synchronize_irq
(
spu
->
irqs
[
1
]);
synchronize_irq
(
spu
->
irqs
[
2
]);
}
static
inline
void
set_watchdog_timer
(
struct
spu_state
*
csa
,
struct
spu
*
spu
)
...
...
arch/powerpc/platforms/celleb/beat.h
浏览文件 @
966ea8c4
...
...
@@ -21,9 +21,6 @@
#ifndef _CELLEB_BEAT_H
#define _CELLEB_BEAT_H
#define DABRX_KERNEL (1UL<<1)
#define DABRX_USER (1UL<<0)
int64_t
beat_get_term_char
(
uint64_t
,
uint64_t
*
,
uint64_t
*
,
uint64_t
*
);
int64_t
beat_put_term_char
(
uint64_t
,
uint64_t
,
uint64_t
,
uint64_t
);
int64_t
beat_repository_encode
(
int
,
const
char
*
,
uint64_t
[
4
]);
...
...
arch/x86/mm/pageattr.c
浏览文件 @
966ea8c4
...
...
@@ -26,7 +26,6 @@ struct cpa_data {
pgprot_t
mask_set
;
pgprot_t
mask_clr
;
int
numpages
;
int
processed
;
int
flushtlb
;
unsigned
long
pfn
;
};
...
...
@@ -291,8 +290,8 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
*/
nextpage_addr
=
(
address
+
psize
)
&
pmask
;
numpages
=
(
nextpage_addr
-
address
)
>>
PAGE_SHIFT
;
if
(
numpages
<
cpa
->
processed
)
cpa
->
processed
=
numpages
;
if
(
numpages
<
cpa
->
numpages
)
cpa
->
numpages
=
numpages
;
/*
* We are safe now. Check whether the new pgprot is the same:
...
...
@@ -319,7 +318,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
*/
addr
=
address
+
PAGE_SIZE
;
pfn
++
;
for
(
i
=
1
;
i
<
cpa
->
processed
;
i
++
,
addr
+=
PAGE_SIZE
,
pfn
++
)
{
for
(
i
=
1
;
i
<
cpa
->
numpages
;
i
++
,
addr
+=
PAGE_SIZE
,
pfn
++
)
{
pgprot_t
chk_prot
=
static_protections
(
new_prot
,
addr
,
pfn
);
if
(
pgprot_val
(
chk_prot
)
!=
pgprot_val
(
new_prot
))
...
...
@@ -343,7 +342,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* that we limited the number of possible pages already to
* the number of pages in the large page.
*/
if
(
address
==
(
nextpage_addr
-
psize
)
&&
cpa
->
processed
==
numpages
)
{
if
(
address
==
(
nextpage_addr
-
psize
)
&&
cpa
->
numpages
==
numpages
)
{
/*
* The address is aligned and the number of pages
* covers the full page.
...
...
@@ -573,7 +572,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
set_pte_atomic
(
kpte
,
new_pte
);
cpa
->
flushtlb
=
1
;
}
cpa
->
processed
=
1
;
cpa
->
numpages
=
1
;
return
0
;
}
...
...
@@ -584,7 +583,7 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
do_split
=
try_preserve_large_page
(
kpte
,
address
,
cpa
);
/*
* When the range fits into the existing large page,
* return. cp->
processed
and cpa->tlbflush have been updated in
* return. cp->
numpages
and cpa->tlbflush have been updated in
* try_large_page:
*/
if
(
do_split
<=
0
)
...
...
@@ -663,7 +662,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
* Store the remaining nr of pages for the large page
* preservation check.
*/
cpa
->
numpages
=
cpa
->
processed
=
numpages
;
cpa
->
numpages
=
numpages
;
ret
=
__change_page_attr
(
cpa
,
checkalias
);
if
(
ret
)
...
...
@@ -680,9 +679,9 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
* CPA operation. Either a large page has been
* preserved or a single page update happened.
*/
BUG_ON
(
cpa
->
processed
>
numpages
);
numpages
-=
cpa
->
processed
;
cpa
->
vaddr
+=
cpa
->
processed
*
PAGE_SIZE
;
BUG_ON
(
cpa
->
numpages
>
numpages
);
numpages
-=
cpa
->
numpages
;
cpa
->
vaddr
+=
cpa
->
numpages
*
PAGE_SIZE
;
}
return
0
;
}
...
...
drivers/acorn/char/defkeymap-l7200.c
浏览文件 @
966ea8c4
...
...
@@ -347,40 +347,40 @@ char *func_table[MAX_NR_FUNC] = {
};
struct
kbdiacruc
accent_table
[
MAX_DIACR
]
=
{
{
'`'
,
'A'
,
'\300'
},
{
'`'
,
'a'
,
'\340'
},
{
'\''
,
'A'
,
'\301'
},
{
'\''
,
'a'
,
'\341'
},
{
'^'
,
'A'
,
'\302'
},
{
'^'
,
'a'
,
'\342'
},
{
'~'
,
'A'
,
'\303'
},
{
'~'
,
'a'
,
'\343'
},
{
'"'
,
'A'
,
'\304'
},
{
'"'
,
'a'
,
'\344'
},
{
'O'
,
'A'
,
'\305'
},
{
'o'
,
'a'
,
'\345'
},
{
'0'
,
'A'
,
'\305'
},
{
'0'
,
'a'
,
'\345'
},
{
'A'
,
'A'
,
'\305'
},
{
'a'
,
'a'
,
'\345'
},
{
'A'
,
'E'
,
'\306'
},
{
'a'
,
'e'
,
'\346'
},
{
','
,
'C'
,
'\307'
},
{
','
,
'c'
,
'\347'
},
{
'`'
,
'E'
,
'\310'
},
{
'`'
,
'e'
,
'\350'
},
{
'\''
,
'E'
,
'\311'
},
{
'\''
,
'e'
,
'\351'
},
{
'^'
,
'E'
,
'\312'
},
{
'^'
,
'e'
,
'\352'
},
{
'"'
,
'E'
,
'\313'
},
{
'"'
,
'e'
,
'\353'
},
{
'`'
,
'I'
,
'\314'
},
{
'`'
,
'i'
,
'\354'
},
{
'\''
,
'I'
,
'\315'
},
{
'\''
,
'i'
,
'\355'
},
{
'^'
,
'I'
,
'\316'
},
{
'^'
,
'i'
,
'\356'
},
{
'"'
,
'I'
,
'\317'
},
{
'"'
,
'i'
,
'\357'
},
{
'-'
,
'D'
,
'\320'
},
{
'-'
,
'd'
,
'\360'
},
{
'~'
,
'N'
,
'\321'
},
{
'~'
,
'n'
,
'\361'
},
{
'`'
,
'O'
,
'\322'
},
{
'`'
,
'o'
,
'\362'
},
{
'\''
,
'O'
,
'\323'
},
{
'\''
,
'o'
,
'\363'
},
{
'^'
,
'O'
,
'\324'
},
{
'^'
,
'o'
,
'\364'
},
{
'~'
,
'O'
,
'\325'
},
{
'~'
,
'o'
,
'\365'
},
{
'"'
,
'O'
,
'\326'
},
{
'"'
,
'o'
,
'\366'
},
{
'/'
,
'O'
,
'\330'
},
{
'/'
,
'o'
,
'\370'
},
{
'`'
,
'U'
,
'\331'
},
{
'`'
,
'u'
,
'\371'
},
{
'\''
,
'U'
,
'\332'
},
{
'\''
,
'u'
,
'\372'
},
{
'^'
,
'U'
,
'\333'
},
{
'^'
,
'u'
,
'\373'
},
{
'"'
,
'U'
,
'\334'
},
{
'"'
,
'u'
,
'\374'
},
{
'\''
,
'Y'
,
'\335'
},
{
'\''
,
'y'
,
'\375'
},
{
'T'
,
'H'
,
'\336'
},
{
't'
,
'h'
,
'\376'
},
{
's'
,
's'
,
'\337'
},
{
'"'
,
'y'
,
'\377'
},
{
's'
,
'z'
,
'\337'
},
{
'i'
,
'j'
,
'\377'
},
{
'`'
,
'A'
,
0300
},
{
'`'
,
'a'
,
0340
},
{
'\''
,
'A'
,
0301
},
{
'\''
,
'a'
,
0341
},
{
'^'
,
'A'
,
0302
},
{
'^'
,
'a'
,
0342
},
{
'~'
,
'A'
,
0303
},
{
'~'
,
'a'
,
0343
},
{
'"'
,
'A'
,
0304
},
{
'"'
,
'a'
,
0344
},
{
'O'
,
'A'
,
0305
},
{
'o'
,
'a'
,
0345
},
{
'0'
,
'A'
,
0305
},
{
'0'
,
'a'
,
0345
},
{
'A'
,
'A'
,
0305
},
{
'a'
,
'a'
,
0345
},
{
'A'
,
'E'
,
0306
},
{
'a'
,
'e'
,
0346
},
{
','
,
'C'
,
0307
},
{
','
,
'c'
,
0347
},
{
'`'
,
'E'
,
0310
},
{
'`'
,
'e'
,
0350
},
{
'\''
,
'E'
,
0311
},
{
'\''
,
'e'
,
0351
},
{
'^'
,
'E'
,
0312
},
{
'^'
,
'e'
,
0352
},
{
'"'
,
'E'
,
0313
},
{
'"'
,
'e'
,
0353
},
{
'`'
,
'I'
,
0314
},
{
'`'
,
'i'
,
0354
},
{
'\''
,
'I'
,
0315
},
{
'\''
,
'i'
,
0355
},
{
'^'
,
'I'
,
0316
},
{
'^'
,
'i'
,
0356
},
{
'"'
,
'I'
,
0317
},
{
'"'
,
'i'
,
0357
},
{
'-'
,
'D'
,
0320
},
{
'-'
,
'd'
,
0360
},
{
'~'
,
'N'
,
0321
},
{
'~'
,
'n'
,
0361
},
{
'`'
,
'O'
,
0322
},
{
'`'
,
'o'
,
0362
},
{
'\''
,
'O'
,
0323
},
{
'\''
,
'o'
,
0363
},
{
'^'
,
'O'
,
0324
},
{
'^'
,
'o'
,
0364
},
{
'~'
,
'O'
,
0325
},
{
'~'
,
'o'
,
0365
},
{
'"'
,
'O'
,
0326
},
{
'"'
,
'o'
,
0366
},
{
'/'
,
'O'
,
0330
},
{
'/'
,
'o'
,
0370
},
{
'`'
,
'U'
,
0331
},
{
'`'
,
'u'
,
0371
},
{
'\''
,
'U'
,
0332
},
{
'\''
,
'u'
,
0372
},
{
'^'
,
'U'
,
0333
},
{
'^'
,
'u'
,
0373
},
{
'"'
,
'U'
,
0334
},
{
'"'
,
'u'
,
0374
},
{
'\''
,
'Y'
,
0335
},
{
'\''
,
'y'
,
0375
},
{
'T'
,
'H'
,
0336
},
{
't'
,
'h'
,
0376
},
{
's'
,
's'
,
0337
},
{
'"'
,
'y'
,
0377
},
{
's'
,
'z'
,
0337
},
{
'i'
,
'j'
,
0377
},
};
unsigned
int
accent_table_size
=
68
;
drivers/base/transport_class.c
浏览文件 @
966ea8c4
...
...
@@ -126,9 +126,7 @@ static int transport_setup_classdev(struct attribute_container *cont,
}
/**
* transport_setup_device - declare a new dev for transport class association
* but don't make it visible yet.
*
* transport_setup_device - declare a new dev for transport class association but don't make it visible yet.
* @dev: the generic device representing the entity being added
*
* Usually, dev represents some component in the HBA system (either
...
...
drivers/char/defkeymap.c_shipped
浏览文件 @
966ea8c4
...
...
@@ -223,40 +223,40 @@ char *func_table[MAX_NR_FUNC] = {
};
struct kbdiacruc accent_table[MAX_DIACR] = {
{'`', 'A',
'\300'}, {'`', 'a', '\340'
},
{'\'', 'A',
'\301'}, {'\'', 'a', '\341'
},
{'^', 'A',
'\302'}, {'^', 'a', '\342'
},
{'~', 'A',
'\303'}, {'~', 'a', '\343'
},
{'"', 'A',
'\304'}, {'"', 'a', '\344'
},
{'O', 'A',
'\305'}, {'o', 'a', '\345'
},
{'0', 'A',
'\305'}, {'0', 'a', '\345'
},
{'A', 'A',
'\305'}, {'a', 'a', '\345'
},
{'A', 'E',
'\306'}, {'a', 'e', '\346'
},
{',', 'C',
'\307'}, {',', 'c', '\347'
},
{'`', 'E',
'\310'}, {'`', 'e', '\350'
},
{'\'', 'E',
'\311'}, {'\'', 'e', '\351'
},
{'^', 'E',
'\312'}, {'^', 'e', '\352'
},
{'"', 'E',
'\313'}, {'"', 'e', '\353'
},
{'`', 'I',
'\314'}, {'`', 'i', '\354'
},
{'\'', 'I',
'\315'}, {'\'', 'i', '\355'
},
{'^', 'I',
'\316'}, {'^', 'i', '\356'
},
{'"', 'I',
'\317'}, {'"', 'i', '\357'
},
{'-', 'D',
'\320'}, {'-', 'd', '\360'
},
{'~', 'N',
'\321'}, {'~', 'n', '\361'
},
{'`', 'O',
'\322'}, {'`', 'o', '\362'
},
{'\'', 'O',
'\323'}, {'\'', 'o', '\363'
},
{'^', 'O',
'\324'}, {'^', 'o', '\364'
},
{'~', 'O',
'\325'}, {'~', 'o', '\365'
},
{'"', 'O',
'\326'}, {'"', 'o', '\366'
},
{'/', 'O',
'\330'}, {'/', 'o', '\370'
},
{'`', 'U',
'\331'}, {'`', 'u', '\371'
},
{'\'', 'U',
'\332'}, {'\'', 'u', '\372'
},
{'^', 'U',
'\333'}, {'^', 'u', '\373'
},
{'"', 'U',
'\334'}, {'"', 'u', '\374'
},
{'\'', 'Y',
'\335'}, {'\'', 'y', '\375'
},
{'T', 'H',
'\336'}, {'t', 'h', '\376'
},
{'s', 's',
'\337'}, {'"', 'y', '\377'
},
{'s', 'z',
'\337'}, {'i', 'j', '\377'
},
{'`', 'A',
0300}, {'`', 'a', 0340
},
{'\'', 'A',
0301}, {'\'', 'a', 0341
},
{'^', 'A',
0302}, {'^', 'a', 0342
},
{'~', 'A',
0303}, {'~', 'a', 0343
},
{'"', 'A',
0304}, {'"', 'a', 0344
},
{'O', 'A',
0305}, {'o', 'a', 0345
},
{'0', 'A',
0305}, {'0', 'a', 0345
},
{'A', 'A',
0305}, {'a', 'a', 0345
},
{'A', 'E',
0306}, {'a', 'e', 0346
},
{',', 'C',
0307}, {',', 'c', 0347
},
{'`', 'E',
0310}, {'`', 'e', 0350
},
{'\'', 'E',
0311}, {'\'', 'e', 0351
},
{'^', 'E',
0312}, {'^', 'e', 0352
},
{'"', 'E',
0313}, {'"', 'e', 0353
},
{'`', 'I',
0314}, {'`', 'i', 0354
},
{'\'', 'I',
0315}, {'\'', 'i', 0355
},
{'^', 'I',
0316}, {'^', 'i', 0356
},
{'"', 'I',
0317}, {'"', 'i', 0357
},
{'-', 'D',
0320}, {'-', 'd', 0360
},
{'~', 'N',
0321}, {'~', 'n', 0361
},
{'`', 'O',
0322}, {'`', 'o', 0362
},
{'\'', 'O',
0323}, {'\'', 'o', 0363
},
{'^', 'O',
0324}, {'^', 'o', 0364
},
{'~', 'O',
0325}, {'~', 'o', 0365
},
{'"', 'O',
0326}, {'"', 'o', 0366
},
{'/', 'O',
0330}, {'/', 'o', 0370
},
{'`', 'U',
0331}, {'`', 'u', 0371
},
{'\'', 'U',
0332}, {'\'', 'u', 0372
},
{'^', 'U',
0333}, {'^', 'u', 0373
},
{'"', 'U',
0334}, {'"', 'u', 0374
},
{'\'', 'Y',
0335}, {'\'', 'y', 0375
},
{'T', 'H',
0336}, {'t', 'h', 0376
},
{'s', 's',
0337}, {'"', 'y', 0377
},
{'s', 'z',
0337}, {'i', 'j', 0377
},
};
unsigned int accent_table_size = 68;
drivers/char/xilinx_hwicap/buffer_icap.c
浏览文件 @
966ea8c4
...
...
@@ -73,8 +73,8 @@
#define XHI_BUFFER_START 0
/**
* buffer_icap_get_status
:
Get the contents of the status register.
* @
parameter
base_address: is the base address of the device
* buffer_icap_get_status
-
Get the contents of the status register.
* @base_address: is the base address of the device
*
* The status register contains the ICAP status and the done bit.
*
...
...
@@ -94,9 +94,9 @@ static inline u32 buffer_icap_get_status(void __iomem *base_address)
}
/**
* buffer_icap_get_bram
:
Reads data from the storage buffer bram.
* @
parameter
base_address: contains the base address of the component.
* @
parameter
offset: The word offset from which the data should be read.
* buffer_icap_get_bram
-
Reads data from the storage buffer bram.
* @base_address: contains the base address of the component.
* @offset: The word offset from which the data should be read.
*
* A bram is used as a configuration memory cache. One frame of data can
* be stored in this "storage buffer".
...
...
@@ -108,8 +108,8 @@ static inline u32 buffer_icap_get_bram(void __iomem *base_address,
}
/**
* buffer_icap_busy
:
Return true if the icap device is busy
* @
parameter
base_address: is the base address of the device
* buffer_icap_busy
-
Return true if the icap device is busy
* @base_address: is the base address of the device
*
* The queries the low order bit of the status register, which
* indicates whether the current configuration or readback operation
...
...
@@ -121,8 +121,8 @@ static inline bool buffer_icap_busy(void __iomem *base_address)
}
/**
* buffer_icap_busy
:
Return true if the icap device is not busy
* @
parameter
base_address: is the base address of the device
* buffer_icap_busy
-
Return true if the icap device is not busy
* @base_address: is the base address of the device
*
* The queries the low order bit of the status register, which
* indicates whether the current configuration or readback operation
...
...
@@ -134,9 +134,9 @@ static inline bool buffer_icap_done(void __iomem *base_address)
}
/**
* buffer_icap_set_size
:
Set the size register.
* @
parameter
base_address: is the base address of the device
* @
parameter
data: The size in bytes.
* buffer_icap_set_size
-
Set the size register.
* @base_address: is the base address of the device
* @data: The size in bytes.
*
* The size register holds the number of 8 bit bytes to transfer between
* bram and the icap (or icap to bram).
...
...
@@ -148,9 +148,9 @@ static inline void buffer_icap_set_size(void __iomem *base_address,
}
/**
* buffer_icap_
mSetoffsetReg:
Set the bram offset register.
* @
parameter
base_address: contains the base address of the device.
* @
parameter
data: is the value to be written to the data register.
* buffer_icap_
set_offset -
Set the bram offset register.
* @base_address: contains the base address of the device.
* @data: is the value to be written to the data register.
*
* The bram offset register holds the starting bram address to transfer
* data from during configuration or write data to during readback.
...
...
@@ -162,9 +162,9 @@ static inline void buffer_icap_set_offset(void __iomem *base_address,
}
/**
* buffer_icap_set_rnc
:
Set the RNC (Readback not Configure) register.
* @
parameter
base_address: contains the base address of the device.
* @
parameter
data: is the value to be written to the data register.
* buffer_icap_set_rnc
-
Set the RNC (Readback not Configure) register.
* @base_address: contains the base address of the device.
* @data: is the value to be written to the data register.
*
* The RNC register determines the direction of the data transfer. It
* controls whether a configuration or readback take place. Writing to
...
...
@@ -178,10 +178,10 @@ static inline void buffer_icap_set_rnc(void __iomem *base_address,
}
/**
* buffer_icap_set_bram
:
Write data to the storage buffer bram.
* @
parameter
base_address: contains the base address of the component.
* @
parameter
offset: The word offset at which the data should be written.
* @
parameter
data: The value to be written to the bram offset.
* buffer_icap_set_bram
-
Write data to the storage buffer bram.
* @base_address: contains the base address of the component.
* @offset: The word offset at which the data should be written.
* @data: The value to be written to the bram offset.
*
* A bram is used as a configuration memory cache. One frame of data can
* be stored in this "storage buffer".
...
...
@@ -193,10 +193,10 @@ static inline void buffer_icap_set_bram(void __iomem *base_address,
}
/**
* buffer_icap_device_read
:
Transfer bytes from ICAP to the storage buffer.
* @
parameter
drvdata: a pointer to the drvdata.
* @
parameter
offset: The storage buffer start address.
* @
parameter
count: The number of words (32 bit) to read from the
* buffer_icap_device_read
-
Transfer bytes from ICAP to the storage buffer.
* @drvdata: a pointer to the drvdata.
* @offset: The storage buffer start address.
* @count: The number of words (32 bit) to read from the
* device (ICAP).
**/
static
int
buffer_icap_device_read
(
struct
hwicap_drvdata
*
drvdata
,
...
...
@@ -227,10 +227,10 @@ static int buffer_icap_device_read(struct hwicap_drvdata *drvdata,
};
/**
* buffer_icap_device_write
:
Transfer bytes from ICAP to the storage buffer.
* @
parameter
drvdata: a pointer to the drvdata.
* @
parameter
offset: The storage buffer start address.
* @
parameter
count: The number of words (32 bit) to read from the
* buffer_icap_device_write
-
Transfer bytes from ICAP to the storage buffer.
* @drvdata: a pointer to the drvdata.
* @offset: The storage buffer start address.
* @count: The number of words (32 bit) to read from the
* device (ICAP).
**/
static
int
buffer_icap_device_write
(
struct
hwicap_drvdata
*
drvdata
,
...
...
@@ -261,8 +261,8 @@ static int buffer_icap_device_write(struct hwicap_drvdata *drvdata,
};
/**
* buffer_icap_reset
:
Reset the logic of the icap device.
* @
parameter
drvdata: a pointer to the drvdata.
* buffer_icap_reset
-
Reset the logic of the icap device.
* @drvdata: a pointer to the drvdata.
*
* Writing to the status register resets the ICAP logic in an internal
* version of the core. For the version of the core published in EDK,
...
...
@@ -274,10 +274,10 @@ void buffer_icap_reset(struct hwicap_drvdata *drvdata)
}
/**
* buffer_icap_set_configuration
:
Load a partial bitstream from system memory.
* @
parameter
drvdata: a pointer to the drvdata.
* @
parameter
data: Kernel address of the partial bitstream.
* @
parameter
size: the size of the partial bitstream in 32 bit words.
* buffer_icap_set_configuration
-
Load a partial bitstream from system memory.
* @drvdata: a pointer to the drvdata.
* @data: Kernel address of the partial bitstream.
* @size: the size of the partial bitstream in 32 bit words.
**/
int
buffer_icap_set_configuration
(
struct
hwicap_drvdata
*
drvdata
,
u32
*
data
,
u32
size
)
...
...
@@ -333,10 +333,10 @@ int buffer_icap_set_configuration(struct hwicap_drvdata *drvdata, u32 *data,
};
/**
* buffer_icap_get_configuration
:
Read configuration data from the device.
* @
parameter
drvdata: a pointer to the drvdata.
* @
parameter
data: Address of the data representing the partial bitstream
* @
parameter
size: the size of the partial bitstream in 32 bit words.
* buffer_icap_get_configuration
-
Read configuration data from the device.
* @drvdata: a pointer to the drvdata.
* @data: Address of the data representing the partial bitstream
* @size: the size of the partial bitstream in 32 bit words.
**/
int
buffer_icap_get_configuration
(
struct
hwicap_drvdata
*
drvdata
,
u32
*
data
,
u32
size
)
...
...
drivers/char/xilinx_hwicap/fifo_icap.c
浏览文件 @
966ea8c4
...
...
@@ -94,9 +94,9 @@
/**
* fifo_icap_fifo_write
:
Write data to the write FIFO.
* @
parameter
drvdata: a pointer to the drvdata.
* @
parameter
data: the 32-bit value to be written to the FIFO.
* fifo_icap_fifo_write
-
Write data to the write FIFO.
* @drvdata: a pointer to the drvdata.
* @data: the 32-bit value to be written to the FIFO.
*
* This function will silently fail if the fifo is full.
**/
...
...
@@ -108,8 +108,8 @@ static inline void fifo_icap_fifo_write(struct hwicap_drvdata *drvdata,
}
/**
* fifo_icap_fifo_read
:
Read data from the Read FIFO.
* @
parameter
drvdata: a pointer to the drvdata.
* fifo_icap_fifo_read
-
Read data from the Read FIFO.
* @drvdata: a pointer to the drvdata.
*
* This function will silently fail if the fifo is empty.
**/
...
...
@@ -121,9 +121,9 @@ static inline u32 fifo_icap_fifo_read(struct hwicap_drvdata *drvdata)
}
/**
* fifo_icap_set_read_size
:
Set the the size register.
* @
parameter
drvdata: a pointer to the drvdata.
* @
parameter
data: the size of the following read transaction, in words.
* fifo_icap_set_read_size
-
Set the the size register.
* @drvdata: a pointer to the drvdata.
* @data: the size of the following read transaction, in words.
**/
static
inline
void
fifo_icap_set_read_size
(
struct
hwicap_drvdata
*
drvdata
,
u32
data
)
...
...
@@ -132,8 +132,8 @@ static inline void fifo_icap_set_read_size(struct hwicap_drvdata *drvdata,
}
/**
* fifo_icap_start_config
:
Initiate a configuration (write) to the device.
* @
parameter
drvdata: a pointer to the drvdata.
* fifo_icap_start_config
-
Initiate a configuration (write) to the device.
* @drvdata: a pointer to the drvdata.
**/
static
inline
void
fifo_icap_start_config
(
struct
hwicap_drvdata
*
drvdata
)
{
...
...
@@ -142,8 +142,8 @@ static inline void fifo_icap_start_config(struct hwicap_drvdata *drvdata)
}
/**
* fifo_icap_start_readback
:
Initiate a readback from the device.
* @
parameter
drvdata: a pointer to the drvdata.
* fifo_icap_start_readback
-
Initiate a readback from the device.
* @drvdata: a pointer to the drvdata.
**/
static
inline
void
fifo_icap_start_readback
(
struct
hwicap_drvdata
*
drvdata
)
{
...
...
@@ -152,8 +152,8 @@ static inline void fifo_icap_start_readback(struct hwicap_drvdata *drvdata)
}
/**
* fifo_icap_busy
:
Return true if the ICAP is still processing a transaction.
* @
parameter
drvdata: a pointer to the drvdata.
* fifo_icap_busy
-
Return true if the ICAP is still processing a transaction.
* @drvdata: a pointer to the drvdata.
**/
static
inline
u32
fifo_icap_busy
(
struct
hwicap_drvdata
*
drvdata
)
{
...
...
@@ -163,8 +163,8 @@ static inline u32 fifo_icap_busy(struct hwicap_drvdata *drvdata)
}
/**
* fifo_icap_write_fifo_vacancy
:
Query the write fifo available space.
* @
parameter
drvdata: a pointer to the drvdata.
* fifo_icap_write_fifo_vacancy
-
Query the write fifo available space.
* @drvdata: a pointer to the drvdata.
*
* Return the number of words that can be safely pushed into the write fifo.
**/
...
...
@@ -175,8 +175,8 @@ static inline u32 fifo_icap_write_fifo_vacancy(
}
/**
* fifo_icap_read_fifo_occupancy
:
Query the read fifo available data.
* @
parameter
drvdata: a pointer to the drvdata.
* fifo_icap_read_fifo_occupancy
-
Query the read fifo available data.
* @drvdata: a pointer to the drvdata.
*
* Return the number of words that can be safely read from the read fifo.
**/
...
...
@@ -187,11 +187,11 @@ static inline u32 fifo_icap_read_fifo_occupancy(
}
/**
* fifo_icap_set_configuration
:
Send configuration data to the ICAP.
* @
parameter
drvdata: a pointer to the drvdata.
* @
parameter
frame_buffer: a pointer to the data to be written to the
* fifo_icap_set_configuration
-
Send configuration data to the ICAP.
* @drvdata: a pointer to the drvdata.
* @frame_buffer: a pointer to the data to be written to the
* ICAP device.
* @
parameter
num_words: the number of words (32 bit) to write to the ICAP
* @num_words: the number of words (32 bit) to write to the ICAP
* device.
* This function writes the given user data to the Write FIFO in
...
...
@@ -266,10 +266,10 @@ int fifo_icap_set_configuration(struct hwicap_drvdata *drvdata,
}
/**
* fifo_icap_get_configuration
:
Read configuration data from the device.
* @
parameter
drvdata: a pointer to the drvdata.
* @
parameter
data: Address of the data representing the partial bitstream
* @
parameter
size: the size of the partial bitstream in 32 bit words.
* fifo_icap_get_configuration
-
Read configuration data from the device.
* @drvdata: a pointer to the drvdata.
* @data: Address of the data representing the partial bitstream
* @size: the size of the partial bitstream in 32 bit words.
*
* This function reads the specified number of words from the ICAP device in
* the polled mode.
...
...
@@ -335,8 +335,8 @@ int fifo_icap_get_configuration(struct hwicap_drvdata *drvdata,
}
/**
* buffer_icap_reset
:
Reset the logic of the icap device.
* @
parameter
drvdata: a pointer to the drvdata.
* buffer_icap_reset
-
Reset the logic of the icap device.
* @drvdata: a pointer to the drvdata.
*
* This function forces the software reset of the complete HWICAP device.
* All the registers will return to the default value and the FIFO is also
...
...
@@ -360,8 +360,8 @@ void fifo_icap_reset(struct hwicap_drvdata *drvdata)
}
/**
* fifo_icap_flush_fifo
:
This function flushes the FIFOs in the device.
* @
parameter
drvdata: a pointer to the drvdata.
* fifo_icap_flush_fifo
-
This function flushes the FIFOs in the device.
* @drvdata: a pointer to the drvdata.
*/
void
fifo_icap_flush_fifo
(
struct
hwicap_drvdata
*
drvdata
)
{
...
...
drivers/char/xilinx_hwicap/xilinx_hwicap.c
浏览文件 @
966ea8c4
...
...
@@ -84,7 +84,7 @@
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <
asm/semaphore
.h>
#include <
linux/mutex
.h>
#include <linux/sysctl.h>
#include <linux/version.h>
#include <linux/fs.h>
...
...
@@ -119,6 +119,7 @@ module_param(xhwicap_minor, int, S_IRUGO);
/* An array, which is set to true when the device is registered. */
static
bool
probed_devices
[
HWICAP_DEVICES
];
static
struct
mutex
icap_sem
;
static
struct
class
*
icap_class
;
...
...
@@ -199,14 +200,14 @@ static const struct config_registers v5_config_registers = {
};
/**
* hwicap_command_desync
:
Send a DESYNC command to the ICAP port.
* @
parameter
drvdata: a pointer to the drvdata.
* hwicap_command_desync
-
Send a DESYNC command to the ICAP port.
* @drvdata: a pointer to the drvdata.
*
* This command desynchronizes the ICAP After this command, a
* bitstream containing a NULL packet, followed by a SYNCH packet is
* required before the ICAP will recognize commands.
*/
int
hwicap_command_desync
(
struct
hwicap_drvdata
*
drvdata
)
static
int
hwicap_command_desync
(
struct
hwicap_drvdata
*
drvdata
)
{
u32
buffer
[
4
];
u32
index
=
0
;
...
...
@@ -228,51 +229,18 @@ int hwicap_command_desync(struct hwicap_drvdata *drvdata)
}
/**
* hwicap_command_capture: Send a CAPTURE command to the ICAP port.
* @parameter drvdata: a pointer to the drvdata.
*
* This command captures all of the flip flop states so they will be
* available during readback. One can use this command instead of
* enabling the CAPTURE block in the design.
*/
int
hwicap_command_capture
(
struct
hwicap_drvdata
*
drvdata
)
{
u32
buffer
[
7
];
u32
index
=
0
;
/*
* Create the data to be written to the ICAP.
*/
buffer
[
index
++
]
=
XHI_DUMMY_PACKET
;
buffer
[
index
++
]
=
XHI_SYNC_PACKET
;
buffer
[
index
++
]
=
XHI_NOOP_PACKET
;
buffer
[
index
++
]
=
hwicap_type_1_write
(
drvdata
->
config_regs
->
CMD
)
|
1
;
buffer
[
index
++
]
=
XHI_CMD_GCAPTURE
;
buffer
[
index
++
]
=
XHI_DUMMY_PACKET
;
buffer
[
index
++
]
=
XHI_DUMMY_PACKET
;
/*
* Write the data to the FIFO and intiate the transfer of data
* present in the FIFO to the ICAP device.
*/
return
drvdata
->
config
->
set_configuration
(
drvdata
,
&
buffer
[
0
],
index
);
}
/**
* hwicap_get_configuration_register: Query a configuration register.
* @parameter drvdata: a pointer to the drvdata.
* @parameter reg: a constant which represents the configuration
* hwicap_get_configuration_register - Query a configuration register.
* @drvdata: a pointer to the drvdata.
* @reg: a constant which represents the configuration
* register value to be returned.
* Examples: XHI_IDCODE, XHI_FLR.
* @
parameter RegD
ata: returns the value of the register.
* @
reg_d
ata: returns the value of the register.
*
* Sends a query packet to the ICAP and then receives the response.
* The icap is left in Synched state.
*/
int
hwicap_get_configuration_register
(
struct
hwicap_drvdata
*
drvdata
,
u32
reg
,
u32
*
RegD
ata
)
static
int
hwicap_get_configuration_register
(
struct
hwicap_drvdata
*
drvdata
,
u32
reg
,
u32
*
reg_d
ata
)
{
int
status
;
u32
buffer
[
6
];
...
...
@@ -300,14 +268,14 @@ int hwicap_get_configuration_register(struct hwicap_drvdata *drvdata,
/*
* Read the configuration register
*/
status
=
drvdata
->
config
->
get_configuration
(
drvdata
,
RegD
ata
,
1
);
status
=
drvdata
->
config
->
get_configuration
(
drvdata
,
reg_d
ata
,
1
);
if
(
status
)
return
status
;
return
0
;
}
int
hwicap_initialize_hwicap
(
struct
hwicap_drvdata
*
drvdata
)
static
int
hwicap_initialize_hwicap
(
struct
hwicap_drvdata
*
drvdata
)
{
int
status
;
u32
idcode
;
...
...
@@ -344,7 +312,7 @@ int hwicap_initialize_hwicap(struct hwicap_drvdata *drvdata)
}
static
ssize_t
hwicap_read
(
struct
file
*
file
,
char
*
buf
,
size_t
count
,
loff_t
*
ppos
)
hwicap_read
(
struct
file
*
file
,
char
__user
*
buf
,
size_t
count
,
loff_t
*
ppos
)
{
struct
hwicap_drvdata
*
drvdata
=
file
->
private_data
;
ssize_t
bytes_to_read
=
0
;
...
...
@@ -353,8 +321,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
u32
bytes_remaining
;
int
status
;
if
(
down_interruptible
(
&
drvdata
->
sem
))
return
-
ERESTARTSYS
;
status
=
mutex_lock_interruptible
(
&
drvdata
->
sem
);
if
(
status
)
return
status
;
if
(
drvdata
->
read_buffer_in_use
)
{
/* If there are leftover bytes in the buffer, just */
...
...
@@ -370,8 +339,9 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
goto
error
;
}
drvdata
->
read_buffer_in_use
-=
bytes_to_read
;
memcpy
(
drvdata
->
read_buffer
+
bytes_to_read
,
drvdata
->
read_buffer
,
4
-
bytes_to_read
);
memmove
(
drvdata
->
read_buffer
,
drvdata
->
read_buffer
+
bytes_to_read
,
4
-
bytes_to_read
);
}
else
{
/* Get new data from the ICAP, and return was was requested. */
kbuf
=
(
u32
*
)
get_zeroed_page
(
GFP_KERNEL
);
...
...
@@ -414,18 +384,20 @@ hwicap_read(struct file *file, char *buf, size_t count, loff_t *ppos)
status
=
-
EFAULT
;
goto
error
;
}
memcpy
(
kbuf
,
drvdata
->
read_buffer
,
bytes_remaining
);
memcpy
(
drvdata
->
read_buffer
,
kbuf
,
bytes_remaining
);
drvdata
->
read_buffer_in_use
=
bytes_remaining
;
free_page
((
unsigned
long
)
kbuf
);
}
status
=
bytes_to_read
;
error:
up
(
&
drvdata
->
sem
);
mutex_unlock
(
&
drvdata
->
sem
);
return
status
;
}
static
ssize_t
hwicap_write
(
struct
file
*
file
,
const
char
*
buf
,
hwicap_write
(
struct
file
*
file
,
const
char
__user
*
buf
,
size_t
count
,
loff_t
*
ppos
)
{
struct
hwicap_drvdata
*
drvdata
=
file
->
private_data
;
...
...
@@ -435,8 +407,9 @@ hwicap_write(struct file *file, const char *buf,
ssize_t
len
;
ssize_t
status
;
if
(
down_interruptible
(
&
drvdata
->
sem
))
return
-
ERESTARTSYS
;
status
=
mutex_lock_interruptible
(
&
drvdata
->
sem
);
if
(
status
)
return
status
;
left
+=
drvdata
->
write_buffer_in_use
;
...
...
@@ -465,7 +438,7 @@ hwicap_write(struct file *file, const char *buf,
memcpy
(
kbuf
,
drvdata
->
write_buffer
,
drvdata
->
write_buffer_in_use
);
if
(
copy_from_user
(
(((
char
*
)
kbuf
)
+
(
drvdata
->
write_buffer_in_use
)
),
(((
char
*
)
kbuf
)
+
drvdata
->
write_buffer_in_use
),
buf
+
written
,
len
-
(
drvdata
->
write_buffer_in_use
)))
{
free_page
((
unsigned
long
)
kbuf
);
...
...
@@ -508,7 +481,7 @@ hwicap_write(struct file *file, const char *buf,
free_page
((
unsigned
long
)
kbuf
);
status
=
written
;
error:
up
(
&
drvdata
->
sem
);
mutex_unlock
(
&
drvdata
->
sem
);
return
status
;
}
...
...
@@ -519,8 +492,9 @@ static int hwicap_open(struct inode *inode, struct file *file)
drvdata
=
container_of
(
inode
->
i_cdev
,
struct
hwicap_drvdata
,
cdev
);
if
(
down_interruptible
(
&
drvdata
->
sem
))
return
-
ERESTARTSYS
;
status
=
mutex_lock_interruptible
(
&
drvdata
->
sem
);
if
(
status
)
return
status
;
if
(
drvdata
->
is_open
)
{
status
=
-
EBUSY
;
...
...
@@ -539,7 +513,7 @@ static int hwicap_open(struct inode *inode, struct file *file)
drvdata
->
is_open
=
1
;
error:
up
(
&
drvdata
->
sem
);
mutex_unlock
(
&
drvdata
->
sem
);
return
status
;
}
...
...
@@ -549,8 +523,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
int
i
;
int
status
=
0
;
if
(
down_interruptible
(
&
drvdata
->
sem
))
return
-
ERESTARTSYS
;
mutex_lock
(
&
drvdata
->
sem
);
if
(
drvdata
->
write_buffer_in_use
)
{
/* Flush write buffer. */
...
...
@@ -569,7 +542,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
error:
drvdata
->
is_open
=
0
;
up
(
&
drvdata
->
sem
);
mutex_unlock
(
&
drvdata
->
sem
);
return
status
;
}
...
...
@@ -592,31 +565,36 @@ static int __devinit hwicap_setup(struct device *dev, int id,
dev_info
(
dev
,
"Xilinx icap port driver
\n
"
);
mutex_lock
(
&
icap_sem
);
if
(
id
<
0
)
{
for
(
id
=
0
;
id
<
HWICAP_DEVICES
;
id
++
)
if
(
!
probed_devices
[
id
])
break
;
}
if
(
id
<
0
||
id
>=
HWICAP_DEVICES
)
{
mutex_unlock
(
&
icap_sem
);
dev_err
(
dev
,
"%s%i too large
\n
"
,
DRIVER_NAME
,
id
);
return
-
EINVAL
;
}
if
(
probed_devices
[
id
])
{
mutex_unlock
(
&
icap_sem
);
dev_err
(
dev
,
"cannot assign to %s%i; it is already in use
\n
"
,
DRIVER_NAME
,
id
);
return
-
EBUSY
;
}
probed_devices
[
id
]
=
1
;
mutex_unlock
(
&
icap_sem
);
devt
=
MKDEV
(
xhwicap_major
,
xhwicap_minor
+
id
);
drvdata
=
k
m
alloc
(
sizeof
(
struct
hwicap_drvdata
),
GFP_KERNEL
);
drvdata
=
k
z
alloc
(
sizeof
(
struct
hwicap_drvdata
),
GFP_KERNEL
);
if
(
!
drvdata
)
{
dev_err
(
dev
,
"Couldn't allocate device private record
\n
"
);
return
-
ENOMEM
;
retval
=
-
ENOMEM
;
goto
failed0
;
}
memset
((
void
*
)
drvdata
,
0
,
sizeof
(
struct
hwicap_drvdata
));
dev_set_drvdata
(
dev
,
(
void
*
)
drvdata
);
if
(
!
regs_res
)
{
...
...
@@ -648,7 +626,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
drvdata
->
config
=
config
;
drvdata
->
config_regs
=
config_regs
;
init_MUTEX
(
&
drvdata
->
sem
);
mutex_init
(
&
drvdata
->
sem
);
drvdata
->
is_open
=
0
;
dev_info
(
dev
,
"ioremap %lx to %p with size %x
\n
"
,
...
...
@@ -663,7 +641,7 @@ static int __devinit hwicap_setup(struct device *dev, int id,
goto
failed3
;
}
/* devfs_mk_cdev(devt, S_IFCHR|S_IRUGO|S_IWUGO, DRIVER_NAME); */
class_device_create
(
icap_class
,
NULL
,
devt
,
NULL
,
DRIVER_NAME
);
device_create
(
icap_class
,
dev
,
devt
,
"%s%d"
,
DRIVER_NAME
,
id
);
return
0
;
/* success */
failed3:
...
...
@@ -675,6 +653,11 @@ static int __devinit hwicap_setup(struct device *dev, int id,
failed1:
kfree
(
drvdata
);
failed0:
mutex_lock
(
&
icap_sem
);
probed_devices
[
id
]
=
0
;
mutex_unlock
(
&
icap_sem
);
return
retval
;
}
...
...
@@ -699,14 +682,16 @@ static int __devexit hwicap_remove(struct device *dev)
if
(
!
drvdata
)
return
0
;
class_
device_destroy
(
icap_class
,
drvdata
->
devt
);
device_destroy
(
icap_class
,
drvdata
->
devt
);
cdev_del
(
&
drvdata
->
cdev
);
iounmap
(
drvdata
->
base_address
);
release_mem_region
(
drvdata
->
mem_start
,
drvdata
->
mem_size
);
kfree
(
drvdata
);
dev_set_drvdata
(
dev
,
NULL
);
probed_devices
[
MINOR
(
dev
->
devt
)
-
xhwicap_minor
]
=
0
;
mutex_lock
(
&
icap_sem
);
probed_devices
[
MINOR
(
dev
->
devt
)
-
xhwicap_minor
]
=
0
;
mutex_unlock
(
&
icap_sem
);
return
0
;
/* success */
}
...
...
@@ -821,28 +806,29 @@ static struct of_platform_driver hwicap_of_driver = {
};
/* Registration helpers to keep the number of #ifdefs to a minimum */
static
inline
int
__
dev
init
hwicap_of_register
(
void
)
static
inline
int
__init
hwicap_of_register
(
void
)
{
pr_debug
(
"hwicap: calling of_register_platform_driver()
\n
"
);
return
of_register_platform_driver
(
&
hwicap_of_driver
);
}
static
inline
void
__
dev
exit
hwicap_of_unregister
(
void
)
static
inline
void
__exit
hwicap_of_unregister
(
void
)
{
of_unregister_platform_driver
(
&
hwicap_of_driver
);
}
#else
/* CONFIG_OF */
/* CONFIG_OF not enabled; do nothing helpers */
static
inline
int
__
dev
init
hwicap_of_register
(
void
)
{
return
0
;
}
static
inline
void
__
dev
exit
hwicap_of_unregister
(
void
)
{
}
static
inline
int
__init
hwicap_of_register
(
void
)
{
return
0
;
}
static
inline
void
__exit
hwicap_of_unregister
(
void
)
{
}
#endif
/* CONFIG_OF */
static
int
__
dev
init
hwicap_module_init
(
void
)
static
int
__init
hwicap_module_init
(
void
)
{
dev_t
devt
;
int
retval
;
icap_class
=
class_create
(
THIS_MODULE
,
"xilinx_config"
);
mutex_init
(
&
icap_sem
);
if
(
xhwicap_major
)
{
devt
=
MKDEV
(
xhwicap_major
,
xhwicap_minor
);
...
...
@@ -883,7 +869,7 @@ static int __devinit hwicap_module_init(void)
return
retval
;
}
static
void
__
dev
exit
hwicap_module_cleanup
(
void
)
static
void
__exit
hwicap_module_cleanup
(
void
)
{
dev_t
devt
=
MKDEV
(
xhwicap_major
,
xhwicap_minor
);
...
...
drivers/char/xilinx_hwicap/xilinx_hwicap.h
浏览文件 @
966ea8c4
...
...
@@ -48,9 +48,9 @@ struct hwicap_drvdata {
u8
write_buffer
[
4
];
u32
read_buffer_in_use
;
/* Always in [0,3] */
u8
read_buffer
[
4
];
u32
mem_start
;
/* phys. address of the control registers */
u32
mem_end
;
/* phys. address of the control registers */
u32
mem_size
;
resource_size_t
mem_start
;
/* phys. address of the control registers */
resource_size_t
mem_end
;
/* phys. address of the control registers */
resource_size_t
mem_size
;
void
__iomem
*
base_address
;
/* virt. address of the control registers */
struct
device
*
dev
;
...
...
@@ -61,7 +61,7 @@ struct hwicap_drvdata {
const
struct
config_registers
*
config_regs
;
void
*
private_data
;
bool
is_open
;
struct
semaphore
sem
;
struct
mutex
sem
;
};
struct
hwicap_driver_config
{
...
...
@@ -164,29 +164,29 @@ struct config_registers {
#define XHI_DISABLED_AUTO_CRC 0x0000DEFCUL
/**
* hwicap_type_1_read
:
Generates a Type 1 read packet header.
* @
parameter: Register
is the address of the register to be read back.
* hwicap_type_1_read
-
Generates a Type 1 read packet header.
* @
reg:
is the address of the register to be read back.
*
* Generates a Type 1 read packet header, which is used to indirectly
* read registers in the configuration logic. This packet must then
* be sent through the icap device, and a return packet received with
* the information.
**/
static
inline
u32
hwicap_type_1_read
(
u32
Register
)
static
inline
u32
hwicap_type_1_read
(
u32
reg
)
{
return
(
XHI_TYPE_1
<<
XHI_TYPE_SHIFT
)
|
(
Register
<<
XHI_REGISTER_SHIFT
)
|
(
reg
<<
XHI_REGISTER_SHIFT
)
|
(
XHI_OP_READ
<<
XHI_OP_SHIFT
);
}
/**
* hwicap_type_1_write
:
Generates a Type 1 write packet header
* @
parameter: Register
is the address of the register to be read back.
* hwicap_type_1_write
-
Generates a Type 1 write packet header
* @
reg:
is the address of the register to be read back.
**/
static
inline
u32
hwicap_type_1_write
(
u32
Register
)
static
inline
u32
hwicap_type_1_write
(
u32
reg
)
{
return
(
XHI_TYPE_1
<<
XHI_TYPE_SHIFT
)
|
(
Register
<<
XHI_REGISTER_SHIFT
)
|
(
reg
<<
XHI_REGISTER_SHIFT
)
|
(
XHI_OP_WRITE
<<
XHI_OP_SHIFT
);
}
...
...
drivers/message/fusion/mptbase.c
浏览文件 @
966ea8c4
...
...
@@ -632,8 +632,7 @@ mpt_deregister(u8 cb_idx)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_event_register - Register protocol-specific event callback
* handler.
* mpt_event_register - Register protocol-specific event callback handler.
* @cb_idx: previously registered (via mpt_register) callback handle
* @ev_cbfunc: callback function
*
...
...
@@ -654,8 +653,7 @@ mpt_event_register(u8 cb_idx, MPT_EVHANDLER ev_cbfunc)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_event_deregister - Deregister protocol-specific event callback
* handler.
* mpt_event_deregister - Deregister protocol-specific event callback handler
* @cb_idx: previously registered callback handle
*
* Each protocol-specific driver should call this routine
...
...
@@ -765,11 +763,13 @@ mpt_device_driver_deregister(u8 cb_idx)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024)
* allocated per MPT adapter.
* mpt_get_msg_frame - Obtain an MPT request frame from the pool
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
*
* Obtain an MPT request frame from the pool (of 1024) that are
* allocated per MPT adapter.
*
* Returns pointer to a MPT request frame or %NULL if none are available
* or IOC is not active.
*/
...
...
@@ -834,13 +834,12 @@ mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc)
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/**
* mpt_put_msg_frame - Send a protocol specific MPT request frame
* to a IOC.
* mpt_put_msg_frame - Send a protocol-specific MPT request frame to an IOC
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* This routine posts a MPT request frame to the request post FIFO of a
* This routine posts a
n
MPT request frame to the request post FIFO of a
* specific MPT adapter.
*/
void
...
...
@@ -868,13 +867,15 @@ mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
}
/**
* mpt_put_msg_frame_hi_pri - Send a protocol specific MPT request frame
* to a IOC using hi priority request queue.
* mpt_put_msg_frame_hi_pri - Send a hi-pri protocol-specific MPT request frame
* @cb_idx: Handle of registered MPT protocol driver
* @ioc: Pointer to MPT adapter structure
* @mf: Pointer to MPT request frame
*
* This routine posts a MPT request frame to the request post FIFO of a
* Send a protocol-specific MPT request frame to an IOC using
* hi-priority request queue.
*
* This routine posts an MPT request frame to the request post FIFO of a
* specific MPT adapter.
**/
void
...
...
drivers/message/fusion/mptscsih.c
浏览文件 @
966ea8c4
...
...
@@ -1533,7 +1533,7 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
*
* Remark: Currently invoked from a non-interrupt thread (_bh).
*
*
Remark
: With old EH code, at most 1 SCSI TaskMgmt function per IOC
*
Note
: With old EH code, at most 1 SCSI TaskMgmt function per IOC
* will be active.
*
* Returns 0 for SUCCESS, or %FAILED.
...
...
@@ -2537,14 +2537,12 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
/**
* mptscsih_get_scsi_lookup
*
* retrieves scmd entry from ScsiLookup[] array list
*
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
*
*
Returns the scsi_cmd pointer
*
retrieves scmd entry from ScsiLookup[] array list
*
* Returns the scsi_cmd pointer
**/
static
struct
scsi_cmnd
*
mptscsih_get_scsi_lookup
(
MPT_ADAPTER
*
ioc
,
int
i
)
...
...
@@ -2561,14 +2559,12 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
/**
* mptscsih_getclear_scsi_lookup
*
* retrieves and clears scmd entry from ScsiLookup[] array list
*
* @ioc: Pointer to MPT_ADAPTER structure
* @i: index into the array
*
*
Returns the scsi_cmd pointer
*
retrieves and clears scmd entry from ScsiLookup[] array list
*
* Returns the scsi_cmd pointer
**/
static
struct
scsi_cmnd
*
mptscsih_getclear_scsi_lookup
(
MPT_ADAPTER
*
ioc
,
int
i
)
...
...
drivers/pci/rom.c
浏览文件 @
966ea8c4
...
...
@@ -242,8 +242,7 @@ void pci_remove_rom(struct pci_dev *pdev)
#endif /* 0 */
/**
* pci_cleanup_rom - internal routine for freeing the ROM copy created
* by pci_map_rom_copy called from remove.c
* pci_cleanup_rom - free the ROM copy created by pci_map_rom_copy
* @pdev: pointer to pci device struct
*
* Free the copied ROM if we allocated one.
...
...
drivers/rapidio/rio-driver.c
浏览文件 @
966ea8c4
...
...
@@ -78,8 +78,7 @@ void rio_dev_put(struct rio_dev *rdev)
}
/**
* rio_device_probe - Tell if a RIO device structure has a matching RIO
* device id structure
* rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure
* @id: the RIO device id structure to match against
* @dev: the RIO device structure to match against
*
...
...
@@ -137,7 +136,7 @@ static int rio_device_remove(struct device *dev)
* rio_register_driver - register a new RIO driver
* @rdrv: the RIO driver structure to register
*
* Adds a &struct rio_driver to the list of registered drivers
* Adds a &struct rio_driver to the list of registered drivers
.
* Returns a negative value on error, otherwise 0. If no error
* occurred, the driver remains registered even if no device
* was claimed during registration.
...
...
@@ -167,8 +166,7 @@ void rio_unregister_driver(struct rio_driver *rdrv)
}
/**
* rio_match_bus - Tell if a RIO device structure has a matching RIO
* driver device id structure
* rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure
* @dev: the standard device structure to match against
* @drv: the standard driver structure containing the ids to match against
*
...
...
drivers/s390/char/defkeymap.c
浏览文件 @
966ea8c4
...
...
@@ -151,8 +151,8 @@ char *func_table[MAX_NR_FUNC] = {
};
struct
kbdiacruc
accent_table
[
MAX_DIACR
]
=
{
{
'^'
,
'c'
,
'\003'
},
{
'^'
,
'd'
,
'\004'
},
{
'^'
,
'z'
,
'\032'
},
{
'^'
,
'\012'
,
'\000'
},
{
'^'
,
'c'
,
0003
},
{
'^'
,
'd'
,
0004
},
{
'^'
,
'z'
,
0032
},
{
'^'
,
0012
'
,
0000
},
};
unsigned
int
accent_table_size
=
4
;
drivers/scsi/scsi_scan.c
浏览文件 @
966ea8c4
...
...
@@ -1577,8 +1577,7 @@ static void __scsi_scan_target(struct device *parent, unsigned int channel,
}
/**
* scsi_scan_target - scan a target id, possibly including all LUNs on the
* target.
* scsi_scan_target - scan a target id, possibly including all LUNs on the target.
* @parent: host to scan
* @channel: channel to scan
* @id: target id to scan
...
...
drivers/usb/core/usb.c
浏览文件 @
966ea8c4
...
...
@@ -99,8 +99,7 @@ struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
EXPORT_SYMBOL_GPL
(
usb_ifnum_to_if
);
/**
* usb_altnum_to_altsetting - get the altsetting structure with a given
* alternate setting number.
* usb_altnum_to_altsetting - get the altsetting structure with a given alternate setting number.
* @intf: the interface containing the altsetting in question
* @altnum: the desired alternate setting number
*
...
...
@@ -442,8 +441,7 @@ EXPORT_SYMBOL_GPL(usb_put_intf);
*/
/**
* usb_lock_device_for_reset - cautiously acquire the lock for a
* usb device structure
* usb_lock_device_for_reset - cautiously acquire the lock for a usb device structure
* @udev: device that's being locked
* @iface: interface bound to the driver making the request (optional)
*
...
...
fs/buffer.c
浏览文件 @
966ea8c4
...
...
@@ -627,8 +627,7 @@ static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
}
/**
* sync_mapping_buffers - write out and wait upon a mapping's "associated"
* buffers
* sync_mapping_buffers - write out & wait upon a mapping's "associated" buffers
* @mapping: the mapping which wants those buffers written
*
* Starts I/O against the buffers at mapping->private_list, and waits upon
...
...
fs/exec.c
浏览文件 @
966ea8c4
...
...
@@ -173,8 +173,15 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
return
NULL
;
if
(
write
)
{
struct
rlimit
*
rlim
=
current
->
signal
->
rlim
;
unsigned
long
size
=
bprm
->
vma
->
vm_end
-
bprm
->
vma
->
vm_start
;
struct
rlimit
*
rlim
;
/*
* We've historically supported up to 32 pages (ARG_MAX)
* of argument strings even with small stacks
*/
if
(
size
<=
ARG_MAX
)
return
page
;
/*
* Limit to 1/4-th the stack size for the argv+env strings.
...
...
@@ -183,6 +190,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
* - the program will have a reasonable amount of stack left
* to work from.
*/
rlim
=
current
->
signal
->
rlim
;
if
(
size
>
rlim
[
RLIMIT_STACK
].
rlim_cur
/
4
)
{
put_page
(
page
);
return
NULL
;
...
...
fs/jbd/transaction.c
浏览文件 @
966ea8c4
...
...
@@ -369,7 +369,7 @@ int journal_extend(handle_t *handle, int nblocks)
/**
* int journal_restart() - restart a handle
.
* int journal_restart() - restart a handle.
* @handle: handle to restart
* @nblocks: nr credits requested
*
...
...
@@ -844,8 +844,7 @@ int journal_get_create_access(handle_t *handle, struct buffer_head *bh)
}
/**
* int journal_get_undo_access() - Notify intent to modify metadata with
* non-rewindable consequences
* int journal_get_undo_access() - Notify intent to modify metadata with non-rewindable consequences
* @handle: transaction
* @bh: buffer to undo
* @credits: store the number of taken credits here (if not NULL)
...
...
@@ -921,12 +920,14 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
}
/**
* int journal_dirty_data() - mark a buffer as containing dirty data which
* needs to be flushed before we can commit the
* current transaction.
* int journal_dirty_data() - mark a buffer as containing dirty data to be flushed
* @handle: transaction
* @bh: bufferhead to mark
*
* Description:
* Mark a buffer as containing dirty data which needs to be flushed before
* we can commit the current transaction.
*
* The buffer is placed on the transaction's data list and is marked as
* belonging to the transaction.
*
...
...
@@ -1098,11 +1099,11 @@ int journal_dirty_data(handle_t *handle, struct buffer_head *bh)
}
/**
* int journal_dirty_metadata() -
mark a buffer as containing dirty metadata
* int journal_dirty_metadata() - mark a buffer as containing dirty metadata
* @handle: transaction to add buffer to.
* @bh: buffer to mark
*
*
m
ark dirty metadata which needs to be journaled as part of the current
*
M
ark dirty metadata which needs to be journaled as part of the current
* transaction.
*
* The buffer is placed on the transaction's metadata list and is marked
...
...
fs/mpage.c
浏览文件 @
966ea8c4
...
...
@@ -325,16 +325,12 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
}
/**
* mpage_readpages - populate an address space with some pages, and
* start reads against them.
*
* mpage_readpages - populate an address space with some pages & start reads against them
* @mapping: the address_space
* @pages: The address of a list_head which contains the target pages. These
* pages have their ->index populated and are otherwise uninitialised.
*
* The page at @pages->prev has the lowest file offset, and reads should be
* issued in @pages->prev to @pages->next order.
*
* @nr_pages: The number of pages at *@pages
* @get_block: The filesystem's block mapper function.
*
...
...
@@ -360,6 +356,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
* So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
* submitted in the following order:
* 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
*
* because the indirect block has to be read to get the mappings of blocks
* 13,14,15,16. Obviously, this impacts performance.
*
...
...
@@ -656,9 +653,7 @@ static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
}
/**
* mpage_writepages - walk the list of dirty pages of the given
* address space and writepage() all of them.
*
* mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
* @mapping: address space structure to write
* @wbc: subtract the number of written pages from *@wbc->nr_to_write
* @get_block: the filesystem's block mapper function.
...
...
include/asm-powerpc/reg.h
浏览文件 @
966ea8c4
...
...
@@ -153,6 +153,9 @@
#define CTRL_RUNLATCH 0x1
#define SPRN_DABR 0x3F5
/* Data Address Breakpoint Register */
#define DABR_TRANSLATION (1UL << 2)
#define SPRN_DABRX 0x3F7
/* Data Address Breakpoint Register Extension */
#define DABRX_USER (1UL << 0)
#define DABRX_KERNEL (1UL << 1)
#define SPRN_DAR 0x013
/* Data Address Register */
#define SPRN_DSISR 0x012
/* Data Storage Interrupt Status Register */
#define DSISR_NOHPTE 0x40000000
/* no translation found */
...
...
include/asm-x86/pgtable_32.h
浏览文件 @
966ea8c4
...
...
@@ -91,9 +91,7 @@ extern unsigned long pg0[];
/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
#define pmd_none(x) (!(unsigned long)pmd_val(x))
#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
#define pmd_bad(x) ((pmd_val(x) \
& ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \
!= _KERNPG_TABLE)
#define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
...
...
include/asm-x86/pgtable_64.h
浏览文件 @
966ea8c4
...
...
@@ -153,14 +153,12 @@ static inline unsigned long pgd_bad(pgd_t pgd)
static
inline
unsigned
long
pud_bad
(
pud_t
pud
)
{
return
pud_val
(
pud
)
&
~
(
PTE_MASK
|
_KERNPG_TABLE
|
_PAGE_USER
|
_PAGE_PSE
|
_PAGE_NX
);
return
pud_val
(
pud
)
&
~
(
PTE_MASK
|
_KERNPG_TABLE
|
_PAGE_USER
);
}
static
inline
unsigned
long
pmd_bad
(
pmd_t
pmd
)
{
return
pmd_val
(
pmd
)
&
~
(
PTE_MASK
|
_KERNPG_TABLE
|
_PAGE_USER
|
_PAGE_PSE
|
_PAGE_NX
);
return
pmd_val
(
pmd
)
&
~
(
PTE_MASK
|
_KERNPG_TABLE
|
_PAGE_USER
);
}
#define pte_none(x) (!pte_val(x))
...
...
include/linux/mm_types.h
浏览文件 @
966ea8c4
...
...
@@ -64,10 +64,7 @@ struct page {
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
spinlock_t
ptl
;
#endif
struct
{
struct
kmem_cache
*
slab
;
/* SLUB: Pointer to slab */
void
*
end
;
/* SLUB: end marker */
};
struct
kmem_cache
*
slab
;
/* SLUB: Pointer to slab */
struct
page
*
first_page
;
/* Compound tail pages */
};
union
{
...
...
include/linux/slub_def.h
浏览文件 @
966ea8c4
...
...
@@ -61,7 +61,7 @@ struct kmem_cache {
int
size
;
/* The size of an object including meta data */
int
objsize
;
/* The size of an object without meta data */
int
offset
;
/* Free pointer offset. */
int
order
;
int
order
;
/* Current preferred allocation order */
/*
* Avoid an extra cache line for UP, SMP and for the node local to
...
...
@@ -138,11 +138,11 @@ static __always_inline int kmalloc_index(size_t size)
if
(
size
<=
512
)
return
9
;
if
(
size
<=
1024
)
return
10
;
if
(
size
<=
2
*
1024
)
return
11
;
if
(
size
<=
4
*
1024
)
return
12
;
/*
* The following is only needed to support architectures with a larger page
* size than 4k.
*/
if
(
size
<=
4
*
1024
)
return
12
;
if
(
size
<=
8
*
1024
)
return
13
;
if
(
size
<=
16
*
1024
)
return
14
;
if
(
size
<=
32
*
1024
)
return
15
;
...
...
include/linux/usb.h
浏览文件 @
966ea8c4
...
...
@@ -781,8 +781,7 @@ static inline int usb_endpoint_is_isoc_out(
.idVendor = (vend), \
.idProduct = (prod)
/**
* USB_DEVICE_VER - macro used to describe a specific usb device with a
* version range
* USB_DEVICE_VER - describe a specific usb device with a version range
* @vend: the 16 bit USB Vendor ID
* @prod: the 16 bit USB Product ID
* @lo: the bcdDevice_lo value
...
...
@@ -799,8 +798,7 @@ static inline int usb_endpoint_is_isoc_out(
.bcdDevice_hi = (hi)
/**
* USB_DEVICE_INTERFACE_PROTOCOL - macro used to describe a usb
* device with a specific interface protocol
* USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol
* @vend: the 16 bit USB Vendor ID
* @prod: the 16 bit USB Product ID
* @pr: bInterfaceProtocol value
...
...
@@ -846,8 +844,7 @@ static inline int usb_endpoint_is_isoc_out(
.bInterfaceProtocol = (pr)
/**
* USB_DEVICE_AND_INTERFACE_INFO - macro used to describe a specific usb device
* with a class of usb interfaces
* USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces
* @vend: the 16 bit USB Vendor ID
* @prod: the 16 bit USB Product ID
* @cl: bInterfaceClass value
...
...
kernel/exit.c
浏览文件 @
966ea8c4
...
...
@@ -214,20 +214,19 @@ struct pid *session_of_pgrp(struct pid *pgrp)
static
int
will_become_orphaned_pgrp
(
struct
pid
*
pgrp
,
struct
task_struct
*
ignored_task
)
{
struct
task_struct
*
p
;
int
ret
=
1
;
do_each_pid_task
(
pgrp
,
PIDTYPE_PGID
,
p
)
{
if
(
p
==
ignored_task
||
p
->
exit_state
||
is_global_init
(
p
->
real_parent
))
if
(
(
p
==
ignored_task
)
||
(
p
->
exit_state
&&
thread_group_empty
(
p
))
||
is_global_init
(
p
->
real_parent
))
continue
;
if
(
task_pgrp
(
p
->
real_parent
)
!=
pgrp
&&
task_session
(
p
->
real_parent
)
==
task_session
(
p
))
{
ret
=
0
;
break
;
}
task_session
(
p
->
real_parent
)
==
task_session
(
p
))
return
0
;
}
while_each_pid_task
(
pgrp
,
PIDTYPE_PGID
,
p
);
return
ret
;
/* (sighing) "Often!" */
return
1
;
}
int
is_current_pgrp_orphaned
(
void
)
...
...
@@ -255,6 +254,37 @@ static int has_stopped_jobs(struct pid *pgrp)
return
retval
;
}
/*
* Check to see if any process groups have become orphaned as
* a result of our exiting, and if they have any stopped jobs,
* send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*/
static
void
kill_orphaned_pgrp
(
struct
task_struct
*
tsk
,
struct
task_struct
*
parent
)
{
struct
pid
*
pgrp
=
task_pgrp
(
tsk
);
struct
task_struct
*
ignored_task
=
tsk
;
if
(
!
parent
)
/* exit: our father is in a different pgrp than
* we are and we were the only connection outside.
*/
parent
=
tsk
->
real_parent
;
else
/* reparent: our child is in a different pgrp than
* we are, and it was the only connection outside.
*/
ignored_task
=
NULL
;
if
(
task_pgrp
(
parent
)
!=
pgrp
&&
task_session
(
parent
)
==
task_session
(
tsk
)
&&
will_become_orphaned_pgrp
(
pgrp
,
ignored_task
)
&&
has_stopped_jobs
(
pgrp
))
{
__kill_pgrp_info
(
SIGHUP
,
SEND_SIG_PRIV
,
pgrp
);
__kill_pgrp_info
(
SIGCONT
,
SEND_SIG_PRIV
,
pgrp
);
}
}
/**
* reparent_to_kthreadd - Reparent the calling kernel thread to kthreadd
*
...
...
@@ -635,22 +665,7 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
p
->
exit_signal
!=
-
1
&&
thread_group_empty
(
p
))
do_notify_parent
(
p
,
p
->
exit_signal
);
/*
* process group orphan check
* Case ii: Our child is in a different pgrp
* than we are, and it was the only connection
* outside, so the child pgrp is now orphaned.
*/
if
((
task_pgrp
(
p
)
!=
task_pgrp
(
father
))
&&
(
task_session
(
p
)
==
task_session
(
father
)))
{
struct
pid
*
pgrp
=
task_pgrp
(
p
);
if
(
will_become_orphaned_pgrp
(
pgrp
,
NULL
)
&&
has_stopped_jobs
(
pgrp
))
{
__kill_pgrp_info
(
SIGHUP
,
SEND_SIG_PRIV
,
pgrp
);
__kill_pgrp_info
(
SIGCONT
,
SEND_SIG_PRIV
,
pgrp
);
}
}
kill_orphaned_pgrp
(
p
,
father
);
}
/*
...
...
@@ -735,11 +750,9 @@ static void forget_original_parent(struct task_struct *father)
* Send signals to all our closest relatives so that they know
* to properly mourn us..
*/
static
void
exit_notify
(
struct
task_struct
*
tsk
)
static
void
exit_notify
(
struct
task_struct
*
tsk
,
int
group_dead
)
{
int
state
;
struct
task_struct
*
t
;
struct
pid
*
pgrp
;
/*
* This does two things:
...
...
@@ -753,25 +766,8 @@ static void exit_notify(struct task_struct *tsk)
exit_task_namespaces
(
tsk
);
write_lock_irq
(
&
tasklist_lock
);
/*
* Check to see if any process groups have become orphaned
* as a result of our exiting, and if they have any stopped
* jobs, send them a SIGHUP and then a SIGCONT. (POSIX 3.2.2.2)
*
* Case i: Our father is in a different pgrp than we are
* and we were the only connection outside, so our pgrp
* is about to become orphaned.
*/
t
=
tsk
->
real_parent
;
pgrp
=
task_pgrp
(
tsk
);
if
((
task_pgrp
(
t
)
!=
pgrp
)
&&
(
task_session
(
t
)
==
task_session
(
tsk
))
&&
will_become_orphaned_pgrp
(
pgrp
,
tsk
)
&&
has_stopped_jobs
(
pgrp
))
{
__kill_pgrp_info
(
SIGHUP
,
SEND_SIG_PRIV
,
pgrp
);
__kill_pgrp_info
(
SIGCONT
,
SEND_SIG_PRIV
,
pgrp
);
}
if
(
group_dead
)
kill_orphaned_pgrp
(
tsk
->
group_leader
,
NULL
);
/* Let father know we died
*
...
...
@@ -788,8 +784,8 @@ static void exit_notify(struct task_struct *tsk)
* the same after a fork.
*/
if
(
tsk
->
exit_signal
!=
SIGCHLD
&&
tsk
->
exit_signal
!=
-
1
&&
(
tsk
->
parent_exec_id
!=
t
->
self_exec_id
||
tsk
->
self_exec_id
!=
tsk
->
parent_exec_id
)
(
tsk
->
parent_exec_id
!=
tsk
->
real_parent
->
self_exec_id
||
tsk
->
self_exec_id
!=
tsk
->
parent_exec_id
)
&&
!
capable
(
CAP_KILL
))
tsk
->
exit_signal
=
SIGCHLD
;
...
...
@@ -986,7 +982,7 @@ NORET_TYPE void do_exit(long code)
module_put
(
tsk
->
binfmt
->
module
);
proc_exit_connector
(
tsk
);
exit_notify
(
tsk
);
exit_notify
(
tsk
,
group_dead
);
#ifdef CONFIG_NUMA
mpol_free
(
tsk
->
mempolicy
);
tsk
->
mempolicy
=
NULL
;
...
...
mm/slub.c
浏览文件 @
966ea8c4
...
...
@@ -291,32 +291,16 @@ static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
#endif
}
/*
* The end pointer in a slab is special. It points to the first object in the
* slab but has bit 0 set to mark it.
*
* Note that SLUB relies on page_mapping returning NULL for pages with bit 0
* in the mapping set.
*/
static
inline
int
is_end
(
void
*
addr
)
{
return
(
unsigned
long
)
addr
&
PAGE_MAPPING_ANON
;
}
static
void
*
slab_address
(
struct
page
*
page
)
{
return
page
->
end
-
PAGE_MAPPING_ANON
;
}
/* Verify that a pointer has an address that is valid within a slab page */
static
inline
int
check_valid_pointer
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
const
void
*
object
)
{
void
*
base
;
if
(
object
==
page
->
end
)
if
(
!
object
)
return
1
;
base
=
slab
_address
(
page
);
base
=
page
_address
(
page
);
if
(
object
<
base
||
object
>=
base
+
s
->
objects
*
s
->
size
||
(
object
-
base
)
%
s
->
size
)
{
return
0
;
...
...
@@ -349,8 +333,7 @@ static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
/* Scan freelist */
#define for_each_free_object(__p, __s, __free) \
for (__p = (__free); (__p) != page->end; __p = get_freepointer((__s),\
__p))
for (__p = (__free); __p; __p = get_freepointer((__s), __p))
/* Determine object index from a given position */
static
inline
int
slab_index
(
void
*
p
,
struct
kmem_cache
*
s
,
void
*
addr
)
...
...
@@ -502,7 +485,7 @@ static void slab_fix(struct kmem_cache *s, char *fmt, ...)
static
void
print_trailer
(
struct
kmem_cache
*
s
,
struct
page
*
page
,
u8
*
p
)
{
unsigned
int
off
;
/* Offset of last byte */
u8
*
addr
=
slab
_address
(
page
);
u8
*
addr
=
page
_address
(
page
);
print_tracking
(
s
,
p
);
...
...
@@ -637,7 +620,7 @@ static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
* A. Free pointer (if we cannot overwrite object on free)
* B. Tracking data for SLAB_STORE_USER
* C. Padding to reach required alignment boundary or at mininum
* one word if debuggin is on to be able to detect writes
* one word if debuggin
g
is on to be able to detect writes
* before the word boundary.
*
* Padding is done using 0x5a (POISON_INUSE)
...
...
@@ -680,7 +663,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
if
(
!
(
s
->
flags
&
SLAB_POISON
))
return
1
;
start
=
slab
_address
(
page
);
start
=
page
_address
(
page
);
end
=
start
+
(
PAGE_SIZE
<<
s
->
order
);
length
=
s
->
objects
*
s
->
size
;
remainder
=
end
-
(
start
+
length
);
...
...
@@ -748,7 +731,7 @@ static int check_object(struct kmem_cache *s, struct page *page,
* of the free objects in this slab. May cause
* another error because the object count is now wrong.
*/
set_freepointer
(
s
,
p
,
page
->
end
);
set_freepointer
(
s
,
p
,
NULL
);
return
0
;
}
return
1
;
...
...
@@ -782,18 +765,18 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
void
*
fp
=
page
->
freelist
;
void
*
object
=
NULL
;
while
(
fp
!=
page
->
end
&&
nr
<=
s
->
objects
)
{
while
(
fp
&&
nr
<=
s
->
objects
)
{
if
(
fp
==
search
)
return
1
;
if
(
!
check_valid_pointer
(
s
,
page
,
fp
))
{
if
(
object
)
{
object_err
(
s
,
page
,
object
,
"Freechain corrupt"
);
set_freepointer
(
s
,
object
,
page
->
end
);
set_freepointer
(
s
,
object
,
NULL
);
break
;
}
else
{
slab_err
(
s
,
page
,
"Freepointer corrupt"
);
page
->
freelist
=
page
->
end
;
page
->
freelist
=
NULL
;
page
->
inuse
=
s
->
objects
;
slab_fix
(
s
,
"Freelist cleared"
);
return
0
;
...
...
@@ -870,7 +853,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
if
(
!
check_slab
(
s
,
page
))
goto
bad
;
if
(
object
&&
!
on_freelist
(
s
,
page
,
object
))
{
if
(
!
on_freelist
(
s
,
page
,
object
))
{
object_err
(
s
,
page
,
object
,
"Object already allocated"
);
goto
bad
;
}
...
...
@@ -880,7 +863,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
goto
bad
;
}
if
(
object
&&
!
check_object
(
s
,
page
,
object
,
0
))
if
(
!
check_object
(
s
,
page
,
object
,
0
))
goto
bad
;
/* Success perform special debug activities for allocs */
...
...
@@ -899,7 +882,7 @@ static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
*/
slab_fix
(
s
,
"Marking all objects used"
);
page
->
inuse
=
s
->
objects
;
page
->
freelist
=
page
->
end
;
page
->
freelist
=
NULL
;
}
return
0
;
}
...
...
@@ -939,7 +922,7 @@ static int free_debug_processing(struct kmem_cache *s, struct page *page,
}
/* Special debug activities for freeing objects */
if
(
!
SlabFrozen
(
page
)
&&
page
->
freelist
==
page
->
end
)
if
(
!
SlabFrozen
(
page
)
&&
!
page
->
freelist
)
remove_full
(
s
,
page
);
if
(
s
->
flags
&
SLAB_STORE_USER
)
set_track
(
s
,
object
,
TRACK_FREE
,
addr
);
...
...
@@ -1015,30 +998,11 @@ static unsigned long kmem_cache_flags(unsigned long objsize,
void
(
*
ctor
)(
struct
kmem_cache
*
,
void
*
))
{
/*
* The page->offset field is only 16 bit wide. This is an offset
* in units of words from the beginning of an object. If the slab
* size is bigger then we cannot move the free pointer behind the
* object anymore.
*
* On 32 bit platforms the limit is 256k. On 64bit platforms
* the limit is 512k.
*
* Debugging or ctor may create a need to move the free
* pointer. Fail if this happens.
* Enable debugging if selected on the kernel commandline.
*/
if
(
objsize
>=
65535
*
sizeof
(
void
*
))
{
BUG_ON
(
flags
&
(
SLAB_RED_ZONE
|
SLAB_POISON
|
SLAB_STORE_USER
|
SLAB_DESTROY_BY_RCU
));
BUG_ON
(
ctor
);
}
else
{
/*
* Enable debugging if selected on the kernel commandline.
*/
if
(
slub_debug
&&
(
!
slub_debug_slabs
||
strncmp
(
slub_debug_slabs
,
name
,
strlen
(
slub_debug_slabs
))
==
0
))
flags
|=
slub_debug
;
}
if
(
slub_debug
&&
(
!
slub_debug_slabs
||
strncmp
(
slub_debug_slabs
,
name
,
strlen
(
slub_debug_slabs
))
==
0
))
flags
|=
slub_debug
;
return
flags
;
}
...
...
@@ -1124,7 +1088,6 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
SetSlabDebug
(
page
);
start
=
page_address
(
page
);
page
->
end
=
start
+
1
;
if
(
unlikely
(
s
->
flags
&
SLAB_POISON
))
memset
(
start
,
POISON_INUSE
,
PAGE_SIZE
<<
s
->
order
);
...
...
@@ -1136,7 +1099,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
last
=
p
;
}
setup_object
(
s
,
page
,
last
);
set_freepointer
(
s
,
last
,
page
->
end
);
set_freepointer
(
s
,
last
,
NULL
);
page
->
freelist
=
start
;
page
->
inuse
=
0
;
...
...
@@ -1152,7 +1115,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
void
*
p
;
slab_pad_check
(
s
,
page
);
for_each_object
(
p
,
s
,
slab
_address
(
page
))
for_each_object
(
p
,
s
,
page
_address
(
page
))
check_object
(
s
,
page
,
p
,
0
);
ClearSlabDebug
(
page
);
}
...
...
@@ -1162,7 +1125,6 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
NR_SLAB_RECLAIMABLE
:
NR_SLAB_UNRECLAIMABLE
,
-
pages
);
page
->
mapping
=
NULL
;
__free_pages
(
page
,
s
->
order
);
}
...
...
@@ -1307,7 +1269,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
* may return off node objects because partial slabs are obtained
* from other nodes and filled up.
*
* If /sys/slab/xx/defrag_ratio is set to 100 (which makes
* If /sys/
kernel/
slab/xx/defrag_ratio is set to 100 (which makes
* defrag_ratio = 1000) then every (well almost) allocation will
* first attempt to defrag slab caches on other nodes. This means
* scanning over all nodes to look for partial slabs which may be
...
...
@@ -1366,7 +1328,7 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
ClearSlabFrozen
(
page
);
if
(
page
->
inuse
)
{
if
(
page
->
freelist
!=
page
->
end
)
{
if
(
page
->
freelist
)
{
add_partial
(
n
,
page
,
tail
);
stat
(
c
,
tail
?
DEACTIVATE_TO_TAIL
:
DEACTIVATE_TO_HEAD
);
}
else
{
...
...
@@ -1382,9 +1344,11 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
* Adding an empty slab to the partial slabs in order
* to avoid page allocator overhead. This slab needs
* to come after the other slabs with objects in
* order to fill them up. That way the size of the
* partial list stays small. kmem_cache_shrink can
* reclaim empty slabs from the partial list.
* so that the others get filled first. That way the
* size of the partial list stays small.
*
* kmem_cache_shrink can reclaim any empty slabs from the
* partial list.
*/
add_partial
(
n
,
page
,
1
);
slab_unlock
(
page
);
...
...
@@ -1407,15 +1371,11 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
if
(
c
->
freelist
)
stat
(
c
,
DEACTIVATE_REMOTE_FREES
);
/*
* Merge cpu freelist into freelist. Typically we get here
* Merge cpu freelist into
slab
freelist. Typically we get here
* because both freelists are empty. So this is unlikely
* to occur.
*
* We need to use _is_end here because deactivate slab may
* be called for a debug slab. Then c->freelist may contain
* a dummy pointer.
*/
while
(
unlikely
(
!
is_end
(
c
->
freelist
)
))
{
while
(
unlikely
(
c
->
freelist
))
{
void
**
object
;
tail
=
0
;
/* Hot objects. Put the slab first */
...
...
@@ -1442,6 +1402,7 @@ static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
/*
* Flush cpu slab.
*
* Called from IPI handler with interrupts disabled.
*/
static
inline
void
__flush_cpu_slab
(
struct
kmem_cache
*
s
,
int
cpu
)
...
...
@@ -1500,7 +1461,8 @@ static inline int node_match(struct kmem_cache_cpu *c, int node)
* rest of the freelist to the lockless freelist.
*
* And if we were unable to get a new slab from the partial slab lists then
* we need to allocate a new slab. This is slowest path since we may sleep.
* we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*/
static
void
*
__slab_alloc
(
struct
kmem_cache
*
s
,
gfp_t
gfpflags
,
int
node
,
void
*
addr
,
struct
kmem_cache_cpu
*
c
)
...
...
@@ -1514,18 +1476,19 @@ static void *__slab_alloc(struct kmem_cache *s,
slab_lock
(
c
->
page
);
if
(
unlikely
(
!
node_match
(
c
,
node
)))
goto
another_slab
;
stat
(
c
,
ALLOC_REFILL
);
load_freelist:
object
=
c
->
page
->
freelist
;
if
(
unlikely
(
object
==
c
->
page
->
end
))
if
(
unlikely
(
!
object
))
goto
another_slab
;
if
(
unlikely
(
SlabDebug
(
c
->
page
)))
goto
debug
;
object
=
c
->
page
->
freelist
;
c
->
freelist
=
object
[
c
->
offset
];
c
->
page
->
inuse
=
s
->
objects
;
c
->
page
->
freelist
=
c
->
page
->
end
;
c
->
page
->
freelist
=
NULL
;
c
->
node
=
page_to_nid
(
c
->
page
);
unlock_out:
slab_unlock
(
c
->
page
);
...
...
@@ -1578,7 +1541,6 @@ static void *__slab_alloc(struct kmem_cache *s,
return
NULL
;
debug:
object
=
c
->
page
->
freelist
;
if
(
!
alloc_debug_processing
(
s
,
c
->
page
,
object
,
addr
))
goto
another_slab
;
...
...
@@ -1607,7 +1569,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s,
local_irq_save
(
flags
);
c
=
get_cpu_slab
(
s
,
smp_processor_id
());
if
(
unlikely
(
is_end
(
c
->
freelist
)
||
!
node_match
(
c
,
node
)))
if
(
unlikely
(
!
c
->
freelist
||
!
node_match
(
c
,
node
)))
object
=
__slab_alloc
(
s
,
gfpflags
,
node
,
addr
,
c
);
...
...
@@ -1659,6 +1621,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
if
(
unlikely
(
SlabDebug
(
page
)))
goto
debug
;
checks_ok:
prior
=
object
[
offset
]
=
page
->
freelist
;
page
->
freelist
=
object
;
...
...
@@ -1673,11 +1636,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
goto
slab_empty
;
/*
* Objects left in the slab. If it
* was not on the partial list before
* Objects left in the slab. If it was not on the partial list before
* then add it.
*/
if
(
unlikely
(
prior
==
page
->
end
))
{
if
(
unlikely
(
!
prior
))
{
add_partial
(
get_node
(
s
,
page_to_nid
(
page
)),
page
,
1
);
stat
(
c
,
FREE_ADD_PARTIAL
);
}
...
...
@@ -1687,7 +1649,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
return
;
slab_empty:
if
(
prior
!=
page
->
end
)
{
if
(
prior
)
{
/*
* Slab still on the partial list.
*/
...
...
@@ -1724,8 +1686,8 @@ static __always_inline void slab_free(struct kmem_cache *s,
unsigned
long
flags
;
local_irq_save
(
flags
);
debug_check_no_locks_freed
(
object
,
s
->
objsize
);
c
=
get_cpu_slab
(
s
,
smp_processor_id
());
debug_check_no_locks_freed
(
object
,
c
->
objsize
);
if
(
likely
(
page
==
c
->
page
&&
c
->
node
>=
0
))
{
object
[
c
->
offset
]
=
c
->
freelist
;
c
->
freelist
=
object
;
...
...
@@ -1888,13 +1850,11 @@ static unsigned long calculate_alignment(unsigned long flags,
unsigned
long
align
,
unsigned
long
size
)
{
/*
* If the user wants hardware cache aligned objects then
* follow that suggestion if the object is sufficiently
* large.
* If the user wants hardware cache aligned objects then follow that
* suggestion if the object is sufficiently large.
*
* The hardware cache alignment cannot override the
* specified alignment though. If that is greater
* then use it.
* The hardware cache alignment cannot override the specified
* alignment though. If that is greater then use it.
*/
if
((
flags
&
SLAB_HWCACHE_ALIGN
)
&&
size
>
cache_line_size
()
/
2
)
...
...
@@ -1910,7 +1870,7 @@ static void init_kmem_cache_cpu(struct kmem_cache *s,
struct
kmem_cache_cpu
*
c
)
{
c
->
page
=
NULL
;
c
->
freelist
=
(
void
*
)
PAGE_MAPPING_ANON
;
c
->
freelist
=
NULL
;
c
->
node
=
0
;
c
->
offset
=
s
->
offset
/
sizeof
(
void
*
);
c
->
objsize
=
s
->
objsize
;
...
...
@@ -2092,6 +2052,7 @@ static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
#endif
init_kmem_cache_node
(
n
);
atomic_long_inc
(
&
n
->
nr_slabs
);
/*
* lockdep requires consistent irq usage for each lock
* so even though there cannot be a race this early in
...
...
@@ -2172,6 +2133,14 @@ static int calculate_sizes(struct kmem_cache *s)
unsigned
long
size
=
s
->
objsize
;
unsigned
long
align
=
s
->
align
;
/*
* Round up object size to the next word boundary. We can only
* place the free pointer at word boundaries and this determines
* the possible location of the free pointer.
*/
size
=
ALIGN
(
size
,
sizeof
(
void
*
));
#ifdef CONFIG_SLUB_DEBUG
/*
* Determine if we can poison the object itself. If the user of
* the slab may touch the object after free or before allocation
...
...
@@ -2183,14 +2152,7 @@ static int calculate_sizes(struct kmem_cache *s)
else
s
->
flags
&=
~
__OBJECT_POISON
;
/*
* Round up object size to the next word boundary. We can only
* place the free pointer at word boundaries and this determines
* the possible location of the free pointer.
*/
size
=
ALIGN
(
size
,
sizeof
(
void
*
));
#ifdef CONFIG_SLUB_DEBUG
/*
* If we are Redzoning then check if there is some space between the
* end of the object and the free pointer. If not then add an
...
...
@@ -2343,7 +2305,7 @@ int kmem_ptr_validate(struct kmem_cache *s, const void *object)
/*
* We could also check if the object is on the slabs freelist.
* But this would be too expensive and it seems that the main
* purpose of kmem_ptr_valid is to check if the object belongs
* purpose of kmem_ptr_valid
()
is to check if the object belongs
* to a certain slab.
*/
return
1
;
...
...
@@ -2630,13 +2592,24 @@ void *__kmalloc(size_t size, gfp_t flags)
}
EXPORT_SYMBOL
(
__kmalloc
);
static
void
*
kmalloc_large_node
(
size_t
size
,
gfp_t
flags
,
int
node
)
{
struct
page
*
page
=
alloc_pages_node
(
node
,
flags
|
__GFP_COMP
,
get_order
(
size
));
if
(
page
)
return
page_address
(
page
);
else
return
NULL
;
}
#ifdef CONFIG_NUMA
void
*
__kmalloc_node
(
size_t
size
,
gfp_t
flags
,
int
node
)
{
struct
kmem_cache
*
s
;
if
(
unlikely
(
size
>
PAGE_SIZE
))
return
kmalloc_large
(
size
,
flags
);
return
kmalloc_large
_node
(
size
,
flags
,
node
);
s
=
get_slab
(
size
,
flags
);
...
...
@@ -2653,19 +2626,17 @@ size_t ksize(const void *object)
struct
page
*
page
;
struct
kmem_cache
*
s
;
BUG_ON
(
!
object
);
if
(
unlikely
(
object
==
ZERO_SIZE_PTR
))
return
0
;
page
=
virt_to_head_page
(
object
);
BUG_ON
(
!
page
);
if
(
unlikely
(
!
PageSlab
(
page
)))
return
PAGE_SIZE
<<
compound_order
(
page
);
s
=
page
->
slab
;
BUG_ON
(
!
s
);
#ifdef CONFIG_SLUB_DEBUG
/*
* Debugging requires use of the padding between object
* and whatever may come after it.
...
...
@@ -2673,6 +2644,7 @@ size_t ksize(const void *object)
if
(
s
->
flags
&
(
SLAB_RED_ZONE
|
SLAB_POISON
))
return
s
->
objsize
;
#endif
/*
* If we have the need to store the freelist pointer
* back there or track user information then we can
...
...
@@ -2680,7 +2652,6 @@ size_t ksize(const void *object)
*/
if
(
s
->
flags
&
(
SLAB_DESTROY_BY_RCU
|
SLAB_STORE_USER
))
return
s
->
inuse
;
/*
* Else we can use all the padding etc for the allocation
*/
...
...
@@ -2957,7 +2928,7 @@ void __init kmem_cache_init(void)
/*
* Patch up the size_index table if we have strange large alignment
* requirements for the kmalloc array. This is only the case for
*
mips
it seems. The standard arches will not generate any code here.
*
MIPS
it seems. The standard arches will not generate any code here.
*
* Largest permitted alignment is 256 bytes due to the way we
* handle the index determination for the smaller caches.
...
...
@@ -2986,7 +2957,6 @@ void __init kmem_cache_init(void)
kmem_size
=
sizeof
(
struct
kmem_cache
);
#endif
printk
(
KERN_INFO
"SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
" CPUs=%d, Nodes=%d
\n
"
,
...
...
@@ -3083,12 +3053,15 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
*/
for_each_online_cpu
(
cpu
)
get_cpu_slab
(
s
,
cpu
)
->
objsize
=
s
->
objsize
;
s
->
inuse
=
max_t
(
int
,
s
->
inuse
,
ALIGN
(
size
,
sizeof
(
void
*
)));
up_write
(
&
slub_lock
);
if
(
sysfs_slab_alias
(
s
,
name
))
goto
err
;
return
s
;
}
s
=
kmalloc
(
kmem_size
,
GFP_KERNEL
);
if
(
s
)
{
if
(
kmem_cache_open
(
s
,
GFP_KERNEL
,
name
,
...
...
@@ -3184,7 +3157,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
struct
kmem_cache
*
s
;
if
(
unlikely
(
size
>
PAGE_SIZE
))
return
kmalloc_large
(
size
,
gfpflags
);
return
kmalloc_large
_node
(
size
,
gfpflags
,
node
);
s
=
get_slab
(
size
,
gfpflags
);
...
...
@@ -3199,7 +3172,7 @@ static int validate_slab(struct kmem_cache *s, struct page *page,
unsigned
long
*
map
)
{
void
*
p
;
void
*
addr
=
slab
_address
(
page
);
void
*
addr
=
page
_address
(
page
);
if
(
!
check_slab
(
s
,
page
)
||
!
on_freelist
(
s
,
page
,
NULL
))
...
...
@@ -3482,7 +3455,7 @@ static int add_location(struct loc_track *t, struct kmem_cache *s,
static
void
process_slab
(
struct
loc_track
*
t
,
struct
kmem_cache
*
s
,
struct
page
*
page
,
enum
track_item
alloc
)
{
void
*
addr
=
slab
_address
(
page
);
void
*
addr
=
page
_address
(
page
);
DECLARE_BITMAP
(
map
,
s
->
objects
);
void
*
p
;
...
...
@@ -3591,8 +3564,8 @@ enum slab_stat_type {
#define SO_CPU (1 << SL_CPU)
#define SO_OBJECTS (1 << SL_OBJECTS)
static
unsigned
long
slab_objects
(
struct
kmem_cache
*
s
,
char
*
buf
,
unsigned
long
flags
)
static
ssize_t
show_
slab_objects
(
struct
kmem_cache
*
s
,
char
*
buf
,
unsigned
long
flags
)
{
unsigned
long
total
=
0
;
int
cpu
;
...
...
@@ -3602,6 +3575,8 @@ static unsigned long slab_objects(struct kmem_cache *s,
unsigned
long
*
per_cpu
;
nodes
=
kzalloc
(
2
*
sizeof
(
unsigned
long
)
*
nr_node_ids
,
GFP_KERNEL
);
if
(
!
nodes
)
return
-
ENOMEM
;
per_cpu
=
nodes
+
nr_node_ids
;
for_each_possible_cpu
(
cpu
)
{
...
...
@@ -3754,25 +3729,25 @@ SLAB_ATTR_RO(aliases);
static
ssize_t
slabs_show
(
struct
kmem_cache
*
s
,
char
*
buf
)
{
return
slab_objects
(
s
,
buf
,
SO_FULL
|
SO_PARTIAL
|
SO_CPU
);
return
s
how_s
lab_objects
(
s
,
buf
,
SO_FULL
|
SO_PARTIAL
|
SO_CPU
);
}
SLAB_ATTR_RO
(
slabs
);
static
ssize_t
partial_show
(
struct
kmem_cache
*
s
,
char
*
buf
)
{
return
slab_objects
(
s
,
buf
,
SO_PARTIAL
);
return
s
how_s
lab_objects
(
s
,
buf
,
SO_PARTIAL
);
}
SLAB_ATTR_RO
(
partial
);
static
ssize_t
cpu_slabs_show
(
struct
kmem_cache
*
s
,
char
*
buf
)
{
return
slab_objects
(
s
,
buf
,
SO_CPU
);
return
s
how_s
lab_objects
(
s
,
buf
,
SO_CPU
);
}
SLAB_ATTR_RO
(
cpu_slabs
);
static
ssize_t
objects_show
(
struct
kmem_cache
*
s
,
char
*
buf
)
{
return
slab_objects
(
s
,
buf
,
SO_FULL
|
SO_PARTIAL
|
SO_CPU
|
SO_OBJECTS
);
return
s
how_s
lab_objects
(
s
,
buf
,
SO_FULL
|
SO_PARTIAL
|
SO_CPU
|
SO_OBJECTS
);
}
SLAB_ATTR_RO
(
objects
);
...
...
@@ -3971,7 +3946,6 @@ SLAB_ATTR(remote_node_defrag_ratio);
#endif
#ifdef CONFIG_SLUB_STATS
static
int
show_stat
(
struct
kmem_cache
*
s
,
char
*
buf
,
enum
stat_item
si
)
{
unsigned
long
sum
=
0
;
...
...
@@ -4155,8 +4129,8 @@ static struct kset *slab_kset;
#define ID_STR_LENGTH 64
/* Create a unique string id for a slab cache:
*
format
*
:[flags-]size:[memory address of kmemcache]
*
*
Format :[flags-]size
*/
static
char
*
create_unique_id
(
struct
kmem_cache
*
s
)
{
...
...
mm/truncate.c
浏览文件 @
966ea8c4
...
...
@@ -134,8 +134,7 @@ invalidate_complete_page(struct address_space *mapping, struct page *page)
}
/**
* truncate_inode_pages - truncate range of pages specified by start and
* end byte offsets
* truncate_inode_pages - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
* @lend: offset to which to truncate
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录