Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openanolis
cloud-kernel
提交
e13e75b8
cloud-kernel
项目概览
openanolis
/
cloud-kernel
1 年多 前同步成功
通知
160
Star
36
Fork
7
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
10
列表
看板
标记
里程碑
合并请求
2
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
cloud-kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
10
Issue
10
列表
看板
标记
里程碑
合并请求
2
合并请求
2
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
e13e75b8
编写于
4月 09, 2018
作者:
D
Dan Williams
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'for-4.17/dax' into libnvdimm-for-next
上级
1ed41b56
976431b0
变更
60
展开全部
隐藏空白更改
内联
并排
Showing
60 changed file
with
1637 addition
and
1298 deletion
+1637
-1298
Documentation/admin-guide/kernel-parameters.txt
Documentation/admin-guide/kernel-parameters.txt
+11
-0
drivers/dax/Kconfig
drivers/dax/Kconfig
+4
-1
drivers/dax/super.c
drivers/dax/super.c
+12
-3
drivers/md/Kconfig
drivers/md/Kconfig
+1
-1
drivers/md/dm-linear.c
drivers/md/dm-linear.c
+6
-0
drivers/md/dm-log-writes.c
drivers/md/dm-log-writes.c
+50
-45
drivers/md/dm-stripe.c
drivers/md/dm-stripe.c
+6
-0
drivers/md/dm.c
drivers/md/dm.c
+6
-4
drivers/nvdimm/Kconfig
drivers/nvdimm/Kconfig
+1
-1
drivers/s390/block/Kconfig
drivers/s390/block/Kconfig
+1
-1
fs/block_dev.c
fs/block_dev.c
+0
-5
fs/dax.c
fs/dax.c
+94
-52
fs/ext2/ext2.h
fs/ext2/ext2.h
+1
-0
fs/ext2/inode.c
fs/ext2/inode.c
+27
-19
fs/ext2/namei.c
fs/ext2/namei.c
+2
-16
fs/ext4/inode.c
fs/ext4/inode.c
+31
-11
fs/libfs.c
fs/libfs.c
+39
-0
fs/xfs/xfs_aops.c
fs/xfs/xfs_aops.c
+18
-16
fs/xfs/xfs_aops.h
fs/xfs/xfs_aops.h
+1
-0
fs/xfs/xfs_iops.c
fs/xfs/xfs_iops.c
+4
-1
include/linux/dax.h
include/linux/dax.h
+33
-9
include/linux/fs.h
include/linux/fs.h
+4
-0
include/linux/sched/deadline.h
include/linux/sched/deadline.h
+0
-6
include/linux/sched/isolation.h
include/linux/sched/isolation.h
+1
-0
include/linux/sched/nohz.h
include/linux/sched/nohz.h
+0
-4
include/linux/tick.h
include/linux/tick.h
+3
-1
include/linux/wait_bit.h
include/linux/wait_bit.h
+70
-0
kernel/sched/Makefile
kernel/sched/Makefile
+3
-2
kernel/sched/autogroup.c
kernel/sched/autogroup.c
+9
-12
kernel/sched/autogroup.h
kernel/sched/autogroup.h
+3
-9
kernel/sched/clock.c
kernel/sched/clock.c
+12
-24
kernel/sched/completion.c
kernel/sched/completion.c
+1
-4
kernel/sched/core.c
kernel/sched/core.c
+108
-57
kernel/sched/cpuacct.c
kernel/sched/cpuacct.c
+11
-22
kernel/sched/cpudeadline.c
kernel/sched/cpudeadline.c
+11
-12
kernel/sched/cpudeadline.h
kernel/sched/cpudeadline.h
+10
-19
kernel/sched/cpufreq.c
kernel/sched/cpufreq.c
+0
-1
kernel/sched/cpufreq_schedutil.c
kernel/sched/cpufreq_schedutil.c
+67
-70
kernel/sched/cpupri.c
kernel/sched/cpupri.c
+5
-10
kernel/sched/cpupri.h
kernel/sched/cpupri.h
+9
-16
kernel/sched/cputime.c
kernel/sched/cputime.c
+27
-31
kernel/sched/deadline.c
kernel/sched/deadline.c
+43
-35
kernel/sched/debug.c
kernel/sched/debug.c
+35
-64
kernel/sched/fair.c
kernel/sched/fair.c
+180
-119
kernel/sched/idle.c
kernel/sched/idle.c
+125
-17
kernel/sched/idle_task.c
kernel/sched/idle_task.c
+0
-110
kernel/sched/isolation.c
kernel/sched/isolation.c
+6
-8
kernel/sched/loadavg.c
kernel/sched/loadavg.c
+15
-19
kernel/sched/membarrier.c
kernel/sched/membarrier.c
+12
-15
kernel/sched/rt.c
kernel/sched/rt.c
+29
-22
kernel/sched/sched.h
kernel/sched/sched.h
+325
-298
kernel/sched/stats.c
kernel/sched/stats.c
+11
-9
kernel/sched/stats.h
kernel/sched/stats.h
+40
-46
kernel/sched/stop_task.c
kernel/sched/stop_task.c
+9
-2
kernel/sched/swait.c
kernel/sched/swait.c
+4
-2
kernel/sched/topology.c
kernel/sched/topology.c
+21
-25
kernel/sched/wait.c
kernel/sched/wait.c
+5
-8
kernel/sched/wait_bit.c
kernel/sched/wait_bit.c
+65
-6
kernel/time/tick-sched.c
kernel/time/tick-sched.c
+8
-7
kernel/workqueue.c
kernel/workqueue.c
+2
-1
未找到文件。
Documentation/admin-guide/kernel-parameters.txt
浏览文件 @
e13e75b8
...
...
@@ -1766,6 +1766,17 @@
nohz
Disable the tick when a single task runs.
A residual 1Hz tick is offloaded to workqueues, which you
need to affine to housekeeping through the global
workqueue's affinity configured via the
/sys/devices/virtual/workqueue/cpumask sysfs file, or
by using the 'domain' flag described below.
NOTE: by default the global workqueue runs on all CPUs,
so to protect individual CPUs the 'cpumask' file has to
be configured manually after bootup.
domain
Isolate from the general SMP balancing and scheduling
algorithms. Note that performing domain isolation this way
...
...
drivers/dax/Kconfig
浏览文件 @
e13e75b8
config DAX_DRIVER
select DAX
bool
menuconfig DAX
tristate "DAX: direct access to differentiated memory"
select SRCU
...
...
@@ -16,7 +20,6 @@ config DEV_DAX
baseline memory pool. Mappings of a /dev/daxX.Y device impose
restrictions that make the mapping behavior deterministic.
config DEV_DAX_PMEM
tristate "PMEM DAX: direct access to persistent memory"
depends on LIBNVDIMM && NVDIMM_DAX && DEV_DAX
...
...
drivers/dax/super.c
浏览文件 @
e13e75b8
...
...
@@ -124,10 +124,19 @@ int __bdev_dax_supported(struct super_block *sb, int blocksize)
return
len
<
0
?
len
:
-
EIO
;
}
if
((
IS_ENABLED
(
CONFIG_FS_DAX_LIMITED
)
&&
pfn_t_special
(
pfn
))
||
pfn_t_devmap
(
pfn
))
if
(
IS_ENABLED
(
CONFIG_FS_DAX_LIMITED
)
&&
pfn_t_special
(
pfn
))
{
/*
* An arch that has enabled the pmem api should also
* have its drivers support pfn_t_devmap()
*
* This is a developer warning and should not trigger in
* production. dax_flush() will crash since it depends
* on being able to do (page_address(pfn_to_page())).
*/
WARN_ON
(
IS_ENABLED
(
CONFIG_ARCH_HAS_PMEM_API
));
}
else
if
(
pfn_t_devmap
(
pfn
))
{
/* pass */
;
else
{
}
else
{
pr_debug
(
"VFS (%s): error: dax support not enabled
\n
"
,
sb
->
s_id
);
return
-
EOPNOTSUPP
;
...
...
drivers/md/Kconfig
浏览文件 @
e13e75b8
...
...
@@ -201,7 +201,7 @@ config BLK_DEV_DM_BUILTIN
config BLK_DEV_DM
tristate "Device mapper support"
select BLK_DEV_DM_BUILTIN
select DAX
depends on DAX || DAX=n
---help---
Device-mapper is a low level volume manager. It works by allowing
people to specify mappings for ranges of logical sectors. Various
...
...
drivers/md/dm-linear.c
浏览文件 @
e13e75b8
...
...
@@ -154,6 +154,7 @@ static int linear_iterate_devices(struct dm_target *ti,
return
fn
(
ti
,
lc
->
dev
,
lc
->
start
,
ti
->
len
,
data
);
}
#if IS_ENABLED(CONFIG_DAX_DRIVER)
static
long
linear_dax_direct_access
(
struct
dm_target
*
ti
,
pgoff_t
pgoff
,
long
nr_pages
,
void
**
kaddr
,
pfn_t
*
pfn
)
{
...
...
@@ -184,6 +185,11 @@ static size_t linear_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
return
dax_copy_from_iter
(
dax_dev
,
pgoff
,
addr
,
bytes
,
i
);
}
#else
#define linear_dax_direct_access NULL
#define linear_dax_copy_from_iter NULL
#endif
static
struct
target_type
linear_target
=
{
.
name
=
"linear"
,
.
version
=
{
1
,
4
,
0
},
...
...
drivers/md/dm-log-writes.c
浏览文件 @
e13e75b8
...
...
@@ -610,51 +610,6 @@ static int log_mark(struct log_writes_c *lc, char *data)
return
0
;
}
static
int
log_dax
(
struct
log_writes_c
*
lc
,
sector_t
sector
,
size_t
bytes
,
struct
iov_iter
*
i
)
{
struct
pending_block
*
block
;
if
(
!
bytes
)
return
0
;
block
=
kzalloc
(
sizeof
(
struct
pending_block
),
GFP_KERNEL
);
if
(
!
block
)
{
DMERR
(
"Error allocating dax pending block"
);
return
-
ENOMEM
;
}
block
->
data
=
kzalloc
(
bytes
,
GFP_KERNEL
);
if
(
!
block
->
data
)
{
DMERR
(
"Error allocating dax data space"
);
kfree
(
block
);
return
-
ENOMEM
;
}
/* write data provided via the iterator */
if
(
!
copy_from_iter
(
block
->
data
,
bytes
,
i
))
{
DMERR
(
"Error copying dax data"
);
kfree
(
block
->
data
);
kfree
(
block
);
return
-
EIO
;
}
/* rewind the iterator so that the block driver can use it */
iov_iter_revert
(
i
,
bytes
);
block
->
datalen
=
bytes
;
block
->
sector
=
bio_to_dev_sectors
(
lc
,
sector
);
block
->
nr_sectors
=
ALIGN
(
bytes
,
lc
->
sectorsize
)
>>
lc
->
sectorshift
;
atomic_inc
(
&
lc
->
pending_blocks
);
spin_lock_irq
(
&
lc
->
blocks_lock
);
list_add_tail
(
&
block
->
list
,
&
lc
->
unflushed_blocks
);
spin_unlock_irq
(
&
lc
->
blocks_lock
);
wake_up_process
(
lc
->
log_kthread
);
return
0
;
}
static
void
log_writes_dtr
(
struct
dm_target
*
ti
)
{
struct
log_writes_c
*
lc
=
ti
->
private
;
...
...
@@ -920,6 +875,52 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit
limits
->
io_min
=
limits
->
physical_block_size
;
}
#if IS_ENABLED(CONFIG_DAX_DRIVER)
static
int
log_dax
(
struct
log_writes_c
*
lc
,
sector_t
sector
,
size_t
bytes
,
struct
iov_iter
*
i
)
{
struct
pending_block
*
block
;
if
(
!
bytes
)
return
0
;
block
=
kzalloc
(
sizeof
(
struct
pending_block
),
GFP_KERNEL
);
if
(
!
block
)
{
DMERR
(
"Error allocating dax pending block"
);
return
-
ENOMEM
;
}
block
->
data
=
kzalloc
(
bytes
,
GFP_KERNEL
);
if
(
!
block
->
data
)
{
DMERR
(
"Error allocating dax data space"
);
kfree
(
block
);
return
-
ENOMEM
;
}
/* write data provided via the iterator */
if
(
!
copy_from_iter
(
block
->
data
,
bytes
,
i
))
{
DMERR
(
"Error copying dax data"
);
kfree
(
block
->
data
);
kfree
(
block
);
return
-
EIO
;
}
/* rewind the iterator so that the block driver can use it */
iov_iter_revert
(
i
,
bytes
);
block
->
datalen
=
bytes
;
block
->
sector
=
bio_to_dev_sectors
(
lc
,
sector
);
block
->
nr_sectors
=
ALIGN
(
bytes
,
lc
->
sectorsize
)
>>
lc
->
sectorshift
;
atomic_inc
(
&
lc
->
pending_blocks
);
spin_lock_irq
(
&
lc
->
blocks_lock
);
list_add_tail
(
&
block
->
list
,
&
lc
->
unflushed_blocks
);
spin_unlock_irq
(
&
lc
->
blocks_lock
);
wake_up_process
(
lc
->
log_kthread
);
return
0
;
}
static
long
log_writes_dax_direct_access
(
struct
dm_target
*
ti
,
pgoff_t
pgoff
,
long
nr_pages
,
void
**
kaddr
,
pfn_t
*
pfn
)
{
...
...
@@ -956,6 +957,10 @@ static size_t log_writes_dax_copy_from_iter(struct dm_target *ti,
dax_copy:
return
dax_copy_from_iter
(
lc
->
dev
->
dax_dev
,
pgoff
,
addr
,
bytes
,
i
);
}
#else
#define log_writes_dax_direct_access NULL
#define log_writes_dax_copy_from_iter NULL
#endif
static
struct
target_type
log_writes_target
=
{
.
name
=
"log-writes"
,
...
...
drivers/md/dm-stripe.c
浏览文件 @
e13e75b8
...
...
@@ -311,6 +311,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
return
DM_MAPIO_REMAPPED
;
}
#if IS_ENABLED(CONFIG_DAX_DRIVER)
static
long
stripe_dax_direct_access
(
struct
dm_target
*
ti
,
pgoff_t
pgoff
,
long
nr_pages
,
void
**
kaddr
,
pfn_t
*
pfn
)
{
...
...
@@ -351,6 +352,11 @@ static size_t stripe_dax_copy_from_iter(struct dm_target *ti, pgoff_t pgoff,
return
dax_copy_from_iter
(
dax_dev
,
pgoff
,
addr
,
bytes
,
i
);
}
#else
#define stripe_dax_direct_access NULL
#define stripe_dax_copy_from_iter NULL
#endif
/*
* Stripe status:
*
...
...
drivers/md/dm.c
浏览文件 @
e13e75b8
...
...
@@ -1805,7 +1805,7 @@ static void cleanup_mapped_device(struct mapped_device *md)
static
struct
mapped_device
*
alloc_dev
(
int
minor
)
{
int
r
,
numa_node_id
=
dm_get_numa_node
();
struct
dax_device
*
dax_dev
;
struct
dax_device
*
dax_dev
=
NULL
;
struct
mapped_device
*
md
;
void
*
old_md
;
...
...
@@ -1871,9 +1871,11 @@ static struct mapped_device *alloc_dev(int minor)
md
->
disk
->
private_data
=
md
;
sprintf
(
md
->
disk
->
disk_name
,
"dm-%d"
,
minor
);
dax_dev
=
alloc_dax
(
md
,
md
->
disk
->
disk_name
,
&
dm_dax_ops
);
if
(
!
dax_dev
)
goto
bad
;
if
(
IS_ENABLED
(
CONFIG_DAX_DRIVER
))
{
dax_dev
=
alloc_dax
(
md
,
md
->
disk
->
disk_name
,
&
dm_dax_ops
);
if
(
!
dax_dev
)
goto
bad
;
}
md
->
dax_dev
=
dax_dev
;
add_disk_no_queue_reg
(
md
->
disk
);
...
...
drivers/nvdimm/Kconfig
浏览文件 @
e13e75b8
...
...
@@ -20,7 +20,7 @@ if LIBNVDIMM
config BLK_DEV_PMEM
tristate "PMEM: Persistent memory block device support"
default LIBNVDIMM
select DAX
select DAX
_DRIVER
select ND_BTT if BTT
select ND_PFN if NVDIMM_PFN
help
...
...
drivers/s390/block/Kconfig
浏览文件 @
e13e75b8
...
...
@@ -15,8 +15,8 @@ config BLK_DEV_XPRAM
config DCSSBLK
def_tristate m
select DAX
select FS_DAX_LIMITED
select DAX_DRIVER
prompt "DCSSBLK support"
depends on S390 && BLOCK
help
...
...
fs/block_dev.c
浏览文件 @
e13e75b8
...
...
@@ -1946,11 +1946,6 @@ static int blkdev_releasepage(struct page *page, gfp_t wait)
static
int
blkdev_writepages
(
struct
address_space
*
mapping
,
struct
writeback_control
*
wbc
)
{
if
(
dax_mapping
(
mapping
))
{
struct
block_device
*
bdev
=
I_BDEV
(
mapping
->
host
);
return
dax_writeback_mapping_range
(
mapping
,
bdev
,
wbc
);
}
return
generic_writepages
(
mapping
,
wbc
);
}
...
...
fs/dax.c
浏览文件 @
e13e75b8
...
...
@@ -73,16 +73,15 @@ fs_initcall(init_dax_wait_table);
#define RADIX_DAX_ZERO_PAGE (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 2))
#define RADIX_DAX_EMPTY (1 << (RADIX_TREE_EXCEPTIONAL_SHIFT + 3))
static
unsigned
long
dax_radix_
sector
(
void
*
entry
)
static
unsigned
long
dax_radix_
pfn
(
void
*
entry
)
{
return
(
unsigned
long
)
entry
>>
RADIX_DAX_SHIFT
;
}
static
void
*
dax_radix_locked_entry
(
sector_t
sector
,
unsigned
long
flags
)
static
void
*
dax_radix_locked_entry
(
unsigned
long
pfn
,
unsigned
long
flags
)
{
return
(
void
*
)(
RADIX_TREE_EXCEPTIONAL_ENTRY
|
flags
|
((
unsigned
long
)
sector
<<
RADIX_DAX_SHIFT
)
|
RADIX_DAX_ENTRY_LOCK
);
(
pfn
<<
RADIX_DAX_SHIFT
)
|
RADIX_DAX_ENTRY_LOCK
);
}
static
unsigned
int
dax_radix_order
(
void
*
entry
)
...
...
@@ -299,6 +298,63 @@ static void put_unlocked_mapping_entry(struct address_space *mapping,
dax_wake_mapping_entry_waiter
(
mapping
,
index
,
entry
,
false
);
}
static
unsigned
long
dax_entry_size
(
void
*
entry
)
{
if
(
dax_is_zero_entry
(
entry
))
return
0
;
else
if
(
dax_is_empty_entry
(
entry
))
return
0
;
else
if
(
dax_is_pmd_entry
(
entry
))
return
PMD_SIZE
;
else
return
PAGE_SIZE
;
}
static
unsigned
long
dax_radix_end_pfn
(
void
*
entry
)
{
return
dax_radix_pfn
(
entry
)
+
dax_entry_size
(
entry
)
/
PAGE_SIZE
;
}
/*
* Iterate through all mapped pfns represented by an entry, i.e. skip
* 'empty' and 'zero' entries.
*/
#define for_each_mapped_pfn(entry, pfn) \
for (pfn = dax_radix_pfn(entry); \
pfn < dax_radix_end_pfn(entry); pfn++)
static
void
dax_associate_entry
(
void
*
entry
,
struct
address_space
*
mapping
)
{
unsigned
long
pfn
;
if
(
IS_ENABLED
(
CONFIG_FS_DAX_LIMITED
))
return
;
for_each_mapped_pfn
(
entry
,
pfn
)
{
struct
page
*
page
=
pfn_to_page
(
pfn
);
WARN_ON_ONCE
(
page
->
mapping
);
page
->
mapping
=
mapping
;
}
}
static
void
dax_disassociate_entry
(
void
*
entry
,
struct
address_space
*
mapping
,
bool
trunc
)
{
unsigned
long
pfn
;
if
(
IS_ENABLED
(
CONFIG_FS_DAX_LIMITED
))
return
;
for_each_mapped_pfn
(
entry
,
pfn
)
{
struct
page
*
page
=
pfn_to_page
(
pfn
);
WARN_ON_ONCE
(
trunc
&&
page_ref_count
(
page
)
>
1
);
WARN_ON_ONCE
(
page
->
mapping
&&
page
->
mapping
!=
mapping
);
page
->
mapping
=
NULL
;
}
}
/*
* Find radix tree entry at given index. If it points to an exceptional entry,
* return it with the radix tree entry locked. If the radix tree doesn't
...
...
@@ -405,6 +461,7 @@ static void *grab_mapping_entry(struct address_space *mapping, pgoff_t index,
}
if
(
pmd_downgrade
)
{
dax_disassociate_entry
(
entry
,
mapping
,
false
);
radix_tree_delete
(
&
mapping
->
page_tree
,
index
);
mapping
->
nrexceptional
--
;
dax_wake_mapping_entry_waiter
(
mapping
,
index
,
entry
,
...
...
@@ -454,6 +511,7 @@ static int __dax_invalidate_mapping_entry(struct address_space *mapping,
(
radix_tree_tag_get
(
page_tree
,
index
,
PAGECACHE_TAG_DIRTY
)
||
radix_tree_tag_get
(
page_tree
,
index
,
PAGECACHE_TAG_TOWRITE
)))
goto
out
;
dax_disassociate_entry
(
entry
,
mapping
,
trunc
);
radix_tree_delete
(
page_tree
,
index
);
mapping
->
nrexceptional
--
;
ret
=
1
;
...
...
@@ -526,12 +584,13 @@ static int copy_user_dax(struct block_device *bdev, struct dax_device *dax_dev,
*/
static
void
*
dax_insert_mapping_entry
(
struct
address_space
*
mapping
,
struct
vm_fault
*
vmf
,
void
*
entry
,
sector_t
sector
,
void
*
entry
,
pfn_t
pfn_t
,
unsigned
long
flags
,
bool
dirty
)
{
struct
radix_tree_root
*
page_tree
=
&
mapping
->
page_tree
;
void
*
new_entry
;
unsigned
long
pfn
=
pfn_t_to_pfn
(
pfn_t
)
;
pgoff_t
index
=
vmf
->
pgoff
;
void
*
new_entry
;
if
(
dirty
)
__mark_inode_dirty
(
mapping
->
host
,
I_DIRTY_PAGES
);
...
...
@@ -546,7 +605,11 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
}
spin_lock_irq
(
&
mapping
->
tree_lock
);
new_entry
=
dax_radix_locked_entry
(
sector
,
flags
);
new_entry
=
dax_radix_locked_entry
(
pfn
,
flags
);
if
(
dax_entry_size
(
entry
)
!=
dax_entry_size
(
new_entry
))
{
dax_disassociate_entry
(
entry
,
mapping
,
false
);
dax_associate_entry
(
new_entry
,
mapping
);
}
if
(
dax_is_zero_entry
(
entry
)
||
dax_is_empty_entry
(
entry
))
{
/*
...
...
@@ -657,17 +720,14 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
i_mmap_unlock_read
(
mapping
);
}
static
int
dax_writeback_one
(
struct
block_device
*
bdev
,
struct
dax_device
*
dax_dev
,
struct
address_space
*
mapping
,
pgoff_t
index
,
void
*
entry
)
static
int
dax_writeback_one
(
struct
dax_device
*
dax_dev
,
struct
address_space
*
mapping
,
pgoff_t
index
,
void
*
entry
)
{
struct
radix_tree_root
*
page_tree
=
&
mapping
->
page_tree
;
void
*
entry2
,
**
slot
,
*
kaddr
;
long
ret
=
0
,
id
;
sector_t
sector
;
pgoff_t
pgoff
;
void
*
entry2
,
**
slot
;
unsigned
long
pfn
;
long
ret
=
0
;
size_t
size
;
pfn_t
pfn
;
/*
* A page got tagged dirty in DAX mapping? Something is seriously
...
...
@@ -683,10 +743,10 @@ static int dax_writeback_one(struct block_device *bdev,
goto
put_unlocked
;
/*
* Entry got reallocated elsewhere? No need to writeback. We have to
* compare
sector
s as we must not bail out due to difference in lockbit
* compare
pfn
s as we must not bail out due to difference in lockbit
* or entry type.
*/
if
(
dax_radix_
sector
(
entry2
)
!=
dax_radix_sector
(
entry
))
if
(
dax_radix_
pfn
(
entry2
)
!=
dax_radix_pfn
(
entry
))
goto
put_unlocked
;
if
(
WARN_ON_ONCE
(
dax_is_empty_entry
(
entry
)
||
dax_is_zero_entry
(
entry
)))
{
...
...
@@ -712,33 +772,15 @@ static int dax_writeback_one(struct block_device *bdev,
/*
* Even if dax_writeback_mapping_range() was given a wbc->range_start
* in the middle of a PMD, the 'index' we are given will be aligned to
* the start index of the PMD, as will the
sector we pull from
*
'entry'. This allows us to flush for PMD_SIZE and not have to
*
worry about
partial PMD writebacks.
* the start index of the PMD, as will the
pfn we pull from 'entry'.
*
This allows us to flush for PMD_SIZE and not have to worry about
* partial PMD writebacks.
*/
sector
=
dax_radix_sector
(
entry
);
pfn
=
dax_radix_pfn
(
entry
);
size
=
PAGE_SIZE
<<
dax_radix_order
(
entry
);
id
=
dax_read_lock
();
ret
=
bdev_dax_pgoff
(
bdev
,
sector
,
size
,
&
pgoff
);
if
(
ret
)
goto
dax_unlock
;
/*
* dax_direct_access() may sleep, so cannot hold tree_lock over
* its invocation.
*/
ret
=
dax_direct_access
(
dax_dev
,
pgoff
,
size
/
PAGE_SIZE
,
&
kaddr
,
&
pfn
);
if
(
ret
<
0
)
goto
dax_unlock
;
if
(
WARN_ON_ONCE
(
ret
<
size
/
PAGE_SIZE
))
{
ret
=
-
EIO
;
goto
dax_unlock
;
}
dax_mapping_entry_mkclean
(
mapping
,
index
,
pfn_t_to_pfn
(
pfn
));
dax_flush
(
dax_dev
,
kaddr
,
size
);
dax_mapping_entry_mkclean
(
mapping
,
index
,
pfn
);
dax_flush
(
dax_dev
,
page_address
(
pfn_to_page
(
pfn
)),
size
);
/*
* After we have flushed the cache, we can clear the dirty tag. There
* cannot be new dirty data in the pfn after the flush has completed as
...
...
@@ -749,8 +791,6 @@ static int dax_writeback_one(struct block_device *bdev,
radix_tree_tag_clear
(
page_tree
,
index
,
PAGECACHE_TAG_DIRTY
);
spin_unlock_irq
(
&
mapping
->
tree_lock
);
trace_dax_writeback_one
(
mapping
->
host
,
index
,
size
>>
PAGE_SHIFT
);
dax_unlock:
dax_read_unlock
(
id
);
put_locked_mapping_entry
(
mapping
,
index
);
return
ret
;
...
...
@@ -808,8 +848,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
break
;
}
ret
=
dax_writeback_one
(
bdev
,
dax_dev
,
mapping
,
indices
[
i
],
pvec
.
pages
[
i
]);
ret
=
dax_writeback_one
(
dax_dev
,
mapping
,
indices
[
i
]
,
pvec
.
pages
[
i
]);
if
(
ret
<
0
)
{
mapping_set_error
(
mapping
,
ret
);
goto
out
;
...
...
@@ -877,6 +917,7 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
int
ret
=
VM_FAULT_NOPAGE
;
struct
page
*
zero_page
;
void
*
entry2
;
pfn_t
pfn
;
zero_page
=
ZERO_PAGE
(
0
);
if
(
unlikely
(
!
zero_page
))
{
...
...
@@ -884,14 +925,15 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
goto
out
;
}
entry2
=
dax_insert_mapping_entry
(
mapping
,
vmf
,
entry
,
0
,
pfn
=
page_to_pfn_t
(
zero_page
);
entry2
=
dax_insert_mapping_entry
(
mapping
,
vmf
,
entry
,
pfn
,
RADIX_DAX_ZERO_PAGE
,
false
);
if
(
IS_ERR
(
entry2
))
{
ret
=
VM_FAULT_SIGBUS
;
goto
out
;
}
vm_insert_mixed
(
vmf
->
vma
,
vaddr
,
p
age_to_pfn_t
(
zero_page
)
);
vm_insert_mixed
(
vmf
->
vma
,
vaddr
,
p
fn
);
out:
trace_dax_load_hole
(
inode
,
vmf
,
ret
);
return
ret
;
...
...
@@ -1200,8 +1242,7 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
if
(
error
<
0
)
goto
error_finish_iomap
;
entry
=
dax_insert_mapping_entry
(
mapping
,
vmf
,
entry
,
dax_iomap_sector
(
&
iomap
,
pos
),
entry
=
dax_insert_mapping_entry
(
mapping
,
vmf
,
entry
,
pfn
,
0
,
write
&&
!
sync
);
if
(
IS_ERR
(
entry
))
{
error
=
PTR_ERR
(
entry
);
...
...
@@ -1280,13 +1321,15 @@ static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
void
*
ret
=
NULL
;
spinlock_t
*
ptl
;
pmd_t
pmd_entry
;
pfn_t
pfn
;
zero_page
=
mm_get_huge_zero_page
(
vmf
->
vma
->
vm_mm
);
if
(
unlikely
(
!
zero_page
))
goto
fallback
;
ret
=
dax_insert_mapping_entry
(
mapping
,
vmf
,
entry
,
0
,
pfn
=
page_to_pfn_t
(
zero_page
);
ret
=
dax_insert_mapping_entry
(
mapping
,
vmf
,
entry
,
pfn
,
RADIX_DAX_PMD
|
RADIX_DAX_ZERO_PAGE
,
false
);
if
(
IS_ERR
(
ret
))
goto
fallback
;
...
...
@@ -1409,8 +1452,7 @@ static int dax_iomap_pmd_fault(struct vm_fault *vmf, pfn_t *pfnp,
if
(
error
<
0
)
goto
finish_iomap
;
entry
=
dax_insert_mapping_entry
(
mapping
,
vmf
,
entry
,
dax_iomap_sector
(
&
iomap
,
pos
),
entry
=
dax_insert_mapping_entry
(
mapping
,
vmf
,
entry
,
pfn
,
RADIX_DAX_PMD
,
write
&&
!
sync
);
if
(
IS_ERR
(
entry
))
goto
finish_iomap
;
...
...
fs/ext2/ext2.h
浏览文件 @
e13e75b8
...
...
@@ -814,6 +814,7 @@ extern const struct inode_operations ext2_file_inode_operations;
extern
const
struct
file_operations
ext2_file_operations
;
/* inode.c */
extern
void
ext2_set_file_ops
(
struct
inode
*
inode
);
extern
const
struct
address_space_operations
ext2_aops
;
extern
const
struct
address_space_operations
ext2_nobh_aops
;
extern
const
struct
iomap_ops
ext2_iomap_ops
;
...
...
fs/ext2/inode.c
浏览文件 @
e13e75b8
...
...
@@ -940,9 +940,6 @@ ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
loff_t
offset
=
iocb
->
ki_pos
;
ssize_t
ret
;
if
(
WARN_ON_ONCE
(
IS_DAX
(
inode
)))
return
-
EIO
;
ret
=
blockdev_direct_IO
(
iocb
,
inode
,
iter
,
ext2_get_block
);
if
(
ret
<
0
&&
iov_iter_rw
(
iter
)
==
WRITE
)
ext2_write_failed
(
mapping
,
offset
+
count
);
...
...
@@ -952,17 +949,16 @@ ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
static
int
ext2_writepages
(
struct
address_space
*
mapping
,
struct
writeback_control
*
wbc
)
{
#ifdef CONFIG_FS_DAX
if
(
dax_mapping
(
mapping
))
{
return
dax_writeback_mapping_range
(
mapping
,
mapping
->
host
->
i_sb
->
s_bdev
,
wbc
);
}
#endif
return
mpage_writepages
(
mapping
,
wbc
,
ext2_get_block
);
}
static
int
ext2_dax_writepages
(
struct
address_space
*
mapping
,
struct
writeback_control
*
wbc
)
{
return
dax_writeback_mapping_range
(
mapping
,
mapping
->
host
->
i_sb
->
s_bdev
,
wbc
);
}
const
struct
address_space_operations
ext2_aops
=
{
.
readpage
=
ext2_readpage
,
.
readpages
=
ext2_readpages
,
...
...
@@ -990,6 +986,13 @@ const struct address_space_operations ext2_nobh_aops = {
.
error_remove_page
=
generic_error_remove_page
,
};
static
const
struct
address_space_operations
ext2_dax_aops
=
{
.
writepages
=
ext2_dax_writepages
,
.
direct_IO
=
noop_direct_IO
,
.
set_page_dirty
=
noop_set_page_dirty
,
.
invalidatepage
=
noop_invalidatepage
,
};
/*
* Probably it should be a library function... search for first non-zero word
* or memcmp with zero_page, whatever is better for particular architecture.
...
...
@@ -1388,6 +1391,18 @@ void ext2_set_inode_flags(struct inode *inode)
inode
->
i_flags
|=
S_DAX
;
}
void
ext2_set_file_ops
(
struct
inode
*
inode
)
{
inode
->
i_op
=
&
ext2_file_inode_operations
;
inode
->
i_fop
=
&
ext2_file_operations
;
if
(
IS_DAX
(
inode
))
inode
->
i_mapping
->
a_ops
=
&
ext2_dax_aops
;
else
if
(
test_opt
(
inode
->
i_sb
,
NOBH
))
inode
->
i_mapping
->
a_ops
=
&
ext2_nobh_aops
;
else
inode
->
i_mapping
->
a_ops
=
&
ext2_aops
;
}
struct
inode
*
ext2_iget
(
struct
super_block
*
sb
,
unsigned
long
ino
)
{
struct
ext2_inode_info
*
ei
;
...
...
@@ -1480,14 +1495,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
ei
->
i_data
[
n
]
=
raw_inode
->
i_block
[
n
];
if
(
S_ISREG
(
inode
->
i_mode
))
{
inode
->
i_op
=
&
ext2_file_inode_operations
;
if
(
test_opt
(
inode
->
i_sb
,
NOBH
))
{
inode
->
i_mapping
->
a_ops
=
&
ext2_nobh_aops
;
inode
->
i_fop
=
&
ext2_file_operations
;
}
else
{
inode
->
i_mapping
->
a_ops
=
&
ext2_aops
;
inode
->
i_fop
=
&
ext2_file_operations
;
}
ext2_set_file_ops
(
inode
);
}
else
if
(
S_ISDIR
(
inode
->
i_mode
))
{
inode
->
i_op
=
&
ext2_dir_inode_operations
;
inode
->
i_fop
=
&
ext2_dir_operations
;
...
...
fs/ext2/namei.c
浏览文件 @
e13e75b8
...
...
@@ -107,14 +107,7 @@ static int ext2_create (struct inode * dir, struct dentry * dentry, umode_t mode
if
(
IS_ERR
(
inode
))
return
PTR_ERR
(
inode
);
inode
->
i_op
=
&
ext2_file_inode_operations
;
if
(
test_opt
(
inode
->
i_sb
,
NOBH
))
{
inode
->
i_mapping
->
a_ops
=
&
ext2_nobh_aops
;
inode
->
i_fop
=
&
ext2_file_operations
;
}
else
{
inode
->
i_mapping
->
a_ops
=
&
ext2_aops
;
inode
->
i_fop
=
&
ext2_file_operations
;
}
ext2_set_file_ops
(
inode
);
mark_inode_dirty
(
inode
);
return
ext2_add_nondir
(
dentry
,
inode
);
}
...
...
@@ -125,14 +118,7 @@ static int ext2_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
if
(
IS_ERR
(
inode
))
return
PTR_ERR
(
inode
);
inode
->
i_op
=
&
ext2_file_inode_operations
;
if
(
test_opt
(
inode
->
i_sb
,
NOBH
))
{
inode
->
i_mapping
->
a_ops
=
&
ext2_nobh_aops
;
inode
->
i_fop
=
&
ext2_file_operations
;
}
else
{
inode
->
i_mapping
->
a_ops
=
&
ext2_aops
;
inode
->
i_fop
=
&
ext2_file_operations
;
}
ext2_set_file_ops
(
inode
);
mark_inode_dirty
(
inode
);
d_tmpfile
(
dentry
,
inode
);
unlock_new_inode
(
inode
);
...
...
fs/ext4/inode.c
浏览文件 @
e13e75b8
...
...
@@ -2725,12 +2725,6 @@ static int ext4_writepages(struct address_space *mapping,
percpu_down_read
(
&
sbi
->
s_journal_flag_rwsem
);
trace_ext4_writepages
(
inode
,
wbc
);
if
(
dax_mapping
(
mapping
))
{
ret
=
dax_writeback_mapping_range
(
mapping
,
inode
->
i_sb
->
s_bdev
,
wbc
);
goto
out_writepages
;
}
/*
* No pages to write? This is mainly a kludge to avoid starting
* a transaction for special inodes like journal inode on last iput()
...
...
@@ -2955,6 +2949,27 @@ static int ext4_writepages(struct address_space *mapping,
return
ret
;
}
static
int
ext4_dax_writepages
(
struct
address_space
*
mapping
,
struct
writeback_control
*
wbc
)
{
int
ret
;
long
nr_to_write
=
wbc
->
nr_to_write
;
struct
inode
*
inode
=
mapping
->
host
;
struct
ext4_sb_info
*
sbi
=
EXT4_SB
(
mapping
->
host
->
i_sb
);
if
(
unlikely
(
ext4_forced_shutdown
(
EXT4_SB
(
inode
->
i_sb
))))
return
-
EIO
;
percpu_down_read
(
&
sbi
->
s_journal_flag_rwsem
);
trace_ext4_writepages
(
inode
,
wbc
);
ret
=
dax_writeback_mapping_range
(
mapping
,
inode
->
i_sb
->
s_bdev
,
wbc
);
trace_ext4_writepages_result
(
inode
,
wbc
,
ret
,
nr_to_write
-
wbc
->
nr_to_write
);
percpu_up_read
(
&
sbi
->
s_journal_flag_rwsem
);
return
ret
;
}
static
int
ext4_nonda_switch
(
struct
super_block
*
sb
)
{
s64
free_clusters
,
dirty_clusters
;
...
...
@@ -3857,10 +3872,6 @@ static ssize_t ext4_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
if
(
ext4_has_inline_data
(
inode
))
return
0
;
/* DAX uses iomap path now */
if
(
WARN_ON_ONCE
(
IS_DAX
(
inode
)))
return
0
;
trace_ext4_direct_IO_enter
(
inode
,
offset
,
count
,
iov_iter_rw
(
iter
));
if
(
iov_iter_rw
(
iter
)
==
READ
)
ret
=
ext4_direct_IO_read
(
iocb
,
iter
);
...
...
@@ -3946,6 +3957,13 @@ static const struct address_space_operations ext4_da_aops = {
.
error_remove_page
=
generic_error_remove_page
,
};
static
const
struct
address_space_operations
ext4_dax_aops
=
{
.
writepages
=
ext4_dax_writepages
,
.
direct_IO
=
noop_direct_IO
,
.
set_page_dirty
=
noop_set_page_dirty
,
.
invalidatepage
=
noop_invalidatepage
,
};
void
ext4_set_aops
(
struct
inode
*
inode
)
{
switch
(
ext4_inode_journal_mode
(
inode
))
{
...
...
@@ -3958,7 +3976,9 @@ void ext4_set_aops(struct inode *inode)
default:
BUG
();
}
if
(
test_opt
(
inode
->
i_sb
,
DELALLOC
))
if
(
IS_DAX
(
inode
))
inode
->
i_mapping
->
a_ops
=
&
ext4_dax_aops
;
else
if
(
test_opt
(
inode
->
i_sb
,
DELALLOC
))
inode
->
i_mapping
->
a_ops
=
&
ext4_da_aops
;
else
inode
->
i_mapping
->
a_ops
=
&
ext4_aops
;
...
...
fs/libfs.c
浏览文件 @
e13e75b8
...
...
@@ -1060,6 +1060,45 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
EXPORT_SYMBOL
(
noop_fsync
);
int
noop_set_page_dirty
(
struct
page
*
page
)
{
/*
* Unlike __set_page_dirty_no_writeback that handles dirty page
* tracking in the page object, dax does all dirty tracking in
* the inode address_space in response to mkwrite faults. In the
* dax case we only need to worry about potentially dirty CPU
* caches, not dirty page cache pages to write back.
*
* This callback is defined to prevent fallback to
* __set_page_dirty_buffers() in set_page_dirty().
*/
return
0
;
}
EXPORT_SYMBOL_GPL
(
noop_set_page_dirty
);
void
noop_invalidatepage
(
struct
page
*
page
,
unsigned
int
offset
,
unsigned
int
length
)
{
/*
* There is no page cache to invalidate in the dax case, however
* we need this callback defined to prevent falling back to
* block_invalidatepage() in do_invalidatepage().
*/
}
EXPORT_SYMBOL_GPL
(
noop_invalidatepage
);
ssize_t
noop_direct_IO
(
struct
kiocb
*
iocb
,
struct
iov_iter
*
iter
)
{
/*
* iomap based filesystems support direct I/O without need for
* this callback. However, it still needs to be set in
* inode->a_ops so that open/fcntl know that direct I/O is
* generally supported.
*/
return
-
EINVAL
;
}
EXPORT_SYMBOL_GPL
(
noop_direct_IO
);
/* Because kfree isn't assignment-compatible with void(void*) ;-/ */
void
kfree_link
(
void
*
p
)
{
...
...
fs/xfs/xfs_aops.c
浏览文件 @
e13e75b8
...
...
@@ -1194,16 +1194,22 @@ xfs_vm_writepages(
int
ret
;
xfs_iflags_clear
(
XFS_I
(
mapping
->
host
),
XFS_ITRUNCATED
);
if
(
dax_mapping
(
mapping
))
return
dax_writeback_mapping_range
(
mapping
,
xfs_find_bdev_for_inode
(
mapping
->
host
),
wbc
);
ret
=
write_cache_pages
(
mapping
,
wbc
,
xfs_do_writepage
,
&
wpc
);
if
(
wpc
.
ioend
)
ret
=
xfs_submit_ioend
(
wbc
,
wpc
.
ioend
,
ret
);
return
ret
;
}
STATIC
int
xfs_dax_writepages
(
struct
address_space
*
mapping
,
struct
writeback_control
*
wbc
)
{
xfs_iflags_clear
(
XFS_I
(
mapping
->
host
),
XFS_ITRUNCATED
);
return
dax_writeback_mapping_range
(
mapping
,
xfs_find_bdev_for_inode
(
mapping
->
host
),
wbc
);
}
/*
* Called to move a page into cleanable state - and from there
* to be released. The page should already be clean. We always
...
...
@@ -1367,17 +1373,6 @@ xfs_get_blocks(
return
error
;
}
STATIC
ssize_t
xfs_vm_direct_IO
(
struct
kiocb
*
iocb
,
struct
iov_iter
*
iter
)
{
/*
* We just need the method present so that open/fcntl allow direct I/O.
*/
return
-
EINVAL
;
}
STATIC
sector_t
xfs_vm_bmap
(
struct
address_space
*
mapping
,
...
...
@@ -1500,8 +1495,15 @@ const struct address_space_operations xfs_address_space_operations = {
.
releasepage
=
xfs_vm_releasepage
,
.
invalidatepage
=
xfs_vm_invalidatepage
,
.
bmap
=
xfs_vm_bmap
,
.
direct_IO
=
xfs_vm
_direct_IO
,
.
direct_IO
=
noop
_direct_IO
,
.
migratepage
=
buffer_migrate_page
,
.
is_partially_uptodate
=
block_is_partially_uptodate
,
.
error_remove_page
=
generic_error_remove_page
,
};
const
struct
address_space_operations
xfs_dax_aops
=
{
.
writepages
=
xfs_dax_writepages
,
.
direct_IO
=
noop_direct_IO
,
.
set_page_dirty
=
noop_set_page_dirty
,
.
invalidatepage
=
noop_invalidatepage
,
};
fs/xfs/xfs_aops.h
浏览文件 @
e13e75b8
...
...
@@ -54,6 +54,7 @@ struct xfs_ioend {
};
extern
const
struct
address_space_operations
xfs_address_space_operations
;
extern
const
struct
address_space_operations
xfs_dax_aops
;
int
xfs_setfilesize
(
struct
xfs_inode
*
ip
,
xfs_off_t
offset
,
size_t
size
);
...
...
fs/xfs/xfs_iops.c
浏览文件 @
e13e75b8
...
...
@@ -1272,7 +1272,10 @@ xfs_setup_iops(
case
S_IFREG
:
inode
->
i_op
=
&
xfs_inode_operations
;
inode
->
i_fop
=
&
xfs_file_operations
;
inode
->
i_mapping
->
a_ops
=
&
xfs_address_space_operations
;
if
(
IS_DAX
(
inode
))
inode
->
i_mapping
->
a_ops
=
&
xfs_dax_aops
;
else
inode
->
i_mapping
->
a_ops
=
&
xfs_address_space_operations
;
break
;
case
S_IFDIR
:
if
(
xfs_sb_version_hasasciici
(
&
XFS_M
(
inode
->
i_sb
)
->
m_sb
))
...
...
include/linux/dax.h
浏览文件 @
e13e75b8
...
...
@@ -26,18 +26,42 @@ extern struct attribute_group dax_attribute_group;
#if IS_ENABLED(CONFIG_DAX)
struct
dax_device
*
dax_get_by_host
(
const
char
*
host
);
struct
dax_device
*
alloc_dax
(
void
*
private
,
const
char
*
host
,
const
struct
dax_operations
*
ops
);
void
put_dax
(
struct
dax_device
*
dax_dev
);
void
kill_dax
(
struct
dax_device
*
dax_dev
);
void
dax_write_cache
(
struct
dax_device
*
dax_dev
,
bool
wc
);
bool
dax_write_cache_enabled
(
struct
dax_device
*
dax_dev
);
#else
static
inline
struct
dax_device
*
dax_get_by_host
(
const
char
*
host
)
{
return
NULL
;
}
static
inline
struct
dax_device
*
alloc_dax
(
void
*
private
,
const
char
*
host
,
const
struct
dax_operations
*
ops
)
{
/*
* Callers should check IS_ENABLED(CONFIG_DAX) to know if this
* NULL is an error or expected.
*/
return
NULL
;
}
static
inline
void
put_dax
(
struct
dax_device
*
dax_dev
)
{
}
static
inline
void
kill_dax
(
struct
dax_device
*
dax_dev
)
{
}
static
inline
void
dax_write_cache
(
struct
dax_device
*
dax_dev
,
bool
wc
)
{
}
static
inline
bool
dax_write_cache_enabled
(
struct
dax_device
*
dax_dev
)
{
return
false
;
}
#endif
struct
writeback_control
;
int
bdev_dax_pgoff
(
struct
block_device
*
,
sector_t
,
size_t
,
pgoff_t
*
pgoff
);
#if IS_ENABLED(CONFIG_FS_DAX)
int
__bdev_dax_supported
(
struct
super_block
*
sb
,
int
blocksize
);
...
...
@@ -57,6 +81,8 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
}
struct
dax_device
*
fs_dax_get_by_bdev
(
struct
block_device
*
bdev
);
int
dax_writeback_mapping_range
(
struct
address_space
*
mapping
,
struct
block_device
*
bdev
,
struct
writeback_control
*
wbc
);
#else
static
inline
int
bdev_dax_supported
(
struct
super_block
*
sb
,
int
blocksize
)
{
...
...
@@ -76,22 +102,23 @@ static inline struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{
return
NULL
;
}
static
inline
int
dax_writeback_mapping_range
(
struct
address_space
*
mapping
,
struct
block_device
*
bdev
,
struct
writeback_control
*
wbc
)
{
return
-
EOPNOTSUPP
;
}
#endif
int
dax_read_lock
(
void
);
void
dax_read_unlock
(
int
id
);
struct
dax_device
*
alloc_dax
(
void
*
private
,
const
char
*
host
,
const
struct
dax_operations
*
ops
);
bool
dax_alive
(
struct
dax_device
*
dax_dev
);
void
kill_dax
(
struct
dax_device
*
dax_dev
);
void
*
dax_get_private
(
struct
dax_device
*
dax_dev
);
long
dax_direct_access
(
struct
dax_device
*
dax_dev
,
pgoff_t
pgoff
,
long
nr_pages
,
void
**
kaddr
,
pfn_t
*
pfn
);
size_t
dax_copy_from_iter
(
struct
dax_device
*
dax_dev
,
pgoff_t
pgoff
,
void
*
addr
,
size_t
bytes
,
struct
iov_iter
*
i
);
void
dax_flush
(
struct
dax_device
*
dax_dev
,
void
*
addr
,
size_t
size
);
void
dax_write_cache
(
struct
dax_device
*
dax_dev
,
bool
wc
);
bool
dax_write_cache_enabled
(
struct
dax_device
*
dax_dev
);
ssize_t
dax_iomap_rw
(
struct
kiocb
*
iocb
,
struct
iov_iter
*
iter
,
const
struct
iomap_ops
*
ops
);
...
...
@@ -121,7 +148,4 @@ static inline bool dax_mapping(struct address_space *mapping)
return
mapping
->
host
&&
IS_DAX
(
mapping
->
host
);
}
struct
writeback_control
;
int
dax_writeback_mapping_range
(
struct
address_space
*
mapping
,
struct
block_device
*
bdev
,
struct
writeback_control
*
wbc
);
#endif
include/linux/fs.h
浏览文件 @
e13e75b8
...
...
@@ -3130,6 +3130,10 @@ extern int simple_rmdir(struct inode *, struct dentry *);
extern
int
simple_rename
(
struct
inode
*
,
struct
dentry
*
,
struct
inode
*
,
struct
dentry
*
,
unsigned
int
);
extern
int
noop_fsync
(
struct
file
*
,
loff_t
,
loff_t
,
int
);
extern
int
noop_set_page_dirty
(
struct
page
*
page
);
extern
void
noop_invalidatepage
(
struct
page
*
page
,
unsigned
int
offset
,
unsigned
int
length
);
extern
ssize_t
noop_direct_IO
(
struct
kiocb
*
iocb
,
struct
iov_iter
*
iter
);
extern
int
simple_empty
(
struct
dentry
*
);
extern
int
simple_readpage
(
struct
file
*
file
,
struct
page
*
page
);
extern
int
simple_write_begin
(
struct
file
*
file
,
struct
address_space
*
mapping
,
...
...
include/linux/sched/deadline.h
浏览文件 @
e13e75b8
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_SCHED_DEADLINE_H
#define _LINUX_SCHED_DEADLINE_H
#include <linux/sched.h>
/*
* SCHED_DEADLINE tasks has negative priorities, reflecting
...
...
@@ -28,5 +24,3 @@ static inline bool dl_time_before(u64 a, u64 b)
{
return
(
s64
)(
a
-
b
)
<
0
;
}
#endif
/* _LINUX_SCHED_DEADLINE_H */
include/linux/sched/isolation.h
浏览文件 @
e13e75b8
...
...
@@ -12,6 +12,7 @@ enum hk_flags {
HK_FLAG_SCHED
=
(
1
<<
3
),
HK_FLAG_TICK
=
(
1
<<
4
),
HK_FLAG_DOMAIN
=
(
1
<<
5
),
HK_FLAG_WQ
=
(
1
<<
6
),
};
#ifdef CONFIG_CPU_ISOLATION
...
...
include/linux/sched/nohz.h
浏览文件 @
e13e75b8
...
...
@@ -37,8 +37,4 @@ extern void wake_up_nohz_cpu(int cpu);
static
inline
void
wake_up_nohz_cpu
(
int
cpu
)
{
}
#endif
#ifdef CONFIG_NO_HZ_FULL
extern
u64
scheduler_tick_max_deferment
(
void
);
#endif
#endif
/* _LINUX_SCHED_NOHZ_H */
include/linux/tick.h
浏览文件 @
e13e75b8
...
...
@@ -113,7 +113,8 @@ enum tick_dep_bits {
#ifdef CONFIG_NO_HZ_COMMON
extern
bool
tick_nohz_enabled
;
extern
int
tick_nohz_tick_stopped
(
void
);
extern
bool
tick_nohz_tick_stopped
(
void
);
extern
bool
tick_nohz_tick_stopped_cpu
(
int
cpu
);
extern
void
tick_nohz_idle_enter
(
void
);
extern
void
tick_nohz_idle_exit
(
void
);
extern
void
tick_nohz_irq_exit
(
void
);
...
...
@@ -125,6 +126,7 @@ extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else
/* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
static
inline
int
tick_nohz_tick_stopped
(
void
)
{
return
0
;
}
static
inline
int
tick_nohz_tick_stopped_cpu
(
int
cpu
)
{
return
0
;
}
static
inline
void
tick_nohz_idle_enter
(
void
)
{
}
static
inline
void
tick_nohz_idle_exit
(
void
)
{
}
...
...
include/linux/wait_bit.h
浏览文件 @
e13e75b8
...
...
@@ -262,4 +262,74 @@ int wait_on_atomic_t(atomic_t *val, wait_atomic_t_action_f action, unsigned mode
return
out_of_line_wait_on_atomic_t
(
val
,
action
,
mode
);
}
extern
void
init_wait_var_entry
(
struct
wait_bit_queue_entry
*
wbq_entry
,
void
*
var
,
int
flags
);
extern
void
wake_up_var
(
void
*
var
);
extern
wait_queue_head_t
*
__var_waitqueue
(
void
*
p
);
#define ___wait_var_event(var, condition, state, exclusive, ret, cmd) \
({ \
__label__ __out; \
struct wait_queue_head *__wq_head = __var_waitqueue(var); \
struct wait_bit_queue_entry __wbq_entry; \
long __ret = ret;
/* explicit shadow */
\
\
init_wait_var_entry(&__wbq_entry, var, \
exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
for (;;) { \
long __int = prepare_to_wait_event(__wq_head, \
&__wbq_entry.wq_entry, \
state); \
if (condition) \
break; \
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
goto __out; \
} \
\
cmd; \
} \
finish_wait(__wq_head, &__wbq_entry.wq_entry); \
__out: __ret; \
})
#define __wait_var_event(var, condition) \
___wait_var_event(var, condition, TASK_UNINTERRUPTIBLE, 0, 0, \
schedule())
#define wait_var_event(var, condition) \
do { \
might_sleep(); \
if (condition) \
break; \
__wait_var_event(var, condition); \
} while (0)
#define __wait_var_event_killable(var, condition) \
___wait_var_event(var, condition, TASK_KILLABLE, 0, 0, \
schedule())
#define wait_var_event_killable(var, condition) \
({ \
int __ret = 0; \
might_sleep(); \
if (!(condition)) \
__ret = __wait_var_event_killable(var, condition); \
__ret; \
})
#define __wait_var_event_timeout(var, condition, timeout) \
___wait_var_event(var, ___wait_cond_timeout(condition), \
TASK_UNINTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret))
#define wait_var_event_timeout(var, condition, timeout) \
({ \
long __ret = timeout; \
might_sleep(); \
if (!___wait_cond_timeout(condition)) \
__ret = __wait_var_event_timeout(var, condition, timeout); \
__ret; \
})
#endif
/* _LINUX_WAIT_BIT_H */
kernel/sched/Makefile
浏览文件 @
e13e75b8
...
...
@@ -17,8 +17,9 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif
obj-y
+=
core.o loadavg.o clock.o cputime.o
obj-y
+=
idle_task.o fair.o rt.o deadline.o
obj-y
+=
wait.o wait_bit.o swait.o completion.o idle.o
obj-y
+=
idle.o fair.o rt.o deadline.o
obj-y
+=
wait.o wait_bit.o swait.o completion.o
obj-$(CONFIG_SMP)
+=
cpupri.o cpudeadline.o topology.o stop_task.o
obj-$(CONFIG_SCHED_AUTOGROUP)
+=
autogroup.o
obj-$(CONFIG_SCHEDSTATS)
+=
stats.o
...
...
kernel/sched/autogroup.c
浏览文件 @
e13e75b8
// SPDX-License-Identifier: GPL-2.0
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/utsname.h>
#include <linux/security.h>
#include <linux/export.h>
/*
* Auto-group scheduling implementation:
*/
#include "sched.h"
unsigned
int
__read_mostly
sysctl_sched_autogroup_enabled
=
1
;
...
...
@@ -168,18 +165,19 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
autogroup_kref_put
(
prev
);
}
/* Allocates GFP_KERNEL, cannot be called under any spinlock */
/* Allocates GFP_KERNEL, cannot be called under any spinlock
:
*/
void
sched_autogroup_create_attach
(
struct
task_struct
*
p
)
{
struct
autogroup
*
ag
=
autogroup_create
();
autogroup_move_group
(
p
,
ag
);
/* drop extra reference added by autogroup_create() */
/* Drop extra reference added by autogroup_create(): */
autogroup_kref_put
(
ag
);
}
EXPORT_SYMBOL
(
sched_autogroup_create_attach
);
/* Cannot be called under siglock.
Currently has no users
*/
/* Cannot be called under siglock.
Currently has no users:
*/
void
sched_autogroup_detach
(
struct
task_struct
*
p
)
{
autogroup_move_group
(
p
,
&
autogroup_default
);
...
...
@@ -202,7 +200,6 @@ static int __init setup_autogroup(char *str)
return
1
;
}
__setup
(
"noautogroup"
,
setup_autogroup
);
#ifdef CONFIG_PROC_FS
...
...
@@ -224,7 +221,7 @@ int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
if
(
nice
<
0
&&
!
can_nice
(
current
,
nice
))
return
-
EPERM
;
/*
this is a heavy operation
taking global locks.. */
/*
This is a heavy operation,
taking global locks.. */
if
(
!
capable
(
CAP_SYS_ADMIN
)
&&
time_before
(
jiffies
,
next
))
return
-
EAGAIN
;
...
...
@@ -267,4 +264,4 @@ int autogroup_path(struct task_group *tg, char *buf, int buflen)
return
snprintf
(
buf
,
buflen
,
"%s-%ld"
,
"/autogroup"
,
tg
->
autogroup
->
id
);
}
#endif
/* CONFIG_SCHED_DEBUG */
#endif
kernel/sched/autogroup.h
浏览文件 @
e13e75b8
/* SPDX-License-Identifier: GPL-2.0 */
#ifdef CONFIG_SCHED_AUTOGROUP
#include <linux/kref.h>
#include <linux/rwsem.h>
#include <linux/sched/autogroup.h>
struct
autogroup
{
/*
*
reference doesn't mean how many thread
attach to this
* autogroup now. It just stands for the number of task
* could use this autogroup.
*
Reference doesn't mean how many threads
attach to this
* autogroup now. It just stands for the number of task
s
*
which
could use this autogroup.
*/
struct
kref
kref
;
struct
task_group
*
tg
;
...
...
@@ -56,11 +52,9 @@ autogroup_task_group(struct task_struct *p, struct task_group *tg)
return
tg
;
}
#ifdef CONFIG_SCHED_DEBUG
static
inline
int
autogroup_path
(
struct
task_group
*
tg
,
char
*
buf
,
int
buflen
)
{
return
0
;
}
#endif
#endif
/* CONFIG_SCHED_AUTOGROUP */
kernel/sched/clock.c
浏览文件 @
e13e75b8
/*
* sched_clock
for unstable cpu
clocks
* sched_clock
() for unstable CPU
clocks
*
* Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
*
...
...
@@ -11,7 +11,7 @@
* Guillaume Chazarain <guichaz@gmail.com>
*
*
* What:
* What
this file implements
:
*
* cpu_clock(i) provides a fast (execution time) high resolution
* clock with bounded drift between CPUs. The value of cpu_clock(i)
...
...
@@ -26,11 +26,11 @@
* at 0 on boot (but people really shouldn't rely on that).
*
* cpu_clock(i) -- can be used from any context, including NMI.
* local_clock() -- is cpu_clock() on the current
cpu
.
* local_clock() -- is cpu_clock() on the current
CPU
.
*
* sched_clock_cpu(i)
*
* How:
* How
it is implemented
:
*
* The implementation either uses sched_clock() when
* !CONFIG_HAVE_UNSTABLE_SCHED_CLOCK, which means in that case the
...
...
@@ -52,19 +52,7 @@
* that is otherwise invisible (TSC gets stopped).
*
*/
#include <linux/spinlock.h>
#include <linux/hardirq.h>
#include <linux/export.h>
#include <linux/percpu.h>
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/nmi.h>
#include <linux/sched/clock.h>
#include <linux/static_key.h>
#include <linux/workqueue.h>
#include <linux/compiler.h>
#include <linux/tick.h>
#include <linux/init.h>
#include "sched.h"
/*
* Scheduler clock - returns current time in nanosec units.
...
...
@@ -302,21 +290,21 @@ static u64 sched_clock_remote(struct sched_clock_data *scd)
* cmpxchg64 below only protects one readout.
*
* We must reread via sched_clock_local() in the retry case on
* 32
bit
as an NMI could use sched_clock_local() via the
* 32
-bit kernels
as an NMI could use sched_clock_local() via the
* tracer and hit between the readout of
* the low
32bit and the high 32
bit portion.
* the low
32-bit and the high 32-
bit portion.
*/
this_clock
=
sched_clock_local
(
my_scd
);
/*
* We must enforce atomic readout on 32bit, otherwise the
* update on the remote
cpu
can hit inbetween the readout of
* the low
32bit and the high 32
bit portion.
* We must enforce atomic readout on 32
-
bit, otherwise the
* update on the remote
CPU
can hit inbetween the readout of
* the low
32-bit and the high 32-
bit portion.
*/
remote_clock
=
cmpxchg64
(
&
scd
->
clock
,
0
,
0
);
#else
/*
* On 64
bit
the read of [my]scd->clock is atomic versus the
* update, so we can avoid the above 32bit dance.
* On 64
-bit kernels
the read of [my]scd->clock is atomic versus the
* update, so we can avoid the above 32
-
bit dance.
*/
sched_clock_local
(
my_scd
);
again:
...
...
kernel/sched/completion.c
浏览文件 @
e13e75b8
...
...
@@ -11,10 +11,7 @@
* typically be used for exclusion which gives rise to priority inversion.
* Waiting for completion is a typically sync point, but not an exclusion point.
*/
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/completion.h>
#include "sched.h"
/**
* complete: - signals a single thread waiting on this completion
...
...
kernel/sched/core.c
浏览文件 @
e13e75b8
...
...
@@ -5,37 +5,11 @@
*
* Copyright (C) 1991-2002 Linus Torvalds
*/
#include <linux/sched.h>
#include <linux/sched/clock.h>
#include <uapi/linux/sched/types.h>
#include <linux/sched/loadavg.h>
#include <linux/sched/hotplug.h>
#include <linux/wait_bit.h>
#include <linux/cpuset.h>
#include <linux/delayacct.h>
#include <linux/init_task.h>
#include <linux/context_tracking.h>
#include <linux/rcupdate_wait.h>
#include <linux/compat.h>
#include <linux/blkdev.h>
#include <linux/kprobes.h>
#include <linux/mmu_context.h>
#include <linux/module.h>
#include <linux/nmi.h>
#include <linux/prefetch.h>
#include <linux/profile.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/sched/isolation.h>
#include "sched.h"
#include <asm/switch_to.h>
#include <asm/tlb.h>
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
#include "sched.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"
...
...
@@ -135,7 +109,7 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
* [L] ->on_rq
* RELEASE (rq->lock)
*
* If we observe the old
cpu
in task_rq_lock, the acquire of
* If we observe the old
CPU
in task_rq_lock, the acquire of
* the old rq->lock will fully serialize against the stores.
*
* If we observe the new CPU in task_rq_lock, the acquire will
...
...
@@ -333,7 +307,7 @@ void hrtick_start(struct rq *rq, u64 delay)
}
#endif
/* CONFIG_SMP */
static
void
init_rq_hrtick
(
struct
rq
*
rq
)
static
void
hrtick_rq_init
(
struct
rq
*
rq
)
{
#ifdef CONFIG_SMP
rq
->
hrtick_csd_pending
=
0
;
...
...
@@ -351,7 +325,7 @@ static inline void hrtick_clear(struct rq *rq)
{
}
static
inline
void
init_rq_hrtick
(
struct
rq
*
rq
)
static
inline
void
hrtick_rq_init
(
struct
rq
*
rq
)
{
}
#endif
/* CONFIG_SCHED_HRTICK */
...
...
@@ -1457,7 +1431,7 @@ EXPORT_SYMBOL_GPL(kick_process);
*
* - cpu_active must be a subset of cpu_online
*
* - on
cpu-up we allow per-cpu kthreads on the online && !active cpu
,
* - on
CPU-up we allow per-CPU kthreads on the online && !active CPU
,
* see __set_cpus_allowed_ptr(). At this point the newly online
* CPU isn't yet part of the sched domains, and balancing will not
* see it.
...
...
@@ -2629,6 +2603,18 @@ static inline void finish_lock_switch(struct rq *rq)
raw_spin_unlock_irq
(
&
rq
->
lock
);
}
/*
* NOP if the arch has not defined these:
*/
#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
#endif
#ifndef finish_arch_post_lock_switch
# define finish_arch_post_lock_switch() do { } while (0)
#endif
/**
* prepare_task_switch - prepare to switch tasks
* @rq: the runqueue preparing to switch
...
...
@@ -3037,7 +3023,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
/*
* 64-bit doesn't need locks to atomically read a 64bit value.
* 64-bit doesn't need locks to atomically read a 64
-
bit value.
* So we have a optimization chance when the task's delta_exec is 0.
* Reading ->on_cpu is racy, but this is ok.
*
...
...
@@ -3096,35 +3082,99 @@ void scheduler_tick(void)
rq
->
idle_balance
=
idle_cpu
(
cpu
);
trigger_load_balance
(
rq
);
#endif
rq_last_tick_reset
(
rq
);
}
#ifdef CONFIG_NO_HZ_FULL
/**
* scheduler_tick_max_deferment
*
* Keep at least one tick per second when a single
* active task is running because the scheduler doesn't
* yet completely support full dynticks environment.
*
* This makes sure that uptime, CFS vruntime, load
* balancing, etc... continue to move forward, even
* with a very low granularity.
*
* Return: Maximum deferment in nanoseconds.
*/
u64
scheduler_tick_max_deferment
(
void
)
struct
tick_work
{
int
cpu
;
struct
delayed_work
work
;
};
static
struct
tick_work
__percpu
*
tick_work_cpu
;
static
void
sched_tick_remote
(
struct
work_struct
*
work
)
{
struct
rq
*
rq
=
this_rq
();
unsigned
long
next
,
now
=
READ_ONCE
(
jiffies
);
struct
delayed_work
*
dwork
=
to_delayed_work
(
work
);
struct
tick_work
*
twork
=
container_of
(
dwork
,
struct
tick_work
,
work
);
int
cpu
=
twork
->
cpu
;
struct
rq
*
rq
=
cpu_rq
(
cpu
);
struct
rq_flags
rf
;
next
=
rq
->
last_sched_tick
+
HZ
;
/*
* Handle the tick only if it appears the remote CPU is running in full
* dynticks mode. The check is racy by nature, but missing a tick or
* having one too much is no big deal because the scheduler tick updates
* statistics and checks timeslices in a time-independent way, regardless
* of when exactly it is running.
*/
if
(
!
idle_cpu
(
cpu
)
&&
tick_nohz_tick_stopped_cpu
(
cpu
))
{
struct
task_struct
*
curr
;
u64
delta
;
if
(
time_before_eq
(
next
,
now
))
return
0
;
rq_lock_irq
(
rq
,
&
rf
);
update_rq_clock
(
rq
);
curr
=
rq
->
curr
;
delta
=
rq_clock_task
(
rq
)
-
curr
->
se
.
exec_start
;
return
jiffies_to_nsecs
(
next
-
now
);
/*
* Make sure the next tick runs within a reasonable
* amount of time.
*/
WARN_ON_ONCE
(
delta
>
(
u64
)
NSEC_PER_SEC
*
3
);
curr
->
sched_class
->
task_tick
(
rq
,
curr
,
0
);
rq_unlock_irq
(
rq
,
&
rf
);
}
/*
* Run the remote tick once per second (1Hz). This arbitrary
* frequency is large enough to avoid overload but short enough
* to keep scheduler internal stats reasonably up to date.
*/
queue_delayed_work
(
system_unbound_wq
,
dwork
,
HZ
);
}
static
void
sched_tick_start
(
int
cpu
)
{
struct
tick_work
*
twork
;
if
(
housekeeping_cpu
(
cpu
,
HK_FLAG_TICK
))
return
;
WARN_ON_ONCE
(
!
tick_work_cpu
);
twork
=
per_cpu_ptr
(
tick_work_cpu
,
cpu
);
twork
->
cpu
=
cpu
;
INIT_DELAYED_WORK
(
&
twork
->
work
,
sched_tick_remote
);
queue_delayed_work
(
system_unbound_wq
,
&
twork
->
work
,
HZ
);
}
#ifdef CONFIG_HOTPLUG_CPU
static
void
sched_tick_stop
(
int
cpu
)
{
struct
tick_work
*
twork
;
if
(
housekeeping_cpu
(
cpu
,
HK_FLAG_TICK
))
return
;
WARN_ON_ONCE
(
!
tick_work_cpu
);
twork
=
per_cpu_ptr
(
tick_work_cpu
,
cpu
);
cancel_delayed_work_sync
(
&
twork
->
work
);
}
#endif
/* CONFIG_HOTPLUG_CPU */
int
__init
sched_tick_offload_init
(
void
)
{
tick_work_cpu
=
alloc_percpu
(
struct
tick_work
);
BUG_ON
(
!
tick_work_cpu
);
return
0
;
}
#else
/* !CONFIG_NO_HZ_FULL */
static
inline
void
sched_tick_start
(
int
cpu
)
{
}
static
inline
void
sched_tick_stop
(
int
cpu
)
{
}
#endif
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
...
...
@@ -5786,6 +5836,7 @@ int sched_cpu_starting(unsigned int cpu)
{
set_cpu_rq_start_time
(
cpu
);
sched_rq_cpu_starting
(
cpu
);
sched_tick_start
(
cpu
);
return
0
;
}
...
...
@@ -5797,6 +5848,7 @@ int sched_cpu_dying(unsigned int cpu)
/* Handle pending wakeups and then migrate everything off */
sched_ttwu_pending
();
sched_tick_stop
(
cpu
);
rq_lock_irqsave
(
rq
,
&
rf
);
if
(
rq
->
rd
)
{
...
...
@@ -6024,11 +6076,8 @@ void __init sched_init(void)
rq
->
last_load_update_tick
=
jiffies
;
rq
->
nohz_flags
=
0
;
#endif
#ifdef CONFIG_NO_HZ_FULL
rq
->
last_sched_tick
=
0
;
#endif
#endif
/* CONFIG_SMP */
init_rq_hrtick
(
rq
);
hrtick_rq_init
(
rq
);
atomic_set
(
&
rq
->
nr_iowait
,
0
);
}
...
...
@@ -7027,3 +7076,5 @@ const u32 sched_prio_to_wmult[40] = {
/* 10 */
39045157
,
49367440
,
61356676
,
76695844
,
95443717
,
/* 15 */
119304647
,
148102320
,
186737708
,
238609294
,
286331153
,
};
#undef CREATE_TRACE_POINTS
kernel/sched/cpuacct.c
浏览文件 @
e13e75b8
// SPDX-License-Identifier: GPL-2.0
#include <linux/cgroup.h>
#include <linux/slab.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <linux/cpumask.h>
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <linux/kernel_stat.h>
#include <linux/err.h>
#include "sched.h"
/*
* CPU accounting code for task groups.
*
* Based on the work by Paul Menage (menage@google.com) and Balbir Singh
* (balbir@in.ibm.com).
*/
#include "sched.h"
/* Time spent by the tasks of the
cpu
accounting group executing in ... */
/* Time spent by the tasks of the
CPU
accounting group executing in ... */
enum
cpuacct_stat_index
{
CPUACCT_STAT_USER
,
/* ... user mode */
CPUACCT_STAT_SYSTEM
,
/* ... kernel mode */
...
...
@@ -35,12 +24,12 @@ struct cpuacct_usage {
u64
usages
[
CPUACCT_STAT_NSTATS
];
};
/* track
cpu
usage of a group of tasks and its child groups */
/* track
CPU
usage of a group of tasks and its child groups */
struct
cpuacct
{
struct
cgroup_subsys_state
css
;
/* cpuusage holds pointer to a u64-type object on every
cpu
*/
struct
cpuacct_usage
__percpu
*
cpuusage
;
struct
kernel_cpustat
__percpu
*
cpustat
;
struct
cgroup_subsys_state
css
;
/* cpuusage holds pointer to a u64-type object on every
CPU
*/
struct
cpuacct_usage
__percpu
*
cpuusage
;
struct
kernel_cpustat
__percpu
*
cpustat
;
};
static
inline
struct
cpuacct
*
css_ca
(
struct
cgroup_subsys_state
*
css
)
...
...
@@ -48,7 +37,7 @@ static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
return
css
?
container_of
(
css
,
struct
cpuacct
,
css
)
:
NULL
;
}
/*
return cpu
accounting group to which this task belongs */
/*
Return CPU
accounting group to which this task belongs */
static
inline
struct
cpuacct
*
task_ca
(
struct
task_struct
*
tsk
)
{
return
css_ca
(
task_css
(
tsk
,
cpuacct_cgrp_id
));
...
...
@@ -65,7 +54,7 @@ static struct cpuacct root_cpuacct = {
.
cpuusage
=
&
root_cpuacct_cpuusage
,
};
/*
create a new cpu
accounting group */
/*
Create a new CPU
accounting group */
static
struct
cgroup_subsys_state
*
cpuacct_css_alloc
(
struct
cgroup_subsys_state
*
parent_css
)
{
...
...
@@ -96,7 +85,7 @@ cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
return
ERR_PTR
(
-
ENOMEM
);
}
/*
destroy an existing cpu
accounting group */
/*
Destroy an existing CPU
accounting group */
static
void
cpuacct_css_free
(
struct
cgroup_subsys_state
*
css
)
{
struct
cpuacct
*
ca
=
css_ca
(
css
);
...
...
@@ -162,7 +151,7 @@ static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
#endif
}
/*
return total cpu
usage (in nanoseconds) of a group */
/*
Return total CPU
usage (in nanoseconds) of a group */
static
u64
__cpuusage_read
(
struct
cgroup_subsys_state
*
css
,
enum
cpuacct_stat_index
index
)
{
...
...
kernel/sched/cpudeadline.c
浏览文件 @
e13e75b8
...
...
@@ -10,11 +10,7 @@
* as published by the Free Software Foundation; version 2
* of the License.
*/
#include <linux/gfp.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include "cpudeadline.h"
#include "sched.h"
static
inline
int
parent
(
int
i
)
{
...
...
@@ -42,8 +38,9 @@ static void cpudl_heapify_down(struct cpudl *cp, int idx)
return
;
/* adapted from lib/prio_heap.c */
while
(
1
)
{
while
(
1
)
{
u64
largest_dl
;
l
=
left_child
(
idx
);
r
=
right_child
(
idx
);
largest
=
idx
;
...
...
@@ -131,6 +128,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
return
1
;
}
else
{
int
best_cpu
=
cpudl_maximum
(
cp
);
WARN_ON
(
best_cpu
!=
-
1
&&
!
cpu_present
(
best_cpu
));
if
(
cpumask_test_cpu
(
best_cpu
,
&
p
->
cpus_allowed
)
&&
...
...
@@ -145,9 +143,9 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
}
/*
* cpudl_clear - remove a
cpu
from the cpudl max-heap
* cpudl_clear - remove a
CPU
from the cpudl max-heap
* @cp: the cpudl max-heap context
* @cpu: the target
cpu
* @cpu: the target
CPU
*
* Notes: assumes cpu_rq(cpu)->lock is locked
*
...
...
@@ -186,8 +184,8 @@ void cpudl_clear(struct cpudl *cp, int cpu)
/*
* cpudl_set - update the cpudl max-heap
* @cp: the cpudl max-heap context
* @cpu: the target
cpu
* @dl: the new earliest deadline for this
cpu
* @cpu: the target
CPU
* @dl: the new earliest deadline for this
CPU
*
* Notes: assumes cpu_rq(cpu)->lock is locked
*
...
...
@@ -205,6 +203,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
old_idx
=
cp
->
elements
[
cpu
].
idx
;
if
(
old_idx
==
IDX_INVALID
)
{
int
new_idx
=
cp
->
size
++
;
cp
->
elements
[
new_idx
].
dl
=
dl
;
cp
->
elements
[
new_idx
].
cpu
=
cpu
;
cp
->
elements
[
cpu
].
idx
=
new_idx
;
...
...
@@ -221,7 +220,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl)
/*
* cpudl_set_freecpu - Set the cpudl.free_cpus
* @cp: the cpudl max-heap context
* @cpu: rd attached
cpu
* @cpu: rd attached
CPU
*/
void
cpudl_set_freecpu
(
struct
cpudl
*
cp
,
int
cpu
)
{
...
...
@@ -231,7 +230,7 @@ void cpudl_set_freecpu(struct cpudl *cp, int cpu)
/*
* cpudl_clear_freecpu - Clear the cpudl.free_cpus
* @cp: the cpudl max-heap context
* @cpu: rd attached
cpu
* @cpu: rd attached
CPU
*/
void
cpudl_clear_freecpu
(
struct
cpudl
*
cp
,
int
cpu
)
{
...
...
kernel/sched/cpudeadline.h
浏览文件 @
e13e75b8
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CPUDL_H
#define _LINUX_CPUDL_H
#include <linux/sched.h>
#include <linux/sched/deadline.h>
#define IDX_INVALID -1
#define IDX_INVALID -1
struct
cpudl_item
{
u64
dl
;
int
cpu
;
int
idx
;
u64
dl
;
int
cpu
;
int
idx
;
};
struct
cpudl
{
raw_spinlock_t
lock
;
int
size
;
cpumask_var_t
free_cpus
;
struct
cpudl_item
*
elements
;
raw_spinlock_t
lock
;
int
size
;
cpumask_var_t
free_cpus
;
struct
cpudl_item
*
elements
;
};
#ifdef CONFIG_SMP
int
cpudl_find
(
struct
cpudl
*
cp
,
struct
task_struct
*
p
,
struct
cpumask
*
later_mask
);
int
cpudl_find
(
struct
cpudl
*
cp
,
struct
task_struct
*
p
,
struct
cpumask
*
later_mask
);
void
cpudl_set
(
struct
cpudl
*
cp
,
int
cpu
,
u64
dl
);
void
cpudl_clear
(
struct
cpudl
*
cp
,
int
cpu
);
int
cpudl_init
(
struct
cpudl
*
cp
);
int
cpudl_init
(
struct
cpudl
*
cp
);
void
cpudl_set_freecpu
(
struct
cpudl
*
cp
,
int
cpu
);
void
cpudl_clear_freecpu
(
struct
cpudl
*
cp
,
int
cpu
);
void
cpudl_cleanup
(
struct
cpudl
*
cp
);
#endif
/* CONFIG_SMP */
#endif
/* _LINUX_CPUDL_H */
kernel/sched/cpufreq.c
浏览文件 @
e13e75b8
...
...
@@ -8,7 +8,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include "sched.h"
DEFINE_PER_CPU
(
struct
update_util_data
*
,
cpufreq_update_util_data
);
...
...
kernel/sched/cpufreq_schedutil.c
浏览文件 @
e13e75b8
...
...
@@ -11,61 +11,57 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/cpufreq.h>
#include <linux/kthread.h>
#include <uapi/linux/sched/types.h>
#include <linux/slab.h>
#include <trace/events/power.h>
#include "sched.h"
#include <trace/events/power.h>
struct
sugov_tunables
{
struct
gov_attr_set
attr_set
;
unsigned
int
rate_limit_us
;
struct
gov_attr_set
attr_set
;
unsigned
int
rate_limit_us
;
};
struct
sugov_policy
{
struct
cpufreq_policy
*
policy
;
struct
sugov_tunables
*
tunables
;
struct
list_head
tunables_hook
;
raw_spinlock_t
update_lock
;
/* For shared policies */
u64
last_freq_update_time
;
s64
freq_update_delay_ns
;
unsigned
int
next_freq
;
unsigned
int
cached_raw_freq
;
/* The next fields are only needed if fast switch cannot be used
.
*/
struct
irq_work
irq_work
;
struct
kthread_work
work
;
struct
mutex
work_lock
;
struct
kthread_worker
worker
;
struct
task_struct
*
thread
;
bool
work_in_progress
;
bool
need_freq_update
;
struct
cpufreq_policy
*
policy
;
struct
sugov_tunables
*
tunables
;
struct
list_head
tunables_hook
;
raw_spinlock_t
update_lock
;
/* For shared policies */
u64
last_freq_update_time
;
s64
freq_update_delay_ns
;
unsigned
int
next_freq
;
unsigned
int
cached_raw_freq
;
/* The next fields are only needed if fast switch cannot be used
:
*/
struct
irq_work
irq_work
;
struct
kthread_work
work
;
struct
mutex
work_lock
;
struct
kthread_worker
worker
;
struct
task_struct
*
thread
;
bool
work_in_progress
;
bool
need_freq_update
;
};
struct
sugov_cpu
{
struct
update_util_data
update_util
;
struct
sugov_policy
*
sg_policy
;
unsigned
int
cpu
;
struct
update_util_data
update_util
;
struct
sugov_policy
*
sg_policy
;
unsigned
int
cpu
;
bool
iowait_boost_pending
;
unsigned
int
iowait_boost
;
unsigned
int
iowait_boost_max
;
bool
iowait_boost_pending
;
unsigned
int
iowait_boost
;
unsigned
int
iowait_boost_max
;
u64
last_update
;
/* The fields below are only needed when sharing a policy
.
*/
unsigned
long
util_cfs
;
unsigned
long
util_dl
;
unsigned
long
max
;
unsigned
int
flags
;
/* The fields below are only needed when sharing a policy
:
*/
unsigned
long
util_cfs
;
unsigned
long
util_dl
;
unsigned
long
max
;
unsigned
int
flags
;
/* The field below is for single-CPU policies only
.
*/
/* The field below is for single-CPU policies only
:
*/
#ifdef CONFIG_NO_HZ_COMMON
unsigned
long
saved_idle_calls
;
unsigned
long
saved_idle_calls
;
#endif
};
...
...
@@ -79,9 +75,9 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
/*
* Since cpufreq_update_util() is called with rq->lock held for
* the @target_cpu, our per-
cpu
data is fully serialized.
* the @target_cpu, our per-
CPU
data is fully serialized.
*
* However, drivers cannot in general deal with cross-
cpu
* However, drivers cannot in general deal with cross-
CPU
* requests, so while get_next_freq() will work, our
* sugov_update_commit() call may not for the fast switching platforms.
*
...
...
@@ -111,6 +107,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
}
delta_ns
=
time
-
sg_policy
->
last_freq_update_time
;
return
delta_ns
>=
sg_policy
->
freq_update_delay_ns
;
}
...
...
@@ -345,8 +342,8 @@ static unsigned int sugov_next_freq_shared(struct sugov_cpu *sg_cpu, u64 time)
return
get_next_freq
(
sg_policy
,
util
,
max
);
}
static
void
sugov_update_shared
(
struct
update_util_data
*
hook
,
u64
time
,
unsigned
int
flags
)
static
void
sugov_update_shared
(
struct
update_util_data
*
hook
,
u64
time
,
unsigned
int
flags
)
{
struct
sugov_cpu
*
sg_cpu
=
container_of
(
hook
,
struct
sugov_cpu
,
update_util
);
struct
sugov_policy
*
sg_policy
=
sg_cpu
->
sg_policy
;
...
...
@@ -423,8 +420,8 @@ static ssize_t rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
return
sprintf
(
buf
,
"%u
\n
"
,
tunables
->
rate_limit_us
);
}
static
ssize_t
rate_limit_us_store
(
struct
gov_attr_set
*
attr_set
,
const
char
*
buf
,
size_t
count
)
static
ssize_t
rate_limit_us_store
(
struct
gov_attr_set
*
attr_set
,
const
char
*
buf
,
size_t
count
)
{
struct
sugov_tunables
*
tunables
=
to_sugov_tunables
(
attr_set
);
struct
sugov_policy
*
sg_policy
;
...
...
@@ -479,11 +476,11 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy)
{
struct
task_struct
*
thread
;
struct
sched_attr
attr
=
{
.
size
=
sizeof
(
struct
sched_attr
),
.
sched_policy
=
SCHED_DEADLINE
,
.
sched_flags
=
SCHED_FLAG_SUGOV
,
.
sched_nice
=
0
,
.
sched_priority
=
0
,
.
size
=
sizeof
(
struct
sched_attr
),
.
sched_policy
=
SCHED_DEADLINE
,
.
sched_flags
=
SCHED_FLAG_SUGOV
,
.
sched_nice
=
0
,
.
sched_priority
=
0
,
/*
* Fake (unused) bandwidth; workaround to "fix"
* priority inheritance.
...
...
@@ -663,21 +660,21 @@ static int sugov_start(struct cpufreq_policy *policy)
struct
sugov_policy
*
sg_policy
=
policy
->
governor_data
;
unsigned
int
cpu
;
sg_policy
->
freq_update_delay_ns
=
sg_policy
->
tunables
->
rate_limit_us
*
NSEC_PER_USEC
;
sg_policy
->
last_freq_update_time
=
0
;
sg_policy
->
next_freq
=
UINT_MAX
;
sg_policy
->
work_in_progress
=
false
;
sg_policy
->
need_freq_update
=
false
;
sg_policy
->
cached_raw_freq
=
0
;
sg_policy
->
freq_update_delay_ns
=
sg_policy
->
tunables
->
rate_limit_us
*
NSEC_PER_USEC
;
sg_policy
->
last_freq_update_time
=
0
;
sg_policy
->
next_freq
=
UINT_MAX
;
sg_policy
->
work_in_progress
=
false
;
sg_policy
->
need_freq_update
=
false
;
sg_policy
->
cached_raw_freq
=
0
;
for_each_cpu
(
cpu
,
policy
->
cpus
)
{
struct
sugov_cpu
*
sg_cpu
=
&
per_cpu
(
sugov_cpu
,
cpu
);
memset
(
sg_cpu
,
0
,
sizeof
(
*
sg_cpu
));
sg_cpu
->
cpu
=
cpu
;
sg_cpu
->
sg_policy
=
sg_policy
;
sg_cpu
->
flags
=
0
;
sg_cpu
->
iowait_boost_max
=
policy
->
cpuinfo
.
max_freq
;
sg_cpu
->
cpu
=
cpu
;
sg_cpu
->
sg_policy
=
sg_policy
;
sg_cpu
->
flags
=
0
;
sg_cpu
->
iowait_boost_max
=
policy
->
cpuinfo
.
max_freq
;
}
for_each_cpu
(
cpu
,
policy
->
cpus
)
{
...
...
@@ -721,14 +718,14 @@ static void sugov_limits(struct cpufreq_policy *policy)
}
static
struct
cpufreq_governor
schedutil_gov
=
{
.
name
=
"schedutil"
,
.
owner
=
THIS_MODULE
,
.
dynamic_switching
=
true
,
.
init
=
sugov_init
,
.
exit
=
sugov_exit
,
.
start
=
sugov_start
,
.
stop
=
sugov_stop
,
.
limits
=
sugov_limits
,
.
name
=
"schedutil"
,
.
owner
=
THIS_MODULE
,
.
dynamic_switching
=
true
,
.
init
=
sugov_init
,
.
exit
=
sugov_exit
,
.
start
=
sugov_start
,
.
stop
=
sugov_stop
,
.
limits
=
sugov_limits
,
};
#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL
...
...
kernel/sched/cpupri.c
浏览文件 @
e13e75b8
...
...
@@ -14,7 +14,7 @@
*
* going from the lowest priority to the highest. CPUs in the INVALID state
* are not eligible for routing. The system maintains this state with
* a 2 dimensional bitmap (the first for priority class, the second for
cpu
s
* a 2 dimensional bitmap (the first for priority class, the second for
CPU
s
* in that class). Therefore a typical application without affinity
* restrictions can find a suitable CPU with O(1) complexity (e.g. two bit
* searches). For tasks with affinity restrictions, the algorithm has a
...
...
@@ -26,12 +26,7 @@
* as published by the Free Software Foundation; version 2
* of the License.
*/
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/slab.h>
#include "cpupri.h"
#include "sched.h"
/* Convert between a 140 based task->prio, and our 102 based cpupri */
static
int
convert_prio
(
int
prio
)
...
...
@@ -128,9 +123,9 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
}
/**
* cpupri_set - update the
cpu
priority setting
* cpupri_set - update the
CPU
priority setting
* @cp: The cpupri context
* @cpu: The target
cpu
* @cpu: The target
CPU
* @newpri: The priority (INVALID-RT99) to assign to this CPU
*
* Note: Assumes cpu_rq(cpu)->lock is locked
...
...
@@ -151,7 +146,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
return
;
/*
* If the
cpu
was currently mapped to a different value, we
* If the
CPU
was currently mapped to a different value, we
* need to map it to the new value then remove the old value.
* Note, we must add the new value first, otherwise we risk the
* cpu being missed by the priority loop in cpupri_find.
...
...
kernel/sched/cpupri.h
浏览文件 @
e13e75b8
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CPUPRI_H
#define _LINUX_CPUPRI_H
#include <linux/sched.h>
#define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO + 2)
#define CPUPRI_INVALID
-1
#define CPUPRI_IDLE
0
#define CPUPRI_NORMAL
1
#define CPUPRI_INVALID
-1
#define CPUPRI_IDLE
0
#define CPUPRI_NORMAL
1
/* values 2-101 are RT priorities 0-99 */
struct
cpupri_vec
{
atomic_t
count
;
cpumask_var_t
mask
;
atomic_t
count
;
cpumask_var_t
mask
;
};
struct
cpupri
{
struct
cpupri_vec
pri_to_cpu
[
CPUPRI_NR_PRIORITIES
];
int
*
cpu_to_pri
;
struct
cpupri_vec
pri_to_cpu
[
CPUPRI_NR_PRIORITIES
];
int
*
cpu_to_pri
;
};
#ifdef CONFIG_SMP
int
cpupri_find
(
struct
cpupri
*
cp
,
struct
task_struct
*
p
,
struct
cpumask
*
lowest_mask
);
int
cpupri_find
(
struct
cpupri
*
cp
,
struct
task_struct
*
p
,
struct
cpumask
*
lowest_mask
);
void
cpupri_set
(
struct
cpupri
*
cp
,
int
cpu
,
int
pri
);
int
cpupri_init
(
struct
cpupri
*
cp
);
int
cpupri_init
(
struct
cpupri
*
cp
);
void
cpupri_cleanup
(
struct
cpupri
*
cp
);
#endif
#endif
/* _LINUX_CPUPRI_H */
kernel/sched/cputime.c
浏览文件 @
e13e75b8
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/tsacct_kern.h>
#include <linux/kernel_stat.h>
#include <linux/static_key.h>
#include <linux/context_tracking.h>
#include <linux/sched/cputime.h>
/*
* Simple CPU accounting cgroup controller
*/
#include "sched.h"
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
...
...
@@ -113,9 +109,9 @@ static inline void task_group_account_field(struct task_struct *p, int index,
}
/*
* Account user
cpu
time to a process.
* @p: the process that the
cpu
time gets accounted to
* @cputime: the
cpu
time spent in user space since the last update
* Account user
CPU
time to a process.
* @p: the process that the
CPU
time gets accounted to
* @cputime: the
CPU
time spent in user space since the last update
*/
void
account_user_time
(
struct
task_struct
*
p
,
u64
cputime
)
{
...
...
@@ -135,9 +131,9 @@ void account_user_time(struct task_struct *p, u64 cputime)
}
/*
* Account guest
cpu
time to a process.
* @p: the process that the
cpu
time gets accounted to
* @cputime: the
cpu
time spent in virtual machine since the last update
* Account guest
CPU
time to a process.
* @p: the process that the
CPU
time gets accounted to
* @cputime: the
CPU
time spent in virtual machine since the last update
*/
void
account_guest_time
(
struct
task_struct
*
p
,
u64
cputime
)
{
...
...
@@ -159,9 +155,9 @@ void account_guest_time(struct task_struct *p, u64 cputime)
}
/*
* Account system
cpu
time to a process and desired cpustat field
* @p: the process that the
cpu
time gets accounted to
* @cputime: the
cpu
time spent in kernel space since the last update
* Account system
CPU
time to a process and desired cpustat field
* @p: the process that the
CPU
time gets accounted to
* @cputime: the
CPU
time spent in kernel space since the last update
* @index: pointer to cpustat field that has to be updated
*/
void
account_system_index_time
(
struct
task_struct
*
p
,
...
...
@@ -179,10 +175,10 @@ void account_system_index_time(struct task_struct *p,
}
/*
* Account system
cpu
time to a process.
* @p: the process that the
cpu
time gets accounted to
* Account system
CPU
time to a process.
* @p: the process that the
CPU
time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
* @cputime: the
cpu
time spent in kernel space since the last update
* @cputime: the
CPU
time spent in kernel space since the last update
*/
void
account_system_time
(
struct
task_struct
*
p
,
int
hardirq_offset
,
u64
cputime
)
{
...
...
@@ -205,7 +201,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset, u64 cputime)
/*
* Account for involuntary wait time.
* @cputime: the
cpu
time spent in involuntary wait
* @cputime: the
CPU
time spent in involuntary wait
*/
void
account_steal_time
(
u64
cputime
)
{
...
...
@@ -216,7 +212,7 @@ void account_steal_time(u64 cputime)
/*
* Account for idle time.
* @cputime: the
cpu
time spent in idle wait
* @cputime: the
CPU
time spent in idle wait
*/
void
account_idle_time
(
u64
cputime
)
{
...
...
@@ -338,7 +334,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
* Account a tick to a process and cpustat
* @p: the process that the
cpu
time gets accounted to
* @p: the process that the
CPU
time gets accounted to
* @user_tick: is the tick from userspace
* @rq: the pointer to rq
*
...
...
@@ -400,17 +396,16 @@ static void irqtime_account_idle_ticks(int ticks)
irqtime_account_process_tick
(
current
,
0
,
rq
,
ticks
);
}
#else
/* CONFIG_IRQ_TIME_ACCOUNTING */
static
inline
void
irqtime_account_idle_ticks
(
int
ticks
)
{}
static
inline
void
irqtime_account_idle_ticks
(
int
ticks
)
{
}
static
inline
void
irqtime_account_process_tick
(
struct
task_struct
*
p
,
int
user_tick
,
struct
rq
*
rq
,
int
nr_ticks
)
{}
struct
rq
*
rq
,
int
nr_ticks
)
{
}
#endif
/* CONFIG_IRQ_TIME_ACCOUNTING */
/*
* Use precise platform statistics if available:
*/
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
# ifndef __ARCH_HAS_VTIME_TASK_SWITCH
void
vtime_common_task_switch
(
struct
task_struct
*
prev
)
{
if
(
is_idle_task
(
prev
))
...
...
@@ -421,8 +416,7 @@ void vtime_common_task_switch(struct task_struct *prev)
vtime_flush
(
prev
);
arch_vtime_task_switch
(
prev
);
}
#endif
# endif
#endif
/* CONFIG_VIRT_CPU_ACCOUNTING */
...
...
@@ -469,10 +463,12 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
*
ut
=
cputime
.
utime
;
*
st
=
cputime
.
stime
;
}
#else
/* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#else
/* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
/*
* Account a single tick of
cpu
time.
* @p: the process that the
cpu
time gets accounted to
* Account a single tick of
CPU
time.
* @p: the process that the
CPU
time gets accounted to
* @user_tick: indicates if the tick is a user or a system tick
*/
void
account_process_tick
(
struct
task_struct
*
p
,
int
user_tick
)
...
...
kernel/sched/deadline.c
浏览文件 @
e13e75b8
...
...
@@ -17,9 +17,6 @@
*/
#include "sched.h"
#include <linux/slab.h>
#include <uapi/linux/sched/types.h>
struct
dl_bandwidth
def_dl_bandwidth
;
static
inline
struct
task_struct
*
dl_task_of
(
struct
sched_dl_entity
*
dl_se
)
...
...
@@ -514,7 +511,7 @@ static DEFINE_PER_CPU(struct callback_head, dl_pull_head);
static
void
push_dl_tasks
(
struct
rq
*
);
static
void
pull_dl_task
(
struct
rq
*
);
static
inline
void
queue_push_tasks
(
struct
rq
*
rq
)
static
inline
void
deadline_
queue_push_tasks
(
struct
rq
*
rq
)
{
if
(
!
has_pushable_dl_tasks
(
rq
))
return
;
...
...
@@ -522,7 +519,7 @@ static inline void queue_push_tasks(struct rq *rq)
queue_balance_callback
(
rq
,
&
per_cpu
(
dl_push_head
,
rq
->
cpu
),
push_dl_tasks
);
}
static
inline
void
queue_pull_task
(
struct
rq
*
rq
)
static
inline
void
deadline_
queue_pull_task
(
struct
rq
*
rq
)
{
queue_balance_callback
(
rq
,
&
per_cpu
(
dl_pull_head
,
rq
->
cpu
),
pull_dl_task
);
}
...
...
@@ -539,12 +536,12 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
/*
* If we cannot preempt any rq, fall back to pick any
* online
cpu.
* online
CPU:
*/
cpu
=
cpumask_any_and
(
cpu_active_mask
,
&
p
->
cpus_allowed
);
if
(
cpu
>=
nr_cpu_ids
)
{
/*
* Fail
to find any suitable cpu
.
* Fail
ed to find any suitable CPU
.
* The task will never come back!
*/
BUG_ON
(
dl_bandwidth_enabled
());
...
...
@@ -597,19 +594,18 @@ static inline void pull_dl_task(struct rq *rq)
{
}
static
inline
void
queue_push_tasks
(
struct
rq
*
rq
)
static
inline
void
deadline_
queue_push_tasks
(
struct
rq
*
rq
)
{
}
static
inline
void
queue_pull_task
(
struct
rq
*
rq
)
static
inline
void
deadline_
queue_pull_task
(
struct
rq
*
rq
)
{
}
#endif
/* CONFIG_SMP */
static
void
enqueue_task_dl
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
flags
);
static
void
__dequeue_task_dl
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
flags
);
static
void
check_preempt_curr_dl
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
flags
);
static
void
check_preempt_curr_dl
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
flags
);
/*
* We are being explicitly informed that a new instance is starting,
...
...
@@ -1763,7 +1759,7 @@ pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
if
(
hrtick_enabled
(
rq
))
start_hrtick_dl
(
rq
,
p
);
queue_push_tasks
(
rq
);
deadline_
queue_push_tasks
(
rq
);
return
p
;
}
...
...
@@ -1776,6 +1772,14 @@ static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
enqueue_pushable_dl_task
(
rq
,
p
);
}
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static
void
task_tick_dl
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
queued
)
{
update_curr_dl
(
rq
);
...
...
@@ -1865,7 +1869,7 @@ static int find_later_rq(struct task_struct *task)
/*
* We have to consider system topology and task affinity
* first, then we can look for a suitable
cpu
.
* first, then we can look for a suitable
CPU
.
*/
if
(
!
cpudl_find
(
&
task_rq
(
task
)
->
rd
->
cpudl
,
task
,
later_mask
))
return
-
1
;
...
...
@@ -1879,7 +1883,7 @@ static int find_later_rq(struct task_struct *task)
* Now we check how well this matches with task's
* affinity and system topology.
*
* The last
cpu
where the task run is our first
* The last
CPU
where the task run is our first
* guess, since it is most likely cache-hot there.
*/
if
(
cpumask_test_cpu
(
cpu
,
later_mask
))
...
...
@@ -1909,9 +1913,9 @@ static int find_later_rq(struct task_struct *task)
best_cpu
=
cpumask_first_and
(
later_mask
,
sched_domain_span
(
sd
));
/*
* Last chance: if a
cpu
being in both later_mask
* Last chance: if a
CPU
being in both later_mask
* and current sd span is valid, that becomes our
* choice. Of course, the latest possible
cpu
is
* choice. Of course, the latest possible
CPU
is
* already under consideration through later_mask.
*/
if
(
best_cpu
<
nr_cpu_ids
)
{
...
...
@@ -2067,7 +2071,7 @@ static int push_dl_task(struct rq *rq)
if
(
task
==
next_task
)
{
/*
* The task is still there. We don't try
* again, some other
cpu
will pull it when ready.
* again, some other
CPU
will pull it when ready.
*/
goto
out
;
}
...
...
@@ -2300,12 +2304,12 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
/*
* Since this might be the only -deadline task on the rq,
* this is the right place to try to pull some other one
* from an overloaded
cpu
, if any.
* from an overloaded
CPU
, if any.
*/
if
(
!
task_on_rq_queued
(
p
)
||
rq
->
dl
.
dl_nr_running
)
return
;
queue_pull_task
(
rq
);
deadline_
queue_pull_task
(
rq
);
}
/*
...
...
@@ -2327,7 +2331,7 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
if
(
rq
->
curr
!=
p
)
{
#ifdef CONFIG_SMP
if
(
p
->
nr_cpus_allowed
>
1
&&
rq
->
dl
.
overloaded
)
queue_push_tasks
(
rq
);
deadline_
queue_push_tasks
(
rq
);
#endif
if
(
dl_task
(
rq
->
curr
))
check_preempt_curr_dl
(
rq
,
p
,
0
);
...
...
@@ -2352,7 +2356,7 @@ static void prio_changed_dl(struct rq *rq, struct task_struct *p,
* or lowering its prio, so...
*/
if
(
!
rq
->
dl
.
overloaded
)
queue_pull_task
(
rq
);
deadline_
queue_pull_task
(
rq
);
/*
* If we now have a earlier deadline task than p,
...
...
@@ -2626,17 +2630,17 @@ void __dl_clear_params(struct task_struct *p)
{
struct
sched_dl_entity
*
dl_se
=
&
p
->
dl
;
dl_se
->
dl_runtime
=
0
;
dl_se
->
dl_deadline
=
0
;
dl_se
->
dl_period
=
0
;
dl_se
->
flags
=
0
;
dl_se
->
dl_bw
=
0
;
dl_se
->
dl_density
=
0
;
dl_se
->
dl_runtime
=
0
;
dl_se
->
dl_deadline
=
0
;
dl_se
->
dl_period
=
0
;
dl_se
->
flags
=
0
;
dl_se
->
dl_bw
=
0
;
dl_se
->
dl_density
=
0
;
dl_se
->
dl_throttled
=
0
;
dl_se
->
dl_yielded
=
0
;
dl_se
->
dl_non_contending
=
0
;
dl_se
->
dl_overrun
=
0
;
dl_se
->
dl_throttled
=
0
;
dl_se
->
dl_yielded
=
0
;
dl_se
->
dl_non_contending
=
0
;
dl_se
->
dl_overrun
=
0
;
}
bool
dl_param_changed
(
struct
task_struct
*
p
,
const
struct
sched_attr
*
attr
)
...
...
@@ -2655,21 +2659,22 @@ bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr)
#ifdef CONFIG_SMP
int
dl_task_can_attach
(
struct
task_struct
*
p
,
const
struct
cpumask
*
cs_cpus_allowed
)
{
unsigned
int
dest_cpu
=
cpumask_any_and
(
cpu_active_mask
,
cs_cpus_allowed
);
unsigned
int
dest_cpu
;
struct
dl_bw
*
dl_b
;
bool
overflow
;
int
cpus
,
ret
;
unsigned
long
flags
;
dest_cpu
=
cpumask_any_and
(
cpu_active_mask
,
cs_cpus_allowed
);
rcu_read_lock_sched
();
dl_b
=
dl_bw_of
(
dest_cpu
);
raw_spin_lock_irqsave
(
&
dl_b
->
lock
,
flags
);
cpus
=
dl_bw_cpus
(
dest_cpu
);
overflow
=
__dl_overflow
(
dl_b
,
cpus
,
0
,
p
->
dl
.
dl_bw
);
if
(
overflow
)
if
(
overflow
)
{
ret
=
-
EBUSY
;
else
{
}
else
{
/*
* We reserve space for this task in the destination
* root_domain, as we can't fail after this point.
...
...
@@ -2681,6 +2686,7 @@ int dl_task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allo
}
raw_spin_unlock_irqrestore
(
&
dl_b
->
lock
,
flags
);
rcu_read_unlock_sched
();
return
ret
;
}
...
...
@@ -2701,6 +2707,7 @@ int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur,
ret
=
0
;
raw_spin_unlock_irqrestore
(
&
cur_dl_b
->
lock
,
flags
);
rcu_read_unlock_sched
();
return
ret
;
}
...
...
@@ -2718,6 +2725,7 @@ bool dl_cpu_busy(unsigned int cpu)
overflow
=
__dl_overflow
(
dl_b
,
cpus
,
0
,
0
);
raw_spin_unlock_irqrestore
(
&
dl_b
->
lock
,
flags
);
rcu_read_unlock_sched
();
return
overflow
;
}
#endif
...
...
kernel/sched/debug.c
浏览文件 @
e13e75b8
/*
* kernel/sched/debug.c
*
* Print the CFS rbtree
* Print the CFS rbtree
and other debugging details
*
* Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
*
...
...
@@ -9,16 +9,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/proc_fs.h>
#include <linux/sched/mm.h>
#include <linux/sched/task.h>
#include <linux/seq_file.h>
#include <linux/kallsyms.h>
#include <linux/utsname.h>
#include <linux/mempolicy.h>
#include <linux/debugfs.h>
#include "sched.h"
static
DEFINE_SPINLOCK
(
sched_debug_lock
);
...
...
@@ -274,34 +264,19 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
if
(
table
==
NULL
)
return
NULL
;
set_table_entry
(
&
table
[
0
],
"min_interval"
,
&
sd
->
min_interval
,
sizeof
(
long
),
0644
,
proc_doulongvec_minmax
,
false
);
set_table_entry
(
&
table
[
1
],
"max_interval"
,
&
sd
->
max_interval
,
sizeof
(
long
),
0644
,
proc_doulongvec_minmax
,
false
);
set_table_entry
(
&
table
[
2
],
"busy_idx"
,
&
sd
->
busy_idx
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
3
],
"idle_idx"
,
&
sd
->
idle_idx
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
4
],
"newidle_idx"
,
&
sd
->
newidle_idx
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
5
],
"wake_idx"
,
&
sd
->
wake_idx
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
6
],
"forkexec_idx"
,
&
sd
->
forkexec_idx
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
7
],
"busy_factor"
,
&
sd
->
busy_factor
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
false
);
set_table_entry
(
&
table
[
8
],
"imbalance_pct"
,
&
sd
->
imbalance_pct
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
false
);
set_table_entry
(
&
table
[
9
],
"cache_nice_tries"
,
&
sd
->
cache_nice_tries
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
false
);
set_table_entry
(
&
table
[
10
],
"flags"
,
&
sd
->
flags
,
sizeof
(
int
),
0644
,
proc_dointvec_minmax
,
false
);
set_table_entry
(
&
table
[
11
],
"max_newidle_lb_cost"
,
&
sd
->
max_newidle_lb_cost
,
sizeof
(
long
),
0644
,
proc_doulongvec_minmax
,
false
);
set_table_entry
(
&
table
[
12
],
"name"
,
sd
->
name
,
CORENAME_MAX_SIZE
,
0444
,
proc_dostring
,
false
);
set_table_entry
(
&
table
[
0
]
,
"min_interval"
,
&
sd
->
min_interval
,
sizeof
(
long
),
0644
,
proc_doulongvec_minmax
,
false
);
set_table_entry
(
&
table
[
1
]
,
"max_interval"
,
&
sd
->
max_interval
,
sizeof
(
long
),
0644
,
proc_doulongvec_minmax
,
false
);
set_table_entry
(
&
table
[
2
]
,
"busy_idx"
,
&
sd
->
busy_idx
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
3
]
,
"idle_idx"
,
&
sd
->
idle_idx
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
4
]
,
"newidle_idx"
,
&
sd
->
newidle_idx
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
5
]
,
"wake_idx"
,
&
sd
->
wake_idx
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
6
]
,
"forkexec_idx"
,
&
sd
->
forkexec_idx
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
true
);
set_table_entry
(
&
table
[
7
]
,
"busy_factor"
,
&
sd
->
busy_factor
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
false
);
set_table_entry
(
&
table
[
8
]
,
"imbalance_pct"
,
&
sd
->
imbalance_pct
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
false
);
set_table_entry
(
&
table
[
9
]
,
"cache_nice_tries"
,
&
sd
->
cache_nice_tries
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
false
);
set_table_entry
(
&
table
[
10
],
"flags"
,
&
sd
->
flags
,
sizeof
(
int
)
,
0644
,
proc_dointvec_minmax
,
false
);
set_table_entry
(
&
table
[
11
],
"max_newidle_lb_cost"
,
&
sd
->
max_newidle_lb_cost
,
sizeof
(
long
),
0644
,
proc_doulongvec_minmax
,
false
);
set_table_entry
(
&
table
[
12
],
"name"
,
sd
->
name
,
CORENAME_MAX_SIZE
,
0444
,
proc_dostring
,
false
);
/* &table[13] is terminator */
return
table
;
...
...
@@ -332,8 +307,8 @@ static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
return
table
;
}
static
cpumask_var_t
sd_sysctl_cpus
;
static
struct
ctl_table_header
*
sd_sysctl_header
;
static
cpumask_var_t
sd_sysctl_cpus
;
static
struct
ctl_table_header
*
sd_sysctl_header
;
void
register_sched_domain_sysctl
(
void
)
{
...
...
@@ -413,14 +388,10 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
{
struct
sched_entity
*
se
=
tg
->
se
[
cpu
];
#define P(F) \
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
#define P_SCHEDSTAT(F) \
SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
#define PN(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN_SCHEDSTAT(F) \
SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
#define P(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
#define P_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)schedstat_val(F))
#define PN(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN_SCHEDSTAT(F) SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)schedstat_val(F)))
if
(
!
se
)
return
;
...
...
@@ -428,6 +399,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
PN
(
se
->
exec_start
);
PN
(
se
->
vruntime
);
PN
(
se
->
sum_exec_runtime
);
if
(
schedstat_enabled
())
{
PN_SCHEDSTAT
(
se
->
statistics
.
wait_start
);
PN_SCHEDSTAT
(
se
->
statistics
.
sleep_start
);
...
...
@@ -440,6 +412,7 @@ static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group
PN_SCHEDSTAT
(
se
->
statistics
.
wait_sum
);
P_SCHEDSTAT
(
se
->
statistics
.
wait_count
);
}
P
(
se
->
load
.
weight
);
P
(
se
->
runnable_weight
);
#ifdef CONFIG_SMP
...
...
@@ -464,6 +437,7 @@ static char *task_group_path(struct task_group *tg)
return
group_path
;
cgroup_path
(
tg
->
css
.
cgroup
,
group_path
,
PATH_MAX
);
return
group_path
;
}
#endif
...
...
@@ -804,9 +778,9 @@ void sysrq_sched_debug_show(void)
/*
* This itererator needs some explanation.
* It returns 1 for the header position.
* This means 2 is
cpu
0.
* In a hotplugged system some
cpus, including cpu
0, may be missing so we have
* to use cpumask_* to iterate over the
cpu
s.
* This means 2 is
CPU
0.
* In a hotplugged system some
CPUs, including CPU
0, may be missing so we have
* to use cpumask_* to iterate over the
CPU
s.
*/
static
void
*
sched_debug_start
(
struct
seq_file
*
file
,
loff_t
*
offset
)
{
...
...
@@ -826,6 +800,7 @@ static void *sched_debug_start(struct seq_file *file, loff_t *offset)
if
(
n
<
nr_cpu_ids
)
return
(
void
*
)(
unsigned
long
)(
n
+
2
);
return
NULL
;
}
...
...
@@ -840,10 +815,10 @@ static void sched_debug_stop(struct seq_file *file, void *data)
}
static
const
struct
seq_operations
sched_debug_sops
=
{
.
start
=
sched_debug_start
,
.
next
=
sched_debug_next
,
.
stop
=
sched_debug_stop
,
.
show
=
sched_debug_show
,
.
start
=
sched_debug_start
,
.
next
=
sched_debug_next
,
.
stop
=
sched_debug_stop
,
.
show
=
sched_debug_show
,
};
static
int
sched_debug_release
(
struct
inode
*
inode
,
struct
file
*
file
)
...
...
@@ -881,14 +856,10 @@ static int __init init_sched_debug_procfs(void)
__initcall
(
init_sched_debug_procfs
);
#define __P(F) \
SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
#define P(F) \
SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) \
SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) \
SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
#define __P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
#define P(F) SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
#define __PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
#define PN(F) SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
#ifdef CONFIG_NUMA_BALANCING
...
...
kernel/sched/fair.c
浏览文件 @
e13e75b8
此差异已折叠。
点击以展开。
kernel/sched/idle.c
浏览文件 @
e13e75b8
/*
* Generic entry point for the idle threads
* Generic entry points for the idle threads and
* implementation of the idle task scheduling class.
*
* (NOTE: these are not related to SCHED_IDLE batch scheduled
* tasks which are handled in sched/fair.c )
*/
#include <linux/sched.h>
#include <linux/sched/idle.h>
#include <linux/cpu.h>
#include <linux/cpuidle.h>
#include <linux/cpuhotplug.h>
#include <linux/tick.h>
#include <linux/mm.h>
#include <linux/stackprotector.h>
#include <linux/suspend.h>
#include <linux/livepatch.h>
#include <asm/tlb.h>
#include "sched.h"
#include <trace/events/power.h>
#include "sched.h"
/* Linker adds these: start and end of __cpuidle functions */
extern
char
__cpuidle_text_start
[],
__cpuidle_text_end
[];
...
...
@@ -46,6 +37,7 @@ void cpu_idle_poll_ctrl(bool enable)
static
int
__init
cpu_idle_poll_setup
(
char
*
__unused
)
{
cpu_idle_force_poll
=
1
;
return
1
;
}
__setup
(
"nohlt"
,
cpu_idle_poll_setup
);
...
...
@@ -53,6 +45,7 @@ __setup("nohlt", cpu_idle_poll_setup);
static
int
__init
cpu_idle_nopoll_setup
(
char
*
__unused
)
{
cpu_idle_force_poll
=
0
;
return
1
;
}
__setup
(
"hlt"
,
cpu_idle_nopoll_setup
);
...
...
@@ -64,12 +57,14 @@ static noinline int __cpuidle cpu_idle_poll(void)
trace_cpu_idle_rcuidle
(
0
,
smp_processor_id
());
local_irq_enable
();
stop_critical_timings
();
while
(
!
tif_need_resched
()
&&
(
cpu_idle_force_poll
||
tick_check_broadcast_expired
()))
cpu_relax
();
start_critical_timings
();
trace_cpu_idle_rcuidle
(
PWR_EVENT_EXIT
,
smp_processor_id
());
rcu_idle_exit
();
return
1
;
}
...
...
@@ -332,8 +327,8 @@ void cpu_startup_entry(enum cpuhp_state state)
{
/*
* This #ifdef needs to die, but it's too late in the cycle to
* make this generic (
arm and sh
have never invoked the canary
* init for the non boot
cpu
s!). Will be fixed in 3.11
* make this generic (
ARM and SH
have never invoked the canary
* init for the non boot
CPU
s!). Will be fixed in 3.11
*/
#ifdef CONFIG_X86
/*
...
...
@@ -350,3 +345,116 @@ void cpu_startup_entry(enum cpuhp_state state)
while
(
1
)
do_idle
();
}
/*
* idle-task scheduling class.
*/
#ifdef CONFIG_SMP
static
int
select_task_rq_idle
(
struct
task_struct
*
p
,
int
cpu
,
int
sd_flag
,
int
flags
)
{
return
task_cpu
(
p
);
/* IDLE tasks as never migrated */
}
#endif
/*
* Idle tasks are unconditionally rescheduled:
*/
static
void
check_preempt_curr_idle
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
flags
)
{
resched_curr
(
rq
);
}
static
struct
task_struct
*
pick_next_task_idle
(
struct
rq
*
rq
,
struct
task_struct
*
prev
,
struct
rq_flags
*
rf
)
{
put_prev_task
(
rq
,
prev
);
update_idle_core
(
rq
);
schedstat_inc
(
rq
->
sched_goidle
);
return
rq
->
idle
;
}
/*
* It is not legal to sleep in the idle task - print a warning
* message if some code attempts to do it:
*/
static
void
dequeue_task_idle
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
flags
)
{
raw_spin_unlock_irq
(
&
rq
->
lock
);
printk
(
KERN_ERR
"bad: scheduling from the idle thread!
\n
"
);
dump_stack
();
raw_spin_lock_irq
(
&
rq
->
lock
);
}
static
void
put_prev_task_idle
(
struct
rq
*
rq
,
struct
task_struct
*
prev
)
{
}
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static
void
task_tick_idle
(
struct
rq
*
rq
,
struct
task_struct
*
curr
,
int
queued
)
{
}
static
void
set_curr_task_idle
(
struct
rq
*
rq
)
{
}
static
void
switched_to_idle
(
struct
rq
*
rq
,
struct
task_struct
*
p
)
{
BUG
();
}
static
void
prio_changed_idle
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
oldprio
)
{
BUG
();
}
static
unsigned
int
get_rr_interval_idle
(
struct
rq
*
rq
,
struct
task_struct
*
task
)
{
return
0
;
}
static
void
update_curr_idle
(
struct
rq
*
rq
)
{
}
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
const
struct
sched_class
idle_sched_class
=
{
/* .next is NULL */
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
.
dequeue_task
=
dequeue_task_idle
,
.
check_preempt_curr
=
check_preempt_curr_idle
,
.
pick_next_task
=
pick_next_task_idle
,
.
put_prev_task
=
put_prev_task_idle
,
#ifdef CONFIG_SMP
.
select_task_rq
=
select_task_rq_idle
,
.
set_cpus_allowed
=
set_cpus_allowed_common
,
#endif
.
set_curr_task
=
set_curr_task_idle
,
.
task_tick
=
task_tick_idle
,
.
get_rr_interval
=
get_rr_interval_idle
,
.
prio_changed
=
prio_changed_idle
,
.
switched_to
=
switched_to_idle
,
.
update_curr
=
update_curr_idle
,
};
kernel/sched/idle_task.c
已删除
100644 → 0
浏览文件 @
1ed41b56
// SPDX-License-Identifier: GPL-2.0
#include "sched.h"
/*
* idle-task scheduling class.
*
* (NOTE: these are not related to SCHED_IDLE tasks which are
* handled in sched/fair.c)
*/
#ifdef CONFIG_SMP
static
int
select_task_rq_idle
(
struct
task_struct
*
p
,
int
cpu
,
int
sd_flag
,
int
flags
)
{
return
task_cpu
(
p
);
/* IDLE tasks as never migrated */
}
#endif
/* CONFIG_SMP */
/*
* Idle tasks are unconditionally rescheduled:
*/
static
void
check_preempt_curr_idle
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
flags
)
{
resched_curr
(
rq
);
}
static
struct
task_struct
*
pick_next_task_idle
(
struct
rq
*
rq
,
struct
task_struct
*
prev
,
struct
rq_flags
*
rf
)
{
put_prev_task
(
rq
,
prev
);
update_idle_core
(
rq
);
schedstat_inc
(
rq
->
sched_goidle
);
return
rq
->
idle
;
}
/*
* It is not legal to sleep in the idle task - print a warning
* message if some code attempts to do it:
*/
static
void
dequeue_task_idle
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
flags
)
{
raw_spin_unlock_irq
(
&
rq
->
lock
);
printk
(
KERN_ERR
"bad: scheduling from the idle thread!
\n
"
);
dump_stack
();
raw_spin_lock_irq
(
&
rq
->
lock
);
}
static
void
put_prev_task_idle
(
struct
rq
*
rq
,
struct
task_struct
*
prev
)
{
rq_last_tick_reset
(
rq
);
}
static
void
task_tick_idle
(
struct
rq
*
rq
,
struct
task_struct
*
curr
,
int
queued
)
{
}
static
void
set_curr_task_idle
(
struct
rq
*
rq
)
{
}
static
void
switched_to_idle
(
struct
rq
*
rq
,
struct
task_struct
*
p
)
{
BUG
();
}
static
void
prio_changed_idle
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
oldprio
)
{
BUG
();
}
static
unsigned
int
get_rr_interval_idle
(
struct
rq
*
rq
,
struct
task_struct
*
task
)
{
return
0
;
}
static
void
update_curr_idle
(
struct
rq
*
rq
)
{
}
/*
* Simple, special scheduling class for the per-CPU idle tasks:
*/
const
struct
sched_class
idle_sched_class
=
{
/* .next is NULL */
/* no enqueue/yield_task for idle tasks */
/* dequeue is not valid, we print a debug message there: */
.
dequeue_task
=
dequeue_task_idle
,
.
check_preempt_curr
=
check_preempt_curr_idle
,
.
pick_next_task
=
pick_next_task_idle
,
.
put_prev_task
=
put_prev_task_idle
,
#ifdef CONFIG_SMP
.
select_task_rq
=
select_task_rq_idle
,
.
set_cpus_allowed
=
set_cpus_allowed_common
,
#endif
.
set_curr_task
=
set_curr_task_idle
,
.
task_tick
=
task_tick_idle
,
.
get_rr_interval
=
get_rr_interval_idle
,
.
prio_changed
=
prio_changed_idle
,
.
switched_to
=
switched_to_idle
,
.
update_curr
=
update_curr_idle
,
};
kernel/sched/isolation.c
浏览文件 @
e13e75b8
...
...
@@ -3,15 +3,10 @@
* any CPU: unbound workqueues, timers, kthreads and any offloadable work.
*
* Copyright (C) 2017 Red Hat, Inc., Frederic Weisbecker
* Copyright (C) 2017-2018 SUSE, Frederic Weisbecker
*
*/
#include <linux/sched/isolation.h>
#include <linux/tick.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/static_key.h>
#include <linux/ctype.h>
#include "sched.h"
DEFINE_STATIC_KEY_FALSE
(
housekeeping_overriden
);
EXPORT_SYMBOL_GPL
(
housekeeping_overriden
);
...
...
@@ -60,6 +55,9 @@ void __init housekeeping_init(void)
static_branch_enable
(
&
housekeeping_overriden
);
if
(
housekeeping_flags
&
HK_FLAG_TICK
)
sched_tick_offload_init
();
/* We need at least one CPU to handle housekeeping work */
WARN_ON_ONCE
(
cpumask_empty
(
housekeeping_mask
));
}
...
...
@@ -119,7 +117,7 @@ static int __init housekeeping_nohz_full_setup(char *str)
{
unsigned
int
flags
;
flags
=
HK_FLAG_TICK
|
HK_FLAG_TIMER
|
HK_FLAG_RCU
|
HK_FLAG_MISC
;
flags
=
HK_FLAG_TICK
|
HK_FLAG_
WQ
|
HK_FLAG_
TIMER
|
HK_FLAG_RCU
|
HK_FLAG_MISC
;
return
housekeeping_setup
(
str
,
flags
);
}
...
...
kernel/sched/loadavg.c
浏览文件 @
e13e75b8
...
...
@@ -6,10 +6,6 @@
* figure. Its a silly number but people think its important. We go through
* great pains to make it work on big machines and tickless kernels.
*/
#include <linux/export.h>
#include <linux/sched/loadavg.h>
#include "sched.h"
/*
...
...
@@ -32,29 +28,29 @@
* Due to a number of reasons the above turns in the mess below:
*
* - for_each_possible_cpu() is prohibitively expensive on machines with
* serious number of
cpu
s, therefore we need to take a distributed approach
* serious number of
CPU
s, therefore we need to take a distributed approach
* to calculating nr_active.
*
* \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
* = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
*
* So assuming nr_active := 0 when we start out -- true per definition, we
* can simply take per-
cpu
deltas and fold those into a global accumulate
* can simply take per-
CPU
deltas and fold those into a global accumulate
* to obtain the same result. See calc_load_fold_active().
*
* Furthermore, in order to avoid synchronizing all per-
cpu
delta folding
* Furthermore, in order to avoid synchronizing all per-
CPU
delta folding
* across the machine, we assume 10 ticks is sufficient time for every
*
cpu
to have completed this task.
*
CPU
to have completed this task.
*
* This places an upper-bound on the IRQ-off latency of the machine. Then
* again, being late doesn't loose the delta, just wrecks the sample.
*
* - cpu_rq()->nr_uninterruptible isn't accurately tracked per-
cpu
because
* this would add another cross-
cpu
cacheline miss and atomic operation
* to the wakeup path. Instead we increment on whatever
cpu
the task ran
* when it went into uninterruptible state and decrement on whatever
cpu
* - cpu_rq()->nr_uninterruptible isn't accurately tracked per-
CPU
because
* this would add another cross-
CPU
cacheline miss and atomic operation
* to the wakeup path. Instead we increment on whatever
CPU
the task ran
* when it went into uninterruptible state and decrement on whatever
CPU
* did the wakeup. This means that only the sum of nr_uninterruptible over
* all
cpu
s yields the correct result.
* all
CPU
s yields the correct result.
*
* This covers the NO_HZ=n code, for extra head-aches, see the comment below.
*/
...
...
@@ -115,11 +111,11 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
* Handle NO_HZ for the global load-average.
*
* Since the above described distributed algorithm to compute the global
* load-average relies on per-
cpu
sampling from the tick, it is affected by
* load-average relies on per-
CPU
sampling from the tick, it is affected by
* NO_HZ.
*
* The basic idea is to fold the nr_active delta into a global NO_HZ-delta upon
* entering NO_HZ state such that we can include this as an 'extra'
cpu
delta
* entering NO_HZ state such that we can include this as an 'extra'
CPU
delta
* when we read the global state.
*
* Obviously reality has to ruin such a delightfully simple scheme:
...
...
@@ -146,9 +142,9 @@ calc_load(unsigned long load, unsigned long exp, unsigned long active)
* busy state.
*
* This is solved by pushing the window forward, and thus skipping the
* sample, for this
cpu (effectively using the NO_HZ-delta for this cpu
which
* sample, for this
CPU (effectively using the NO_HZ-delta for this CPU
which
* was in effect at the time the window opened). This also solves the issue
* of having to deal with a
cpu
having been in NO_HZ for multiple LOAD_FREQ
* of having to deal with a
CPU
having been in NO_HZ for multiple LOAD_FREQ
* intervals.
*
* When making the ILB scale, we should try to pull this in as well.
...
...
@@ -299,7 +295,7 @@ calc_load_n(unsigned long load, unsigned long exp,
}
/*
* NO_HZ can leave us missing all per-
cpu
ticks calling
* NO_HZ can leave us missing all per-
CPU
ticks calling
* calc_load_fold_active(), but since a NO_HZ CPU folds its delta into
* calc_load_nohz per calc_load_nohz_start(), all we need to do is fold
* in the pending NO_HZ delta if our NO_HZ period crossed a load cycle boundary.
...
...
@@ -363,7 +359,7 @@ void calc_global_load(unsigned long ticks)
return
;
/*
* Fold the 'old' NO_HZ-delta to include all NO_HZ
cpu
s.
* Fold the 'old' NO_HZ-delta to include all NO_HZ
CPU
s.
*/
delta
=
calc_load_nohz_fold
();
if
(
delta
)
...
...
kernel/sched/membarrier.c
浏览文件 @
e13e75b8
...
...
@@ -13,32 +13,25 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/syscalls.h>
#include <linux/membarrier.h>
#include <linux/tick.h>
#include <linux/cpumask.h>
#include <linux/atomic.h>
#include "sched.h"
/* for cpu_rq(). */
#include "sched.h"
/*
* Bitmask made from a "or" of all commands within enum membarrier_cmd,
* except MEMBARRIER_CMD_QUERY.
*/
#ifdef CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK \
(MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE
\
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK
\
(MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE
\
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE)
#else
#define MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK 0
#endif
#define MEMBARRIER_CMD_BITMASK \
(MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED
\
| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED
\
| MEMBARRIER_CMD_PRIVATE_EXPEDITED \
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED \
#define MEMBARRIER_CMD_BITMASK
\
(MEMBARRIER_CMD_GLOBAL | MEMBARRIER_CMD_GLOBAL_EXPEDITED
\
| MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED
\
| MEMBARRIER_CMD_PRIVATE_EXPEDITED
\
| MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED
\
| MEMBARRIER_PRIVATE_EXPEDITED_SYNC_CORE_BITMASK)
static
void
ipi_mb
(
void
*
info
)
...
...
@@ -85,6 +78,7 @@ static int membarrier_global_expedited(void)
*/
if
(
cpu
==
raw_smp_processor_id
())
continue
;
rcu_read_lock
();
p
=
task_rcu_dereference
(
&
cpu_rq
(
cpu
)
->
curr
);
if
(
p
&&
p
->
mm
&&
(
atomic_read
(
&
p
->
mm
->
membarrier_state
)
&
...
...
@@ -188,6 +182,7 @@ static int membarrier_private_expedited(int flags)
* rq->curr modification in scheduler.
*/
smp_mb
();
/* exit from system call is not a mb */
return
0
;
}
...
...
@@ -219,6 +214,7 @@ static int membarrier_register_global_expedited(void)
}
atomic_or
(
MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY
,
&
mm
->
membarrier_state
);
return
0
;
}
...
...
@@ -253,6 +249,7 @@ static int membarrier_register_private_expedited(int flags)
synchronize_sched
();
}
atomic_or
(
state
,
&
mm
->
membarrier_state
);
return
0
;
}
...
...
kernel/sched/rt.c
浏览文件 @
e13e75b8
...
...
@@ -3,12 +3,8 @@
* Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
* policies)
*/
#include "sched.h"
#include <linux/slab.h>
#include <linux/irq_work.h>
int
sched_rr_timeslice
=
RR_TIMESLICE
;
int
sysctl_sched_rr_timeslice
=
(
MSEC_PER_SEC
/
HZ
)
*
RR_TIMESLICE
;
...
...
@@ -359,7 +355,7 @@ static DEFINE_PER_CPU(struct callback_head, rt_pull_head);
static
void
push_rt_tasks
(
struct
rq
*
);
static
void
pull_rt_task
(
struct
rq
*
);
static
inline
void
queue_push_tasks
(
struct
rq
*
rq
)
static
inline
void
rt_
queue_push_tasks
(
struct
rq
*
rq
)
{
if
(
!
has_pushable_tasks
(
rq
))
return
;
...
...
@@ -367,7 +363,7 @@ static inline void queue_push_tasks(struct rq *rq)
queue_balance_callback
(
rq
,
&
per_cpu
(
rt_push_head
,
rq
->
cpu
),
push_rt_tasks
);
}
static
inline
void
queue_pull_task
(
struct
rq
*
rq
)
static
inline
void
rt_
queue_pull_task
(
struct
rq
*
rq
)
{
queue_balance_callback
(
rq
,
&
per_cpu
(
rt_pull_head
,
rq
->
cpu
),
pull_rt_task
);
}
...
...
@@ -425,7 +421,7 @@ static inline void pull_rt_task(struct rq *this_rq)
{
}
static
inline
void
queue_push_tasks
(
struct
rq
*
rq
)
static
inline
void
rt_
queue_push_tasks
(
struct
rq
*
rq
)
{
}
#endif
/* CONFIG_SMP */
...
...
@@ -1453,9 +1449,9 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
return
;
/*
* There appear
s to be other cpu
s that can accept
*
current and none to
run 'p', so lets reschedule
* to try and push
current
away:
* There appear
to be other CPU
s that can accept
*
the current task but none can
run 'p', so lets reschedule
* to try and push
the current task
away:
*/
requeue_task_rt
(
rq
,
p
,
1
);
resched_curr
(
rq
);
...
...
@@ -1569,7 +1565,7 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* The running task is never eligible for pushing */
dequeue_pushable_task
(
rq
,
p
);
queue_push_tasks
(
rq
);
rt_
queue_push_tasks
(
rq
);
return
p
;
}
...
...
@@ -1596,12 +1592,13 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
if
(
!
task_running
(
rq
,
p
)
&&
cpumask_test_cpu
(
cpu
,
&
p
->
cpus_allowed
))
return
1
;
return
0
;
}
/*
* Return the highest pushable rq's task, which is suitable to be executed
* on the
cpu
, NULL otherwise
* on the
CPU
, NULL otherwise
*/
static
struct
task_struct
*
pick_highest_pushable_task
(
struct
rq
*
rq
,
int
cpu
)
{
...
...
@@ -1639,11 +1636,11 @@ static int find_lowest_rq(struct task_struct *task)
return
-
1
;
/* No targets found */
/*
* At this point we have built a mask of
cpu
s representing the
* At this point we have built a mask of
CPU
s representing the
* lowest priority tasks in the system. Now we want to elect
* the best one based on our affinity and topology.
*
* We prioritize the last
cpu
that the task executed on since
* We prioritize the last
CPU
that the task executed on since
* it is most likely cache-hot in that location.
*/
if
(
cpumask_test_cpu
(
cpu
,
lowest_mask
))
...
...
@@ -1651,7 +1648,7 @@ static int find_lowest_rq(struct task_struct *task)
/*
* Otherwise, we consult the sched_domains span maps to figure
* out which
cpu
is logically closest to our hot cache data.
* out which
CPU
is logically closest to our hot cache data.
*/
if
(
!
cpumask_test_cpu
(
this_cpu
,
lowest_mask
))
this_cpu
=
-
1
;
/* Skip this_cpu opt if not among lowest */
...
...
@@ -1692,6 +1689,7 @@ static int find_lowest_rq(struct task_struct *task)
cpu
=
cpumask_any
(
lowest_mask
);
if
(
cpu
<
nr_cpu_ids
)
return
cpu
;
return
-
1
;
}
...
...
@@ -1827,7 +1825,7 @@ static int push_rt_task(struct rq *rq)
* The task hasn't migrated, and is still the next
* eligible task, but we failed to find a run-queue
* to push it to. Do not retry in this case, since
* other
cpu
s will pull from us when ready.
* other
CPU
s will pull from us when ready.
*/
goto
out
;
}
...
...
@@ -1919,7 +1917,7 @@ static int rto_next_cpu(struct root_domain *rd)
* rt_next_cpu() will simply return the first CPU found in
* the rto_mask.
*
* If rto_next_cpu() is called with rto_cpu is a valid
cpu
, it
* If rto_next_cpu() is called with rto_cpu is a valid
CPU
, it
* will return the next CPU found in the rto_mask.
*
* If there are no more CPUs left in the rto_mask, then a check is made
...
...
@@ -1980,7 +1978,7 @@ static void tell_cpu_to_push(struct rq *rq)
raw_spin_lock
(
&
rq
->
rd
->
rto_lock
);
/*
* The rto_cpu is updated under the lock, if it has a valid
cpu
* The rto_cpu is updated under the lock, if it has a valid
CPU
* then the IPI is still running and will continue due to the
* update to loop_next, and nothing needs to be done here.
* Otherwise it is finishing up and an ipi needs to be sent.
...
...
@@ -2105,7 +2103,7 @@ static void pull_rt_task(struct rq *this_rq)
/*
* There's a chance that p is higher in priority
* than what's currently running on its
cpu
.
* than what's currently running on its
CPU
.
* This is just that p is wakeing up and hasn't
* had a chance to schedule. We only pull
* p if it is lower in priority than the
...
...
@@ -2187,7 +2185,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
if
(
!
task_on_rq_queued
(
p
)
||
rq
->
rt
.
rt_nr_running
)
return
;
queue_pull_task
(
rq
);
rt_
queue_pull_task
(
rq
);
}
void
__init
init_sched_rt_class
(
void
)
...
...
@@ -2218,7 +2216,7 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
if
(
task_on_rq_queued
(
p
)
&&
rq
->
curr
!=
p
)
{
#ifdef CONFIG_SMP
if
(
p
->
nr_cpus_allowed
>
1
&&
rq
->
rt
.
overloaded
)
queue_push_tasks
(
rq
);
rt_
queue_push_tasks
(
rq
);
#endif
/* CONFIG_SMP */
if
(
p
->
prio
<
rq
->
curr
->
prio
&&
cpu_online
(
cpu_of
(
rq
)))
resched_curr
(
rq
);
...
...
@@ -2242,7 +2240,7 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
* may need to pull tasks to this runqueue.
*/
if
(
oldprio
<
p
->
prio
)
queue_pull_task
(
rq
);
rt_
queue_pull_task
(
rq
);
/*
* If there's a higher priority task waiting to run
...
...
@@ -2292,6 +2290,14 @@ static void watchdog(struct rq *rq, struct task_struct *p)
static
inline
void
watchdog
(
struct
rq
*
rq
,
struct
task_struct
*
p
)
{
}
#endif
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static
void
task_tick_rt
(
struct
rq
*
rq
,
struct
task_struct
*
p
,
int
queued
)
{
struct
sched_rt_entity
*
rt_se
=
&
p
->
rt
;
...
...
@@ -2685,6 +2691,7 @@ int sched_rr_handler(struct ctl_table *table, int write,
msecs_to_jiffies
(
sysctl_sched_rr_timeslice
);
}
mutex_unlock
(
&
mutex
);
return
ret
;
}
...
...
kernel/sched/sched.h
浏览文件 @
e13e75b8
此差异已折叠。
点击以展开。
kernel/sched/stats.c
浏览文件 @
e13e75b8
// SPDX-License-Identifier: GPL-2.0
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
/*
* /proc/schedstat implementation
*/
#include "sched.h"
/*
* bump this up when changing the output format or the meaning of an existing
* Current schedstat API version.
*
* Bump this up when changing the output format or the meaning of an existing
* format, so that tools can adapt (or abort)
*/
#define SCHEDSTAT_VERSION 15
...
...
@@ -78,8 +77,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
* This itererator needs some explanation.
* It returns 1 for the header position.
* This means 2 is cpu 0.
* In a hotplugged system some
cpu
s, including cpu 0, may be missing so we have
* to use cpumask_* to iterate over the
cpu
s.
* In a hotplugged system some
CPU
s, including cpu 0, may be missing so we have
* to use cpumask_* to iterate over the
CPU
s.
*/
static
void
*
schedstat_start
(
struct
seq_file
*
file
,
loff_t
*
offset
)
{
...
...
@@ -99,12 +98,14 @@ static void *schedstat_start(struct seq_file *file, loff_t *offset)
if
(
n
<
nr_cpu_ids
)
return
(
void
*
)(
unsigned
long
)(
n
+
2
);
return
NULL
;
}
static
void
*
schedstat_next
(
struct
seq_file
*
file
,
void
*
data
,
loff_t
*
offset
)
{
(
*
offset
)
++
;
return
schedstat_start
(
file
,
offset
);
}
...
...
@@ -134,6 +135,7 @@ static const struct file_operations proc_schedstat_operations = {
static
int
__init
proc_schedstat_init
(
void
)
{
proc_create
(
"schedstat"
,
0
,
NULL
,
&
proc_schedstat_operations
);
return
0
;
}
subsys_initcall
(
proc_schedstat_init
);
kernel/sched/stats.h
浏览文件 @
e13e75b8
...
...
@@ -30,35 +30,29 @@ rq_sched_info_dequeued(struct rq *rq, unsigned long long delta)
if
(
rq
)
rq
->
rq_sched_info
.
run_delay
+=
delta
;
}
#define schedstat_enabled() static_branch_unlikely(&sched_schedstats)
#define
schedstat_enabled() static_branch_unlikely(&sched_schedstats)
#define __schedstat_inc(var) do { var++; } while (0)
#define schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
#define
schedstat_inc(var) do { if (schedstat_enabled()) { var++; } } while (0)
#define __schedstat_add(var, amt) do { var += (amt); } while (0)
#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
#define __schedstat_set(var, val) do { var = (val); } while (0)
#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
#define schedstat_val(var) (var)
#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
#else
/* !CONFIG_SCHEDSTATS */
static
inline
void
rq_sched_info_arrive
(
struct
rq
*
rq
,
unsigned
long
long
delta
)
{}
static
inline
void
rq_sched_info_dequeued
(
struct
rq
*
rq
,
unsigned
long
long
delta
)
{}
static
inline
void
rq_sched_info_depart
(
struct
rq
*
rq
,
unsigned
long
long
delta
)
{}
#define schedstat_enabled() 0
#define __schedstat_inc(var) do { } while (0)
#define schedstat_inc(var) do { } while (0)
#define __schedstat_add(var, amt) do { } while (0)
#define schedstat_add(var, amt) do { } while (0)
#define __schedstat_set(var, val) do { } while (0)
#define schedstat_set(var, val) do { } while (0)
#define schedstat_val(var) 0
#define schedstat_val_or_zero(var) 0
#define schedstat_add(var, amt) do { if (schedstat_enabled()) { var += (amt); } } while (0)
#define __schedstat_set(var, val) do { var = (val); } while (0)
#define schedstat_set(var, val) do { if (schedstat_enabled()) { var = (val); } } while (0)
#define schedstat_val(var) (var)
#define schedstat_val_or_zero(var) ((schedstat_enabled()) ? (var) : 0)
#else
/* !CONFIG_SCHEDSTATS: */
static
inline
void
rq_sched_info_arrive
(
struct
rq
*
rq
,
unsigned
long
long
delta
)
{
}
static
inline
void
rq_sched_info_dequeued
(
struct
rq
*
rq
,
unsigned
long
long
delta
)
{
}
static
inline
void
rq_sched_info_depart
(
struct
rq
*
rq
,
unsigned
long
long
delta
)
{
}
# define schedstat_enabled() 0
# define __schedstat_inc(var) do { } while (0)
# define schedstat_inc(var) do { } while (0)
# define __schedstat_add(var, amt) do { } while (0)
# define schedstat_add(var, amt) do { } while (0)
# define __schedstat_set(var, val) do { } while (0)
# define schedstat_set(var, val) do { } while (0)
# define schedstat_val(var) 0
# define schedstat_val_or_zero(var) 0
#endif
/* CONFIG_SCHEDSTATS */
#ifdef CONFIG_SCHED_INFO
...
...
@@ -69,9 +63,9 @@ static inline void sched_info_reset_dequeued(struct task_struct *t)
/*
* We are interested in knowing how long it was from the *first* time a
* task was queued to the time that it finally hit a
cpu
, we call this routine
* from dequeue_task() to account for possible rq->clock skew across
cpu
s. The
* delta taken on each
cpu
would annul the skew.
* task was queued to the time that it finally hit a
CPU
, we call this routine
* from dequeue_task() to account for possible rq->clock skew across
CPU
s. The
* delta taken on each
CPU
would annul the skew.
*/
static
inline
void
sched_info_dequeued
(
struct
rq
*
rq
,
struct
task_struct
*
t
)
{
...
...
@@ -87,7 +81,7 @@ static inline void sched_info_dequeued(struct rq *rq, struct task_struct *t)
}
/*
* Called when a task finally hits the
cpu
. We can now calculate how
* Called when a task finally hits the
CPU
. We can now calculate how
* long it was waiting to run. We also note when it began so that we
* can keep stats on how long its timeslice is.
*/
...
...
@@ -112,9 +106,10 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t)
*/
static
inline
void
sched_info_queued
(
struct
rq
*
rq
,
struct
task_struct
*
t
)
{
if
(
unlikely
(
sched_info_on
()))
if
(
unlikely
(
sched_info_on
()))
{
if
(
!
t
->
sched_info
.
last_queued
)
t
->
sched_info
.
last_queued
=
rq_clock
(
rq
);
}
}
/*
...
...
@@ -127,8 +122,7 @@ static inline void sched_info_queued(struct rq *rq, struct task_struct *t)
*/
static
inline
void
sched_info_depart
(
struct
rq
*
rq
,
struct
task_struct
*
t
)
{
unsigned
long
long
delta
=
rq_clock
(
rq
)
-
t
->
sched_info
.
last_arrival
;
unsigned
long
long
delta
=
rq_clock
(
rq
)
-
t
->
sched_info
.
last_arrival
;
rq_sched_info_depart
(
rq
,
delta
);
...
...
@@ -142,11 +136,10 @@ static inline void sched_info_depart(struct rq *rq, struct task_struct *t)
* the idle task.) We are only called when prev != next.
*/
static
inline
void
__sched_info_switch
(
struct
rq
*
rq
,
struct
task_struct
*
prev
,
struct
task_struct
*
next
)
__sched_info_switch
(
struct
rq
*
rq
,
struct
task_struct
*
prev
,
struct
task_struct
*
next
)
{
/*
* prev now departs the
cpu
. It's not interesting to record
* prev now departs the
CPU
. It's not interesting to record
* stats about how efficient we were at scheduling the idle
* process, however.
*/
...
...
@@ -156,18 +149,19 @@ __sched_info_switch(struct rq *rq,
if
(
next
!=
rq
->
idle
)
sched_info_arrive
(
rq
,
next
);
}
static
inline
void
sched_info_switch
(
struct
rq
*
rq
,
struct
task_struct
*
prev
,
struct
task_struct
*
next
)
sched_info_switch
(
struct
rq
*
rq
,
struct
task_struct
*
prev
,
struct
task_struct
*
next
)
{
if
(
unlikely
(
sched_info_on
()))
__sched_info_switch
(
rq
,
prev
,
next
);
}
#else
#define sched_info_queued(rq, t) do { } while (0)
#define sched_info_reset_dequeued(t) do { } while (0)
#define sched_info_dequeued(rq, t) do { } while (0)
#define sched_info_depart(rq, t) do { } while (0)
#define sched_info_arrive(rq, next) do { } while (0)
#define sched_info_switch(rq, t, next) do { } while (0)
#else
/* !CONFIG_SCHED_INFO: */
# define sched_info_queued(rq, t) do { } while (0)
# define sched_info_reset_dequeued(t) do { } while (0)
# define sched_info_dequeued(rq, t) do { } while (0)
# define sched_info_depart(rq, t) do { } while (0)
# define sched_info_arrive(rq, next) do { } while (0)
# define sched_info_switch(rq, t, next) do { } while (0)
#endif
/* CONFIG_SCHED_INFO */
kernel/sched/stop_task.c
浏览文件 @
e13e75b8
// SPDX-License-Identifier: GPL-2.0
#include "sched.h"
/*
* stop-task scheduling class.
*
...
...
@@ -9,6 +7,7 @@
*
* See kernel/stop_machine.c
*/
#include "sched.h"
#ifdef CONFIG_SMP
static
int
...
...
@@ -75,6 +74,14 @@ static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
cgroup_account_cputime
(
curr
,
delta_exec
);
}
/*
* scheduler tick hitting a task of our scheduling class.
*
* NOTE: This function can be called remotely by the tick offload that
* goes along full dynticks. Therefore no local assumption can be made
* and everything must be accessed through the @rq and @curr passed in
* parameters.
*/
static
void
task_tick_stop
(
struct
rq
*
rq
,
struct
task_struct
*
curr
,
int
queued
)
{
}
...
...
kernel/sched/swait.c
浏览文件 @
e13e75b8
// SPDX-License-Identifier: GPL-2.0
#include <linux/sched/signal.h>
#include <linux/swait.h>
/*
* <linux/swait.h> (simple wait queues ) implementation:
*/
#include "sched.h"
void
__init_swait_queue_head
(
struct
swait_queue_head
*
q
,
const
char
*
name
,
struct
lock_class_key
*
key
)
...
...
kernel/sched/topology.c
浏览文件 @
e13e75b8
...
...
@@ -2,10 +2,6 @@
/*
* Scheduler topology setup/handling methods
*/
#include <linux/sched.h>
#include <linux/mutex.h>
#include <linux/sched/isolation.h>
#include "sched.h"
DEFINE_MUTEX
(
sched_domains_mutex
);
...
...
@@ -41,8 +37,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
if
(
!
(
sd
->
flags
&
SD_LOAD_BALANCE
))
{
printk
(
"does not load-balance
\n
"
);
if
(
sd
->
parent
)
printk
(
KERN_ERR
"ERROR: !SD_LOAD_BALANCE domain"
" has parent"
);
printk
(
KERN_ERR
"ERROR: !SD_LOAD_BALANCE domain has parent"
);
return
-
1
;
}
...
...
@@ -50,12 +45,10 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
cpumask_pr_args
(
sched_domain_span
(
sd
)),
sd
->
name
);
if
(
!
cpumask_test_cpu
(
cpu
,
sched_domain_span
(
sd
)))
{
printk
(
KERN_ERR
"ERROR: domain->span does not contain "
"CPU%d
\n
"
,
cpu
);
printk
(
KERN_ERR
"ERROR: domain->span does not contain CPU%d
\n
"
,
cpu
);
}
if
(
!
cpumask_test_cpu
(
cpu
,
sched_group_span
(
group
)))
{
printk
(
KERN_ERR
"ERROR: domain->groups does not contain"
" CPU%d
\n
"
,
cpu
);
printk
(
KERN_ERR
"ERROR: domain->groups does not contain CPU%d
\n
"
,
cpu
);
}
printk
(
KERN_DEBUG
"%*s groups:"
,
level
+
1
,
""
);
...
...
@@ -115,8 +108,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
if
(
sd
->
parent
&&
!
cpumask_subset
(
groupmask
,
sched_domain_span
(
sd
->
parent
)))
printk
(
KERN_ERR
"ERROR: parent span is not a superset "
"of domain->span
\n
"
);
printk
(
KERN_ERR
"ERROR: parent span is not a superset of domain->span
\n
"
);
return
0
;
}
...
...
@@ -595,7 +587,7 @@ int group_balance_cpu(struct sched_group *sg)
* are not.
*
* This leads to a few particularly weird cases where the sched_domain's are
* not of the same number for each
cpu
. Consider:
* not of the same number for each
CPU
. Consider:
*
* NUMA-2 0-3 0-3
* groups: {0-2},{1-3} {1-3},{0-2}
...
...
@@ -780,7 +772,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* ^ ^ ^ ^
* `-' `-'
*
* The sched_domains are per-
cpu
and have a two way link (parent & child) and
* The sched_domains are per-
CPU
and have a two way link (parent & child) and
* denote the ever growing mask of CPUs belonging to that level of topology.
*
* Each sched_domain has a circular (double) linked list of sched_group's, each
...
...
@@ -1021,6 +1013,7 @@ __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map)
d
->
rd
=
alloc_rootdomain
();
if
(
!
d
->
rd
)
return
sa_sd
;
return
sa_rootdomain
;
}
...
...
@@ -1047,12 +1040,14 @@ static void claim_allocations(int cpu, struct sched_domain *sd)
}
#ifdef CONFIG_NUMA
static
int
sched_domains_numa_levels
;
enum
numa_topology_type
sched_numa_topology_type
;
static
int
*
sched_domains_numa_distance
;
int
sched_max_numa_distance
;
static
struct
cpumask
***
sched_domains_numa_masks
;
static
int
sched_domains_curr_level
;
static
int
sched_domains_numa_levels
;
static
int
sched_domains_curr_level
;
int
sched_max_numa_distance
;
static
int
*
sched_domains_numa_distance
;
static
struct
cpumask
***
sched_domains_numa_masks
;
#endif
/*
...
...
@@ -1074,11 +1069,11 @@ static int sched_domains_curr_level;
* SD_ASYM_PACKING - describes SMT quirks
*/
#define TOPOLOGY_SD_FLAGS \
(SD_SHARE_CPUCAPACITY
|
\
(SD_SHARE_CPUCAPACITY
|
\
SD_SHARE_PKG_RESOURCES | \
SD_NUMA
|
\
SD_ASYM_PACKING
|
\
SD_ASYM_CPUCAPACITY
|
\
SD_NUMA
|
\
SD_ASYM_PACKING
|
\
SD_ASYM_CPUCAPACITY
|
\
SD_SHARE_POWERDOMAIN)
static
struct
sched_domain
*
...
...
@@ -1628,7 +1623,7 @@ static struct sched_domain *build_sched_domain(struct sched_domain_topology_leve
pr_err
(
" the %s domain not a subset of the %s domain
\n
"
,
child
->
name
,
sd
->
name
);
#endif
/* Fixup, ensure @sd has at least @child
cpu
s. */
/* Fixup, ensure @sd has at least @child
CPU
s. */
cpumask_or
(
sched_domain_span
(
sd
),
sched_domain_span
(
sd
),
sched_domain_span
(
child
));
...
...
@@ -1720,6 +1715,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
ret
=
0
;
error:
__free_domain_allocs
(
&
d
,
alloc_state
,
cpu_map
);
return
ret
;
}
...
...
@@ -1824,6 +1820,7 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur,
return
1
;
tmp
=
SD_ATTR_INIT
;
return
!
memcmp
(
cur
?
(
cur
+
idx_cur
)
:
&
tmp
,
new
?
(
new
+
idx_new
)
:
&
tmp
,
sizeof
(
struct
sched_domain_attr
));
...
...
@@ -1929,4 +1926,3 @@ void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[],
mutex_unlock
(
&
sched_domains_mutex
);
}
kernel/sched/wait.c
浏览文件 @
e13e75b8
...
...
@@ -3,14 +3,7 @@
*
* (C) 2004 Nadia Yvette Chambers, Oracle
*/
#include <linux/init.h>
#include <linux/export.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/mm.h>
#include <linux/wait.h>
#include <linux/hash.h>
#include <linux/kthread.h>
#include "sched.h"
void
__init_waitqueue_head
(
struct
wait_queue_head
*
wq_head
,
const
char
*
name
,
struct
lock_class_key
*
key
)
{
...
...
@@ -107,6 +100,7 @@ static int __wake_up_common(struct wait_queue_head *wq_head, unsigned int mode,
break
;
}
}
return
nr_exclusive
;
}
...
...
@@ -317,6 +311,7 @@ int do_wait_intr(wait_queue_head_t *wq, wait_queue_entry_t *wait)
spin_unlock
(
&
wq
->
lock
);
schedule
();
spin_lock
(
&
wq
->
lock
);
return
0
;
}
EXPORT_SYMBOL
(
do_wait_intr
);
...
...
@@ -333,6 +328,7 @@ int do_wait_intr_irq(wait_queue_head_t *wq, wait_queue_entry_t *wait)
spin_unlock_irq
(
&
wq
->
lock
);
schedule
();
spin_lock_irq
(
&
wq
->
lock
);
return
0
;
}
EXPORT_SYMBOL
(
do_wait_intr_irq
);
...
...
@@ -378,6 +374,7 @@ int autoremove_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, i
if
(
ret
)
list_del_init
(
&
wq_entry
->
entry
);
return
ret
;
}
EXPORT_SYMBOL
(
autoremove_wake_function
);
...
...
kernel/sched/wait_bit.c
浏览文件 @
e13e75b8
/*
* The implementation of the wait_bit*() and related waiting APIs:
*/
#include <linux/wait_bit.h>
#include <linux/sched/signal.h>
#include <linux/sched/debug.h>
#include <linux/hash.h>
#include "sched.h"
#define WAIT_TABLE_BITS 8
#define WAIT_TABLE_SIZE (1 << WAIT_TABLE_BITS)
...
...
@@ -29,8 +26,8 @@ int wake_bit_function(struct wait_queue_entry *wq_entry, unsigned mode, int sync
wait_bit
->
key
.
bit_nr
!=
key
->
bit_nr
||
test_bit
(
key
->
bit_nr
,
key
->
flags
))
return
0
;
else
return
autoremove_wake_function
(
wq_entry
,
mode
,
sync
,
key
);
return
autoremove_wake_function
(
wq_entry
,
mode
,
sync
,
key
);
}
EXPORT_SYMBOL
(
wake_bit_function
);
...
...
@@ -50,7 +47,9 @@ __wait_on_bit(struct wait_queue_head *wq_head, struct wait_bit_queue_entry *wbq_
if
(
test_bit
(
wbq_entry
->
key
.
bit_nr
,
wbq_entry
->
key
.
flags
))
ret
=
(
*
action
)(
&
wbq_entry
->
key
,
mode
);
}
while
(
test_bit
(
wbq_entry
->
key
.
bit_nr
,
wbq_entry
->
key
.
flags
)
&&
!
ret
);
finish_wait
(
wq_head
,
&
wbq_entry
->
wq_entry
);
return
ret
;
}
EXPORT_SYMBOL
(
__wait_on_bit
);
...
...
@@ -73,6 +72,7 @@ int __sched out_of_line_wait_on_bit_timeout(
DEFINE_WAIT_BIT
(
wq_entry
,
word
,
bit
);
wq_entry
.
key
.
timeout
=
jiffies
+
timeout
;
return
__wait_on_bit
(
wq_head
,
&
wq_entry
,
action
,
mode
);
}
EXPORT_SYMBOL_GPL
(
out_of_line_wait_on_bit_timeout
);
...
...
@@ -120,6 +120,7 @@ EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
void
__wake_up_bit
(
struct
wait_queue_head
*
wq_head
,
void
*
word
,
int
bit
)
{
struct
wait_bit_key
key
=
__WAIT_BIT_KEY_INITIALIZER
(
word
,
bit
);
if
(
waitqueue_active
(
wq_head
))
__wake_up
(
wq_head
,
TASK_NORMAL
,
1
,
&
key
);
}
...
...
@@ -148,6 +149,54 @@ void wake_up_bit(void *word, int bit)
}
EXPORT_SYMBOL
(
wake_up_bit
);
wait_queue_head_t
*
__var_waitqueue
(
void
*
p
)
{
if
(
BITS_PER_LONG
==
64
)
{
unsigned
long
q
=
(
unsigned
long
)
p
;
return
bit_waitqueue
((
void
*
)(
q
&
~
1
),
q
&
1
);
}
return
bit_waitqueue
(
p
,
0
);
}
EXPORT_SYMBOL
(
__var_waitqueue
);
static
int
var_wake_function
(
struct
wait_queue_entry
*
wq_entry
,
unsigned
int
mode
,
int
sync
,
void
*
arg
)
{
struct
wait_bit_key
*
key
=
arg
;
struct
wait_bit_queue_entry
*
wbq_entry
=
container_of
(
wq_entry
,
struct
wait_bit_queue_entry
,
wq_entry
);
if
(
wbq_entry
->
key
.
flags
!=
key
->
flags
||
wbq_entry
->
key
.
bit_nr
!=
key
->
bit_nr
)
return
0
;
return
autoremove_wake_function
(
wq_entry
,
mode
,
sync
,
key
);
}
void
init_wait_var_entry
(
struct
wait_bit_queue_entry
*
wbq_entry
,
void
*
var
,
int
flags
)
{
*
wbq_entry
=
(
struct
wait_bit_queue_entry
){
.
key
=
{
.
flags
=
(
var
),
.
bit_nr
=
-
1
,
},
.
wq_entry
=
{
.
private
=
current
,
.
func
=
var_wake_function
,
.
entry
=
LIST_HEAD_INIT
(
wbq_entry
->
wq_entry
.
entry
),
},
};
}
EXPORT_SYMBOL
(
init_wait_var_entry
);
void
wake_up_var
(
void
*
var
)
{
__wake_up_bit
(
__var_waitqueue
(
var
),
var
,
-
1
);
}
EXPORT_SYMBOL
(
wake_up_var
);
/*
* Manipulate the atomic_t address to produce a better bit waitqueue table hash
* index (we're keying off bit -1, but that would produce a horrible hash
...
...
@@ -157,6 +206,7 @@ static inline wait_queue_head_t *atomic_t_waitqueue(atomic_t *p)
{
if
(
BITS_PER_LONG
==
64
)
{
unsigned
long
q
=
(
unsigned
long
)
p
;
return
bit_waitqueue
((
void
*
)(
q
&
~
1
),
q
&
1
);
}
return
bit_waitqueue
(
p
,
0
);
...
...
@@ -173,6 +223,7 @@ static int wake_atomic_t_function(struct wait_queue_entry *wq_entry, unsigned mo
wait_bit
->
key
.
bit_nr
!=
key
->
bit_nr
||
atomic_read
(
val
)
!=
0
)
return
0
;
return
autoremove_wake_function
(
wq_entry
,
mode
,
sync
,
key
);
}
...
...
@@ -196,6 +247,7 @@ int __wait_on_atomic_t(struct wait_queue_head *wq_head, struct wait_bit_queue_en
ret
=
(
*
action
)(
val
,
mode
);
}
while
(
!
ret
&&
atomic_read
(
val
)
!=
0
);
finish_wait
(
wq_head
,
&
wbq_entry
->
wq_entry
);
return
ret
;
}
...
...
@@ -226,6 +278,7 @@ __sched int atomic_t_wait(atomic_t *counter, unsigned int mode)
schedule
();
if
(
signal_pending_state
(
mode
,
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL
(
atomic_t_wait
);
...
...
@@ -250,6 +303,7 @@ __sched int bit_wait(struct wait_bit_key *word, int mode)
schedule
();
if
(
signal_pending_state
(
mode
,
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL
(
bit_wait
);
...
...
@@ -259,6 +313,7 @@ __sched int bit_wait_io(struct wait_bit_key *word, int mode)
io_schedule
();
if
(
signal_pending_state
(
mode
,
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL
(
bit_wait_io
);
...
...
@@ -266,11 +321,13 @@ EXPORT_SYMBOL(bit_wait_io);
__sched
int
bit_wait_timeout
(
struct
wait_bit_key
*
word
,
int
mode
)
{
unsigned
long
now
=
READ_ONCE
(
jiffies
);
if
(
time_after_eq
(
now
,
word
->
timeout
))
return
-
EAGAIN
;
schedule_timeout
(
word
->
timeout
-
now
);
if
(
signal_pending_state
(
mode
,
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL_GPL
(
bit_wait_timeout
);
...
...
@@ -278,11 +335,13 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
__sched
int
bit_wait_io_timeout
(
struct
wait_bit_key
*
word
,
int
mode
)
{
unsigned
long
now
=
READ_ONCE
(
jiffies
);
if
(
time_after_eq
(
now
,
word
->
timeout
))
return
-
EAGAIN
;
io_schedule_timeout
(
word
->
timeout
-
now
);
if
(
signal_pending_state
(
mode
,
current
))
return
-
EINTR
;
return
0
;
}
EXPORT_SYMBOL_GPL
(
bit_wait_io_timeout
);
...
...
kernel/time/tick-sched.c
浏览文件 @
e13e75b8
...
...
@@ -481,11 +481,18 @@ static int __init setup_tick_nohz(char *str)
__setup
(
"nohz="
,
setup_tick_nohz
);
int
tick_nohz_tick_stopped
(
void
)
bool
tick_nohz_tick_stopped
(
void
)
{
return
__this_cpu_read
(
tick_cpu_sched
.
tick_stopped
);
}
bool
tick_nohz_tick_stopped_cpu
(
int
cpu
)
{
struct
tick_sched
*
ts
=
per_cpu_ptr
(
&
tick_cpu_sched
,
cpu
);
return
ts
->
tick_stopped
;
}
/**
* tick_nohz_update_jiffies - update jiffies when idle was interrupted
*
...
...
@@ -741,12 +748,6 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
delta
=
KTIME_MAX
;
}
#ifdef CONFIG_NO_HZ_FULL
/* Limit the tick delta to the maximum scheduler deferment */
if
(
!
ts
->
inidle
)
delta
=
min
(
delta
,
scheduler_tick_max_deferment
());
#endif
/* Calculate the next expiry time */
if
(
delta
<
(
KTIME_MAX
-
basemono
))
expires
=
basemono
+
delta
;
...
...
kernel/workqueue.c
浏览文件 @
e13e75b8
...
...
@@ -5573,12 +5573,13 @@ static void __init wq_numa_init(void)
int
__init
workqueue_init_early
(
void
)
{
int
std_nice
[
NR_STD_WORKER_POOLS
]
=
{
0
,
HIGHPRI_NICE_LEVEL
};
int
hk_flags
=
HK_FLAG_DOMAIN
|
HK_FLAG_WQ
;
int
i
,
cpu
;
WARN_ON
(
__alignof__
(
struct
pool_workqueue
)
<
__alignof__
(
long
long
));
BUG_ON
(
!
alloc_cpumask_var
(
&
wq_unbound_cpumask
,
GFP_KERNEL
));
cpumask_copy
(
wq_unbound_cpumask
,
housekeeping_cpumask
(
HK_FLAG_DOMAIN
));
cpumask_copy
(
wq_unbound_cpumask
,
housekeeping_cpumask
(
hk_flags
));
pwq_cache
=
KMEM_CACHE
(
pool_workqueue
,
SLAB_PANIC
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录