Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
058fc47e
K
Kernel
项目概览
openeuler
/
Kernel
1 年多 前同步成功
通知
8
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
058fc47e
编写于
9月 05, 2017
作者:
T
Tejun Heo
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'for-4.13-fixes' into for-4.14
上级
c5a94a61
1ad0f0a7
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
36 addition
and
11 deletion
+36
-11
drivers/md/bitmap.c
drivers/md/bitmap.c
+2
-1
drivers/md/md.h
drivers/md/md.h
+3
-1
drivers/md/raid5-ppl.c
drivers/md/raid5-ppl.c
+1
-1
drivers/md/raid5.c
drivers/md/raid5.c
+1
-3
include/linux/workqueue.h
include/linux/workqueue.h
+3
-1
kernel/workqueue.c
kernel/workqueue.c
+26
-4
未找到文件。
drivers/md/bitmap.c
浏览文件 @
058fc47e
...
@@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
...
@@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset,
rdev_for_each
(
rdev
,
mddev
)
{
rdev_for_each
(
rdev
,
mddev
)
{
if
(
!
test_bit
(
In_sync
,
&
rdev
->
flags
)
if
(
!
test_bit
(
In_sync
,
&
rdev
->
flags
)
||
test_bit
(
Faulty
,
&
rdev
->
flags
))
||
test_bit
(
Faulty
,
&
rdev
->
flags
)
||
test_bit
(
Bitmap_sync
,
&
rdev
->
flags
))
continue
;
continue
;
target
=
offset
+
index
*
(
PAGE_SIZE
/
512
);
target
=
offset
+
index
*
(
PAGE_SIZE
/
512
);
...
...
drivers/md/md.h
浏览文件 @
058fc47e
...
@@ -134,7 +134,9 @@ enum flag_bits {
...
@@ -134,7 +134,9 @@ enum flag_bits {
Faulty
,
/* device is known to have a fault */
Faulty
,
/* device is known to have a fault */
In_sync
,
/* device is in_sync with rest of array */
In_sync
,
/* device is in_sync with rest of array */
Bitmap_sync
,
/* ..actually, not quite In_sync. Need a
Bitmap_sync
,
/* ..actually, not quite In_sync. Need a
* bitmap-based recovery to get fully in sync
* bitmap-based recovery to get fully in sync.
* The bit is only meaningful before device
* has been passed to pers->hot_add_disk.
*/
*/
WriteMostly
,
/* Avoid reading if at all possible */
WriteMostly
,
/* Avoid reading if at all possible */
AutoDetected
,
/* added by auto-detect */
AutoDetected
,
/* added by auto-detect */
...
...
drivers/md/raid5-ppl.c
浏览文件 @
058fc47e
...
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
...
@@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf)
goto
err
;
goto
err
;
}
}
ppl_conf
->
bs
=
bioset_create
(
conf
->
raid_disks
,
0
,
0
);
ppl_conf
->
bs
=
bioset_create
(
conf
->
raid_disks
,
0
,
BIOSET_NEED_BVECS
);
if
(
!
ppl_conf
->
bs
)
{
if
(
!
ppl_conf
->
bs
)
{
ret
=
-
ENOMEM
;
ret
=
-
ENOMEM
;
goto
err
;
goto
err
;
...
...
drivers/md/raid5.c
浏览文件 @
058fc47e
...
@@ -7951,12 +7951,10 @@ static void end_reshape(struct r5conf *conf)
...
@@ -7951,12 +7951,10 @@ static void end_reshape(struct r5conf *conf)
{
{
if
(
!
test_bit
(
MD_RECOVERY_INTR
,
&
conf
->
mddev
->
recovery
))
{
if
(
!
test_bit
(
MD_RECOVERY_INTR
,
&
conf
->
mddev
->
recovery
))
{
struct
md_rdev
*
rdev
;
spin_lock_irq
(
&
conf
->
device_lock
);
spin_lock_irq
(
&
conf
->
device_lock
);
conf
->
previous_raid_disks
=
conf
->
raid_disks
;
conf
->
previous_raid_disks
=
conf
->
raid_disks
;
rdev_for_each
(
rdev
,
conf
->
mddev
)
md_finish_reshape
(
conf
->
mddev
);
rdev
->
data_offset
=
rdev
->
new_data_offset
;
smp_wmb
();
smp_wmb
();
conf
->
reshape_progress
=
MaxSector
;
conf
->
reshape_progress
=
MaxSector
;
conf
->
mddev
->
reshape_position
=
MaxSector
;
conf
->
mddev
->
reshape_position
=
MaxSector
;
...
...
include/linux/workqueue.h
浏览文件 @
058fc47e
...
@@ -323,6 +323,7 @@ enum {
...
@@ -323,6 +323,7 @@ enum {
__WQ_DRAINING
=
1
<<
16
,
/* internal: workqueue is draining */
__WQ_DRAINING
=
1
<<
16
,
/* internal: workqueue is draining */
__WQ_ORDERED
=
1
<<
17
,
/* internal: workqueue is ordered */
__WQ_ORDERED
=
1
<<
17
,
/* internal: workqueue is ordered */
__WQ_ORDERED_EXPLICIT
=
1
<<
18
,
/* internal: alloc_ordered_workqueue() */
__WQ_LEGACY
=
1
<<
18
,
/* internal: create*_workqueue() */
__WQ_LEGACY
=
1
<<
18
,
/* internal: create*_workqueue() */
WQ_MAX_ACTIVE
=
512
,
/* I like 512, better ideas? */
WQ_MAX_ACTIVE
=
512
,
/* I like 512, better ideas? */
...
@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
...
@@ -422,7 +423,8 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
* Pointer to the allocated workqueue on success, %NULL on failure.
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
*/
#define alloc_ordered_workqueue(fmt, flags, args...) \
#define alloc_ordered_workqueue(fmt, flags, args...) \
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | \
__WQ_ORDERED_EXPLICIT | (flags), 1, ##args)
#define create_workqueue(name) \
#define create_workqueue(name) \
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
...
...
kernel/workqueue.c
浏览文件 @
058fc47e
...
@@ -3577,6 +3577,13 @@ static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
...
@@ -3577,6 +3577,13 @@ static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
/* yeap, return possible CPUs in @node that @attrs wants */
/* yeap, return possible CPUs in @node that @attrs wants */
cpumask_and
(
cpumask
,
attrs
->
cpumask
,
wq_numa_possible_cpumask
[
node
]);
cpumask_and
(
cpumask
,
attrs
->
cpumask
,
wq_numa_possible_cpumask
[
node
]);
if
(
cpumask_empty
(
cpumask
))
{
pr_warn_once
(
"WARNING: workqueue cpumask: online intersect > "
"possible intersect
\n
"
);
return
false
;
}
return
!
cpumask_equal
(
cpumask
,
attrs
->
cpumask
);
return
!
cpumask_equal
(
cpumask
,
attrs
->
cpumask
);
use_dfl:
use_dfl:
...
@@ -3744,8 +3751,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
...
@@ -3744,8 +3751,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
return
-
EINVAL
;
return
-
EINVAL
;
/* creating multiple pwqs breaks ordering guarantee */
/* creating multiple pwqs breaks ordering guarantee */
if
(
WARN_ON
((
wq
->
flags
&
__WQ_ORDERED
)
&&
!
list_empty
(
&
wq
->
pwqs
)))
if
(
!
list_empty
(
&
wq
->
pwqs
))
{
return
-
EINVAL
;
if
(
WARN_ON
(
wq
->
flags
&
__WQ_ORDERED_EXPLICIT
))
return
-
EINVAL
;
wq
->
flags
&=
~
__WQ_ORDERED
;
}
ctx
=
apply_wqattrs_prepare
(
wq
,
attrs
);
ctx
=
apply_wqattrs_prepare
(
wq
,
attrs
);
if
(
!
ctx
)
if
(
!
ctx
)
...
@@ -3929,6 +3940,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
...
@@ -3929,6 +3940,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
struct
workqueue_struct
*
wq
;
struct
workqueue_struct
*
wq
;
struct
pool_workqueue
*
pwq
;
struct
pool_workqueue
*
pwq
;
/*
* Unbound && max_active == 1 used to imply ordered, which is no
* longer the case on NUMA machines due to per-node pools. While
* alloc_ordered_workqueue() is the right way to create an ordered
* workqueue, keep the previous behavior to avoid subtle breakages
* on NUMA.
*/
if
((
flags
&
WQ_UNBOUND
)
&&
max_active
==
1
)
flags
|=
__WQ_ORDERED
;
/* see the comment above the definition of WQ_POWER_EFFICIENT */
/* see the comment above the definition of WQ_POWER_EFFICIENT */
if
((
flags
&
WQ_POWER_EFFICIENT
)
&&
wq_power_efficient
)
if
((
flags
&
WQ_POWER_EFFICIENT
)
&&
wq_power_efficient
)
flags
|=
WQ_UNBOUND
;
flags
|=
WQ_UNBOUND
;
...
@@ -4119,13 +4140,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
...
@@ -4119,13 +4140,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
struct
pool_workqueue
*
pwq
;
struct
pool_workqueue
*
pwq
;
/* disallow meddling with max_active for ordered workqueues */
/* disallow meddling with max_active for ordered workqueues */
if
(
WARN_ON
(
wq
->
flags
&
__WQ_ORDERED
))
if
(
WARN_ON
(
wq
->
flags
&
__WQ_ORDERED
_EXPLICIT
))
return
;
return
;
max_active
=
wq_clamp_max_active
(
max_active
,
wq
->
flags
,
wq
->
name
);
max_active
=
wq_clamp_max_active
(
max_active
,
wq
->
flags
,
wq
->
name
);
mutex_lock
(
&
wq
->
mutex
);
mutex_lock
(
&
wq
->
mutex
);
wq
->
flags
&=
~
__WQ_ORDERED
;
wq
->
saved_max_active
=
max_active
;
wq
->
saved_max_active
=
max_active
;
for_each_pwq
(
pwq
,
wq
)
for_each_pwq
(
pwq
,
wq
)
...
@@ -5253,7 +5275,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
...
@@ -5253,7 +5275,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
* attributes breaks ordering guarantee. Disallow exposing ordered
* attributes breaks ordering guarantee. Disallow exposing ordered
* workqueues.
* workqueues.
*/
*/
if
(
WARN_ON
(
wq
->
flags
&
__WQ_ORDERED
))
if
(
WARN_ON
(
wq
->
flags
&
__WQ_ORDERED
_EXPLICIT
))
return
-
EINVAL
;
return
-
EINVAL
;
wq
->
wq_dev
=
wq_dev
=
kzalloc
(
sizeof
(
*
wq_dev
),
GFP_KERNEL
);
wq
->
wq_dev
=
wq_dev
=
kzalloc
(
sizeof
(
*
wq_dev
),
GFP_KERNEL
);
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录