Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
ca7394cd
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 1 年 前同步成功
通知
2298
Star
20931
Fork
5422
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
未验证
提交
ca7394cd
编写于
3月 23, 2023
作者:
I
Infinity_lee
提交者:
GitHub
3月 23, 2023
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[CodeStyle][C403] Unnecessary list comprehension (rewrite as a set comprehension) (#51968)
上级
cf391b81
变更
19
隐藏空白更改
内联
并排
Showing
19 changed file
with
39 addition
and
53 deletion
+39
-53
python/paddle/distributed/fleet/base/role_maker.py
python/paddle/distributed/fleet/base/role_maker.py
+4
-12
python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py
...ptimizers/dygraph_optimizer/dygraph_sharding_optimizer.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py
...distributed/fleet/meta_optimizers/sharding/fp16_helper.py
+5
-7
python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py
...ed/fleet/meta_optimizers/sharding/gradient_clip_helper.py
+5
-7
python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py
...addle/distributed/fleet/meta_optimizers/sharding/shard.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py
...e/distributed/fleet/meta_optimizers/sharding_optimizer.py
+1
-1
python/paddle/distributed/launch/plugins/__init__.py
python/paddle/distributed/launch/plugins/__init__.py
+2
-2
python/paddle/distributed/parallel.py
python/paddle/distributed/parallel.py
+1
-1
python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py
...ibuted/passes/auto_parallel_data_parallel_optimization.py
+5
-7
python/paddle/distributed/passes/auto_parallel_sharding.py
python/paddle/distributed/passes/auto_parallel_sharding.py
+1
-1
python/paddle/distributed/transpiler/collective.py
python/paddle/distributed/transpiler/collective.py
+1
-1
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
.../unittests/dygraph_to_static/test_save_inference_model.py
+1
-1
python/paddle/fluid/tests/unittests/test_backward.py
python/paddle/fluid/tests/unittests/test_backward.py
+3
-3
python/paddle/fluid/tests/unittests/test_target_assign_op.py
python/paddle/fluid/tests/unittests/test_target_assign_op.py
+1
-1
python/paddle/incubate/autograd/composite_rules.py
python/paddle/incubate/autograd/composite_rules.py
+1
-1
python/paddle/optimizer/optimizer.py
python/paddle/optimizer/optimizer.py
+3
-3
python/paddle/static/io.py
python/paddle/static/io.py
+1
-1
python/paddle/utils/cpp_extension/extension_utils.py
python/paddle/utils/cpp_extension/extension_utils.py
+1
-1
tools/diff_use_default_grad_op_maker.py
tools/diff_use_default_grad_op_maker.py
+1
-1
未找到文件。
python/paddle/distributed/fleet/base/role_maker.py
浏览文件 @
ca7394cd
...
...
@@ -1060,9 +1060,7 @@ class PaddleCloudRoleMaker(RoleMakerBase):
self
.
_trainers_num
=
trainers_num
self
.
_role
=
role
self
.
_current_id
=
current_id
self
.
_nodes_num
=
len
(
set
([
x
.
split
(
':'
)[
0
]
for
x
in
self
.
_worker_endpoints
])
)
self
.
_nodes_num
=
len
({
x
.
split
(
':'
)[
0
]
for
x
in
self
.
_worker_endpoints
})
def
_collective_env
(
self
):
self
.
_current_id
=
int
(
os
.
getenv
(
"PADDLE_TRAINER_ID"
,
"0"
))
...
...
@@ -1078,9 +1076,7 @@ class PaddleCloudRoleMaker(RoleMakerBase):
self
.
_non_distributed
=
True
self
.
_worker_endpoints
=
self
.
_worker_endpoints
.
split
(
","
)
self
.
_trainers_num
=
len
(
self
.
_worker_endpoints
)
self
.
_nodes_num
=
len
(
set
([
x
.
split
(
':'
)[
0
]
for
x
in
self
.
_worker_endpoints
])
)
self
.
_nodes_num
=
len
({
x
.
split
(
':'
)[
0
]
for
x
in
self
.
_worker_endpoints
})
self
.
_local_rank
=
os
.
getenv
(
"PADDLE_RANK_IN_NODE"
)
self
.
_local_device_ids
=
os
.
getenv
(
"PADDLE_LOCAL_DEVICE_IDS"
)
self
.
_world_device_ids
=
os
.
getenv
(
"PADDLE_WORLD_DEVICE_IDS"
)
...
...
@@ -1206,18 +1202,14 @@ class UserDefinedRoleMaker(PaddleCloudRoleMaker):
self
.
_cur_endpoint
=
self
.
_worker_endpoints
[
self
.
_current_id
]
elif
self
.
_role
==
Role
.
SERVER
:
self
.
_cur_endpoint
=
self
.
_server_endpoints
[
self
.
_current_id
]
self
.
_nodes_num
=
len
(
set
([
x
.
split
(
':'
)[
0
]
for
x
in
self
.
_worker_endpoints
])
)
self
.
_nodes_num
=
len
({
x
.
split
(
':'
)[
0
]
for
x
in
self
.
_worker_endpoints
})
def
_user_defined_collective_env
(
self
):
self
.
_worker_endpoints
=
self
.
_kwargs
.
get
(
"worker_endpoints"
)
self
.
_current_id
=
self
.
_kwargs
.
get
(
"current_id"
)
self
.
_trainers_num
=
len
(
self
.
_worker_endpoints
)
self
.
_training_role
=
Role
.
WORKER
self
.
_nodes_num
=
len
(
set
([
x
.
split
(
':'
)[
0
]
for
x
in
self
.
_worker_endpoints
])
)
self
.
_nodes_num
=
len
({
x
.
split
(
':'
)[
0
]
for
x
in
self
.
_worker_endpoints
})
def
_generate_role
(
self
):
"""
...
...
python/paddle/distributed/fleet/meta_optimizers/dygraph_optimizer/dygraph_sharding_optimizer.py
浏览文件 @
ca7394cd
...
...
@@ -176,7 +176,7 @@ class DygraphShardingOptimizer:
# NOTE in dygraph mode, the only different between step and minimize is that minimize
# allow user to customize the parameters for updating on each step
input_param_names
=
set
([
param
.
name
for
param
in
parameters
])
input_param_names
=
{
param
.
name
for
param
in
parameters
}
parameters
=
list
(
filter
(
lambda
x
:
x
.
name
in
input_param_names
,
...
...
python/paddle/distributed/fleet/meta_optimizers/sharding/fp16_helper.py
浏览文件 @
ca7394cd
...
...
@@ -142,13 +142,11 @@ class FP16Utils:
# the grad checking should take the all and only param in the current shard
to_check_param
=
set
(
reversed_x_paramname
)
should_check_param
=
set
(
shard
.
global_params
).
intersection
(
set
(
[
param
for
param
,
worker_idx
in
shard
.
global_param2device
.
items
()
if
worker_idx
==
shard
.
worker_idx
]
)
{
param
for
param
,
worker_idx
in
shard
.
global_param2device
.
items
()
if
worker_idx
==
shard
.
worker_idx
}
)
assert
(
to_check_param
==
should_check_param
...
...
python/paddle/distributed/fleet/meta_optimizers/sharding/gradient_clip_helper.py
浏览文件 @
ca7394cd
...
...
@@ -134,13 +134,11 @@ class GradientClipHelper:
# the grad sum here should take the all and only param in the current shard
to_check_param
=
set
(
reversed_x_paramname
)
should_check_param
=
set
(
shard
.
global_params
).
intersection
(
set
(
[
param
for
param
,
worker_idx
in
shard
.
global_param2device
.
items
()
if
worker_idx
==
shard
.
worker_idx
]
)
{
param
for
param
,
worker_idx
in
shard
.
global_param2device
.
items
()
if
worker_idx
==
shard
.
worker_idx
}
)
assert
(
to_check_param
==
should_check_param
...
...
python/paddle/distributed/fleet/meta_optimizers/sharding/shard.py
浏览文件 @
ca7394cd
...
...
@@ -35,7 +35,7 @@ class Shard:
def
setup
(
self
,
params_grads
,
worker_idx
,
worker_num
):
# param names of all devices
self
.
global_params
=
set
([
x
[
0
].
name
for
x
in
params_grads
])
self
.
global_params
=
{
x
[
0
].
name
for
x
in
params_grads
}
# _param(str) -> device_id(int)
self
.
worker_idx
=
worker_idx
self
.
worker_num
=
worker_num
...
...
python/paddle/distributed/fleet/meta_optimizers/sharding_optimizer.py
浏览文件 @
ca7394cd
...
...
@@ -907,7 +907,7 @@ class ShardingOptimizer(MetaOptimizerBase):
def
_build_shard
(
self
,
params_grads
,
shard_rank
,
shard_size
):
# step 2: split params
self
.
_params
=
set
([
x
[
0
].
name
for
x
in
params_grads
])
self
.
_params
=
{
x
[
0
].
name
for
x
in
params_grads
}
self
.
_shard
.
setup
(
params_grads
,
shard_rank
,
shard_size
)
# step 3: get broadcast vars
...
...
python/paddle/distributed/launch/plugins/__init__.py
浏览文件 @
ca7394cd
...
...
@@ -45,7 +45,7 @@ def process_args(ctx):
def
collective_compatible
(
ctx
):
if
'PADDLE_TRAINER_ENDPOINTS'
in
ctx
.
envs
:
eps
=
ctx
.
envs
[
'PADDLE_TRAINER_ENDPOINTS'
].
split
(
','
)
hosts
=
set
([
h
.
split
(
':'
)[
0
]
for
h
in
eps
])
hosts
=
{
h
.
split
(
':'
)[
0
]
for
h
in
eps
}
ctx
.
args
.
master
=
eps
[
0
]
if
':'
in
eps
[
0
]
else
'{}:6768'
.
format
(
eps
[
0
])
ctx
.
args
.
nnodes
=
len
(
hosts
)
ctx
.
logger
.
info
(
...
...
@@ -54,7 +54,7 @@ def collective_compatible(ctx):
if
'DISTRIBUTED_TRAINER_ENDPOINTS'
in
ctx
.
envs
:
eps
=
ctx
.
envs
[
'DISTRIBUTED_TRAINER_ENDPOINTS'
].
split
(
','
)
hosts
=
set
([
h
.
split
(
':'
)[
0
]
for
h
in
eps
])
hosts
=
{
h
.
split
(
':'
)[
0
]
for
h
in
eps
}
ctx
.
args
.
master
=
eps
[
0
]
ctx
.
args
.
nnodes
=
len
(
hosts
)
ctx
.
logger
.
info
(
...
...
python/paddle/distributed/parallel.py
浏览文件 @
ca7394cd
...
...
@@ -1116,7 +1116,7 @@ def init_parallel_env():
paddle
.
distributed
.
barrier
(
group
=
group
)
return
group
node_num
=
set
([
i
.
split
(
":"
)[
0
]
for
i
in
parallel_env
.
trainer_endpoints
])
node_num
=
{
i
.
split
(
":"
)[
0
]
for
i
in
parallel_env
.
trainer_endpoints
}
# 3: init gloo context (step 1: httpsever start)
init_gloo
=
int
(
os
.
getenv
(
"PADDLE_WITH_GLOO"
,
"0"
))
if
is_cpu_only
or
init_gloo
or
backend
==
"heter"
:
...
...
python/paddle/distributed/passes/auto_parallel_data_parallel_optimization.py
浏览文件 @
ca7394cd
...
...
@@ -208,12 +208,10 @@ class DataParallelOptimizationPass(PassBase):
def
_all_dp_groups_same_degree
(
self
):
return
(
len
(
set
(
[
len
(
group
.
ranks
)
for
group
in
self
.
_group_to_grad_name_map
.
keys
()
]
)
{
len
(
group
.
ranks
)
for
group
in
self
.
_group_to_grad_name_map
.
keys
()
}
)
==
1
)
...
...
@@ -430,7 +428,7 @@ class DataParallelOptimizationPass(PassBase):
def
op_depend_on_group
(
op
,
group
):
vars_
=
set
(
op
.
input_arg_names
+
op
.
output_arg_names
)
grad_names
=
set
([
grad
.
name
for
grad
in
group
.
gradients
])
grad_names
=
{
grad
.
name
for
grad
in
group
.
gradients
}
return
len
(
vars_
.
intersection
(
grad_names
))
>
0
for
i
,
op
in
enumerate
(
ops
):
...
...
python/paddle/distributed/passes/auto_parallel_sharding.py
浏览文件 @
ca7394cd
...
...
@@ -969,7 +969,7 @@ class ShardingPass(PassBase):
def
op_depend_on_group
(
op
,
group
):
vars_
=
set
(
op
.
input_arg_names
+
op
.
output_arg_names
)
var_names
=
set
([
var
.
name
for
var
in
group
.
vars
])
var_names
=
{
var
.
name
for
var
in
group
.
vars
}
return
len
(
vars_
.
intersection
(
var_names
))
>
0
# analyze groups
...
...
python/paddle/distributed/transpiler/collective.py
浏览文件 @
ca7394cd
...
...
@@ -510,7 +510,7 @@ class SingleProcessMultiThread(GradAllReduce):
def
_transpile_startup_program
(
self
):
nodes_num
=
0
if
len
(
self
.
endpoints
)
>
1
:
nodes_num
=
len
(
set
([
x
.
split
(
':'
)[
0
]
for
x
in
self
.
endpoints
])
)
nodes_num
=
len
(
{
x
.
split
(
':'
)[
0
]
for
x
in
self
.
endpoints
}
)
# diffent ip num is multi node
if
nodes_num
>
1
:
self
.
nranks
=
nodes_num
...
...
python/paddle/fluid/tests/unittests/dygraph_to_static/test_save_inference_model.py
浏览文件 @
ca7394cd
...
...
@@ -98,7 +98,7 @@ class TestDyToStaticSaveInferenceModel(unittest.TestCase):
self
,
model
,
inputs
,
gt_out
,
feed
=
None
,
fetch
=
None
):
expected_persistable_vars
=
set
([
p
.
name
for
p
in
model
.
parameters
()])
expected_persistable_vars
=
{
p
.
name
for
p
in
model
.
parameters
()}
infer_model_prefix
=
os
.
path
.
join
(
self
.
temp_dir
.
name
,
"test_dy2stat_inference/model"
...
...
python/paddle/fluid/tests/unittests/test_backward.py
浏览文件 @
ca7394cd
...
...
@@ -103,9 +103,9 @@ class TestBackward(unittest.TestCase):
params_grads
=
fluid
.
backward
.
append_backward
(
loss
,
parameter_list
,
no_grad_set
)
params_names
=
set
(
[
param_var
.
name
for
(
param_var
,
grad_var
)
in
params_grads
]
)
params_names
=
{
param_var
.
name
for
(
param_var
,
grad_var
)
in
params_grads
}
self
.
assertSetEqual
(
params_names
,
self
.
net
.
params_names
)
return
params_grads
...
...
python/paddle/fluid/tests/unittests/test_target_assign_op.py
浏览文件 @
ca7394cd
...
...
@@ -34,7 +34,7 @@ def gen_match_and_neg_indices(num_prior, gt_lod, neg_lod):
ids
=
random
.
sample
([
i
for
i
in
range
(
num_prior
)],
gt_num
)
match_indices
[
n
,
ids
]
=
[
i
for
i
in
range
(
gt_num
)]
ret_ids
=
set
([
i
for
i
in
range
(
num_prior
)])
-
set
(
ids
)
ret_ids
=
{
i
for
i
in
range
(
num_prior
)}
-
set
(
ids
)
l
=
neg_lod
[
n
]
neg_ids
=
random
.
sample
(
ret_ids
,
l
)
neg_indices
[
offset
:
offset
+
neg_lod
[
n
],
:]
=
(
...
...
python/paddle/incubate/autograd/composite_rules.py
浏览文件 @
ca7394cd
...
...
@@ -462,7 +462,7 @@ def squeeze2_composite(x, axis):
if
len
(
axis
)
==
0
:
dims
=
set
(
range
(
rank
))
else
:
dims
=
set
([
ax
%
rank
for
ax
in
axis
])
dims
=
{
ax
%
rank
for
ax
in
axis
}
new_shape
=
[]
for
d
,
s
in
enumerate
(
x
.
shape
):
if
not
(
s
==
1
and
(
d
in
dims
)):
...
...
python/paddle/optimizer/optimizer.py
浏览文件 @
ca7394cd
...
...
@@ -1321,9 +1321,9 @@ class Optimizer:
def
_get_no_grad_set
(
self
,
loss
,
no_grad_set
=
None
):
no_grad_set
=
_get_no_grad_set_name
(
no_grad_set
)
parameters
=
loss
.
block
.
program
.
global_block
().
all_parameters
()
param_no_trainable
=
set
(
[
param
.
name
for
param
in
parameters
if
param
.
stop_gradient
is
True
]
)
param_no_trainable
=
{
param
.
name
for
param
in
parameters
if
param
.
stop_gradient
is
True
}
# If the parameter is no trainable, it should not have a gradient.
no_grad_set
.
update
(
param_no_trainable
)
...
...
python/paddle/static/io.py
浏览文件 @
ca7394cd
...
...
@@ -1498,7 +1498,7 @@ def load(program, model_path, executor=None, var_list=None):
"var_list is required when loading model file saved with [ save_params, save_persistables, save_vars ]"
)
program_var_list
=
program
.
list_vars
()
program_var_name_set
=
set
([
var
.
name
for
var
in
program_var_list
])
program_var_name_set
=
{
var
.
name
for
var
in
program_var_list
}
# check all the variable inlcuded in program
for
var
in
var_list
:
...
...
python/paddle/utils/cpp_extension/extension_utils.py
浏览文件 @
ca7394cd
...
...
@@ -1277,7 +1277,7 @@ def parse_op_name_from(sources):
pattern
=
re
.
compile
(
r
'PD_BUILD_OP\(([^,\)]+)\)'
)
content
=
re
.
sub
(
r
'\s|\t|\n'
,
''
,
content
)
op_name
=
pattern
.
findall
(
content
)
op_name
=
set
([
re
.
sub
(
'_grad'
,
''
,
name
)
for
name
in
op_name
])
op_name
=
{
re
.
sub
(
'_grad'
,
''
,
name
)
for
name
in
op_name
}
return
op_name
...
...
tools/diff_use_default_grad_op_maker.py
浏览文件 @
ca7394cd
...
...
@@ -30,7 +30,7 @@ def generate_spec(filename):
def
read_spec
(
filename
):
with
open
(
filename
,
'r'
)
as
f
:
return
set
([
line
.
strip
()
for
line
in
f
.
readlines
()])
return
{
line
.
strip
()
for
line
in
f
.
readlines
()}
def
get_spec_diff
(
dev_filename
,
pr_filename
):
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录