Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
c9a7cadf
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
c9a7cadf
编写于
11月 07, 2022
作者:
N
Nyakku Shigure
提交者:
GitHub
11月 07, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[CodeStyle][E262][E265] make comments start with `# ` (#47687)
* [CodeStyle][E262][E265] make comments start with `# ` * flake8 config
上级
6074c50a
变更
18
隐藏空白更改
内联
并排
Showing
18 changed file
with
48 addition
and
50 deletion
+48
-50
.flake8
.flake8
+1
-1
python/paddle/distributed/fleet/launch_utils.py
python/paddle/distributed/fleet/launch_utils.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py
...distributed/fleet/meta_optimizers/ascend/ascend_parser.py
+1
-1
python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py
...buted/fleet/meta_optimizers/parameter_server_optimizer.py
+1
-1
python/paddle/distributed/passes/auto_parallel_grad_clip.py
python/paddle/distributed/passes/auto_parallel_grad_clip.py
+1
-1
python/paddle/distributed/passes/ps_trainer_pass.py
python/paddle/distributed/passes/ps_trainer_pass.py
+5
-5
python/paddle/fluid/tests/unittests/dist_fleet_ctr.py
python/paddle/fluid/tests/unittests/dist_fleet_ctr.py
+1
-1
python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py
python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py
+2
-2
python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py
...n/paddle/fluid/tests/unittests/test_cross_entropy_loss.py
+12
-12
python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py
...paddle/fluid/tests/unittests/test_fused_feedforward_op.py
+1
-2
python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py
...fluid/tests/unittests/test_get_inputs_outputs_in_block.py
+2
-2
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
+1
-1
python/paddle/fluid/tests/unittests/test_slice_op.py
python/paddle/fluid/tests/unittests/test_slice_op.py
+6
-6
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
+5
-5
python/paddle/fluid/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py
...ite_list/check_op_sequence_instance_0_input_white_list.py
+0
-1
python/paddle/framework/__init__.py
python/paddle/framework/__init__.py
+3
-3
tools/parallel_UT_rule.py
tools/parallel_UT_rule.py
+1
-1
tools/sampcd_processor.py
tools/sampcd_processor.py
+4
-4
未找到文件。
.flake8
浏览文件 @
c9a7cadf
...
...
@@ -16,7 +16,7 @@ exclude =
./python/paddle/fluid/tests/unittests/mlu/**
ignore =
# E, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
E203,E26
2,E265,E26
6,
E203,E266,
E401,E402,
E501,
E721,E722,E731,E741,
...
...
python/paddle/distributed/fleet/launch_utils.py
浏览文件 @
c9a7cadf
...
...
@@ -1412,7 +1412,7 @@ class ParameterServerLauncher(object):
assert
(
args
.
heter_devices
!=
""
),
"The setting of Parameter-Server heter mode must has heter_devices."
self
.
stage_device_map
[
1
]
=
"cpu"
#
for cpu trainer
self
.
stage_device_map
[
1
]
=
"cpu"
# for cpu trainer
heter_devices_list
=
args
.
heter_devices
.
split
(
";"
)
for
i
in
range
(
len
(
heter_devices_list
)):
self
.
stage_device_map
[
i
+
2
]
=
heter_devices_list
[
i
]
...
...
python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py
浏览文件 @
c9a7cadf
...
...
@@ -17,7 +17,7 @@ from functools import reduce
__all__
=
[]
registerd_op
=
{
#
#
forwards
registerd_op
=
{
# forwards
"elementwise_add"
:
"AddParser"
,
"matmul"
:
"MatMulParser"
,
"mul"
:
"MulParser"
,
...
...
python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py
浏览文件 @
c9a7cadf
...
...
@@ -394,7 +394,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
loss
.
block
.
program
.
_heter_pipeline_opt
=
{
"trainer"
:
"HeterPipelineTrainer"
,
"device_worker"
:
"HeterSection"
,
"trainers"
:
self
.
role_maker
.
_get_stage_trainers
(),
#
#
trainer num in each stage
"trainers"
:
self
.
role_maker
.
_get_stage_trainers
(),
# trainer num in each stage
"trainer_id"
:
int
(
self
.
role_maker
.
_role_id
()),
"pipeline_stage"
:
int
(
self
.
role_maker
.
_get_stage_id
())
-
1
,
"num_pipeline_stages"
:
int
(
...
...
python/paddle/distributed/passes/auto_parallel_grad_clip.py
浏览文件 @
c9a7cadf
...
...
@@ -269,7 +269,7 @@ class ClipGradByGloblNormPass(PassBase):
if
op
.
type
in
removed_op_out_type
:
input_name
=
op
.
input
(
"X"
)[
0
]
if
input_name
.
find
(
"@GRAD"
)
!=
-
1
:
#'clip_by_norm', 'squared_l2_norm', 'square'
#
'clip_by_norm', 'squared_l2_norm', 'square'
param_name
=
input_name
[:
input_name
.
find
(
"@GRAD"
)]
is_local
=
self
.
clip_helper
.
_is_local_param
(
param_name
)
is_calculate
=
self
.
clip_helper
.
_is_calcuate_norm
(
...
...
python/paddle/distributed/passes/ps_trainer_pass.py
浏览文件 @
c9a7cadf
...
...
@@ -1222,8 +1222,8 @@ class SplitTrainerOpsPass(PassBase):
# runtime attribute
"endpoint"
:
get_trainer_endpoint
(
role_maker
),
#
#
get trainer endpoint
"fanin"
:
0
,
#
#
get heter worker
),
# get trainer endpoint
"fanin"
:
0
,
# get heter worker
"pserver_id"
:
get_role_id
(
role_maker
),
"distributed_mode"
:
attrs
[
'ps_mode'
],
"rpc_exec_thread_num"
:
int
(
os
.
getenv
(
"CPU_NUM"
,
32
)),
...
...
@@ -1296,7 +1296,7 @@ class SetHeterPipelineOptPass(PassBase):
main_program
.
_heter_pipeline_opt
=
{
"trainer"
:
"HeterPipelineTrainer"
,
"device_worker"
:
"HeterSection"
,
"trainers"
:
role_maker
.
_get_stage_trainers
(),
#
#
trainer num in each stage
"trainers"
:
role_maker
.
_get_stage_trainers
(),
# trainer num in each stage
"trainer_id"
:
int
(
role_maker
.
_role_id
()),
"pipeline_stage"
:
int
(
role_maker
.
_get_stage_id
())
-
1
,
"num_pipeline_stages"
:
int
(
role_maker
.
_get_num_stage
()),
...
...
@@ -1524,7 +1524,7 @@ class SplitFlOpsPass(PassBase):
attrs
=
{
"message_to_block_id"
:
[
grad_to_block_id
],
"optimize_blocks"
:
[
second_block
],
"endpoint"
:
get_trainer_endpoint
(
self
.
role_maker
),
##
"endpoint"
:
get_trainer_endpoint
(
self
.
role_maker
),
"fanin"
:
0
,
"pserver_id"
:
get_role_id
(
self
.
role_maker
),
"distributed_mode"
:
self
.
ps_mode
,
...
...
@@ -1584,7 +1584,7 @@ class SplitFlOpsPass(PassBase):
grad_to_block_id
=
block_input_flag
+
":"
+
str
(
second_block
.
idx
)
attrs
=
{
"message_to_block_id"
:
[
grad_to_block_id
],
"optimize_blocks"
:
[
second_block
],
#
#
what to do?
"optimize_blocks"
:
[
second_block
],
# what to do?
"endpoint"
:
get_heter_worker_endpoint
(
self
.
role_maker
),
"fanin"
:
len
(
get_previous_stage_trainers
(
self
.
role_maker
)),
"pserver_id"
:
1
,
# TODO
...
...
python/paddle/fluid/tests/unittests/dist_fleet_ctr.py
浏览文件 @
c9a7cadf
...
...
@@ -331,7 +331,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
dataset
.
set_pipe_command
(
'python ctr_dataset_reader.py'
)
dataset
.
load_into_memory
()
dataset
.
global_shuffle
(
fleet
,
12
)
#
#
TODO: thread configure
dataset
.
global_shuffle
(
fleet
,
12
)
#
TODO: thread configure
shuffle_data_size
=
dataset
.
get_shuffle_data_size
(
fleet
)
local_data_size
=
dataset
.
get_shuffle_data_size
()
data_size_list
=
fleet
.
util
.
all_gather
(
local_data_size
)
...
...
python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py
浏览文件 @
c9a7cadf
...
...
@@ -172,7 +172,7 @@ def get_user_defined_strategy(config):
micro_num
=
1
strategy
.
pipeline_configs
=
{
"accumulate_steps"
:
micro_num
}
#
#
num_microbatches
}
# num_microbatches
elif
sync_mode
==
"geo"
:
strategy
=
paddle
.
distributed
.
fleet
.
DistributedStrategy
()
strategy
.
a_sync
=
True
...
...
@@ -372,7 +372,7 @@ class DnnTrainer(object):
print
(
"entering run_minimize -- old"
)
fleet_obj
=
fleet
.
distributed_optimizer
(
inner_optimizer
,
user_defined_strategy
)
#
# Fleet 对象
)
#
Fleet object
fleet_obj
.
minimize
(
loss
)
if
fleet
.
is_server
():
...
...
python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py
浏览文件 @
c9a7cadf
...
...
@@ -36,8 +36,8 @@ def cross_entropy_loss_1d(
C
=
input_shape
[
1
]
out
=
np
.
zeros_like
(
label
).
astype
(
np
.
float64
)
total_weight
=
0
#
##
1. compute softmax cross_entropy (with weight)
#
##
Note: only support hard labels.
#
1. compute softmax cross_entropy (with weight)
#
Note: only support hard labels.
for
i
in
range
(
N
):
cur_target
=
label
[
i
]
if
cur_target
==
ignore_index
:
...
...
@@ -47,7 +47,7 @@ def cross_entropy_loss_1d(
total_weight
+=
cur_weight
out
[
i
]
=
-
log_softmax_out
[
i
][
cur_target
]
*
cur_weight
#
##
2. deal with reduction
#
2. deal with reduction
if
reduction
==
'sum'
:
return
np
.
sum
(
out
),
np
.
array
([
total_weight
]).
astype
(
'float64'
)
elif
reduction
==
'mean'
:
...
...
@@ -179,7 +179,7 @@ class CrossEntropyLoss(unittest.TestCase):
'float32'
if
fluid
.
core
.
is_compiled_with_rocm
()
else
'float64'
)
#
##
test for deprecated softmax_with_cross_entropy
#
test for deprecated softmax_with_cross_entropy
def
test_softmax_with_cross_entropy
(
self
):
self
.
numeric_stable_mode
=
False
self
.
soft_label
=
True
...
...
@@ -240,8 +240,8 @@ class CrossEntropyLoss(unittest.TestCase):
)
np
.
testing
.
assert_allclose
(
paddle_loss_ce
.
numpy
(),
expected
,
rtol
=
1e-05
)
#
##
soft_label test start
#
##
soft_label test 1
#
soft_label test start
#
soft_label test 1
def
test_cross_entropy_loss_soft_1d
(
self
):
self
.
numeric_stable_mode
=
False
self
.
soft_label
=
True
...
...
@@ -329,7 +329,7 @@ class CrossEntropyLoss(unittest.TestCase):
np
.
testing
.
assert_allclose
(
static_ret
[
0
],
expected
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
dy_ret_value
,
expected
,
rtol
=
1e-05
)
#
##
soft_label test 2
#
soft_label test 2
def
test_cross_entropy_loss_soft_1d_weight
(
self
):
self
.
numeric_stable_mode
=
False
self
.
soft_label
=
True
...
...
@@ -427,7 +427,7 @@ class CrossEntropyLoss(unittest.TestCase):
np
.
testing
.
assert_allclose
(
static_ret
[
0
],
expected
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
dy_ret_value
,
expected
,
rtol
=
1e-05
)
#
##
soft_label test 3
#
soft_label test 3
def
test_cross_entropy_loss_soft_1d_mean
(
self
):
self
.
numeric_stable_mode
=
False
self
.
soft_label
=
True
...
...
@@ -511,7 +511,7 @@ class CrossEntropyLoss(unittest.TestCase):
np
.
testing
.
assert_allclose
(
static_ret
[
0
],
expected
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
dy_ret_value
,
expected
,
rtol
=
1e-05
)
#
##
soft_label test 4
#
soft_label test 4
def
test_cross_entropy_loss_soft_1d_weight_mean
(
self
):
self
.
numeric_stable_mode
=
False
self
.
soft_label
=
True
...
...
@@ -599,7 +599,7 @@ class CrossEntropyLoss(unittest.TestCase):
np
.
testing
.
assert_allclose
(
static_ret
[
0
],
expected
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
dy_ret_value
,
expected
,
rtol
=
1e-05
)
#
##
soft_label test 5
#
soft_label test 5
def
test_cross_entropy_loss_soft_2d
(
self
):
def
inner_cross_entropy_loss_soft_2d
(
soft_label
):
self
.
numeric_stable_mode
=
False
...
...
@@ -704,7 +704,7 @@ class CrossEntropyLoss(unittest.TestCase):
inner_cross_entropy_loss_soft_2d
(
True
)
inner_cross_entropy_loss_soft_2d
(
False
)
#
##
soft_label test 6
#
soft_label test 6
def
test_cross_entropy_loss_soft_2d_weight_mean
(
self
):
self
.
numeric_stable_mode
=
False
self
.
soft_label
=
True
...
...
@@ -801,7 +801,7 @@ class CrossEntropyLoss(unittest.TestCase):
np
.
testing
.
assert_allclose
(
static_ret
[
0
],
expected
,
rtol
=
1e-05
)
np
.
testing
.
assert_allclose
(
dy_ret_value
,
expected
,
rtol
=
1e-05
)
#
##
soft_label test end
#
soft_label test end
def
test_cross_entropy_loss_1d_with_mean_ignore
(
self
):
input_np
=
np
.
random
.
random
([
2
,
4
]).
astype
(
self
.
dtype
)
...
...
python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py
浏览文件 @
c9a7cadf
...
...
@@ -253,7 +253,7 @@ class APITestStaticFusedFFN(unittest.TestCase):
pre_layer_norm
=
False
,
)
#
#####base ffn######
#
base ffn
linear1_out
=
F
.
linear
(
x
,
linear1_weight
,
linear1_bias
)
act_out
=
F
.
relu
(
linear1_out
)
dropout1_out
=
F
.
dropout
(
x
=
act_out
,
p
=
0.0
,
training
=
False
)
...
...
@@ -265,7 +265,6 @@ class APITestStaticFusedFFN(unittest.TestCase):
weight
=
ln2_scale
,
bias
=
ln2_bias
,
)
######base ffn######
exe
=
paddle
.
static
.
Executor
(
paddle
.
CUDAPlace
(
0
))
...
...
python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py
浏览文件 @
c9a7cadf
...
...
@@ -68,9 +68,9 @@ class TestGetInputsOutputsInBlock(unittest.TestCase):
inner_inputs
,
inner_outputs
=
utils
.
get_inputs_outputs_in_block
(
sub_block
)
#'fill_constant_1.tmp_0', 'tmp_3' are names of a, c
#
'fill_constant_1.tmp_0', 'tmp_3' are names of a, c
self
.
assertTrue
(
inner_inputs
==
{
'fill_constant_1.tmp_0'
,
'tmp_3'
})
#'_generated_var_1', is name of a + c
#
'_generated_var_1', is name of a + c
self
.
assertTrue
(
inner_outputs
==
{
'_generated_var_1'
})
...
...
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
浏览文件 @
c9a7cadf
...
...
@@ -209,7 +209,7 @@ class TestMultiDotOp4MatFirstAndLast1D(TestMultiDotOp4Mat):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
,
self
.
D
])}
#
####python API test#######
#
python API test
class
TestMultiDotOpError
(
unittest
.
TestCase
):
def
test_errors
(
self
):
with
paddle
.
static
.
program_guard
(
...
...
python/paddle/fluid/tests/unittests/test_slice_op.py
浏览文件 @
c9a7cadf
...
...
@@ -298,7 +298,7 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
#
'starts': self.starts,
'ends'
:
self
.
ends
,
'infer_flags'
:
self
.
infer_flags
,
'decrease_axis'
:
self
.
decrease_axis
,
...
...
@@ -335,8 +335,8 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
#'ends': self.ends_infer,
#
'starts': self.starts,
#
'ends': self.ends_infer,
'infer_flags'
:
self
.
infer_flags
,
}
...
...
@@ -369,8 +369,8 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
#'ends': self.ends,
#
'starts': self.starts,
#
'ends': self.ends,
'infer_flags'
:
self
.
infer_flags
,
'decrease_axis'
:
self
.
decrease_axis
,
}
...
...
@@ -412,7 +412,7 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
out
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
#
'starts': self.starts,
'ends'
:
self
.
ends_infer
,
'infer_flags'
:
self
.
infer_flags
,
}
...
...
python/paddle/fluid/tests/unittests/test_strided_slice_op.py
浏览文件 @
c9a7cadf
...
...
@@ -406,7 +406,7 @@ class TestStridedSliceOp_starts_Tensor(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
output
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
#
'starts': self.starts,
'ends'
:
self
.
ends
,
'strides'
:
self
.
strides
,
'infer_flags'
:
self
.
infer_flags
,
...
...
@@ -442,7 +442,7 @@ class TestStridedSliceOp_ends_Tensor(OpTest):
self
.
attrs
=
{
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts
,
#'ends': self.ends,
#
'ends': self.ends,
'strides'
:
self
.
strides
,
'infer_flags'
:
self
.
infer_flags
,
}
...
...
@@ -483,8 +483,8 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest):
self
.
outputs
=
{
'Out'
:
self
.
output
}
self
.
attrs
=
{
'axes'
:
self
.
axes
,
#'starts': self.starts,
#'ends': self.ends,
#
'starts': self.starts,
#
'ends': self.ends,
'strides'
:
self
.
strides
,
'infer_flags'
:
self
.
infer_flags
,
}
...
...
@@ -520,7 +520,7 @@ class TestStridedSliceOp_strides_Tensor(OpTest):
'axes'
:
self
.
axes
,
'starts'
:
self
.
starts
,
'ends'
:
self
.
ends
,
#'strides': self.strides,
#
'strides': self.strides,
'infer_flags'
:
self
.
infer_flags
,
}
...
...
python/paddle/fluid/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py
浏览文件 @
c9a7cadf
...
...
@@ -18,7 +18,6 @@
# compiletime&runtime will be skipped. Ops in this whitelist need to declear
# reasons for skipping compile_vs_runtime test or be fixed later.
#!/usr/bin/env python
import
sys
# For ops in this whitelist, the check of instance size is 0 input will be skipped.
...
...
python/paddle/framework/__init__.py
浏览文件 @
c9a7cadf
...
...
@@ -47,9 +47,9 @@ from ..fluid.framework import set_flags # noqa: F401
from
..fluid.dygraph.base
import
enable_dygraph
as
disable_static
# noqa: F401
from
..fluid.dygraph.base
import
disable_dygraph
as
enable_static
# noqa: F401
from
..fluid.framework
import
_non_static_mode
as
in_dynamic_mode
# noqa: F401
from
..fluid.framework
import
(
_non_static_mode
,
)
# noqa: F401; temporary used for hackson
from
..fluid.framework
import
(
# noqa: F401
_non_static_mode
,
# temporary used for hackson
)
from
..fluid.framework
import
(
_current_expected_place
,
_get_paddle_place
,
...
...
tools/parallel_UT_rule.py
浏览文件 @
c9a7cadf
...
...
@@ -228,7 +228,7 @@ HIGH_PARALLEL_JOB_NEW = [
'test_launch_coverage'
,
'test_mkldnn_conv_activation_fuse_pass'
,
'test_inference_model_io'
,
'test_fusion_repeated_fc_relu_op'
,
#'heter_listen_and_server_test',
'test_fusion_repeated_fc_relu_op'
,
'cudnn_desc_test'
,
'test_beam_search_op'
,
'test_var_conv_2d'
,
...
...
tools/sampcd_processor.py
浏览文件 @
c9a7cadf
...
...
@@ -547,7 +547,7 @@ def get_full_api():
"""
get all the apis
"""
global
API_DIFF_SPEC_FN
#
#
readonly
global
API_DIFF_SPEC_FN
# readonly
from
print_signatures
import
get_all_api_from_modulelist
member_dict
=
get_all_api_from_modulelist
()
...
...
@@ -559,7 +559,7 @@ def get_full_api_by_walk():
"""
get all the apis
"""
global
API_DIFF_SPEC_FN
#
#
readonly
global
API_DIFF_SPEC_FN
# readonly
from
print_signatures
import
get_all_api
apilist
=
get_all_api
()
...
...
@@ -571,7 +571,7 @@ def get_full_api_from_pr_spec():
"""
get all the apis
"""
global
API_PR_SPEC_FN
,
API_DIFF_SPEC_FN
#
#
readonly
global
API_PR_SPEC_FN
,
API_DIFF_SPEC_FN
# readonly
pr_api
=
get_api_md5
(
API_PR_SPEC_FN
)
if
len
(
pr_api
):
with
open
(
API_DIFF_SPEC_FN
,
'w'
)
as
f
:
...
...
@@ -584,7 +584,7 @@ def get_incrementapi():
'''
this function will get the apis that difference between API_DEV.spec and API_PR.spec.
'''
global
API_DEV_SPEC_FN
,
API_PR_SPEC_FN
,
API_DIFF_SPEC_FN
#
#
readonly
global
API_DEV_SPEC_FN
,
API_PR_SPEC_FN
,
API_DIFF_SPEC_FN
# readonly
dev_api
=
get_api_md5
(
API_DEV_SPEC_FN
)
pr_api
=
get_api_md5
(
API_PR_SPEC_FN
)
with
open
(
API_DIFF_SPEC_FN
,
'w'
)
as
f
:
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录