未验证 提交 c9a7cadf 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][E262][E265] make comments start with `# ` (#47687)

* [CodeStyle][E262][E265] make comments start with `# `

* flake8 config
上级 6074c50a
......@@ -16,7 +16,7 @@ exclude =
./python/paddle/fluid/tests/unittests/mlu/**
ignore =
# E, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
E203,E262,E265,E266,
E203,E266,
E401,E402,
E501,
E721,E722,E731,E741,
......
......@@ -1412,7 +1412,7 @@ class ParameterServerLauncher(object):
assert (
args.heter_devices != ""
), "The setting of Parameter-Server heter mode must has heter_devices."
self.stage_device_map[1] = "cpu" # for cpu trainer
self.stage_device_map[1] = "cpu" # for cpu trainer
heter_devices_list = args.heter_devices.split(";")
for i in range(len(heter_devices_list)):
self.stage_device_map[i + 2] = heter_devices_list[i]
......
......@@ -17,7 +17,7 @@ from functools import reduce
__all__ = []
registerd_op = { ## forwards
registerd_op = { # forwards
"elementwise_add": "AddParser",
"matmul": "MatMulParser",
"mul": "MulParser",
......
......@@ -394,7 +394,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
loss.block.program._heter_pipeline_opt = {
"trainer": "HeterPipelineTrainer",
"device_worker": "HeterSection",
"trainers": self.role_maker._get_stage_trainers(), ## trainer num in each stage
"trainers": self.role_maker._get_stage_trainers(), # trainer num in each stage
"trainer_id": int(self.role_maker._role_id()),
"pipeline_stage": int(self.role_maker._get_stage_id()) - 1,
"num_pipeline_stages": int(
......
......@@ -269,7 +269,7 @@ class ClipGradByGloblNormPass(PassBase):
if op.type in removed_op_out_type:
input_name = op.input("X")[0]
if input_name.find("@GRAD") != -1:
#'clip_by_norm', 'squared_l2_norm', 'square'
# 'clip_by_norm', 'squared_l2_norm', 'square'
param_name = input_name[: input_name.find("@GRAD")]
is_local = self.clip_helper._is_local_param(param_name)
is_calculate = self.clip_helper._is_calcuate_norm(
......
......@@ -1222,8 +1222,8 @@ class SplitTrainerOpsPass(PassBase):
# runtime attribute
"endpoint": get_trainer_endpoint(
role_maker
), ## get trainer endpoint
"fanin": 0, ## get heter worker
), # get trainer endpoint
"fanin": 0, # get heter worker
"pserver_id": get_role_id(role_maker),
"distributed_mode": attrs['ps_mode'],
"rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)),
......@@ -1296,7 +1296,7 @@ class SetHeterPipelineOptPass(PassBase):
main_program._heter_pipeline_opt = {
"trainer": "HeterPipelineTrainer",
"device_worker": "HeterSection",
"trainers": role_maker._get_stage_trainers(), ## trainer num in each stage
"trainers": role_maker._get_stage_trainers(), # trainer num in each stage
"trainer_id": int(role_maker._role_id()),
"pipeline_stage": int(role_maker._get_stage_id()) - 1,
"num_pipeline_stages": int(role_maker._get_num_stage()),
......@@ -1524,7 +1524,7 @@ class SplitFlOpsPass(PassBase):
attrs = {
"message_to_block_id": [grad_to_block_id],
"optimize_blocks": [second_block],
"endpoint": get_trainer_endpoint(self.role_maker), ##
"endpoint": get_trainer_endpoint(self.role_maker),
"fanin": 0,
"pserver_id": get_role_id(self.role_maker),
"distributed_mode": self.ps_mode,
......@@ -1584,7 +1584,7 @@ class SplitFlOpsPass(PassBase):
grad_to_block_id = block_input_flag + ":" + str(second_block.idx)
attrs = {
"message_to_block_id": [grad_to_block_id],
"optimize_blocks": [second_block], ## what to do?
"optimize_blocks": [second_block], # what to do?
"endpoint": get_heter_worker_endpoint(self.role_maker),
"fanin": len(get_previous_stage_trainers(self.role_maker)),
"pserver_id": 1, # TODO
......
......@@ -331,7 +331,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
dataset.set_pipe_command('python ctr_dataset_reader.py')
dataset.load_into_memory()
dataset.global_shuffle(fleet, 12) ##TODO: thread configure
dataset.global_shuffle(fleet, 12) # TODO: thread configure
shuffle_data_size = dataset.get_shuffle_data_size(fleet)
local_data_size = dataset.get_shuffle_data_size()
data_size_list = fleet.util.all_gather(local_data_size)
......
......@@ -172,7 +172,7 @@ def get_user_defined_strategy(config):
micro_num = 1
strategy.pipeline_configs = {
"accumulate_steps": micro_num
} ## num_microbatches
} # num_microbatches
elif sync_mode == "geo":
strategy = paddle.distributed.fleet.DistributedStrategy()
strategy.a_sync = True
......@@ -372,7 +372,7 @@ class DnnTrainer(object):
print("entering run_minimize -- old")
fleet_obj = fleet.distributed_optimizer(
inner_optimizer, user_defined_strategy
) ## Fleet 对象
) # Fleet object
fleet_obj.minimize(loss)
if fleet.is_server():
......
......@@ -36,8 +36,8 @@ def cross_entropy_loss_1d(
C = input_shape[1]
out = np.zeros_like(label).astype(np.float64)
total_weight = 0
###1. compute softmax cross_entropy (with weight)
### Note: only support hard labels.
# 1. compute softmax cross_entropy (with weight)
# Note: only support hard labels.
for i in range(N):
cur_target = label[i]
if cur_target == ignore_index:
......@@ -47,7 +47,7 @@ def cross_entropy_loss_1d(
total_weight += cur_weight
out[i] = -log_softmax_out[i][cur_target] * cur_weight
###2. deal with reduction
# 2. deal with reduction
if reduction == 'sum':
return np.sum(out), np.array([total_weight]).astype('float64')
elif reduction == 'mean':
......@@ -179,7 +179,7 @@ class CrossEntropyLoss(unittest.TestCase):
'float32' if fluid.core.is_compiled_with_rocm() else 'float64'
)
###test for deprecated softmax_with_cross_entropy
# test for deprecated softmax_with_cross_entropy
def test_softmax_with_cross_entropy(self):
self.numeric_stable_mode = False
self.soft_label = True
......@@ -240,8 +240,8 @@ class CrossEntropyLoss(unittest.TestCase):
)
np.testing.assert_allclose(paddle_loss_ce.numpy(), expected, rtol=1e-05)
###soft_label test start
###soft_label test 1
# soft_label test start
# soft_label test 1
def test_cross_entropy_loss_soft_1d(self):
self.numeric_stable_mode = False
self.soft_label = True
......@@ -329,7 +329,7 @@ class CrossEntropyLoss(unittest.TestCase):
np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
###soft_label test 2
# soft_label test 2
def test_cross_entropy_loss_soft_1d_weight(self):
self.numeric_stable_mode = False
self.soft_label = True
......@@ -427,7 +427,7 @@ class CrossEntropyLoss(unittest.TestCase):
np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
###soft_label test 3
# soft_label test 3
def test_cross_entropy_loss_soft_1d_mean(self):
self.numeric_stable_mode = False
self.soft_label = True
......@@ -511,7 +511,7 @@ class CrossEntropyLoss(unittest.TestCase):
np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
###soft_label test 4
# soft_label test 4
def test_cross_entropy_loss_soft_1d_weight_mean(self):
self.numeric_stable_mode = False
self.soft_label = True
......@@ -599,7 +599,7 @@ class CrossEntropyLoss(unittest.TestCase):
np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
###soft_label test 5
# soft_label test 5
def test_cross_entropy_loss_soft_2d(self):
def inner_cross_entropy_loss_soft_2d(soft_label):
self.numeric_stable_mode = False
......@@ -704,7 +704,7 @@ class CrossEntropyLoss(unittest.TestCase):
inner_cross_entropy_loss_soft_2d(True)
inner_cross_entropy_loss_soft_2d(False)
###soft_label test 6
# soft_label test 6
def test_cross_entropy_loss_soft_2d_weight_mean(self):
self.numeric_stable_mode = False
self.soft_label = True
......@@ -801,7 +801,7 @@ class CrossEntropyLoss(unittest.TestCase):
np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05)
np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05)
###soft_label test end
# soft_label test end
def test_cross_entropy_loss_1d_with_mean_ignore(self):
input_np = np.random.random([2, 4]).astype(self.dtype)
......
......@@ -253,7 +253,7 @@ class APITestStaticFusedFFN(unittest.TestCase):
pre_layer_norm=False,
)
######base ffn######
# base ffn
linear1_out = F.linear(x, linear1_weight, linear1_bias)
act_out = F.relu(linear1_out)
dropout1_out = F.dropout(x=act_out, p=0.0, training=False)
......@@ -265,7 +265,6 @@ class APITestStaticFusedFFN(unittest.TestCase):
weight=ln2_scale,
bias=ln2_bias,
)
######base ffn######
exe = paddle.static.Executor(paddle.CUDAPlace(0))
......
......@@ -68,9 +68,9 @@ class TestGetInputsOutputsInBlock(unittest.TestCase):
inner_inputs, inner_outputs = utils.get_inputs_outputs_in_block(
sub_block
)
#'fill_constant_1.tmp_0', 'tmp_3' are names of a, c
# 'fill_constant_1.tmp_0', 'tmp_3' are names of a, c
self.assertTrue(inner_inputs == {'fill_constant_1.tmp_0', 'tmp_3'})
#'_generated_var_1', is name of a + c
# '_generated_var_1', is name of a + c
self.assertTrue(inner_outputs == {'_generated_var_1'})
......
......@@ -209,7 +209,7 @@ class TestMultiDotOp4MatFirstAndLast1D(TestMultiDotOp4Mat):
self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])}
#####python API test#######
# python API test
class TestMultiDotOpError(unittest.TestCase):
def test_errors(self):
with paddle.static.program_guard(
......
......@@ -298,7 +298,7 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest):
self.outputs = {'Out': self.out}
self.attrs = {
'axes': self.axes,
#'starts': self.starts,
# 'starts': self.starts,
'ends': self.ends,
'infer_flags': self.infer_flags,
'decrease_axis': self.decrease_axis,
......@@ -335,8 +335,8 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest):
self.outputs = {'Out': self.out}
self.attrs = {
'axes': self.axes,
#'starts': self.starts,
#'ends': self.ends_infer,
# 'starts': self.starts,
# 'ends': self.ends_infer,
'infer_flags': self.infer_flags,
}
......@@ -369,8 +369,8 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest):
self.outputs = {'Out': self.out}
self.attrs = {
'axes': self.axes,
#'starts': self.starts,
#'ends': self.ends,
# 'starts': self.starts,
# 'ends': self.ends,
'infer_flags': self.infer_flags,
'decrease_axis': self.decrease_axis,
}
......@@ -412,7 +412,7 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
self.outputs = {'Out': self.out}
self.attrs = {
'axes': self.axes,
#'starts': self.starts,
# 'starts': self.starts,
'ends': self.ends_infer,
'infer_flags': self.infer_flags,
}
......
......@@ -406,7 +406,7 @@ class TestStridedSliceOp_starts_Tensor(OpTest):
self.outputs = {'Out': self.output}
self.attrs = {
'axes': self.axes,
#'starts': self.starts,
# 'starts': self.starts,
'ends': self.ends,
'strides': self.strides,
'infer_flags': self.infer_flags,
......@@ -442,7 +442,7 @@ class TestStridedSliceOp_ends_Tensor(OpTest):
self.attrs = {
'axes': self.axes,
'starts': self.starts,
#'ends': self.ends,
# 'ends': self.ends,
'strides': self.strides,
'infer_flags': self.infer_flags,
}
......@@ -483,8 +483,8 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest):
self.outputs = {'Out': self.output}
self.attrs = {
'axes': self.axes,
#'starts': self.starts,
#'ends': self.ends,
# 'starts': self.starts,
# 'ends': self.ends,
'strides': self.strides,
'infer_flags': self.infer_flags,
}
......@@ -520,7 +520,7 @@ class TestStridedSliceOp_strides_Tensor(OpTest):
'axes': self.axes,
'starts': self.starts,
'ends': self.ends,
#'strides': self.strides,
# 'strides': self.strides,
'infer_flags': self.infer_flags,
}
......
......@@ -18,7 +18,6 @@
# compiletime&runtime will be skipped. Ops in this whitelist need to declear
# reasons for skipping compile_vs_runtime test or be fixed later.
#!/usr/bin/env python
import sys
# For ops in this whitelist, the check of instance size is 0 input will be skipped.
......
......@@ -47,9 +47,9 @@ from ..fluid.framework import set_flags # noqa: F401
from ..fluid.dygraph.base import enable_dygraph as disable_static # noqa: F401
from ..fluid.dygraph.base import disable_dygraph as enable_static # noqa: F401
from ..fluid.framework import _non_static_mode as in_dynamic_mode # noqa: F401
from ..fluid.framework import (
_non_static_mode,
) # noqa: F401; temporary used for hackson
from ..fluid.framework import ( # noqa: F401
_non_static_mode, # temporary used for hackson
)
from ..fluid.framework import (
_current_expected_place,
_get_paddle_place,
......
......@@ -228,7 +228,7 @@ HIGH_PARALLEL_JOB_NEW = [
'test_launch_coverage',
'test_mkldnn_conv_activation_fuse_pass',
'test_inference_model_io',
'test_fusion_repeated_fc_relu_op', #'heter_listen_and_server_test',
'test_fusion_repeated_fc_relu_op',
'cudnn_desc_test',
'test_beam_search_op',
'test_var_conv_2d',
......
......@@ -547,7 +547,7 @@ def get_full_api():
"""
get all the apis
"""
global API_DIFF_SPEC_FN ## readonly
global API_DIFF_SPEC_FN # readonly
from print_signatures import get_all_api_from_modulelist
member_dict = get_all_api_from_modulelist()
......@@ -559,7 +559,7 @@ def get_full_api_by_walk():
"""
get all the apis
"""
global API_DIFF_SPEC_FN ## readonly
global API_DIFF_SPEC_FN # readonly
from print_signatures import get_all_api
apilist = get_all_api()
......@@ -571,7 +571,7 @@ def get_full_api_from_pr_spec():
"""
get all the apis
"""
global API_PR_SPEC_FN, API_DIFF_SPEC_FN ## readonly
global API_PR_SPEC_FN, API_DIFF_SPEC_FN # readonly
pr_api = get_api_md5(API_PR_SPEC_FN)
if len(pr_api):
with open(API_DIFF_SPEC_FN, 'w') as f:
......@@ -584,7 +584,7 @@ def get_incrementapi():
'''
this function will get the apis that difference between API_DEV.spec and API_PR.spec.
'''
global API_DEV_SPEC_FN, API_PR_SPEC_FN, API_DIFF_SPEC_FN ## readonly
global API_DEV_SPEC_FN, API_PR_SPEC_FN, API_DIFF_SPEC_FN # readonly
dev_api = get_api_md5(API_DEV_SPEC_FN)
pr_api = get_api_md5(API_PR_SPEC_FN)
with open(API_DIFF_SPEC_FN, 'w') as f:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册