From c9a7cadfa5a4890d16442a858b952cc2458cb417 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Mon, 7 Nov 2022 10:52:21 +0800 Subject: [PATCH] [CodeStyle][E262][E265] make comments start with `# ` (#47687) * [CodeStyle][E262][E265] make comments start with `# ` * flake8 config --- .flake8 | 2 +- .../paddle/distributed/fleet/launch_utils.py | 2 +- .../meta_optimizers/ascend/ascend_parser.py | 2 +- .../parameter_server_optimizer.py | 2 +- .../passes/auto_parallel_grad_clip.py | 2 +- .../distributed/passes/ps_trainer_pass.py | 10 ++++---- .../fluid/tests/unittests/dist_fleet_ctr.py | 2 +- .../tests/unittests/ps/ps_dnn_trainer.py | 4 ++-- .../unittests/test_cross_entropy_loss.py | 24 +++++++++---------- .../unittests/test_fused_feedforward_op.py | 3 +-- .../test_get_inputs_outputs_in_block.py | 4 ++-- .../tests/unittests/test_multi_dot_op.py | 2 +- .../fluid/tests/unittests/test_slice_op.py | 12 +++++----- .../tests/unittests/test_strided_slice_op.py | 10 ++++---- ...op_sequence_instance_0_input_white_list.py | 1 - python/paddle/framework/__init__.py | 6 ++--- tools/parallel_UT_rule.py | 2 +- tools/sampcd_processor.py | 8 +++---- 18 files changed, 48 insertions(+), 50 deletions(-) diff --git a/.flake8 b/.flake8 index 7b8120753d..e85f0bcc67 100644 --- a/.flake8 +++ b/.flake8 @@ -16,7 +16,7 @@ exclude = ./python/paddle/fluid/tests/unittests/mlu/** ignore = # E, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes - E203,E262,E265,E266, + E203,E266, E401,E402, E501, E721,E722,E731,E741, diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index e471535c27..64795140cd 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -1412,7 +1412,7 @@ class ParameterServerLauncher(object): assert ( args.heter_devices != "" ), "The setting of Parameter-Server heter mode must has heter_devices." - self.stage_device_map[1] = "cpu" # for cpu trainer + self.stage_device_map[1] = "cpu" # for cpu trainer heter_devices_list = args.heter_devices.split(";") for i in range(len(heter_devices_list)): self.stage_device_map[i + 2] = heter_devices_list[i] diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py index 21a60824fa..b24e51896f 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py @@ -17,7 +17,7 @@ from functools import reduce __all__ = [] -registerd_op = { ## forwards +registerd_op = { # forwards "elementwise_add": "AddParser", "matmul": "MatMulParser", "mul": "MulParser", diff --git a/python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py index 841db6da46..2ea83ada81 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/parameter_server_optimizer.py @@ -394,7 +394,7 @@ class ParameterServerOptimizer(MetaOptimizerBase): loss.block.program._heter_pipeline_opt = { "trainer": "HeterPipelineTrainer", "device_worker": "HeterSection", - "trainers": self.role_maker._get_stage_trainers(), ## trainer num in each stage + "trainers": self.role_maker._get_stage_trainers(), # trainer num in each stage "trainer_id": int(self.role_maker._role_id()), "pipeline_stage": int(self.role_maker._get_stage_id()) - 1, "num_pipeline_stages": int( diff --git a/python/paddle/distributed/passes/auto_parallel_grad_clip.py b/python/paddle/distributed/passes/auto_parallel_grad_clip.py index b70296c0cc..d570bf9c3f 100644 --- a/python/paddle/distributed/passes/auto_parallel_grad_clip.py +++ b/python/paddle/distributed/passes/auto_parallel_grad_clip.py @@ -269,7 +269,7 @@ class ClipGradByGloblNormPass(PassBase): if op.type in removed_op_out_type: input_name = op.input("X")[0] if input_name.find("@GRAD") != -1: - #'clip_by_norm', 'squared_l2_norm', 'square' + # 'clip_by_norm', 'squared_l2_norm', 'square' param_name = input_name[: input_name.find("@GRAD")] is_local = self.clip_helper._is_local_param(param_name) is_calculate = self.clip_helper._is_calcuate_norm( diff --git a/python/paddle/distributed/passes/ps_trainer_pass.py b/python/paddle/distributed/passes/ps_trainer_pass.py index 098eba6b7a..c31981c4c8 100755 --- a/python/paddle/distributed/passes/ps_trainer_pass.py +++ b/python/paddle/distributed/passes/ps_trainer_pass.py @@ -1222,8 +1222,8 @@ class SplitTrainerOpsPass(PassBase): # runtime attribute "endpoint": get_trainer_endpoint( role_maker - ), ## get trainer endpoint - "fanin": 0, ## get heter worker + ), # get trainer endpoint + "fanin": 0, # get heter worker "pserver_id": get_role_id(role_maker), "distributed_mode": attrs['ps_mode'], "rpc_exec_thread_num": int(os.getenv("CPU_NUM", 32)), @@ -1296,7 +1296,7 @@ class SetHeterPipelineOptPass(PassBase): main_program._heter_pipeline_opt = { "trainer": "HeterPipelineTrainer", "device_worker": "HeterSection", - "trainers": role_maker._get_stage_trainers(), ## trainer num in each stage + "trainers": role_maker._get_stage_trainers(), # trainer num in each stage "trainer_id": int(role_maker._role_id()), "pipeline_stage": int(role_maker._get_stage_id()) - 1, "num_pipeline_stages": int(role_maker._get_num_stage()), @@ -1524,7 +1524,7 @@ class SplitFlOpsPass(PassBase): attrs = { "message_to_block_id": [grad_to_block_id], "optimize_blocks": [second_block], - "endpoint": get_trainer_endpoint(self.role_maker), ## + "endpoint": get_trainer_endpoint(self.role_maker), "fanin": 0, "pserver_id": get_role_id(self.role_maker), "distributed_mode": self.ps_mode, @@ -1584,7 +1584,7 @@ class SplitFlOpsPass(PassBase): grad_to_block_id = block_input_flag + ":" + str(second_block.idx) attrs = { "message_to_block_id": [grad_to_block_id], - "optimize_blocks": [second_block], ## what to do? + "optimize_blocks": [second_block], # what to do? "endpoint": get_heter_worker_endpoint(self.role_maker), "fanin": len(get_previous_stage_trainers(self.role_maker)), "pserver_id": 1, # TODO diff --git a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py index dd01938556..2146616eee 100644 --- a/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py +++ b/python/paddle/fluid/tests/unittests/dist_fleet_ctr.py @@ -331,7 +331,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): dataset.set_pipe_command('python ctr_dataset_reader.py') dataset.load_into_memory() - dataset.global_shuffle(fleet, 12) ##TODO: thread configure + dataset.global_shuffle(fleet, 12) # TODO: thread configure shuffle_data_size = dataset.get_shuffle_data_size(fleet) local_data_size = dataset.get_shuffle_data_size() data_size_list = fleet.util.all_gather(local_data_size) diff --git a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py index ccaed0b984..1fa54bccd2 100755 --- a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py +++ b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py @@ -172,7 +172,7 @@ def get_user_defined_strategy(config): micro_num = 1 strategy.pipeline_configs = { "accumulate_steps": micro_num - } ## num_microbatches + } # num_microbatches elif sync_mode == "geo": strategy = paddle.distributed.fleet.DistributedStrategy() strategy.a_sync = True @@ -372,7 +372,7 @@ class DnnTrainer(object): print("entering run_minimize -- old") fleet_obj = fleet.distributed_optimizer( inner_optimizer, user_defined_strategy - ) ## Fleet 对象 + ) # Fleet object fleet_obj.minimize(loss) if fleet.is_server(): diff --git a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py index f3e66f9bb8..17c51dc741 100644 --- a/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py +++ b/python/paddle/fluid/tests/unittests/test_cross_entropy_loss.py @@ -36,8 +36,8 @@ def cross_entropy_loss_1d( C = input_shape[1] out = np.zeros_like(label).astype(np.float64) total_weight = 0 - ###1. compute softmax cross_entropy (with weight) - ### Note: only support hard labels. + # 1. compute softmax cross_entropy (with weight) + # Note: only support hard labels. for i in range(N): cur_target = label[i] if cur_target == ignore_index: @@ -47,7 +47,7 @@ def cross_entropy_loss_1d( total_weight += cur_weight out[i] = -log_softmax_out[i][cur_target] * cur_weight - ###2. deal with reduction + # 2. deal with reduction if reduction == 'sum': return np.sum(out), np.array([total_weight]).astype('float64') elif reduction == 'mean': @@ -179,7 +179,7 @@ class CrossEntropyLoss(unittest.TestCase): 'float32' if fluid.core.is_compiled_with_rocm() else 'float64' ) - ###test for deprecated softmax_with_cross_entropy + # test for deprecated softmax_with_cross_entropy def test_softmax_with_cross_entropy(self): self.numeric_stable_mode = False self.soft_label = True @@ -240,8 +240,8 @@ class CrossEntropyLoss(unittest.TestCase): ) np.testing.assert_allclose(paddle_loss_ce.numpy(), expected, rtol=1e-05) - ###soft_label test start - ###soft_label test 1 + # soft_label test start + # soft_label test 1 def test_cross_entropy_loss_soft_1d(self): self.numeric_stable_mode = False self.soft_label = True @@ -329,7 +329,7 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) - ###soft_label test 2 + # soft_label test 2 def test_cross_entropy_loss_soft_1d_weight(self): self.numeric_stable_mode = False self.soft_label = True @@ -427,7 +427,7 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) - ###soft_label test 3 + # soft_label test 3 def test_cross_entropy_loss_soft_1d_mean(self): self.numeric_stable_mode = False self.soft_label = True @@ -511,7 +511,7 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) - ###soft_label test 4 + # soft_label test 4 def test_cross_entropy_loss_soft_1d_weight_mean(self): self.numeric_stable_mode = False self.soft_label = True @@ -599,7 +599,7 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) - ###soft_label test 5 + # soft_label test 5 def test_cross_entropy_loss_soft_2d(self): def inner_cross_entropy_loss_soft_2d(soft_label): self.numeric_stable_mode = False @@ -704,7 +704,7 @@ class CrossEntropyLoss(unittest.TestCase): inner_cross_entropy_loss_soft_2d(True) inner_cross_entropy_loss_soft_2d(False) - ###soft_label test 6 + # soft_label test 6 def test_cross_entropy_loss_soft_2d_weight_mean(self): self.numeric_stable_mode = False self.soft_label = True @@ -801,7 +801,7 @@ class CrossEntropyLoss(unittest.TestCase): np.testing.assert_allclose(static_ret[0], expected, rtol=1e-05) np.testing.assert_allclose(dy_ret_value, expected, rtol=1e-05) - ###soft_label test end + # soft_label test end def test_cross_entropy_loss_1d_with_mean_ignore(self): input_np = np.random.random([2, 4]).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py b/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py index 71bf2bad2c..a745ec57c1 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py +++ b/python/paddle/fluid/tests/unittests/test_fused_feedforward_op.py @@ -253,7 +253,7 @@ class APITestStaticFusedFFN(unittest.TestCase): pre_layer_norm=False, ) - ######base ffn###### + # base ffn linear1_out = F.linear(x, linear1_weight, linear1_bias) act_out = F.relu(linear1_out) dropout1_out = F.dropout(x=act_out, p=0.0, training=False) @@ -265,7 +265,6 @@ class APITestStaticFusedFFN(unittest.TestCase): weight=ln2_scale, bias=ln2_bias, ) - ######base ffn###### exe = paddle.static.Executor(paddle.CUDAPlace(0)) diff --git a/python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py b/python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py index b419679403..49c05c420c 100644 --- a/python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py +++ b/python/paddle/fluid/tests/unittests/test_get_inputs_outputs_in_block.py @@ -68,9 +68,9 @@ class TestGetInputsOutputsInBlock(unittest.TestCase): inner_inputs, inner_outputs = utils.get_inputs_outputs_in_block( sub_block ) - #'fill_constant_1.tmp_0', 'tmp_3' are names of a, c + # 'fill_constant_1.tmp_0', 'tmp_3' are names of a, c self.assertTrue(inner_inputs == {'fill_constant_1.tmp_0', 'tmp_3'}) - #'_generated_var_1', is name of a + c + # '_generated_var_1', is name of a + c self.assertTrue(inner_outputs == {'_generated_var_1'}) diff --git a/python/paddle/fluid/tests/unittests/test_multi_dot_op.py b/python/paddle/fluid/tests/unittests/test_multi_dot_op.py index 96d8c7572e..341b34c0d8 100644 --- a/python/paddle/fluid/tests/unittests/test_multi_dot_op.py +++ b/python/paddle/fluid/tests/unittests/test_multi_dot_op.py @@ -209,7 +209,7 @@ class TestMultiDotOp4MatFirstAndLast1D(TestMultiDotOp4Mat): self.outputs = {'Out': multi_dot([self.A, self.B, self.C, self.D])} -#####python API test####### +# python API test class TestMultiDotOpError(unittest.TestCase): def test_errors(self): with paddle.static.program_guard( diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 59cd41ae9d..b7e3be86d8 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -298,7 +298,7 @@ class TestSliceOp_decs_dim_starts_OneTensor(OpTest): self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, - #'starts': self.starts, + # 'starts': self.starts, 'ends': self.ends, 'infer_flags': self.infer_flags, 'decrease_axis': self.decrease_axis, @@ -335,8 +335,8 @@ class TestSliceOp_starts_OneTensor_ends_OneTensor(OpTest): self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, - #'starts': self.starts, - #'ends': self.ends_infer, + # 'starts': self.starts, + # 'ends': self.ends_infer, 'infer_flags': self.infer_flags, } @@ -369,8 +369,8 @@ class TestSliceOp_decs_dim_starts_and_ends_OneTensor(OpTest): self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, - #'starts': self.starts, - #'ends': self.ends, + # 'starts': self.starts, + # 'ends': self.ends, 'infer_flags': self.infer_flags, 'decrease_axis': self.decrease_axis, } @@ -412,7 +412,7 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): self.outputs = {'Out': self.out} self.attrs = { 'axes': self.axes, - #'starts': self.starts, + # 'starts': self.starts, 'ends': self.ends_infer, 'infer_flags': self.infer_flags, } diff --git a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py index cb5c491e17..2be7bbe051 100644 --- a/python/paddle/fluid/tests/unittests/test_strided_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_strided_slice_op.py @@ -406,7 +406,7 @@ class TestStridedSliceOp_starts_Tensor(OpTest): self.outputs = {'Out': self.output} self.attrs = { 'axes': self.axes, - #'starts': self.starts, + # 'starts': self.starts, 'ends': self.ends, 'strides': self.strides, 'infer_flags': self.infer_flags, @@ -442,7 +442,7 @@ class TestStridedSliceOp_ends_Tensor(OpTest): self.attrs = { 'axes': self.axes, 'starts': self.starts, - #'ends': self.ends, + # 'ends': self.ends, 'strides': self.strides, 'infer_flags': self.infer_flags, } @@ -483,8 +483,8 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): self.outputs = {'Out': self.output} self.attrs = { 'axes': self.axes, - #'starts': self.starts, - #'ends': self.ends, + # 'starts': self.starts, + # 'ends': self.ends, 'strides': self.strides, 'infer_flags': self.infer_flags, } @@ -520,7 +520,7 @@ class TestStridedSliceOp_strides_Tensor(OpTest): 'axes': self.axes, 'starts': self.starts, 'ends': self.ends, - #'strides': self.strides, + # 'strides': self.strides, 'infer_flags': self.infer_flags, } diff --git a/python/paddle/fluid/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py b/python/paddle/fluid/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py index 408ad9f614..5b222c56e8 100644 --- a/python/paddle/fluid/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py +++ b/python/paddle/fluid/tests/unittests/white_list/check_op_sequence_instance_0_input_white_list.py @@ -18,7 +18,6 @@ # compiletime&runtime will be skipped. Ops in this whitelist need to declear # reasons for skipping compile_vs_runtime test or be fixed later. -#!/usr/bin/env python import sys # For ops in this whitelist, the check of instance size is 0 input will be skipped. diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index a6b79dd29e..07e9d34e9b 100644 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -47,9 +47,9 @@ from ..fluid.framework import set_flags # noqa: F401 from ..fluid.dygraph.base import enable_dygraph as disable_static # noqa: F401 from ..fluid.dygraph.base import disable_dygraph as enable_static # noqa: F401 from ..fluid.framework import _non_static_mode as in_dynamic_mode # noqa: F401 -from ..fluid.framework import ( - _non_static_mode, -) # noqa: F401; temporary used for hackson +from ..fluid.framework import ( # noqa: F401 + _non_static_mode, # temporary used for hackson +) from ..fluid.framework import ( _current_expected_place, _get_paddle_place, diff --git a/tools/parallel_UT_rule.py b/tools/parallel_UT_rule.py index 04c37999c8..30d91d9685 100755 --- a/tools/parallel_UT_rule.py +++ b/tools/parallel_UT_rule.py @@ -228,7 +228,7 @@ HIGH_PARALLEL_JOB_NEW = [ 'test_launch_coverage', 'test_mkldnn_conv_activation_fuse_pass', 'test_inference_model_io', - 'test_fusion_repeated_fc_relu_op', #'heter_listen_and_server_test', + 'test_fusion_repeated_fc_relu_op', 'cudnn_desc_test', 'test_beam_search_op', 'test_var_conv_2d', diff --git a/tools/sampcd_processor.py b/tools/sampcd_processor.py index 5afa47dc4f..3483b82530 100644 --- a/tools/sampcd_processor.py +++ b/tools/sampcd_processor.py @@ -547,7 +547,7 @@ def get_full_api(): """ get all the apis """ - global API_DIFF_SPEC_FN ## readonly + global API_DIFF_SPEC_FN # readonly from print_signatures import get_all_api_from_modulelist member_dict = get_all_api_from_modulelist() @@ -559,7 +559,7 @@ def get_full_api_by_walk(): """ get all the apis """ - global API_DIFF_SPEC_FN ## readonly + global API_DIFF_SPEC_FN # readonly from print_signatures import get_all_api apilist = get_all_api() @@ -571,7 +571,7 @@ def get_full_api_from_pr_spec(): """ get all the apis """ - global API_PR_SPEC_FN, API_DIFF_SPEC_FN ## readonly + global API_PR_SPEC_FN, API_DIFF_SPEC_FN # readonly pr_api = get_api_md5(API_PR_SPEC_FN) if len(pr_api): with open(API_DIFF_SPEC_FN, 'w') as f: @@ -584,7 +584,7 @@ def get_incrementapi(): ''' this function will get the apis that difference between API_DEV.spec and API_PR.spec. ''' - global API_DEV_SPEC_FN, API_PR_SPEC_FN, API_DIFF_SPEC_FN ## readonly + global API_DEV_SPEC_FN, API_PR_SPEC_FN, API_DIFF_SPEC_FN # readonly dev_api = get_api_md5(API_DEV_SPEC_FN) pr_api = get_api_md5(API_PR_SPEC_FN) with open(API_DIFF_SPEC_FN, 'w') as f: -- GitLab