From 2922aa679badf195696b49e4247632d9e021d783 Mon Sep 17 00:00:00 2001 From: Ainavo <57820731+Ainavo@users.noreply.github.com> Date: Wed, 22 Mar 2023 10:13:01 +0800 Subject: [PATCH] [CodeStyple][B011] replace assert false with raise AssertionError (#51935) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * replace assert false with AssertionError * 修改配置文件多余的部分 --- .../generator/eager_gen.py | 6 ++-- .../generator/python_c_gen.py | 4 ++- pyproject.toml | 6 ++++ python/paddle/dataset/imikolov.py | 2 +- .../auto_parallel/cost/base_cost.py | 6 ++-- .../distributed/auto_parallel/dist_context.py | 34 +++++++++---------- .../auto_parallel/operators/common.py | 4 ++- .../auto_parallel/process_group.py | 6 ++-- .../paddle/distributed/auto_parallel/utils.py | 4 ++- python/paddle/distributed/collective.py | 2 +- python/paddle/distributed/fleet/fleet.py | 4 ++- .../paddle/distributed/fleet/launch_utils.py | 8 ++--- .../ascend/ascend_optimizer.py | 7 ++-- .../meta_optimizers/ascend/ascend_parser.py | 8 ++--- .../fleet/meta_optimizers/dgc_optimizer.py | 4 ++- .../tests/unittests/check_nan_inf_base.py | 4 +-- .../unittests/check_nan_inf_base_dygraph.py | 4 +-- .../unittests/ir/inference/auto_scan_test.py | 4 +-- .../unittests/test_matmul_op_with_head.py | 2 +- .../fluid/tests/unittests/test_nan_inf.py | 2 +- .../incubate/distributed/fleet/collective.py | 4 ++- .../distributed/models/moe/moe_layer.py | 10 +++--- .../paddle/jit/dy2static/convert_operators.py | 2 +- python/paddle/nn/layer/transformer.py | 12 +++---- python/paddle/text/datasets/imikolov.py | 2 +- 25 files changed, 84 insertions(+), 67 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py index a973fb3a9ca..d800dc92988 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/eager_gen.py @@ -829,9 +829,9 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): backward_input_pos, ] else: - assert ( - False - ), f"Cannot find {backward_input_name} in forward position map" + raise AssertionError( + f"Cannot find {backward_input_name} in forward position map" + ) for backward_output in backward_returns_list: backward_output_name = backward_output[0] diff --git a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py index c98a850c91b..612223327ff 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py @@ -58,7 +58,9 @@ atype_to_parsing_function = { def FindParsingFunctionFromAttributeType(atype): if atype not in atype_to_parsing_function.keys(): - assert False, f"Unable to find {atype} in atype_to_parsing_function." + raise AssertionError( + f"Unable to find {atype} in atype_to_parsing_function." + ) return atype_to_parsing_function[atype] diff --git a/pyproject.toml b/pyproject.toml index f7be68ad95a..2851cb60636 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,11 @@ select = [ # Pyflakes "F401", + # Comprehensions + "C400", + "C401", + "C402", + # Pyupgrade "UP001", "UP003", @@ -62,6 +67,7 @@ select = [ # Bugbear "B009", "B010", + "B011", ] unfixable = [ "NPY001" diff --git a/python/paddle/dataset/imikolov.py b/python/paddle/dataset/imikolov.py index 0064475e41b..b4224c355e3 100644 --- a/python/paddle/dataset/imikolov.py +++ b/python/paddle/dataset/imikolov.py @@ -108,7 +108,7 @@ def reader_creator(filename, word_idx, n, data_type): continue yield src_seq, trg_seq else: - assert False, 'Unknown data type' + raise AssertionError('Unknown data type') return reader diff --git a/python/paddle/distributed/auto_parallel/cost/base_cost.py b/python/paddle/distributed/auto_parallel/cost/base_cost.py index 85c1f9881b0..32a51302d18 100644 --- a/python/paddle/distributed/auto_parallel/cost/base_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/base_cost.py @@ -436,8 +436,10 @@ def build_dp_costs( elif var_name in dist_attr.outputs_dist_attrs: dims_mapping = dist_attr.get_output_dims_mapping(var_name) else: - assert False, "cannot find dims_mapping for {} in {}".format( - var_name, dist_attr + raise AssertionError( + "cannot find dims_mapping for {} in {}".format( + var_name, dist_attr + ) ) # dims_mapping = ( diff --git a/python/paddle/distributed/auto_parallel/dist_context.py b/python/paddle/distributed/auto_parallel/dist_context.py index d7f23e2c565..0db13323601 100644 --- a/python/paddle/distributed/auto_parallel/dist_context.py +++ b/python/paddle/distributed/auto_parallel/dist_context.py @@ -974,9 +974,9 @@ class DistributedContext: def validate_dist_attr_for_program(self): if not self._is_initialized: - assert ( - False - ), "Program must be initialized before validating its distributed attributes" + raise AssertionError( + "Program must be initialized before validating its distributed attributes" + ) for block in self.serial_main_program.blocks: for tensor in block.vars.values(): dist_tensor = self.get_dist_tensor_for_program(tensor) @@ -988,13 +988,13 @@ class DistributedContext: if (dist_tensor is not None) and ( not dist_tensor.validate_dist_attr() ): - assert ( - False - ), "Tensor {} (id: {}, original_id: {}) has a wrong distributed attributes {}.".format( - dist_tensor.serial_tensor.name, - dist_tensor.serial_tensor.desc.id(), - dist_tensor.serial_tensor.desc.original_id(), - dist_tensor.dist_attr, + raise AssertionError( + "Tensor {} (id: {}, original_id: {}) has a wrong distributed attributes {}.".format( + dist_tensor.serial_tensor.name, + dist_tensor.serial_tensor.desc.id(), + dist_tensor.serial_tensor.desc.original_id(), + dist_tensor.dist_attr, + ) ) for op in block.ops: dist_op = self.get_dist_op_for_program(op) @@ -1004,13 +1004,13 @@ class DistributedContext: dist_op.serial_op.type ) if (dist_op is not None) and (not dist_op.validate_dist_attr()): - assert ( - False - ), "Operator {} (id: {}, original_id: {}) has a wrong distributed attributes {} .".format( - dist_op.serial_op.type, - dist_op.serial_op.desc.id(), - dist_op.serial_op.desc.original_id(), - dist_op.dist_attr, + raise AssertionError( + "Operator {} (id: {}, original_id: {}) has a wrong distributed attributes {} .".format( + dist_op.serial_op.type, + dist_op.serial_op.desc.id(), + dist_op.serial_op.desc.original_id(), + dist_op.dist_attr, + ) ) return True diff --git a/python/paddle/distributed/auto_parallel/operators/common.py b/python/paddle/distributed/auto_parallel/operators/common.py index ef9292e48be..e9d25600c24 100644 --- a/python/paddle/distributed/auto_parallel/operators/common.py +++ b/python/paddle/distributed/auto_parallel/operators/common.py @@ -186,7 +186,9 @@ def register_distributed_operator_impl(op_type, dist_impl): dist_impl.type = op_type dist_op_impl_container.register_impl(dist_impl) else: - assert False, "Must register distributed operator registry first." + raise AssertionError( + "Must register distributed operator registry first." + ) def find_compatible_distributed_operator_impls(dist_op, fwd=True, partial=True): diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/process_group.py index 91990205504..0fb05bfd931 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/process_group.py @@ -115,8 +115,8 @@ class ProcessGroup: if global_rank in self.ranks: return self.ranks.index(global_rank) else: - assert False, "Rank {} doesn't belong to this group".format( - global_rank + raise AssertionError( + "Rank {} doesn't belong to this group".format(global_rank) ) def is_instantiate(self): @@ -149,7 +149,7 @@ class ProcessGroup: ring_id ) else: - assert False, "No CUDA device found" + raise AssertionError('No CUDA device found') # TODO(shenliang03): This is a temporary solution to solve the problem of # hang caused by cross-creation of new_group diff --git a/python/paddle/distributed/auto_parallel/utils.py b/python/paddle/distributed/auto_parallel/utils.py index 070ea91bf75..27beb2963f5 100644 --- a/python/paddle/distributed/auto_parallel/utils.py +++ b/python/paddle/distributed/auto_parallel/utils.py @@ -1790,7 +1790,9 @@ def set_dist_op_desc_original_id(dist_op_desc, op_desc, dist_context): return # Third, print error infomation if we cannot find the original id else: - assert False, "Cannot find the original id in the distributed context" + raise AssertionError( + "Cannot find the original id in the distributed context" + ) def to_list(value): diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index 586fe76c971..e202ca7074e 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -304,7 +304,7 @@ def new_group(ranks=None, backend=None, timeout=_default_timeout): ring_id ) else: - assert False, "no cuda device found" + raise AssertionError("no cuda device found") else: return gp diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 6ce0b8f9189..9b785a0ed1b 100755 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -416,7 +416,9 @@ class Fleet: if not order: order = ['dp', 'pp', 'sharding', 'mp'] if order[:].sort() != list(d_hybrid_degree.keys())[:].sort(): - assert False, "The order of hybrid_config setting is incorrect." + raise AssertionError( + 'The order of hybrid_config setting is incorrect.' + ) hybrid_group_names = [] dims = [] diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 73821457d5b..7b50796a05b 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -953,10 +953,10 @@ def get_device_proc_info(args): else: devices_per_proc = [x for x in range(0, args.nproc_per_node)] else: - assert ( - False - ), "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format( - device_mode + raise AssertionError( + "Can't support device_mode:{}, support only cpu|gpu|xpu now.".format( + device_mode + ) ) return (device_mode, devices_per_proc) diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py index b7d22882c82..78bacc13e67 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_optimizer.py @@ -116,10 +116,9 @@ class AscendIRParser: ) op_parser.apply(op) else: - assert ( - False - ), "Op[%s] has not been registered, so we have to skip it" % ( - op.type + raise AssertionError( + 'Op[%s] has not been registered, so we have to skip it' + % op.type ) def _parse_program( diff --git a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py index b9840b0333b..62dc123a122 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ascend/ascend_parser.py @@ -515,7 +515,7 @@ class SumParser(AscendParserBase): def _apply(self): len_list = len(self.op.input_arg_names) if len_list < 2: - assert False, "the size of input list must large or equal 2" + raise AssertionError("the size of input list must large or equal 2") x = self._get_ge_input(self.op.input_arg_names[0]) y = self._get_ge_input(self.op.input_arg_names[1]) sum = ( @@ -643,7 +643,7 @@ class MatMulParser(AscendParserBase): .set_attr_bool("transpose_x2", transpose_y) ) else: - assert False, "not support" + raise AssertionError("not support") return [matmul], [[0]] @@ -681,7 +681,7 @@ class MulParser(AscendParserBase): .set_input("x2", y, 0) ) else: - assert False, "not support" + raise AssertionError("not support") else: if len(shape_x1) == 3 and len(shape_x2) == 2: assert x_num_col_dims == 2, "only support 2" @@ -729,7 +729,7 @@ class MulParser(AscendParserBase): .set_attr_vec_int32("perm", [1, 2, 0]) ) else: - assert False, "not support" + raise AssertionError("not support") return [matmul], [[0]] diff --git a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py index ce0bad4193d..d7df1e65d80 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py @@ -107,7 +107,9 @@ class DGCMomentumOptimizer(Optimizer): elif isinstance(regularization, L2Decay): regular_type = 2 else: - assert False, 'regularization must be None|L1Decay|L2Deacy' + raise AssertionError( + "regularization must be None|L1Decay|L2Deacy" + ) return regular_type, regular_coeff def _is_use_dgc(self, param_var, grad_var): diff --git a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py index c62cc3340ee..71c346c9403 100644 --- a/python/paddle/fluid/tests/unittests/check_nan_inf_base.py +++ b/python/paddle/fluid/tests/unittests/check_nan_inf_base.py @@ -105,7 +105,7 @@ def check(use_cuda): if __name__ == '__main__': try: check(use_cuda=False) - assert False + raise AssertionError() except Exception as e: print(e) print(type(e)) @@ -114,7 +114,7 @@ if __name__ == '__main__': if core.is_compiled_with_cuda(): try: check(use_cuda=True) - assert False + raise AssertionError() except Exception as e: print(e) print(type(e)) diff --git a/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py b/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py index e46b861c713..ace66790967 100644 --- a/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py +++ b/python/paddle/fluid/tests/unittests/check_nan_inf_base_dygraph.py @@ -96,7 +96,7 @@ def run_check(): if paddle.is_compiled_with_cuda(): try: check(use_cuda=True) - assert False + raise AssertionError() except Exception as e: print(e) print(type(e)) @@ -105,7 +105,7 @@ def run_check(): assert type(e) == OSError or type(e) == RuntimeError try: check(use_cuda=False) - assert False + raise AssertionError() except Exception as e: print(e) print(type(e)) diff --git a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py index 6b9669dd192..bed9938949e 100755 --- a/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/auto_scan_test.py @@ -462,7 +462,7 @@ class PassAutoScanTest(AutoScanTest): min_success_num, successful_ran_programs ) ) - assert False + raise AssertionError() used_time = time.time() - start_time if max_duration > 0 and used_time > max_duration: logging.error( @@ -470,7 +470,7 @@ class PassAutoScanTest(AutoScanTest): max_duration ) ) - assert False + raise AssertionError() def run_test(self, quant=False, prog_configs=None): status = True diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py b/python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py index c1cc6e1402f..213002ff7d6 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op_with_head.py @@ -180,7 +180,7 @@ def matmul_head2(X, Y, head_number=1): z.append(np.matmul(x[i], y[i])) Z = np.concatenate((z), axis=2) else: - assert False, "ERROR: Not supported dimension!" + raise AssertionError("ERROR: Not supported dimension!") return Z diff --git a/python/paddle/fluid/tests/unittests/test_nan_inf.py b/python/paddle/fluid/tests/unittests/test_nan_inf.py index 4f0e02fdf61..0176727cb99 100644 --- a/python/paddle/fluid/tests/unittests/test_nan_inf.py +++ b/python/paddle/fluid/tests/unittests/test_nan_inf.py @@ -99,7 +99,7 @@ class TestNanInfCheckResult(unittest.TestCase): out = paddle.log(x) sys.stdout.flush() if add_assert: - assert False + raise AssertionError() except Exception as e: # Cannot catch the log in CUDA kernel. err_str_list = ( diff --git a/python/paddle/incubate/distributed/fleet/collective.py b/python/paddle/incubate/distributed/fleet/collective.py index 68c77e36f4a..498e554b507 100644 --- a/python/paddle/incubate/distributed/fleet/collective.py +++ b/python/paddle/incubate/distributed/fleet/collective.py @@ -301,7 +301,9 @@ class CollectiveOptimizer(DistributedOptimizer): def _check_condition(self, name, **kwargs): for k, v in kwargs.items(): if v is True: - assert False, "you can't use %s and %s together" % (name, k) + raise AssertionError( + "you can't use %s and %s together" % (name, k) + ) def _check_collective_mode(self, main_program, optimizer, strategy): """ diff --git a/python/paddle/incubate/distributed/models/moe/moe_layer.py b/python/paddle/incubate/distributed/models/moe/moe_layer.py index d99f0d0864d..0f08edf4130 100644 --- a/python/paddle/incubate/distributed/models/moe/moe_layer.py +++ b/python/paddle/incubate/distributed/models/moe/moe_layer.py @@ -384,12 +384,10 @@ class MoELayer(nn.Layer): group=self.group, ) else: - assert ( - False - ), "We only support naive gate, \ - gshard gate and switch gate, \ - but you choose {} gate.".format( - str(gate) + raise AssertionError( + "We only support naive gate, gshard gate and switch gate, but you choose {} gate.".format( + str(gate) + ) ) elif isinstance(gate, NaiveGate): self.top_k = gate.top_k diff --git a/python/paddle/jit/dy2static/convert_operators.py b/python/paddle/jit/dy2static/convert_operators.py index 2e9d9a13a73..6d85fa7f97d 100644 --- a/python/paddle/jit/dy2static/convert_operators.py +++ b/python/paddle/jit/dy2static/convert_operators.py @@ -87,7 +87,7 @@ def _unpack_by_structure_paddle(target, structure): if isinstance(ele, list): ret.append(unpack_by_structure(target[idx], ele)) continue - assert False, "structure element must be 1 or list" + raise AssertionError("structure element must be 1 or list") return ret diff --git a/python/paddle/nn/layer/transformer.py b/python/paddle/nn/layer/transformer.py index 2bce11b296d..cab257f2e0e 100644 --- a/python/paddle/nn/layer/transformer.py +++ b/python/paddle/nn/layer/transformer.py @@ -1317,9 +1317,9 @@ class Transformer(Layer): encoder_bias_attr = [bias_attr[0], bias_attr[-1]] decoder_bias_attr = bias_attr else: - assert ( - False - ), "length of bias_attr should be 1 or 2 or 3 when it is a list/tuple" + raise AssertionError( + "length of bias_attr should be 1 or 2 or 3 when it is a list/tuple" + ) else: encoder_bias_attr = bias_attr decoder_bias_attr = bias_attr @@ -1339,9 +1339,9 @@ class Transformer(Layer): encoder_weight_attr = [weight_attr[0], weight_attr[-1]] decoder_weight_attr = weight_attr else: - assert ( - False - ), "length of weight_attr should be 1 or 2 or 3 when it is a list/tuple" + raise AssertionError( + "length of weight_attr should be 1 or 2 or 3 when it is a list/tuple" + ) else: encoder_weight_attr = weight_attr decoder_weight_attr = weight_attr diff --git a/python/paddle/text/datasets/imikolov.py b/python/paddle/text/datasets/imikolov.py index d936bcb6678..b84d6255bf8 100644 --- a/python/paddle/text/datasets/imikolov.py +++ b/python/paddle/text/datasets/imikolov.py @@ -168,7 +168,7 @@ class Imikolov(Dataset): continue self.data.append((src_seq, trg_seq)) else: - assert False, 'Unknow data type' + raise AssertionError('Unknow data type') def __getitem__(self, idx): return tuple([np.array(d) for d in self.data[idx]]) -- GitLab