diff --git a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py index dc38d3d46b293f9602ffa19bb96a4fb4178e843d..0ceff35360868ebeef9e08589e734e82f4622acc 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py @@ -532,7 +532,7 @@ class PythonCGenerator(GeneratorBase): ) status = f_generator.run() - if status == True: + if status: self.python_c_functions_str += ( f_generator.python_c_function_str + "\n" ) diff --git a/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py b/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py index 219ce72077cd594084d80d1c20e5c8822ddca183..a5ff3717199b01684e27e6d5a77c5dea2fed4b27 100644 --- a/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py +++ b/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py @@ -45,7 +45,7 @@ def resize_short(img, target_size): def crop_image(img, target_size, center): width, height = img.size size = target_size - if center == True: + if center: w_start = (width - size) // 2 h_start = (height - size) // 2 else: diff --git a/python/paddle/audio/backends/init_backend.py b/python/paddle/audio/backends/init_backend.py index 3ca77ba316f9813df32bfac077bce3b7a4761aa6..6bf972d435f8869da0d9b6de7fa57690f222950b 100644 --- a/python/paddle/audio/backends/init_backend.py +++ b/python/paddle/audio/backends/init_backend.py @@ -79,7 +79,7 @@ def list_available_backends() -> List[str]: if "paddleaudio" in sys.modules: version = paddleaudio.__version__ - if _check_version(version) == False: + if not _check_version(version): err_msg = ( "the version of paddleaudio installed is {},\n" "please ensure the paddleaudio >= 1.0.2." diff --git a/python/paddle/dataset/conll05.py b/python/paddle/dataset/conll05.py index 22038594f60c909b39140ae5ac30a8e8a684baba..08a383badae5de87171314a7a02a154aba703b67 100644 --- a/python/paddle/dataset/conll05.py +++ b/python/paddle/dataset/conll05.py @@ -109,9 +109,9 @@ def corpus_reader(data_path, words_name, props_name): lbl_seq = [] verb_word = '' for l in lbl: - if l == '*' and is_in_bracket == False: + if l == '*' and not is_in_bracket: lbl_seq.append('O') - elif l == '*' and is_in_bracket == True: + elif l == '*' and is_in_bracket: lbl_seq.append('I-' + cur_tag) elif l == '*)': lbl_seq.append('I-' + cur_tag) diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/process_group.py index 9883f116f4eea98216cd87737c9a1177e8e33688..10d2556f299ce4b5010d4a984ab04544741eb1ba 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/process_group.py @@ -106,7 +106,7 @@ class ProcessGroup: return else: assert ( - self.is_instantiate() == False + not self.is_instantiate() ), "Cannot add new ranks after instantiating the process group" self._ranks.extend(new_ranks) self._ranks = sorted(list(set(self.ranks))) diff --git a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py b/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py index 0ed03defbed8686fac753f593e573ca0ea001690..9f31766f19f2f37c268147412e81560d033b1568 100644 --- a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py +++ b/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py @@ -268,7 +268,7 @@ class ParallelTuner: return for idx, dim in enumerate(dims_list): - if visited[idx] == False: + if not visited[idx]: dims_mapping[start] = dim visited[idx] = True self._generate_dims_mapping_candidates_helper( diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index b74f700391cf5c5a4aead52c4c1d433aecdd3ddc..0217e012579cd4d48ba1e30d8ca8136980611333 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -514,7 +514,7 @@ class InMemoryDataset(DatasetBase): self._set_fleet_send_batch_size(kwargs[key]) elif key == "fleet_send_sleep_seconds": self._set_fleet_send_sleep_seconds(kwargs[key]) - elif key == "fea_eval" and kwargs[key] == True: + elif key == "fea_eval" and kwargs[key]: candidate_size = kwargs.get("candidate_size", 10000) self._set_fea_eval(candidate_size, True) diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index db3aae28a201875c172699d197be7be4d3d47da5..2630fa8283eedbf7d78ea54db4bc67a27024dbe4 100644 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -303,7 +303,7 @@ class Fleet(object): paddle.distributed.init_parallel_env() # hybrid parallel not support for npu/xpu - if self._user_defined_strategy.heter_ccl_mode == False: + if not self._user_defined_strategy.heter_ccl_mode: # init hybrid parallel environment in dygraph if tp._HYBRID_PARALLEL_GROUP is None: self._init_hybrid_parallel_env() diff --git a/python/paddle/distributed/fleet/launch.py b/python/paddle/distributed/fleet/launch.py index ca301c52a7f252a0e300845a03950433980935a3..998f64c3ec2938b2036d3c8adb3f3378341d60f5 100755 --- a/python/paddle/distributed/fleet/launch.py +++ b/python/paddle/distributed/fleet/launch.py @@ -369,7 +369,7 @@ def get_cluster_info(args): if os.environ.get('FLAGS_START_PORT') is not None: start_port = os.environ.get('FLAGS_START_PORT') # auto mapping between processes and devices for auto-parallel - if args.enable_auto_mapping == True: + if args.enable_auto_mapping: assert ( args.cluster_topo_path is not None ), "The cluster topology must be provied when enabling auto mapping." diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 4ec2aa07787cb470aedc282fe208e9c728846aa1..d4b6b86119fa38f8590d21bfe5db78f349544e47 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -1582,7 +1582,7 @@ class ParameterServerLauncher(object): x.strip().split(":")[0] for x in self.worker_endpoints.split(",") ] - if self.with_coordinator == True: + if self.with_coordinator: self.coordinator_endpoints_ips = [ x.strip().split(":")[0] for x in self.coordinator_endpoints.split(",") diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index b1627d5a3b79c017e791649a30e7015b661fdbb4..04c4272ee0eec7ea7e8ea622e33a03acf40a1c5c 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -582,7 +582,7 @@ def _parallel_linear( # set is_distributed for splited bias # if a linear layer is splited by row, each rank would hold a complete bias and they should be the same in each rank. # if a linear layer is splited by col, the bias would also be split into each rank as its weight - if axis == 1 and linear._bias_attr != False: + if axis == 1 and linear._bias_attr is not False: _set_var_distributed(linear.bias) if not gather_out: diff --git a/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py index eb97122587f364202102abca88b7bfee11d305ac..9a2fb127999855bfe762ba61ecd8419d74da99e2 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py @@ -53,7 +53,7 @@ class GradientMergeOptimizer(MetaOptimizerBase): return False can_apply = ( - self.user_defined_strategy.gradient_merge == True + self.user_defined_strategy.gradient_merge ) and self.user_defined_strategy.gradient_merge_configs["k_steps"] > 1 return can_apply diff --git a/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py index dd2ccfc7ff7d26e2a47bf24bd2de9899cff98f2f..a1a33992d5946e5d7265fc21b59464b16e26d116 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py @@ -177,7 +177,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase): gradient_scale_configs['scale_strategy'] ] - if self.user_defined_strategy.recompute == True: + if self.user_defined_strategy.recompute: logging.warn( "set enable_sequential_execution=True since you have enable the recompute strategy" ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py index dfb8fe5b22438128af5489d135c10b6e643f8700..dfadeff3807e2fca7591ecb772211e63673e5e7c 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py @@ -66,7 +66,7 @@ class PipelineOptimizer(MetaOptimizerBase): if self.use_sharding: return False - if self.user_defined_strategy.pipeline == True: + if self.user_defined_strategy.pipeline: return True return False diff --git a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py index 6ae89a9754ebdef406df51d3414918c71a0d6854..53972452d80fa73b3a737a24d4cec33bf302ccf8 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py @@ -65,7 +65,7 @@ class RawProgramOptimizer(MetaOptimizerBase): if not self.role_maker._is_collective: return False - if self.without_graph_optimization == True: + if self.without_graph_optimization: return True return False diff --git a/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py index 524c3a123abc08062b49d509750996193b9bd9e2..7a817b6fd04e026e8887ec6683b0001ca1b8f557 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py @@ -55,7 +55,7 @@ class RecomputeOptimizer(MetaOptimizerBase): if not self.role_maker._is_collective: return False - if self.user_defined_strategy.recompute == True: + if self.user_defined_strategy.recompute: if ( len(self.user_defined_strategy.recompute_configs["checkpoints"]) == 0 diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py index 1ec9457854dce7b43d974763050ae93046e98c4f..ea42130300f11208f0f75917e3615b9d9fb86dd7 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py @@ -38,7 +38,7 @@ def check_broadcast(block): broadcast_vars = {} for idx, op in enumerate(block.ops): if op.type == "c_broadcast": - if op.all_attrs()["use_calc_stream"] == False: + if not op.all_attrs()["use_calc_stream"]: var_name = op.desc.input_arg_names()[0] if "@BroadCast" in var_name: if var_name in broadcast_vars: @@ -72,7 +72,7 @@ def check_broadcast(block): last_sync_calc_op_idx = idx continue if op.type == "c_broadcast": - if op.all_attrs()["use_calc_stream"] == False: + if not op.all_attrs()["use_calc_stream"]: var_name = op.desc.input_arg_names()[0] if "@BroadCast" in var_name: if broadcast_vars[var_name]["fill_constant_pos"] != -1: @@ -117,7 +117,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): for idx, op in enumerate(block.ops): # sharding use both allreduce and reduce to sync grad if op.type == "c_allreduce_sum" or op.type == "c_reduce_sum": - if op.all_attrs()["use_calc_stream"] == False: + if not op.all_attrs()["use_calc_stream"]: ring_id = op.desc.attr("ring_id") var_name = op.desc.input_arg_names()[0] param = var_name.split("@")[0] @@ -153,7 +153,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): dp_grads_status[var_name] = 1 # check sharding allreduce and reduce but skip megatron allreduce elif op.type == "c_allreduce_sum" or op.type == "c_reduce_sum": - if op.all_attrs()["use_calc_stream"] == False: + if not op.all_attrs()["use_calc_stream"]: var_name = op.desc.input_arg_names()[0] ring_id = op.desc.attr("ring_id") if ring_id == sharding_ring_id: diff --git a/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py index 0cd86ad08bde29aa3f05c590045d8a4e1dd719e2..8f2e113b52e1715874535ceba4575f9a8cb04c94 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py @@ -57,7 +57,7 @@ class TensorParallelOptimizer(MetaOptimizerBase): if not self.role_maker._is_collective: return False - if self.user_defined_strategy.tensor_parallel == True: + if self.user_defined_strategy.tensor_parallel: return True return False diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index fec5005627b88a0027c6fc43ca7e5949fc75cd01..29cbe0d9dcac27552419a7ef4cdad990194f550c 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -720,7 +720,7 @@ class PipelineLayer(Layer): def _need_recompute(self, funcs, inputs): if not any( - input_.stop_gradient == False + not input_.stop_gradient for input_ in inputs if isinstance(input_, paddle.Tensor) ): diff --git a/python/paddle/distributed/fleet/model.py b/python/paddle/distributed/fleet/model.py index 21e6d07ad55e329ee6439cb0f04c650a924c0bf3..a132860aac1626f7a3adf7c45f4580d2d7d43778 100644 --- a/python/paddle/distributed/fleet/model.py +++ b/python/paddle/distributed/fleet/model.py @@ -90,7 +90,7 @@ def distributed_model(model): amp_enable = False strategy = fleet_env._user_defined_strategy - if strategy.amp == True: + if strategy.amp: amp_enable = True amp_level = "O2" if strategy.amp_configs['use_pure_fp16'] else "O1" if amp_level.upper() == "O2": @@ -122,7 +122,7 @@ def distributed_model(model): use_dynamic_loss_scaling=use_dynamic_loss_scaling, ) - if strategy.heter_ccl_mode == True: + if strategy.heter_ccl_mode: distributed_model = paddle.DataParallel( model, comm_buffer_size=strategy.fuse_grad_size_in_MB, diff --git a/python/paddle/distributed/fleet/optimizer.py b/python/paddle/distributed/fleet/optimizer.py index 37a3a896f6b05664f110668524535f2af583eca6..f67c108486a9be51958dbb58b47d554c85a23ed6 100644 --- a/python/paddle/distributed/fleet/optimizer.py +++ b/python/paddle/distributed/fleet/optimizer.py @@ -59,7 +59,7 @@ def _dygraph_distributed_optimizer(optimizer, strategy=None): fleet_env._context = {} if fleet_env.worker_num() > 1: - if fleet_env._user_defined_strategy.heter_ccl_mode == False: + if not fleet_env._user_defined_strategy.heter_ccl_mode: return HybridParallelOptimizer( optimizer, fleet_env._hcg, fleet_env._user_defined_strategy ) diff --git a/python/paddle/distributed/fleet/recompute/recompute.py b/python/paddle/distributed/fleet/recompute/recompute.py index 2657c60f02c9a7a8b8c9f189ef1230d355f76818..fd8cffdff00a8c4e7979f5a1d8f2f7c81cbc8252 100755 --- a/python/paddle/distributed/fleet/recompute/recompute.py +++ b/python/paddle/distributed/fleet/recompute/recompute.py @@ -41,7 +41,7 @@ def detach_variable(inputs): def check_recompute_necessary(inputs): if not any( - input_.stop_gradient == False + not input_.stop_gradient for input_ in inputs if isinstance(input_, (core.eager.Tensor, paddle.Tensor)) ): diff --git a/python/paddle/distributed/fleet/runtime/the_one_ps.py b/python/paddle/distributed/fleet/runtime/the_one_ps.py index 5a0be9a1e018f6c317a2e5dcf2d7eb508427a8cd..7de34aa6e1c85891cbe7ab78f4c5c7ff58b89bd7 100644 --- a/python/paddle/distributed/fleet/runtime/the_one_ps.py +++ b/python/paddle/distributed/fleet/runtime/the_one_ps.py @@ -337,7 +337,7 @@ class CommonAccessor: self.table_num = size self.table_dim = single_dim - if oop.type != 'adam' and adam_d2sum == True: + if oop.type != 'adam' and adam_d2sum: print('optimization algorithm is not adam, set adam_d2sum False') adam_d2sum = False print("adam_d2sum:", adam_d2sum) diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index 0c5bff02ed820d83957a8c89176afbb4cc554311..6dd100a6f9e70ade47bb69dc681bf21ec0ae4044 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -231,7 +231,7 @@ class HybridParallelInferenceHelper(object): ) else: if isinstance(role_maker, fleet.base.role_maker.RoleMakerBase): - assert role_maker._is_collective == True + assert role_maker._is_collective self.role_maker = role_maker # communication_group info diff --git a/python/paddle/distributed/fleet/utils/ps_util.py b/python/paddle/distributed/fleet/utils/ps_util.py index 9b079d64bb5307c155d787ac13832bccb64a0ddc..d283dbe1fe8b67f91ad195424f6539028575bbe9 100644 --- a/python/paddle/distributed/fleet/utils/ps_util.py +++ b/python/paddle/distributed/fleet/utils/ps_util.py @@ -210,7 +210,7 @@ class DistributedInfer: if found: break if found: - if output_indexes[j] == True: + if output_indexes[j]: warnings.warn( "unable to re-arrange dags order to combine distributed embedding ops" ) diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index 3305982f5055c564d35a0a7eb8d5ca4702e3f079..c8932069a794e99d7e4edeec23a0c72810329651 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -80,9 +80,9 @@ class AMPState(object): fwd_op_id = dist_op_context.grad_op_id_to_op_id[ op.desc.original_id() ] - if self._is_fp16_op(fwd_op_id) == True: + if self._is_fp16_op(fwd_op_id) is True: self._op_fp16_dict[op.desc.original_id()] = True - elif self._is_fp16_op(fwd_op_id) == False: + elif self._is_fp16_op(fwd_op_id) is False: self._op_fp16_dict[op.desc.original_id()] = False elif int(op.attr('op_role')) == int(OpRole.Optimize): break @@ -132,13 +132,13 @@ class AMPState(object): # if it's one of inputs if ( self._is_fp16_op(prev_op.desc.original_id()) - == False + is False or prev_op.type in amp_lists.black_list ): is_black_op = True elif ( self._is_fp16_op(prev_op.desc.original_id()) - == True + is True or prev_op.type in amp_lists.white_list ): is_white_op = True @@ -161,7 +161,7 @@ class AMPState(object): num_cast_ops = 0 if int(op.attr('op_role')) == int(OpRole.Backward): break - if self._is_fp16_op(op.desc.original_id()) == False: + if self._is_fp16_op(op.desc.original_id()) is False: num_cast_ops = self._insert_cast_op_forward( op, idx, @@ -169,7 +169,7 @@ class AMPState(object): core.VarDesc.VarType.FP32, dist_context, ) - elif self._is_fp16_op(op.desc.original_id()) == True: + elif self._is_fp16_op(op.desc.original_id()) is True: num_cast_ops = self._insert_cast_op_forward( op, idx, @@ -302,7 +302,7 @@ class AMPState(object): grad_op_orig_id = grad_op.desc.original_id() dist_op_context = dist_context.dist_op_context if grad_op_orig_id in dist_op_context.grad_op_id_to_op_id: - if self._is_fp16_op(grad_op_orig_id) == False: # fp32 + if self._is_fp16_op(grad_op_orig_id) is False: # fp32 num_cast_ops = self._insert_cast_op_backward( grad_op, idx, @@ -311,7 +311,7 @@ class AMPState(object): dist_context, appended_grad_times, ) - elif self._is_fp16_op(grad_op_orig_id) == True: # fp16 + elif self._is_fp16_op(grad_op_orig_id) is True: # fp16 num_cast_ops = self._insert_cast_op_backward( grad_op, idx, diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index cf1b2d45290bd4143e219e789cc3f24ee209b62a..8ad8b2a8fad41ca4aaf6451df53545501ab41c8c 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -235,10 +235,7 @@ class FP16State(object): for op in block.ops: if is_forward_op(op): # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python - if ( - self._is_fp16_op(op.desc.original_id()) == True - or op.type == "cast" - ): + if self._is_fp16_op(op.desc.original_id()) or op.type == "cast": for in_name in op.input_names: if _keep_fp32_input(op, in_name): continue @@ -255,7 +252,7 @@ class FP16State(object): self.set_var_to_fp16(out_var_name, block) set_op_dtype_to_fp16(op) # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python - elif self._is_fp16_op(op.desc.original_id()) == False: + elif not self._is_fp16_op(op.desc.original_id()): for out_var_name in op.output_arg_names: out_var = block.vars.get(out_var_name) if out_var is None or out_var.type not in _valid_types: @@ -263,7 +260,7 @@ class FP16State(object): if out_var.dtype == core.VarDesc.VarType.FP16: out_var.desc.set_dtype(core.VarDesc.VarType.FP32) elif is_backward_op(op): - if self._is_fp16_op(op.desc.original_id()) == True: + if self._is_fp16_op(op.desc.original_id()): for out_name in op.output_names: if _keep_fp32_output(op, out_name): continue @@ -271,7 +268,7 @@ class FP16State(object): self.set_var_to_fp16(out_var_name, block) set_op_dtype_to_fp16(op) # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python - elif self._is_fp16_op(op.desc.original_id()) == False: + elif not self._is_fp16_op(op.desc.original_id()): for out_var_name in op.output_arg_names: out_var = block.vars.get(out_var_name) if out_var is None or out_var.type not in _valid_types: @@ -290,7 +287,7 @@ class FP16State(object): idx += 1 continue elif is_forward_op(op): - if self._is_fp16_op(op.desc.original_id()) == False: + if not self._is_fp16_op(op.desc.original_id()): num_cast_ops = self._insert_forward_cast_ops( op, idx, @@ -299,7 +296,7 @@ class FP16State(object): core.VarDesc.VarType.FP32, self.dist_context, ) - elif self._is_fp16_op(op.desc.original_id()) == True: + elif self._is_fp16_op(op.desc.original_id()): num_cast_ops = self._insert_forward_cast_ops( op, idx, @@ -310,7 +307,7 @@ class FP16State(object): ) elif is_backward_op(op): if op.desc.original_id() in dist_op_context.grad_op_id_to_op_id: - if self._is_fp16_op(op.desc.original_id()) == False: + if not self._is_fp16_op(op.desc.original_id()): num_cast_ops = self._insert_backward_cast_ops( op, idx, @@ -319,7 +316,7 @@ class FP16State(object): core.VarDesc.VarType.FP32, self.dist_context, ) - elif self._is_fp16_op(op.desc.original_id()) == True: + elif self._is_fp16_op(op.desc.original_id()): num_cast_ops = self._insert_backward_cast_ops( op, idx, diff --git a/python/paddle/distributed/passes/ps_server_pass.py b/python/paddle/distributed/passes/ps_server_pass.py index 37e5622ea8e7f6c71323476d187dbbdf5fba1013..c8f99895a83242045bb88251d5184c9f82727315 100755 --- a/python/paddle/distributed/passes/ps_server_pass.py +++ b/python/paddle/distributed/passes/ps_server_pass.py @@ -140,7 +140,7 @@ class AddLrDecayTablePass(PassBase): def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs - if hasattr(attrs['origin_main_program'], 'lr_sheduler') == False: + if not hasattr(attrs['origin_main_program'], 'lr_sheduler'): return assert isinstance( diff --git a/python/paddle/distributed/passes/ps_trainer_pass.py b/python/paddle/distributed/passes/ps_trainer_pass.py index f99d9f316d462a54198e0ecd389f1d66925804f8..56f73078d54dbe506fdfd516672e152a280342b8 100755 --- a/python/paddle/distributed/passes/ps_trainer_pass.py +++ b/python/paddle/distributed/passes/ps_trainer_pass.py @@ -304,7 +304,7 @@ class DistributedOpsPass(PassBase): if found: break if found: - if output_indexes[j] == True: + if output_indexes[j]: warnings.warn( "unable to re-arrange dags order to combine distributed embedding ops" ) diff --git a/python/paddle/distributed/ps/the_one_ps.py b/python/paddle/distributed/ps/the_one_ps.py index 86766d60ae8c0e32301746a5a6f80d347c40930c..d341a95b24be74992acb000aa8bbcbc4e473e567 100755 --- a/python/paddle/distributed/ps/the_one_ps.py +++ b/python/paddle/distributed/ps/the_one_ps.py @@ -443,7 +443,7 @@ class CommonAccessor(Accessor): self.table_num = size self.table_dim = single_dim - if oop.type != 'adam' and adam_d2sum == True: + if oop.type != 'adam' and adam_d2sum: print('optimization algorithm is not adam, set adam_d2sum False') adam_d2sum = False print("adam_d2sum:", adam_d2sum) @@ -703,7 +703,7 @@ class SparseTable(Table): if ( ctx.is_tensor_table() or len(ctx.origin_varnames()) < 1 - or (ctx.is_sparse() == False) + or (not ctx.is_sparse()) ): return table_proto.table_id = ctx.table_id() @@ -810,7 +810,7 @@ class GeoSparseTable(SparseTable): if ( ctx.is_tensor_table() or len(ctx.origin_varnames()) < 1 - or (ctx.is_sparse() == False) + or (not ctx.is_sparse()) ): return table_proto.table_id = ctx.table_id() @@ -845,7 +845,7 @@ class DenseTable(Table): if ( ctx.is_tensor_table() or len(ctx.origin_varnames()) < 1 - or (ctx.is_sparse() == True) + or (ctx.is_sparse()) ): return @@ -1281,7 +1281,7 @@ class TheOnePSRuntime(RuntimeBase): if not is_test: if ( self.context['ps_mode'] == DistributedMode.GEO - or self.is_heter_ps_mode == True + or self.is_heter_ps_mode ): self._communicator.init_params(dense_map) else: @@ -1298,7 +1298,7 @@ class TheOnePSRuntime(RuntimeBase): if ( self.context['ps_mode'] == DistributedMode.GEO - or self.is_heter_ps_mode == True + or self.is_heter_ps_mode ): if not self._communicator.is_running(): self._communicator.start() diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 75182a497213cfe6409e6b795d212ac81a326877..53628ad7e508451a6a7a1dd74e9561a64b8b025f 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -1744,7 +1744,7 @@ def create_backward_block( ): is_skip = True break - if is_skip == True: + if is_skip: continue block_append_op(program, origin_program, heter_block, op) diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index fd99eb04d99fc5b46ce344c0afb4b69698120172..9204bc7f1235d57a28128bc5c5be5e15ebcb9c55 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -237,7 +237,7 @@ def main(use_cuda, parallel, nn_type, combine): if not use_cuda and not parallel: save_dirname = "recognize_digits_" + nn_type + ".inference.model" save_full_dirname = "recognize_digits_" + nn_type + ".train.model" - if combine == True: + if combine: model_filename = "__model_combined__" params_filename = "__params_combined__" diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py index 3c8b71e71394e7a459ccb4da14bc4f224e03f095..637f1ba844be43469d1739cfe2e3fbb8f87334bc 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py @@ -144,7 +144,7 @@ class TestToStatic(unittest.TestCase): # inputs = InputSpec([batch_size, hidden_size], 'float32', 'x') # labels = InputSpec([batch_size], 'int64', 'label') - assert _non_static_mode() == True + assert _non_static_mode() engine = auto.Engine( model=mlp, loss=loss, @@ -155,7 +155,7 @@ class TestToStatic(unittest.TestCase): engine.fit(dataset, batch_size=batch_size) engine.evaluate(dataset, batch_size=batch_size) engine.predict(dataset, batch_size=batch_size) - assert _non_static_mode() == False + assert not _non_static_mode() class TestLazyInit(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py index 81f1bdede6f41abf8b2e5cd9aacb7894c16b871b..37af28458078ea7b9b7a6deebbec55684549a5dc 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py @@ -593,7 +593,7 @@ class TestHessianNoBatch(unittest.TestCase): numerical_hessian = utils._np_concat_matrix_sequence(numerical_hessian) self.x.stop_gradient = False hessian = paddle.incubate.autograd.Hessian(func, self.x) - assert hessian[:].stop_gradient == False + assert not hessian[:].stop_gradient np.testing.assert_allclose( hessian[:].numpy(), numerical_hessian, self.rtol, self.atol ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py index 57a199c133395fb66a38d4f97df2487d34db0472..bc725e9d13801cf4d891a73ecc389e71c62bfebb 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py @@ -116,9 +116,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.optimizer(avg_cost, strategy, train_prog, startup_prog) ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('cast', ops) self.assertIn('check_finite_and_unscale', ops) @@ -227,9 +225,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('subprog', ''.join(vars)) @@ -316,9 +312,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('subprog', ''.join(vars)) @@ -445,9 +439,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('cast', ops) @@ -564,9 +556,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): startup_prog, regularization=regularization, ) - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] self.assertIn('@BroadCast', ''.join(vars)) @@ -653,9 +643,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.optimizer( avg_cost, strategy, train_prog, startup_prog, grad_clip=clip ) - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] self.assertIn('@BroadCast', ''.join(vars)) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py index 916a21359a4f8e32ad12ceb876354ee99f1791c8..514009577cd11ac4ae5c14c1f6832bea23cb7c48 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py @@ -420,13 +420,13 @@ class TestAmpScaler(unittest.TestCase): decr_every_n_nan_or_inf=2, use_dynamic_loss_scaling=True, ) - self.assertEqual(scaler.is_enable() == True, True) + self.assertEqual(scaler.is_enable(), True) self.assertEqual(scaler.get_init_loss_scaling() == 1024, True) self.assertEqual(scaler.get_incr_ratio() == 2.0, True) self.assertEqual(scaler.get_decr_ratio() == 0.5, True) self.assertEqual(scaler.get_incr_every_n_steps() == 1000, True) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 2, True) - self.assertEqual(scaler.is_use_dynamic_loss_scaling() == True, True) + self.assertEqual(scaler.is_use_dynamic_loss_scaling(), True) scaler.set_decr_every_n_nan_or_inf(4) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 4, True) scaler.set_decr_ratio(0.1) @@ -460,7 +460,7 @@ class TestAmpScaler(unittest.TestCase): scaler3 = paddle.amp.GradScaler(enable=False) scaler3.load_state_dict(scaler_state) - self.assertEqual(scaler3.is_enable() == False, True) + self.assertFalse(scaler3.is_enable()) def test_state_dict_and_load_state_dict_error(self): def test_error(): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 83c9462a89e9d20d827d0046d026ead3cd507cd1..1eec439f792d930adc307c67502a1e1d011b2e24 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -419,13 +419,13 @@ class TestAmpScaler(unittest.TestCase): decr_every_n_nan_or_inf=2, use_dynamic_loss_scaling=True, ) - self.assertEqual(scaler.is_enable() == True, True) + self.assertEqual(scaler.is_enable(), True) self.assertEqual(scaler.get_init_loss_scaling() == 1024, True) self.assertEqual(scaler.get_incr_ratio() == 2.0, True) self.assertEqual(scaler.get_decr_ratio() == 0.5, True) self.assertEqual(scaler.get_incr_every_n_steps() == 1000, True) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 2, True) - self.assertEqual(scaler.is_use_dynamic_loss_scaling() == True, True) + self.assertEqual(scaler.is_use_dynamic_loss_scaling(), True) scaler.set_decr_every_n_nan_or_inf(4) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 4, True) scaler.set_decr_ratio(0.1) @@ -459,7 +459,7 @@ class TestAmpScaler(unittest.TestCase): scaler3 = paddle.amp.GradScaler(enable=False) scaler3.load_state_dict(scaler_state) - self.assertEqual(scaler3.is_enable() == False, True) + self.assertFalse(scaler3.is_enable()) def test_state_dict_and_load_state_dict_error(self): def test_error(): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py index 03db89350795c339470eeb5415080680066b2996..b484a88b7df31957f0c2edb736dd1b5a9f78f5aa 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py @@ -356,7 +356,7 @@ class conv2d(fluid.dygraph.Layer): ): super(conv2d, self).__init__() - if use_bias == False: + if not use_bias: con_bias_attr = False else: con_bias_attr = fluid.ParamAttr( @@ -426,7 +426,7 @@ class DeConv2D(fluid.dygraph.Layer): ): super(DeConv2D, self).__init__() - if use_bias == False: + if not use_bias: de_bias_attr = False else: de_bias_attr = fluid.ParamAttr( diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py index ecda3427e7ea203b84c28f2798d91c77a30a150d..67ea0a28bc0dd9207582d30b2e02116e94befab1 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py @@ -93,7 +93,7 @@ class TestTensorSize(unittest.TestCase): prog_trans = paddle.jit.ProgramTranslator() prog_trans.enable(to_static) x = paddle.ones([1, 2, 3]) - if to_static == False: + if not to_static: return tensor_size(x) return tensor_size(x).numpy() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py index e16fd8b10c2f8afa54f74d9a79518c171b685de5..09171d64a28f3b73d03a9b6b586cd09e78667db4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py @@ -134,7 +134,7 @@ class TestConvBnFusePass(PassAutoScanTest): data_layout=data_format, is_test=True, ) - if has_bias == True: + if has_bias: conv2d_op.inputs["Bias"] = ["conv2d_bias"] ops = [conv2d_op, bn_op] @@ -156,7 +156,7 @@ class TestConvBnFusePass(PassAutoScanTest): }, outputs=["batch_norm_Y"], ) - if has_bias == True: + if has_bias: program_config.weights["conv2d_bias"] = TensorConfig( data_gen=partial(generate_conv2d_Bias) ) @@ -202,7 +202,7 @@ class TestConvBnFusePass(PassAutoScanTest): def teller2(program_config, predictor_config): return ( predictor_config.mkldnn_enabled() - and program_config.ops[0].attrs['has_bias'] == True + and program_config.ops[0].attrs['has_bias'] ) self.add_ignore_check_case( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py index 3cbd48dea6d4e6e6b559bab7d6632b54a6fd506e..883b4a75bc2243ddae2d60aae2bca44405b326a8 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py @@ -43,11 +43,11 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): def is_program_valid(self, program_config: ProgramConfig) -> bool: # is_sparse is only support False - if program_config.ops[0].attrs['is_sparse'] == True: + if program_config.ops[0].attrs['is_sparse']: return False # is_distributed only support False - if program_config.ops[0].attrs['is_distributed'] == True: + if program_config.ops[0].attrs['is_distributed']: return False # axis only support -1 and the last dim. diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py index ba6179a1ff41fa7a9251699afcba4cf166058c97..92881bd8d8200e2862d5e26546a48900deb87fce 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py @@ -100,7 +100,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): outputs={"Out": ["affine_channel_ouput"]}, data_layout=data_format, ) - if has_bias == True: + if has_bias: conv2d_op.inputs["Bias"] = ["conv2d_bias"] ops = [conv2d_op, ac_op] @@ -123,7 +123,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): }, outputs=["affine_channel_ouput"], ) - if has_bias == True: + if has_bias: program_config.weights["conv2d_bias"] = TensorConfig( data_gen=partial(generate_bias) ) @@ -145,7 +145,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): def teller2(program_config, predictor_config): return ( predictor_config.mkldnn_enabled() - and program_config.ops[0].attrs['has_bias'] == True + and program_config.ops[0].attrs['has_bias'] ) self.add_ignore_check_case( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py index 46a7b82ef4d7809c85b044bd1f4e76ada5e4f9dc..6c3c47687751a584e4e09e33f7ee800c715671c7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py @@ -304,7 +304,7 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if dynamic_shape == True: + if dynamic_shape: return 1, 4 else: if attrs[0]['axis'] != 0: diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py index 91b8380d7d6127347e2d8c98c48f02ea0a67af67..ee8f900f5126b1d2b7576f4a005ed796d5b10fd7 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py @@ -123,7 +123,7 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): if attrs[0]['dropout_implementation'] == "upscale_in_train": return 0, 2 - elif self.dims == 1 and dynamic_shape == False: + elif self.dims == 1 and not dynamic_shape: return 0, 3 else: return 1, 2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py index ca2984fa1877712f5ac162065d0342f89d0798c7..fab7428579a559602505ffd19e248845248abe8d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py @@ -85,7 +85,7 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): "index_data": TensorConfig( data_gen=partial( generate_input2 - if index_type_int32 == True + if index_type_int32 else generate_input4, index, ) @@ -180,7 +180,7 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): if self.input_num == 3: return 0, 5 else: - if dynamic_shape and self.index_type_int32 == True: + if dynamic_shape and self.index_type_int32: return 1, 3 else: return 0, 4 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py index d6d2f876361e51a0ff80d3dfc7eebba64755177e..29962386a48dcb04b7293cfd479c9ebc784f7e0c 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py @@ -107,7 +107,7 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest): if compile_version >= valid_version: return 1, 2 else: - if attrs[0]['approximate'] == True: + if attrs[0]['approximate']: return 0, 3 else: return 1, 2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py index 1df42024992cc5ca60c3f344311865d2c0b1e331..8f39add1493ce24b985482ca9644dfc3f8e158d5 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py @@ -137,7 +137,7 @@ class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): and self.dynamic_shape.min_input_shape ): return True - if program_config.ops[0].attrs['align_corners'] == True: + if program_config.ops[0].attrs['align_corners']: return True return False diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py index b4eacbb136f06d90ef6dcf288ebd25c02ae6a502..7bdaab0ee841c1571bbbc60028a81cfa4d9aeb17 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py @@ -29,7 +29,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ksize = program_config.ops[0].attrs['ksize'] pooling_type = program_config.ops[0].attrs['pooling_type'] global_pooling = program_config.ops[0].attrs['global_pooling'] - if global_pooling == False: + if not global_pooling: if pooling_type == 'avg': for index in range(len(ksize)): if ksize[index] <= paddings[index]: @@ -174,10 +174,10 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): def teller(program_config, predictor_config): if ( program_config.ops[0].attrs['pooling_type'] == 'avg' - and program_config.ops[0].attrs['global_pooling'] == False - and program_config.ops[0].attrs['exclusive'] == True - and program_config.ops[0].attrs['adaptive'] == False - and program_config.ops[0].attrs['ceil_mode'] == True + and not program_config.ops[0].attrs['global_pooling'] + and program_config.ops[0].attrs['exclusive'] + and not program_config.ops[0].attrs['adaptive'] + and program_config.ops[0].attrs['ceil_mode'] ): return True return False diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py index f89527359d4d19f650f3d1b96668b3df75323b05..0dc286722d1ae0e95bf66757ec15147f52947981 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py @@ -159,10 +159,10 @@ class TrtConvertRoiAlignTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): if self.num_input == 0: - if dynamic_shape == True: + if dynamic_shape: return 0, 5 elif self.num_input == 1: - if dynamic_shape == True: + if dynamic_shape: return 1, 3 else: return 0, 4 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py index 04c1e3259fc12da2b0d603134bcb600d7e2daa65..9c4c9071e37c91b789becc330539289ceac0ea6d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py @@ -77,7 +77,7 @@ class TrtConvertShuffleChannelTest(TrtLayerAutoScanTest): ver = paddle_infer.get_trt_compile_version() if ( ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8000 - and dynamic_shape == True + and dynamic_shape ): return 0, 3 else: diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py index 18ea2abe6bc35f8cadc429e5e8ec0ad3630da5ae..5e2e984fc7bd95e3ca2f8328a109d6e4caff47bb 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py @@ -192,7 +192,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if dynamic_shape == True: + if dynamic_shape: return 1, 3 else: return 0, 4 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py index 91e5e499b19b4f2f5dd101ee63a0d57d5917a068..c891f236f2fceeb48deb541f321c8b7b4f90d370 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py @@ -181,7 +181,7 @@ class TrtConvertStackTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if dynamic_shape == True: + if dynamic_shape: return 1, 4 else: return 0, 5 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py index d65d7e3c29f9694ba73cea58bd2bdd29771de912..adcb5c5e4b90b4cf8b4a4c76d8ae13e228791b65 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py @@ -81,7 +81,7 @@ class TrtConvertTileTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 >= 7000: - if dynamic_shape == True: + if dynamic_shape: return 0, 3 else: return 1, 2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py index 477ea649effd366496b670250c0b8ce7883fc828..a5575eae55490cab076e3c7e2228b2169ad51780 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py @@ -126,7 +126,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): if self.dims == 1: return 0, 4 - if self.sort == False: + if not self.sort: return 0, 4 return 1, 3 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py index 9ebcd87399230753d1b01addcaac795c2bb490cd..0a987ca1fb69c8fbbf9192b15621dc92e0a2c2df 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py @@ -123,7 +123,7 @@ class TrtConvertTransposeTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if dynamic_shape == True: + if dynamic_shape: return 1, 2 else: if attrs[0]['axis'][0] == 0: diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py index a0d089c69c934464f54beb33fefb76b6c8dfa2d8..7e595a48c9d72df4a3b1bde5306ba7bd8f0d98a4 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py @@ -28,7 +28,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): def sample_program_configs(self): def generate_input1(attrs: List[Dict[str, Any]], batch, channel): - if attrs[0]['iou_aware'] == True: + if attrs[0]['iou_aware']: return np.ones([batch, 3 * (channel + 6), 13, 13]).astype( np.float32 ) @@ -108,7 +108,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): self, program_config ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): - if attrs[0]['iou_aware'] == True: + if attrs[0]['iou_aware']: channel = 3 * (attrs[0]['class_num'] + 6) self.dynamic_shape.min_input_shape = { "yolo_box_input": [1, channel, 12, 12], diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py index 4001e2ba76ba7ec6fd071848de97f61f2022acfc..d71e6446c60482b31ff28f8d3879372f1bc19577 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py @@ -128,7 +128,7 @@ class TestInt8(TestElementwiseAddOp): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad_normal(self): pass @@ -165,9 +165,7 @@ class TestInt8Scales(TestInt8): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() int_atol = 1 # different quantization techniques - self.check_output( - check_dygraph=(self.use_mkldnn == False), atol=int_atol - ) + self.check_output(check_dygraph=(not self.use_mkldnn), atol=int_atol) class TestUint8Scales(TestInt8Scales): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py index 3ca09093b814d76fb4dcc49faa5a43dc1da8e3e3..4881d1c3763e435abbf4797dd3f56494f2b855f2 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py @@ -101,7 +101,7 @@ class TestInt8(ElementwiseMulOp): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad_normal(self): pass @@ -138,9 +138,7 @@ class TestInt8Scales(TestInt8): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() int_atol = 1 # different quantization techniques - self.check_output( - check_dygraph=(self.use_mkldnn == False), atol=int_atol - ) + self.check_output(check_dygraph=(not self.use_mkldnn), atol=int_atol) class TestUint8Scales(TestInt8Scales): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py b/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py index 1b27a39f2e95665f8da70da1b95b5f3db36aaf1a..218900b35f4f3ee23bb170336eedeb44a1db0be6 100755 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py @@ -49,7 +49,7 @@ def resize_short(img, target_size): def crop_image(img, target_size, center): width, height = img.size size = target_size - if center == True: + if center: w_start = (width - size) / 2 h_start = (height - size) / 2 else: diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 6147c88dc56801a2d8f7f8afcc85d12533efd6a7..baa4f26feb81c974115f21ed0912b9491b83a751 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -371,25 +371,22 @@ class OpTest(unittest.TestCase): return True def is_xpu_op_test(): - return hasattr(cls, "use_xpu") and cls.use_xpu == True + return hasattr(cls, "use_xpu") and cls.use_xpu def is_mkldnn_op_test(): - return hasattr(cls, "use_mkldnn") and cls.use_mkldnn == True + return hasattr(cls, "use_mkldnn") and cls.use_mkldnn def is_rocm_op_test(): return core.is_compiled_with_rocm() def is_npu_op_test(): - return hasattr(cls, "use_npu") and cls.use_npu == True + return hasattr(cls, "use_npu") and cls.use_npu def is_mlu_op_test(): - return hasattr(cls, "use_mlu") and cls.use_mlu == True + return hasattr(cls, "use_mlu") and cls.use_mlu def is_custom_device_op_test(): - return ( - hasattr(cls, "use_custom_device") - and cls.use_custom_device == True - ) + return hasattr(cls, "use_custom_device") and cls.use_custom_device if not hasattr(cls, "op_type"): raise AssertionError( @@ -465,17 +462,17 @@ class OpTest(unittest.TestCase): ) def is_mkldnn_op(self): - return (hasattr(self, "use_mkldnn") and self.use_mkldnn == True) or ( + return (hasattr(self, "use_mkldnn") and self.use_mkldnn) or ( hasattr(self, "attrs") and "use_mkldnn" in self.attrs - and self.attrs["use_mkldnn"] == True + and self.attrs["use_mkldnn"] ) def is_xpu_op(self): - return (hasattr(self, "use_xpu") and self.use_xpu == True) or ( + return (hasattr(self, "use_xpu") and self.use_xpu) or ( hasattr(self, "attrs") and "use_xpu" in self.attrs - and self.attrs["use_xpu"] == True + and self.attrs["use_xpu"] ) # set the self.output_dtype . @@ -1542,7 +1539,7 @@ class OpTest(unittest.TestCase): ): # disable legacy dygraph check when check_eager is True - if check_eager == True: + if check_eager: check_dygraph = False def find_imperative_actual(target_name, dygraph_outs, place): @@ -1912,7 +1909,7 @@ class OpTest(unittest.TestCase): ) if check_eager: - assert check_dygraph == False + assert not check_dygraph return outs, eager_dygraph_outs, fetch_list elif check_dygraph: return outs, dygraph_outs, fetch_list @@ -2002,7 +1999,7 @@ class OpTest(unittest.TestCase): ): # disable legacy dygraph check when check_eager is True - if check_eager == True: + if check_eager: check_dygraph = False self.__class__.op_type = self.op_type @@ -2024,7 +2021,7 @@ class OpTest(unittest.TestCase): check_eager=check_eager, ) if check_eager: - assert check_dygraph == False + assert not check_dygraph outs, eager_dygraph_outs, fetch_list = res elif check_dygraph: outs, dygraph_outs, fetch_list = res @@ -2143,7 +2140,7 @@ class OpTest(unittest.TestCase): ): # disable legacy dygraph check when check_eager is True - if check_eager == True: + if check_eager: check_dygraph = False self._check_grad_helper() @@ -2180,7 +2177,7 @@ class OpTest(unittest.TestCase): ): # disable legacy dygraph check when check_eager is True - if check_eager == True: + if check_eager: check_dygraph = False self.scope = core.Scope() @@ -2207,7 +2204,7 @@ class OpTest(unittest.TestCase): # oneDNN numeric gradient should use CPU kernel use_onednn = False - if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"] == True: + if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"]: op_attrs["use_mkldnn"] = False use_onednn = True diff --git a/python/paddle/fluid/tests/unittests/op_test_xpu.py b/python/paddle/fluid/tests/unittests/op_test_xpu.py index 220bd09f2cafa06abe532b2ee49630d106084d82..295f6a67d8d62a3436c407a508e67d10ba9edfdc 100644 --- a/python/paddle/fluid/tests/unittests/op_test_xpu.py +++ b/python/paddle/fluid/tests/unittests/op_test_xpu.py @@ -51,7 +51,7 @@ class XPUOpTest(OpTest): if cls.dtype == np.float16: place = paddle.XPUPlace(0) - if core.is_float16_supported(place) == False: + if not core.is_float16_supported(place): return if cls.dtype == np.float64: @@ -98,7 +98,7 @@ class XPUOpTest(OpTest): return if self.dtype == np.float16: - if core.is_float16_supported(place) == False: + if not core.is_float16_supported(place): return if self.dtype == np.float16: @@ -172,7 +172,7 @@ class XPUOpTest(OpTest): return if self.dtype == np.float16: - if core.is_float16_supported(place) == False: + if not core.is_float16_supported(place): return if self.dtype == np.float16: @@ -254,7 +254,7 @@ class XPUOpTest(OpTest): # oneDNN numeric gradient should use CPU kernel use_onednn = False - if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"] == True: + if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"]: op_attrs["use_mkldnn"] = False use_onednn = True diff --git a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py index 08055ae170393a46694fa567599af6229dc949e3..ccaed0b984fede346919041fe543ae176ca7aa34 100755 --- a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py +++ b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py @@ -167,7 +167,7 @@ def get_user_defined_strategy(config): strategy.is_fl_ps_mode = ( True if config.get("runner.is_fl_ps_mode") == 1 else False ) - if strategy.is_fl_ps_mode == True: + if strategy.is_fl_ps_mode: strategy.pipeline = False micro_num = 1 strategy.pipeline_configs = { diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index 937d30cd74aac6a6b14375cdf9cbd7f65fdf6ddb..9904ee0d100a3df5a8065cd7d6eb5c86de74e56c 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -1126,11 +1126,11 @@ class TestMultiTensorAdam(unittest.TestCase): ) for idx in range(2): - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) diff --git a/python/paddle/fluid/tests/unittests/test_adamw_op.py b/python/paddle/fluid/tests/unittests/test_adamw_op.py index 6e4a7b43f20bf7ce252a5d01637fd5fec6536ffe..15c8bf69bc01b4d9f2f8a8cb93c7b089344e8915 100644 --- a/python/paddle/fluid/tests/unittests/test_adamw_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamw_op.py @@ -302,11 +302,11 @@ class TestAdamWOpMultiPrecison(unittest.TestCase): ) for idx in range(2): - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) diff --git a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py index c56235cd0d12925bba33e7e25b00fb514987941e..41fc17187093ce6f63d61981173e9a9636f8d721 100644 --- a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py @@ -177,8 +177,8 @@ def train(use_cuda, thread_num, cpu_num): fetch_list=[array, acc, prediction, avg_loss.name] ) - assert numpy.allclose(array_v[0], prediction_v) == True - assert numpy.allclose(array_v[1], acc_v) == True + assert numpy.allclose(array_v[0], prediction_v) + assert numpy.allclose(array_v[1], acc_v) loss_val = numpy.mean(loss_val) if step % 10 == 0: diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 381640621cb1cf43c03bb87150bf6e7ed0174ae3..ccd7de2c3171fe3919f5fb3b9b67f184a796edf3 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -313,7 +313,7 @@ class TestBatchNormOpInference(unittest.TestCase): # dims will be in NCHW order as it is MKL-DNN way # of memory descripting. So we need to convert NCHW # dims into NHWC. - if data_layout == "NHWC" and self.use_mkldnn == True: + if data_layout == "NHWC" and self.use_mkldnn: # Create executor to have MKL-DNN cache # cleared after NHWC unit test place = core.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py index 778489eb668dce30edafd6934cff425313a6548e..f5db751169a1f802f533346804ce017d6f9851d6 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py @@ -391,7 +391,7 @@ class TestBatchNormUseGlobalStats(unittest.TestCase): ) net2.weight = net1.weight net2.bias = net1.bias - if self.trainable_statistics == True: + if self.trainable_statistics: net1.training = False net2.training = False y1 = net1(x) diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 233a686c058068e7b2a6396247359c92ffc1411a..fd3106f9c6f8493f78b8939c738248be430724ff 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -20,8 +20,8 @@ import paddle.fluid.core as core def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): - pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False) - pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False) + pb_w = p_box[:, 2] - p_box[:, 0] + (not norm) + pb_h = p_box[:, 3] - p_box[:, 1] + (not norm) pb_x = pb_w * 0.5 + p_box[:, 0] pb_y = pb_h * 0.5 + p_box[:, 1] shape = (1, p_box.shape[0]) if axis == 0 else (p_box.shape[0], 1) @@ -55,8 +55,8 @@ def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): def box_encoder(t_box, p_box, pb_v, output_box, norm): - pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False) - pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False) + pb_w = p_box[:, 2] - p_box[:, 0] + (not norm) + pb_h = p_box[:, 3] - p_box[:, 1] + (not norm) pb_x = pb_w * 0.5 + p_box[:, 0] pb_y = pb_h * 0.5 + p_box[:, 1] shape = (1, p_box.shape[0]) diff --git a/python/paddle/fluid/tests/unittests/test_center_loss.py b/python/paddle/fluid/tests/unittests/test_center_loss.py index b7eda71c0217b03e95ce9d057806366a4cf506e3..7bf68100e029d0b04aac24638a9df35cfd9a775d 100644 --- a/python/paddle/fluid/tests/unittests/test_center_loss.py +++ b/python/paddle/fluid/tests/unittests/test_center_loss.py @@ -58,7 +58,7 @@ class TestCenterLossOp(OpTest): 'CenterUpdateRate': rate, } - if self.need_update == True: + if self.need_update: self.outputs = { 'SampleCenterDiff': output, 'Loss': loss, diff --git a/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py b/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py index bc88cba96eb6e659e6494a297eb5de5c40902a65..fb8a7057fd7d4f9bece4d12bcd9e48abc0153114 100644 --- a/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py @@ -115,7 +115,7 @@ class TestEqualReduceAPI(unittest.TestCase): x = paddle.ones(shape=[10, 10], dtype="int32") y = paddle.ones(shape=[10, 10], dtype="int32") out = paddle.equal_all(x, y) - assert out.numpy()[0] == True + assert out.numpy()[0] is np.True_ paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 34a34f062efb8b7684a2e6c5da87729e43038b1d..0c22f7ff7b2778c7b8609c66fbba294c84aeca6d 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -477,13 +477,12 @@ class TestConv2DOp(OpTest): place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) def test_check_grad(self): if self.dtype == np.float16 or ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True + hasattr(self, "no_need_check_grad") and self.no_need_check_grad ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() @@ -493,13 +492,12 @@ class TestConv2DOp(OpTest): {'Input', 'Filter'}, 'Output', max_relative_error=0.02, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_filter(self): if self.dtype == np.float16 or ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True + hasattr(self, "no_need_check_grad") and self.no_need_check_grad ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() @@ -510,13 +508,12 @@ class TestConv2DOp(OpTest): 'Output', max_relative_error=0.02, no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_input(self): if self.dtype == np.float16 or ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True + hasattr(self, "no_need_check_grad") and self.no_need_check_grad ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() @@ -526,7 +523,7 @@ class TestConv2DOp(OpTest): ['Filter'], 'Output', no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_test_case(self): @@ -804,7 +801,7 @@ class TestConv2DOp_v2(OpTest): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) def test_check_grad(self): @@ -817,7 +814,7 @@ class TestConv2DOp_v2(OpTest): {'Input', 'Filter'}, 'Output', max_relative_error=0.02, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_filter(self): @@ -831,7 +828,7 @@ class TestConv2DOp_v2(OpTest): 'Output', max_relative_error=0.02, no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_input(self): @@ -844,7 +841,7 @@ class TestConv2DOp_v2(OpTest): ['Filter'], 'Output', no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index 29ffbd80d3623f2e26fca6d50dca110f05251535..482da8164b245b9c66092d145e5c334871da8553 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -183,10 +183,10 @@ class TestConv2DTransposeOp(OpTest): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) else: - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad_no_input(self): if self.need_check_grad: @@ -724,10 +724,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=0.02, check_dygraph=(self.use_mkldnn == False) + place, atol=0.02, check_dygraph=(not self.use_mkldnn) ) else: - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) @unittest.skipIf( diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index eaa6ba04c64e6dc2d1388c04c3c04acc87a543d9..54a3621e0ba72e1611558b30339b9df279b1b0e3 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -327,7 +327,7 @@ class TestConv3DOp(OpTest): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) def test_check_grad(self): @@ -340,7 +340,7 @@ class TestConv3DOp(OpTest): {'Input', 'Filter'}, 'Output', max_relative_error=0.03, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_filter(self): @@ -354,7 +354,7 @@ class TestConv3DOp(OpTest): 'Output', max_relative_error=0.03, no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_input(self): @@ -368,7 +368,7 @@ class TestConv3DOp(OpTest): 'Output', max_relative_error=0.03, no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/test_dataset_download.py b/python/paddle/fluid/tests/unittests/test_dataset_download.py index f1fba215b931f092406a82a988e9e20265a978c9..b009a2fe58dca3a47d7fd11bde2953c1bad5820d 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset_download.py +++ b/python/paddle/fluid/tests/unittests/test_dataset_download.py @@ -34,7 +34,7 @@ class TestDataSetDownload(unittest.TestCase): except Exception as e: catch_exp = True - self.assertTrue(catch_exp == False) + self.assertTrue(not catch_exp) file_path = DATA_HOME + "/flowers/imagelabels.mat" diff --git a/python/paddle/fluid/tests/unittests/test_dist_base.py b/python/paddle/fluid/tests/unittests/test_dist_base.py index 4c109feaef2358659ec9ab7de37147db376d6dee..6212de9ebcfa489c48b29eead31970a56e6ebd45 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_base.py @@ -1330,8 +1330,8 @@ class TestDistBase(unittest.TestCase): tr_cmd += " --diff_batch" self.__use_cuda = False self.__use_xpu = False - assert self.__use_cuda == False, "gloo not support use cuda" - assert self.__use_xpu == False, "gloo not support use xpu" + assert not self.__use_cuda, "gloo not support use cuda" + assert not self.__use_xpu, "gloo not support use xpu" tr_cmd += " --use_cpu" env.update( { @@ -1345,7 +1345,7 @@ class TestDistBase(unittest.TestCase): } ) - assert self._use_dgc == False, "gloo not support use dgc" + assert not self._use_dgc, "gloo not support use dgc" if self._accumulate_gradient: tr_cmd += " --accumulate_gradient" @@ -1353,7 +1353,7 @@ class TestDistBase(unittest.TestCase): if self._find_unused_parameters: tr_cmd += " --find_unused_parameters" - assert self._pipeline_mode == False, "gloo not support use pipeline" + assert not self._pipeline_mode, "gloo not support use pipeline" if self._enable_backward_deps: # build strategy, save it tr_cmd += " --enable_backward_deps" @@ -1361,8 +1361,8 @@ class TestDistBase(unittest.TestCase): if self._fuse_all_reduce is not None: tr_cmd += " --fuse_all_reduce {}".format(self._fuse_all_reduce) - assert self._use_fleet_api == False, "gloo not support use fleet api" - assert self._use_fleet_api_20 == False, "gloo not support use fleet api" + assert not self._use_fleet_api, "gloo not support use fleet api" + assert not self._use_fleet_api_20, "gloo not support use fleet api" return tr_cmd, env def _get_nccl2_trainer_cmd( diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index ae516bc44cad3e645964193809ee3ca7d490d412..6bfd14dc8415224d9e2320807eac50b788af5497 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -46,12 +46,12 @@ class TestElementwiseAddOp(OpTest): self.outputs = {'Out': self.out} def check_eager(self): - return self.use_mkldnn == False and self.axis == -1 + return not self.use_mkldnn and self.axis == -1 def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_output( - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), check_eager=self.check_eager(), ) @@ -62,7 +62,7 @@ class TestElementwiseAddOp(OpTest): self.check_grad( ['X', 'Y'], 'Out', - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), check_eager=self.check_eager(), ) @@ -74,7 +74,7 @@ class TestElementwiseAddOp(OpTest): ['Y'], 'Out', no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), check_eager=self.check_eager(), ) @@ -86,7 +86,7 @@ class TestElementwiseAddOp(OpTest): ['X'], 'Out', no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), check_eager=self.check_eager(), ) @@ -115,7 +115,7 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place( - place, atol=1e-3, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-3, check_dygraph=(not self.use_mkldnn) ) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index cc3cd9be8236c144df5d501e933a38450c8f99d1..987a17ff1f5ea1fbdad50996dc2c58cfe6c88afd 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -49,13 +49,11 @@ class ElementwiseMulOp(OpTest): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad_normal(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad( - ['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False) - ) + self.check_grad(['X', 'Y'], 'Out', check_dygraph=(not self.use_mkldnn)) def test_check_grad_ingore_x(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode @@ -63,7 +61,7 @@ class ElementwiseMulOp(OpTest): ['Y'], 'Out', no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_ingore_y(self): @@ -72,7 +70,7 @@ class ElementwiseMulOp(OpTest): ['X'], 'Out', no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_input_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_empty_like_op.py b/python/paddle/fluid/tests/unittests/test_empty_like_op.py index 4ce4ab6a6d52700d043c8243d94c6e0aed8584b7..82ad72e11e5f2b2bf7ee8a8aa0316823a5703c31 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_like_op.py @@ -47,8 +47,8 @@ class TestEmptyLikeAPICommon(unittest.TestCase): ) elif data_type in ['bool']: total_num = out.size - true_num = np.sum(out == True) - false_num = np.sum(out == False) + true_num = np.sum(out) + false_num = np.sum(~out) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', diff --git a/python/paddle/fluid/tests/unittests/test_empty_op.py b/python/paddle/fluid/tests/unittests/test_empty_op.py index 11b66325c1f5b8d2e8875482dbc5ee95c0452706..7b488aa0c6dda17a9e00fd152cd178b4562c982b 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_op.py @@ -43,8 +43,8 @@ class TestEmptyOp(OpTest): ) elif data_type in ['bool']: total_num = outs[0].size - true_num = np.sum(outs[0] == True) - false_num = np.sum(outs[0] == False) + true_num = np.sum(outs[0]) + false_num = np.sum(~outs[0]) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', @@ -132,8 +132,8 @@ class TestEmptyOp_ShapeTensor(OpTest): ) elif data_type in ['bool']: total_num = outs[0].size - true_num = np.sum(outs[0] == True) - false_num = np.sum(outs[0] == False) + true_num = np.sum(outs[0]) + false_num = np.sum(~outs[0]) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', @@ -182,8 +182,8 @@ class TestEmptyOp_ShapeTensorList(OpTest): ) elif data_type in ['bool']: total_num = outs[0].size - true_num = np.sum(outs[0] == True) - false_num = np.sum(outs[0] == False) + true_num = np.sum(outs[0]) + false_num = np.sum(~outs[0]) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py index b0dcfd653fb750d7a1cdb4a9851d424bc92f6331..dc4ad0cea15daea4edf1360481d9cea818cb8faf 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py @@ -29,16 +29,16 @@ class TestImperativeLayerTrainable(unittest.TestCase): linear = dygraph.Linear(10, 10) y = linear(label) - self.assertTrue(y.stop_gradient == False) + self.assertFalse(y.stop_gradient) linear.weight.trainable = False linear.bias.trainable = False - self.assertTrue(linear.weight.trainable == False) - self.assertTrue(linear.weight.stop_gradient == True) + self.assertFalse(linear.weight.trainable) + self.assertTrue(linear.weight.stop_gradient) y = linear(label) - self.assertTrue(y.stop_gradient == True) + self.assertTrue(y.stop_gradient) with self.assertRaises(ValueError): linear.weight.trainable = "1" diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 68e88c9ba2a8132d7938ea80e46fd31ee99c5511..0c52d7596c129e27864d9630ccb19983c82974e8 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -28,13 +28,13 @@ np.random.seed(10) def mean_wrapper(x, axis=None, keepdim=False, reduce_all=False): - if reduce_all == True: + if reduce_all: return paddle.mean(x, range(len(x.shape)), keepdim) return paddle.mean(x, axis, keepdim) def reduce_mean_wrapper(x, axis=0, keepdim=False, reduce_all=False): - if reduce_all == True: + if reduce_all: return paddle.mean(x, range(len(x.shape)), keepdim) return paddle.mean(x, axis, keepdim) diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index 017b001e259d634679e180465d1c73d5abf0621a..fd9b8b88016bd749815fc4de252e1820a7e8126f 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -910,10 +910,10 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): multi_precision=use_amp, ) for idx in range(5): - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index cde6c8daf96be0d3d3bbebe2c64fadc77b0f6b2c..6cc6fdd4311eb57816932bed44a55b710be07257 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -146,12 +146,8 @@ def iou(box_a, box_b, norm): xmax_b = max(box_b[0], box_b[2]) ymax_b = max(box_b[1], box_b[3]) - area_a = (ymax_a - ymin_a + (norm == False)) * ( - xmax_a - xmin_a + (norm == False) - ) - area_b = (ymax_b - ymin_b + (norm == False)) * ( - xmax_b - xmin_b + (norm == False) - ) + area_a = (ymax_a - ymin_a + (not norm)) * (xmax_a - xmin_a + (not norm)) + area_b = (ymax_b - ymin_b + (not norm)) * (xmax_b - xmin_b + (not norm)) if area_a <= 0 and area_b <= 0: return 0.0 @@ -160,9 +156,7 @@ def iou(box_a, box_b, norm): xb = min(xmax_a, xmax_b) yb = min(ymax_a, ymax_b) - inter_area = max(xb - xa + (norm == False), 0.0) * max( - yb - ya + (norm == False), 0.0 - ) + inter_area = max(xb - xa + (not norm), 0.0) * max(yb - ya + (not norm), 0.0) iou_ratio = inter_area / (area_a + area_b - inter_area) diff --git a/python/paddle/fluid/tests/unittests/test_ops_nms.py b/python/paddle/fluid/tests/unittests/test_ops_nms.py index 573231a8a725a9db4d0a83a16d36b814dbf6c652..be4d5f4921324a2626e8e22b4358a605725e9c4c 100644 --- a/python/paddle/fluid/tests/unittests/test_ops_nms.py +++ b/python/paddle/fluid/tests/unittests/test_ops_nms.py @@ -55,7 +55,7 @@ def multiclass_nms(boxes, scores, category_idxs, iou_threshold, top_k): mask[cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs]] = True - keep_boxes_idxs = _find(mask == True) + keep_boxes_idxs = _find(mask) topK_sub_indices = np.argsort(-scores[keep_boxes_idxs])[:top_k] return keep_boxes_idxs[topK_sub_indices] diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 17ad33f67ab9980d76327231c6045258e6224b0d..ab5d5ac46daf9333f0176286c7ea2ef30313e7aa 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -784,7 +784,7 @@ class TestRecomputeOptimizer(unittest.TestCase): type="mean", inputs={"X": b2_out}, outputs={"Out": mean_out} ) - if return_input == True: + if return_input: return mul_x, mul_out, b1_out, b2_out, mean_out return mul_out, b1_out, b2_out, mean_out diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py index 9c8d342993739d4c07ec3bc033aa685f280575de..e0e545448b5b3bff83679b0c2927aa894162e10f 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py @@ -58,8 +58,8 @@ class TestParallelExecutorDropExeScope(unittest.TestCase): train_exe.run(feed={"X": x}, fetch_list=[loss.name]) test_exe.run(feed={"X": x}, fetch_list=[loss.name]) - assert train_exe._need_create_local_exe_scopes() == False - assert test_exe._need_create_local_exe_scopes() == False + assert not train_exe._need_create_local_exe_scopes() + assert not test_exe._need_create_local_exe_scopes() # drop the local execution scope immediately train_exe.drop_local_exe_scopes() diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 7c44827262d6c4c1b4e7fba46f0f9d9b0ee784f0..b2ae6318cc5deb28d39f79e78c06f016c4243ef8 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -181,7 +181,7 @@ def pool2D_forward_naive( if padding_algorithm == "VALID": paddings = [0, 0, 0, 0] - if ceil_mode != False: + if ceil_mode is not False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " @@ -346,10 +346,10 @@ class TestPool2D_Op_Mixin(object): if self.has_cudnn(): place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) else: - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad(self): if self.dtype == np.float16: @@ -362,14 +362,14 @@ class TestPool2D_Op_Mixin(object): set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) elif self.pool_type != "max": self.check_grad( set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_data_format(self): @@ -512,7 +512,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): self.check_output_with_place( place, atol=1e-3, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad(self): @@ -528,7 +528,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op") @@ -553,7 +553,7 @@ def create_test_fp16_class(parent, check_grad=True): self.check_output_with_place( place, atol=1e-3, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad(self): @@ -569,7 +569,7 @@ def create_test_fp16_class(parent, check_grad=True): set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op") diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index a5bf5066956483ed7c0991de7ed1d7b57ac2938c..09222e99c3622c4f705b21384b927b68704a907d 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -68,7 +68,7 @@ def pool3D_forward_naive( if padding_algorithm == "VALID": paddings = [0, 0, 0, 0, 0, 0] - if ceil_mode != False: + if ceil_mode is not False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index 34420ce5a9c56262fe8f79451625f47249cd81c9..255a4799984968300fcd13fd573bc0a99ee9ce69 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -321,12 +321,12 @@ class TestSGDMultiPrecision2_0(unittest.TestCase): optimizer = paddle.optimizer.SGD( parameters=model.parameters(), multi_precision=mp ) - if mp == True: + if mp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) for idx in range(5): - if mp == True: + if mp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) @@ -429,12 +429,12 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): parameter_list=model.parameters(), multi_precision=mp, ) - if mp == True: + if mp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) for idx in range(5): - if mp == True: + if mp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index c83f569cb11a1f2423fb806ac682e3b07507669c..18a5737225fa939f053d6e3d7607cf4d5dfbe855 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -78,10 +78,10 @@ class TestSoftmaxOp(OpTest): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) else: - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode @@ -93,14 +93,14 @@ class TestSoftmaxOp(OpTest): ["X"], "Out", max_relative_error=0.01, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) else: self.check_grad( ["X"], "Out", max_relative_error=0.01, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) @@ -389,9 +389,7 @@ class TestSoftmaxBF16Op(OpTest): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place( - place, check_dygraph=(self.use_mkldnn == False) - ) + self.check_output_with_place(place, check_dygraph=(not self.use_mkldnn)) def test_check_grad(self): place = core.CUDAPlace(0) @@ -400,7 +398,7 @@ class TestSoftmaxBF16Op(OpTest): ["X"], "Out", numeric_grad_delta=0.05, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index fb0e46b6740b8f84e0e3a5b2aa5c80c2a0e84fd0..a623a311ccf1c5827d6951809bb07c27825dd8d8 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -131,7 +131,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): softmax, labels, self.soft_label, self.axis, self.ignore_index ) - if self.use_softmax == False: + if not self.use_softmax: self.inputs = {"Logits": softmax, "Label": labels} else: self.inputs = {"Logits": logits, "Label": labels} diff --git a/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py b/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py index 3b29d335da42b794d09c461e02b1889a20ea9f1a..92e2d0200c80d4f9ce866757c62e9617cd9fdcf3 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py @@ -221,7 +221,7 @@ class TestSparseAttentionOp(OpTest): self.key_padding_mask = key_padding_mask.astype(self.dtype) self.attn_mask = attn_mask.astype(self.dtype) - if self.use_mask == True: + if self.use_mask: result, result_sdd, result_softmax = ref_batch_sparse_attention( self.q, self.k, @@ -236,7 +236,7 @@ class TestSparseAttentionOp(OpTest): self.q, self.k, self.v, self.offset, self.columns ) - if self.use_mask == True: + if self.use_mask: self.inputs = { 'Q': self.q, 'K': self.k, @@ -326,7 +326,7 @@ class TestSparseAttentionAPI(unittest.TestCase): ) key_padding_mask_shape = (self.shape[0], self.shape[2]) attn_mask_shape = (self.shape[2], self.shape[2]) - if self.use_mask == True: + if self.use_mask: key_padding_mask = paddle.static.data( name="KeyPaddingMask", shape=key_padding_mask_shape, @@ -367,7 +367,7 @@ class TestSparseAttentionAPI(unittest.TestCase): attn_mask_np = attn_mask_np.astype(self.dtype) exe = fluid.Executor(self.place) - if self.use_mask == True: + if self.use_mask: fetches_result = exe.run( feed={ "Q": Q_np, @@ -436,7 +436,7 @@ class TestSparseAttentionAPI(unittest.TestCase): paddle_kp_mask = paddle.to_tensor(key_padding_mask, place=self.place) paddle_attn_mask = paddle.to_tensor(attn_mask, place=self.place) - if self.use_mask == True: + if self.use_mask: paddle_result = F.sparse_attention( paddle_query, paddle_key, diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index d1b820bd74c1d9e407eef080d06664ff41bb7a8a..38e65744a811075931a6eb536533650d28cbff1d 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -1147,10 +1147,10 @@ class TestVarBase(unittest.TestCase): if var2: var2_bool = True - assert var1_bool == False, "if var1 should be false" - assert var2_bool == True, "if var2 should be true" - assert bool(var1) == False, "bool(var1) is False" - assert bool(var2) == True, "bool(var2) is True" + assert not var1_bool, "if var1 should be false" + assert var2_bool, "if var2 should be true" + assert not bool(var1), "bool(var1) is False" + assert bool(var2), "bool(var2) is True" def test_if(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_where_op.py b/python/paddle/fluid/tests/unittests/test_where_op.py index 9ae7d9a48331b2c404361a3b5ac960ad69993425..7420753d2d359c5d450f5e4c4d908b61ba127538 100644 --- a/python/paddle/fluid/tests/unittests/test_where_op.py +++ b/python/paddle/fluid/tests/unittests/test_where_op.py @@ -68,10 +68,10 @@ class TestWhereAPI(unittest.TestCase): self.out = np.where(self.cond, self.x, self.y) def ref_x_backward(self, dout): - return np.where((self.cond == True), dout, 0) + return np.where(self.cond, dout, 0) def ref_y_backward(self, dout): - return np.where((self.cond == False), dout, 0) + return np.where(~self.cond, dout, 0) def test_api(self, use_cuda=False): for x_stop_gradient in [False, True]: diff --git a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py index 7d818cc02c868aefe71b3203582e8c108ca84ef6..c6c7d9f34d8a0ddc683281b22f56168f66554808 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py @@ -377,7 +377,7 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): ) net2.weight = net1.weight net2.bias = net1.bias - if self.trainable_statistics == True: + if self.trainable_statistics: net1.training = False net2.training = False y1 = net1(x) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py index 973e2908c4eccdb76ac2d6f51954bff19b519874..a7036f521817a699346fa6b2fa360fb3b2060aff 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py @@ -261,10 +261,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): self.check_output_with_place(self.place) def test_check_grad(self): - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -273,10 +270,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ) def test_check_grad_no_filter(self): - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -285,10 +279,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ) def test_check_grad_no_input(self): - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -433,10 +424,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -446,10 +434,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): def test_check_grad_no_filter(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -459,10 +444,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): def test_check_grad_no_input(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py index 794ff490d7ec908206e78335ccdc3ce9f24517e8..36434ce2020258f4253a1d3d03f422f256cc075c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py @@ -52,7 +52,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): } out = self.inputs['X'] * (1.0 - self.dropout_prob) - if self.is_test == False: + if not self.is_test: mask = None if self.dropout_prob == 0.0: mask = np.ones(self.shape).astype(self.dtype) @@ -78,7 +78,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): def test_check_grad_normal(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py index 22ee95c07d4cee2cc59e8e9512a7148b9d45e708..1d9c8c80f5ae5b8be89a56f6e0115ebca34d304e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py @@ -61,7 +61,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): place, ['X', 'Y'], 'Out', - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_ingore_x(self): @@ -72,7 +72,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): ['Y'], 'Out', no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_ingore_y(self): @@ -83,7 +83,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): ['X'], 'Out', no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_input_output(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py index cb56e9b51f42dbddad1a8570c81fc4e2e29d073c..f11740d74d482d9b00cd8cb48e29cf8be8ea7f6a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py @@ -72,8 +72,8 @@ class XPUTestEmptyOp(XPUOpTestWrapper): ) elif data_type in ['bool']: total_num = outs[0].size - true_num = np.sum(outs[0] == True) - false_num = np.sum(outs[0] == False) + true_num = np.sum(outs[0]) + false_num = np.sum(~outs[0]) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py index c37d1bff5dd96edb929cc327640e3093ff6c8e48..7c2a5ed2f0923277f862d8b782e526f291d7590f 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py @@ -106,14 +106,14 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): - 0.5, } - if self.trans_x == True: + if self.trans_x: numpy_input_x = ( self.inputs['X'].reshape((self.x_shape[0], -1)).T ) else: numpy_input_x = self.inputs['X'].reshape((-1, self.x_shape[-1])) - if self.trans_y == True: + if self.trans_y: numpy_input_y = self.inputs['Y'].T else: numpy_input_y = self.inputs['Y'] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index c4aab23a95201c4b7266fcc170505ad15e2b277f..21e46e31783a44a41b3aefa59feaa5ec34013f6c 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -106,7 +106,7 @@ def generate_compatible_shapes( shape_Y = [BATCH_SIZE] + shape_Y if dim_Y == 3 and dim_X == 2: - if transpose_X == False: + if not transpose_X: shape_X[1] = shape_X[1] * BATCH_SIZE else: shape_X[0] = shape_X[0] * BATCH_SIZE @@ -326,7 +326,7 @@ class TestMatmulBaseGenerator(XPUOpTest): def test_check_grad_normal(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return @@ -338,7 +338,7 @@ class TestMatmulBaseGenerator(XPUOpTest): def test_check_grad_ignore_x(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return @@ -350,7 +350,7 @@ class TestMatmulBaseGenerator(XPUOpTest): def test_check_grad_ignore_y(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py index 63354ac7607ee8f118ae877943c5d1af66d03e58..3e873a965f6d96416c2f5ca3427148c6a67e1f91 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py @@ -101,7 +101,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): def test_check_grad(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return place = paddle.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py index 45c9f518cbdddd9d30b1c360339d4bcfaf00b6c4..36cb5dfaefd8bd55865b33e3b6312555881eb15e 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py @@ -178,7 +178,7 @@ def pool2D_forward_naive( if padding_algorithm == "VALID": paddings = [0, 0, 0, 0] - if ceil_mode != False: + if ceil_mode is not False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py index 18af22f3c6465606c74cf882e93f83fcbdb07e6a..bd6accf59d1c02b6ef487059f72ec0fb1e080971 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py @@ -91,10 +91,10 @@ class TestXPUWhereAPI(unittest.TestCase): self.out = np.where(self.cond, self.x, self.y) def ref_x_backward(self, dout): - return np.where(self.cond == True, dout, 0) + return np.where(self.cond, dout, 0) def ref_y_backward(self, dout): - return np.where(self.cond == False, dout, 0) + return np.where(~self.cond, dout, 0) def test_api(self): for x_stop_gradient in [False, True]: diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index d90c64b76217b2d61bfd90dbd2382d6a1eea9ccf..14cb186fce95fd64f3f4b7224794cbec56ff49b9 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -450,7 +450,7 @@ def summary_string(model, input_size=None, dtypes=None, input=None): total_output += np.sum(np.prod(output_shape, axis=-1)) if "trainable" in summary[layer]: - if summary[layer]["trainable"] == True: + if summary[layer]["trainable"]: trainable_params += summary[layer]["trainable_params"] summary_str += line_new + "\n" diff --git a/python/paddle/incubate/autograd/primrules.py b/python/paddle/incubate/autograd/primrules.py index badd8476463deb39938c02c003973cca59972630..0532ade86c65f7dc2274add9f4748d1311b3944c 100644 --- a/python/paddle/incubate/autograd/primrules.py +++ b/python/paddle/incubate/autograd/primrules.py @@ -515,7 +515,7 @@ def dropout_orig2prim(op, seed_t, x): ), 'Can not lower dropout into prim ops with seedtensor.' mask = bernoulli(shape=x.shape, dtype=x.dtype, p=op.attr('dropout_prob')) if op.attr('dropout_implementation') == 'upscale_in_train': - if op.attr('is_test') == False: + if not op.attr('is_test'): out = div( mul(x, mask), fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype), @@ -524,7 +524,7 @@ def dropout_orig2prim(op, seed_t, x): else: return primops.cast(mask, dtype=paddle.uint8), x elif op.attr('dropout_implementation') == 'downgrade_in_infer': - if op.attr('is_test') == False: + if not op.attr('is_test'): return primops.cast(mask, dtype=paddle.uint8), mul(x, mask) else: return primops.cast(mask, dtype=paddle.uint8), mul( diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index b6936c5a90c9b1ea6009f7d3996df620818ef76a..a61d05761303d597d93aa9b071234ee6340ee40a 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -2109,7 +2109,7 @@ def class_center_sample(label, num_classes, num_samples, group=None): #Tensor(shape=[7], dtype=int64, place=CUDAPlace(1), stop_gradient=True, # [0, 1, 2, 3, 5, 7, 8]) """ - if not (group == False or group is None or hasattr(group, 'is_member')): + if not (group is False or group is None or hasattr(group, 'is_member')): raise ValueError( 'Expected group is False, None or instance of paddle.distributed.collective.Group \ (got group: {})'.format( @@ -2124,7 +2124,7 @@ def class_center_sample(label, num_classes, num_samples, group=None): ring_id = 0 rank = 0 nranks = 1 - if group != False: + if group is not False: if core.is_compiled_with_dist(): parallel_env = paddle.distributed.ParallelEnv() global_rank = parallel_env.rank diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 48cda9d0b4f95347c0165953365743be89695d30..b7e1045b6ee354c1faf270f40aa2aa289d200f96 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -2033,7 +2033,7 @@ def margin_cross_entropy( """ assert reduction in ['mean', 'sum', 'none', None] - if not (group == False or group is None or hasattr(group, 'is_member')): + if not (group is False or group is None or hasattr(group, 'is_member')): raise ValueError( 'Expected group is False, None or instance of paddle.distributed.collective.Group \ (got group: {})'.format( @@ -2048,7 +2048,7 @@ def margin_cross_entropy( ring_id = 0 rank = 0 nranks = 1 - if group != False: + if group is not False: ring_id = 0 if group is None else group.id if core.is_compiled_with_dist(): parallel_env = paddle.distributed.ParallelEnv() @@ -2537,7 +2537,7 @@ def cross_entropy( "should be 'sum', 'mean' or 'none', but received %s, which is not allowed." % reduction ) - if ignore_index > 0 and soft_label == True: + if ignore_index > 0 and soft_label: raise ValueError( "When soft_label == True, the value of 'ignore_index' in softmax_cross_entropy" "should be '-100', but received %s, which is not allowed." @@ -2560,12 +2560,12 @@ def cross_entropy( label = paddle.unsqueeze(label, axis=axis) if in_dygraph_mode(): - if soft_label == False: + if not soft_label: valid_label = ( paddle.cast(label != ignore_index, dtype=label.dtype) * label ) if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): - if soft_label == False: + if not soft_label: _, _, out = _legacy_C_ops.softmax_with_cross_entropy( input, valid_label, @@ -2603,7 +2603,7 @@ def cross_entropy( if weight is not None: # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. - if soft_label == True: + if soft_label: # chajchaj: # weight's shape is C, where C is class num. # for 1d case: label's shape is [N,C], weight_gather's shape is N. @@ -2710,7 +2710,7 @@ def cross_entropy( return out elif _in_legacy_dygraph(): - if soft_label == False: + if not soft_label: valid_label = ( paddle.cast(label != ignore_index, dtype=label.dtype) * label ) @@ -2725,7 +2725,7 @@ def cross_entropy( "Target {} is out of upper bound.".format(label_max.item()) ) if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): - if soft_label == False: + if not soft_label: _, _, out = _legacy_C_ops.softmax_with_cross_entropy( input, valid_label, @@ -2774,7 +2774,7 @@ def cross_entropy( if weight is not None: # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. - if soft_label == True: + if soft_label: # chajchaj: # weight's shape is C, where C is class num. # for 1d case: label's shape is [N,C], weight_gather's shape is N. @@ -2921,7 +2921,7 @@ def cross_entropy( weight, 'weight', ['float32', 'float64'], 'softmax_cross_entropy' ) weight_name = name if reduction == 'none' else None - if soft_label == True: + if soft_label: # chajchaj: # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. # weight's shape is C, where C is class num. diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index f9ece56dc7ef51647436dde25dbfcc18c7c0a688..d81987fa9eedbac07e1e71d799975074db5a71e9 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -110,7 +110,7 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): ) ) if padding == "VALID": - if ceil_mode != False: + if ceil_mode is not False: raise ValueError( "When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. " "Received ceil_mode: True." diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index 344c1d482d4217003d9a47c2311609213abbd0f5..ef09a1cd5e2b5a5ef08d56ff4194927f3ba200d4 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -76,7 +76,7 @@ class PairwiseDistance(Layer): main_str = 'p={p}' if self.epsilon != 1e-6: main_str += ', epsilon={epsilon}' - if self.keepdim != False: + if self.keepdim is not False: main_str += ', keepdim={keepdim}' if self.name != None: main_str += ', name={name}' diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 1b5784fbedff1709e91bc739c96e82968cdab1f4..5f4a4d8d1d8c79f4ce3a127636edccee1ea2ec25 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -71,7 +71,7 @@ class _InstanceNormBase(Layer): ): super(_InstanceNormBase, self).__init__() - if weight_attr == False or bias_attr == False: + if weight_attr is False or bias_attr is False: assert ( weight_attr == bias_attr ), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm" @@ -80,7 +80,7 @@ class _InstanceNormBase(Layer): self._bias_attr = bias_attr self._num_features = num_features - if weight_attr != False and bias_attr != False: + if weight_attr is not False and bias_attr is not False: self.scale = self.create_parameter( attr=self._weight_attr, shape=[num_features], @@ -382,7 +382,7 @@ class GroupNorm(Layer): param_shape = [self._num_channels] - if weight_attr == False: + if weight_attr is False: self.weight = self.create_parameter( attr=None, shape=param_shape, default_initializer=Constant(1.0) ) @@ -398,7 +398,7 @@ class GroupNorm(Layer): and self._weight_attr.learning_rate == 0.0 ) - if bias_attr == False: + if bias_attr is False: self.bias = self.create_parameter( attr=None, shape=param_shape, @@ -619,7 +619,7 @@ class _BatchNormBase(Layer): param_shape = [num_features] # create parameter - if weight_attr == False: + if weight_attr is False: self.weight = self.create_parameter( attr=None, shape=param_shape, @@ -639,7 +639,7 @@ class _BatchNormBase(Layer): and self._weight_attr.learning_rate == 0.0 ) - if bias_attr == False: + if bias_attr is False: self.bias = self.create_parameter( attr=None, shape=param_shape, @@ -1315,7 +1315,10 @@ class SyncBatchNorm(_BatchNormBase): layer._name, ) - if layer._weight_attr != False and layer._bias_attr != False: + if ( + layer._weight_attr is not False + and layer._bias_attr is not False + ): with no_grad(): layer_output.weight = layer.weight layer_output.bias = layer.bias diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 7e0fa6d7d703d356c08850c27bc5d129b89506f5..bfebde2b5eee740e4e9fa073ba8ccc00079b9c35 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -964,9 +964,9 @@ class RNNBase(LayerList): for direction in range(self.num_directions): suffix = '_reverse' if direction == 1 else '' param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}']) - if bias_ih_attr != False: + if bias_ih_attr is not False: param_names.append('bias_ih_l{}{}') - if bias_hh_attr != False: + if bias_hh_attr is not False: param_names.append('bias_hh_l{}{}') param_names = [x.format(layer, suffix) for x in param_names] for name, param in zip(param_names, self.parameters()): @@ -1187,7 +1187,7 @@ class RNNBase(LayerList): main_str = '{input_size}, {hidden_size}' if self.num_layers != 1: main_str += ', num_layers={num_layers}' - if self.time_major != False: + if self.time_major is not False: main_str += ', time_major={time_major}' if self.dropout != 0: main_str += ', dropout={dropout}' diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index 6eeaed7f86fecf91f77c586637a7da9ed1599765..7033df7fb37563c926889db742b188510d95aa73 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -298,7 +298,7 @@ class FakeQuantChannelWiseAbsMax(Layer): reduce_type=None, ): assert ( - quant_on_weight == True + quant_on_weight ), "Channel_wise only can be used on weight quantization." super(FakeQuantChannelWiseAbsMax, self).__init__() self._quant_bits = quant_bits diff --git a/python/paddle/profiler/profiler_statistic.py b/python/paddle/profiler/profiler_statistic.py index c383323d51f98a9442f5739333ffea71f7384c62..8db866be45f2dac3886c86d89417f409e0944179 100755 --- a/python/paddle/profiler/profiler_statistic.py +++ b/python/paddle/profiler/profiler_statistic.py @@ -1237,7 +1237,7 @@ def _build_table( if statistic_data.event_summary.items: all_row_values = [] name_column_width = 52 - if thread_sep == True: + if thread_sep: thread_items = statistic_data.event_summary.thread_items else: thread_items = { @@ -1721,7 +1721,7 @@ def _build_table( 'ProfileStep' ].general_gpu_time ) - if thread_sep == True: + if thread_sep: userdefined_thread_items = ( statistic_data.event_summary.userdefined_thread_items ) diff --git a/python/paddle/profiler/utils.py b/python/paddle/profiler/utils.py index efe3975f1445246678aeed42c2cf7414e4dc188a..4d7b36554b5900ef51e29f2317139e5ff15db0e0 100644 --- a/python/paddle/profiler/utils.py +++ b/python/paddle/profiler/utils.py @@ -164,7 +164,7 @@ def load_profiler_result(filename: str): def in_profiler_mode(): - return _is_profiler_used == True + return _is_profiler_used def wrap_optimizers(): @@ -182,7 +182,7 @@ def wrap_optimizers(): return warpper global _has_optimizer_wrapped - if _has_optimizer_wrapped == True: + if _has_optimizer_wrapped: return import paddle.optimizer as optimizer diff --git a/python/paddle/sparse/nn/layer/norm.py b/python/paddle/sparse/nn/layer/norm.py index 936e43a18faf909bdfd3d82eb716178913d7217a..117fbf01a1d6a49b58ec20bce2832ca263496650 100644 --- a/python/paddle/sparse/nn/layer/norm.py +++ b/python/paddle/sparse/nn/layer/norm.py @@ -398,7 +398,10 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): layer._name, ) - if layer._weight_attr != False and layer._bias_attr != False: + if ( + layer._weight_attr is not False + and layer._bias_attr is not False + ): with no_grad(): layer_output.weight = layer.weight layer_output.bias = layer.bias diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 3ed56a35dfa9cdc720872a8fd489907d8285d5fb..5348681ad04a7c052cd02d77692ee3631da9c7b4 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -466,9 +466,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): if in_dygraph_mode(): out = _C_ops.abs(input) reduce_all = ( - True - if axis == None or axis == [] or asvector == True - else False + True if axis == None or axis == [] or asvector else False ) axis = axis if axis != None and axis != [] else [0] if reduce_all: @@ -487,9 +485,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): dtype=helper.input_dtype() ) - reduce_all = ( - True if axis == None or axis == [] or asvector == True else False - ) + reduce_all = True if axis == None or axis == [] or asvector else False axis = axis if axis != None and axis != [] else [0] reduce_type = ( @@ -1322,7 +1318,7 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): avg = nx.sum(axis=1) / w_sum nx_w = nx - if w is not None and aweights is not None and ddof == True: + if w is not None and aweights is not None and ddof: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 7c629a556b097d768f40fbba3c028272886ed52b..3379a60a3bc5e4f9bdc3b02b41b8133db60d4ed5 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3206,7 +3206,7 @@ def tile(x, repeat_times, name=None): check_variable_and_dtype( x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile' ) - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: raise ValueError( "When the date type is bool for the input 'x' of tile op, you " "must set its stop_gradient to be True by " @@ -3288,7 +3288,7 @@ def expand_as(x, y, name=None): ) check_type(y, 'y', Variable, 'expand_as') - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: raise ValueError( "When the data type of input 'x' for expand_as is bool, " "you must set its stop_gradient to be False by " @@ -3359,7 +3359,7 @@ def broadcast_to(x, shape, name=None): x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'broadcast_to' ) check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to') - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: raise ValueError( "When the data type of input 'x' for broadcast_to is bool, " "you must set its stop_gradient to be False by " @@ -3457,7 +3457,7 @@ def expand(x, shape, name=None): 'expand', ) check_type(shape, 'shape', (list, tuple, Variable), 'expand') - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: raise ValueError( "When the data type of input 'x' for expand is bool, " "you must set its stop_gradient to be False by " diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 8791ebb7af268da8796069dc327ffde89e2bd9e2..f5f448cf4ef8207e6bc97dce5a8f01a68622c699 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -188,7 +188,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None): """ assert ( - core.is_compiled_with_rocm() == False + not core.is_compiled_with_rocm() ), "multinomial op is not supported on ROCM yet." if in_dygraph_mode(): diff --git a/python/paddle/text/datasets/conll05.py b/python/paddle/text/datasets/conll05.py index 807c8c3fbebc9017b5cdbbc24597c473a0c91055..69f23cdaab932eb78622ddd3660fd6432290576a 100644 --- a/python/paddle/text/datasets/conll05.py +++ b/python/paddle/text/datasets/conll05.py @@ -228,9 +228,9 @@ class Conll05st(Dataset): lbl_seq = [] verb_word = '' for l in lbl: - if l == '*' and is_in_bracket == False: + if l == '*' and not is_in_bracket: lbl_seq.append('O') - elif l == '*' and is_in_bracket == True: + elif l == '*' and is_in_bracket: lbl_seq.append('I-' + cur_tag) elif l == '*)': lbl_seq.append('I-' + cur_tag) diff --git a/tools/analysisPyXml.py b/tools/analysisPyXml.py index ee110d6ce7f070f7d345f229f11b684ad0812bb1..b184ef76fcc54a46e49f2327c9a0ceaebea103b9 100644 --- a/tools/analysisPyXml.py +++ b/tools/analysisPyXml.py @@ -46,28 +46,25 @@ def analysisPyXml(rootPath, ut): command = 'sed -n %sp %s' % (line_number, clazz_filename) _code, output = commands.getstatusoutput(command) if _code == 0: - if ( - output.strip().startswith( - ( - 'from', - 'import', - '__all__', - 'def', - 'class', - '"""', - '@', - '\'\'\'', - 'logger', - '_logger', - 'logging', - 'r"""', - 'pass', - 'try', - 'except', - 'if __name__ == "__main__"', - ) + if not output.strip().startswith( + ( + 'from', + 'import', + '__all__', + 'def', + 'class', + '"""', + '@', + '\'\'\'', + 'logger', + '_logger', + 'logging', + 'r"""', + 'pass', + 'try', + 'except', + 'if __name__ == "__main__"', ) - == False ): pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" # a='b'/a="b"/a=0 if re.match(pattern, output.strip()) == None: diff --git a/tools/check_op_benchmark_result.py b/tools/check_op_benchmark_result.py index 8fce508102282d165230192426702926606389f0..6c4e0fc06b6a5fcbf5392e01754dfb63a594d456 100644 --- a/tools/check_op_benchmark_result.py +++ b/tools/check_op_benchmark_result.py @@ -40,7 +40,7 @@ def parse_log_file(log_file): for line in f.read().strip().split('\n')[::-1]: try: result = json.loads(line) - if result.get("disabled", False) == True: + if result.get("disabled", False): return None return result except ValueError: diff --git a/tools/get_pr_ut.py b/tools/get_pr_ut.py index 0788da6e116ec040d935380ac51056df1057ef16..fcfa68bb4da48e72d6fedc889eeaeca8084c999b 100644 --- a/tools/get_pr_ut.py +++ b/tools/get_pr_ut.py @@ -349,7 +349,7 @@ class PRChecker(object): file_list.append(filename) else: isWhiteFile = self.get_is_white_file(filename) - if isWhiteFile == False: + if not isWhiteFile: file_list.append(filename) else: filterFiles.append(filename) @@ -417,7 +417,7 @@ class PRChecker(object): == tempfilename.split(".")[0] ): f_judge_in_added_ut = True - if f_judge_in_added_ut == True: + if f_judge_in_added_ut: print( "Adding new unit tests not hit mapFiles: %s" % f_judge diff --git a/tools/get_single_test_cov.py b/tools/get_single_test_cov.py index 266872feaf4e36d5a99ea24c41d536bf619f3156..ee5f2d9fd5055ff379260cbbdc118f95833637a5 100644 --- a/tools/get_single_test_cov.py +++ b/tools/get_single_test_cov.py @@ -91,7 +91,7 @@ def analysisFNDAFile(rootPath, test): if matchObj == None: OP_REGIST = False break - if OP_REGIST == False: + if not OP_REGIST: related_file_list.append(clazz_filename) os.system( 'echo %s >> %s' % (clazz_filename, related_ut_map_file) diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index 313d844d6f6ae7fe54072e39b7f6ad7256c8bb73..7ece773aa7855cad85bba4ca74f37b32292ec72c 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -122,14 +122,14 @@ def generate_all_ops_inputs_outputs_map(op_descs): outpus = list() for input_ in op_proto[INPUTS]: if ( - op_proto[INPUTS][input_][EXTRA] != True - and op_proto[INPUTS][input_][INTERMEDIATE] != True + not op_proto[INPUTS][input_][EXTRA] + and not op_proto[INPUTS][input_][INTERMEDIATE] ): inputs.append(input_) for output_ in op_proto[OUTPUTS]: if ( - op_proto[OUTPUTS][output_][EXTRA] != True - and op_proto[OUTPUTS][output_][INTERMEDIATE] != True + not op_proto[OUTPUTS][output_][EXTRA] + and not op_proto[OUTPUTS][output_][INTERMEDIATE] ): outpus.append(output_) ops_inputs_map[op_type] = inputs @@ -214,9 +214,9 @@ def get_constraint(op_type, op_proto): optional_input_num_ = 0 for input_ in op_proto[INPUTS]: if ( - op_proto[INPUTS][input_][EXTRA] != True - and op_proto[INPUTS][input_][INTERMEDIATE] != True - and op_proto[INPUTS][input_][DISPENSABLE] == True + not op_proto[INPUTS][input_][EXTRA] + and not op_proto[INPUTS][input_][INTERMEDIATE] + and op_proto[INPUTS][input_][DISPENSABLE] ): optional_input_num_ += 1 if optional_input_num_ > 1: @@ -306,11 +306,11 @@ def convert_op_proto_into_mlir(op_descs): # 2.3.1 inputs for input_ in op_proto[INPUTS]: if ( - op_proto[INPUTS][input_][EXTRA] != True - and op_proto[INPUTS][input_][INTERMEDIATE] != True + not op_proto[INPUTS][input_][EXTRA] + and not op_proto[INPUTS][input_][INTERMEDIATE] ): - if op_proto[INPUTS][input_][DISPENSABLE] != True: - if op_proto[INPUTS][input_][DUPLICABLE] != True: + if not op_proto[INPUTS][input_][DISPENSABLE]: + if not op_proto[INPUTS][input_][DUPLICABLE]: ARGUMENTS = ( ARGUMENTS + " PD_Tensor:$" + input_ + "," ) @@ -319,7 +319,7 @@ def convert_op_proto_into_mlir(op_descs): ARGUMENTS + " PD_Tensor_Array:$" + input_ + "," ) else: - if op_proto[INPUTS][input_][DUPLICABLE] != True: + if not op_proto[INPUTS][input_][DUPLICABLE]: ARGUMENTS = ( ARGUMENTS + " Optional:$" @@ -350,7 +350,7 @@ def convert_op_proto_into_mlir(op_descs): # 2.3.2 attributes for attr in op_proto[ATTRS]: - if (op_proto[ATTRS][attr][EXTRA] == True) or ( + if (op_proto[ATTRS][attr][EXTRA]) or ( attr in skipped_attr_list ): continue @@ -434,10 +434,10 @@ def convert_op_proto_into_mlir(op_descs): outputs = "" for output_ in op_proto[OUTPUTS]: if ( - op_proto[OUTPUTS][output_][EXTRA] != True - and op_proto[OUTPUTS][output_][INTERMEDIATE] != True + not op_proto[OUTPUTS][output_][EXTRA] + and not op_proto[OUTPUTS][output_][INTERMEDIATE] ): - if op_proto[OUTPUTS][output_][DUPLICABLE] != True: + if not op_proto[OUTPUTS][output_][DUPLICABLE]: outputs = outputs + "PD_Tensor:${},".format(output_) else: outputs = outputs + "PD_Tensor_Array:${},".format( diff --git a/tools/sampcd_processor.py b/tools/sampcd_processor.py index 24af09893e355e0ec27168c3390803dc20dd461e..5afa47dc4fa92b9aae2bc269a58ac4a11a659e75 100644 --- a/tools/sampcd_processor.py +++ b/tools/sampcd_processor.py @@ -376,7 +376,7 @@ Please use '.. code-block:: python' to format the sample code.""" # None - no sample code found; # False - it need other special equipment or environment. # so, the following conditional statements are intentionally arranged. - if matched == True: + if matched: tfname = os.path.join( SAMPLECODE_TEMPDIR, '{}_example{}'.format( @@ -395,7 +395,7 @@ Please use '.. code-block:: python' to format the sample code.""" ) ) SUMMARY_INFO['skiptest'].append("{}-{}".format(name, cb['id'])) - elif matched == False: + elif not matched: logger.info( '{}\' code block (name:{}, id:{}) required({}) not match capacity({}).'.format( name,