From 5a2ab683e28359145a5f938fabb78b3f80c53a68 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Tue, 1 Nov 2022 17:12:28 +0800 Subject: [PATCH] [CodeStyle][E712] use `if cond`/`if cond is True` for comparison with `True` (#47464) * [CodeStyle][E712] use `if cond`/`if cond is True` for comparison with `True` * revert changes in fluid * revert unrelated file * revert changes in norm * revert changes in auto_parallel_amp * fix norm and auto_parallel_amp * revert a typo fix due to fixed at #47477 --- .../generator/python_c_gen.py | 2 +- .../api/full_ILSVRC2012_val_preprocess.py | 2 +- python/paddle/audio/backends/init_backend.py | 2 +- python/paddle/dataset/conll05.py | 4 +- .../auto_parallel/process_group.py | 2 +- .../auto_parallel/tuner/parallel_tuner.py | 2 +- .../distributed/fleet/dataset/dataset.py | 2 +- python/paddle/distributed/fleet/fleet.py | 2 +- python/paddle/distributed/fleet/launch.py | 2 +- .../paddle/distributed/fleet/launch_utils.py | 2 +- .../distributed/fleet/layers/mpu/mp_ops.py | 2 +- .../gradient_merge_optimizer.py | 2 +- .../graph_execution_optimizer.py | 2 +- .../meta_optimizers/pipeline_optimizer.py | 2 +- .../meta_optimizers/raw_program_optimizer.py | 2 +- .../meta_optimizers/recompute_optimizer.py | 2 +- .../fleet/meta_optimizers/sharding/utils.py | 8 ++-- .../tensor_parallel_optimizer.py | 2 +- .../parallel_layers/pp_layers.py | 2 +- python/paddle/distributed/fleet/model.py | 4 +- python/paddle/distributed/fleet/optimizer.py | 2 +- .../distributed/fleet/recompute/recompute.py | 2 +- .../distributed/fleet/runtime/the_one_ps.py | 2 +- .../fleet/utils/hybrid_parallel_inference.py | 2 +- .../paddle/distributed/fleet/utils/ps_util.py | 2 +- .../distributed/passes/auto_parallel_amp.py | 16 ++++---- .../distributed/passes/auto_parallel_fp16.py | 19 ++++----- .../distributed/passes/ps_server_pass.py | 2 +- .../distributed/passes/ps_trainer_pass.py | 2 +- python/paddle/distributed/ps/the_one_ps.py | 12 +++--- python/paddle/distributed/ps/utils/public.py | 2 +- .../fluid/tests/book/test_recognize_digits.py | 2 +- .../unittests/auto_parallel/test_to_static.py | 4 +- .../test_autograd_functional_dynamic.py | 2 +- .../test_fleet_sharding_meta_optimizer.py | 24 +++--------- .../test_imperative_auto_mixed_precision.py | 6 +-- ...perative_auto_mixed_precision_for_eager.py | 6 +-- .../dygraph_to_static/test_cycle_gan.py | 4 +- .../dygraph_to_static/test_tensor_methods.py | 2 +- .../ir/inference/test_conv_bn_fuse_pass.py | 6 +-- .../test_emb_eltwise_layernorm_fuse_pass.py | 4 +- ...st_mkldnn_conv_affine_channel_fuse_pass.py | 6 +-- .../ir/inference/test_trt_convert_concat.py | 2 +- .../ir/inference/test_trt_convert_dropout.py | 2 +- .../ir/inference/test_trt_convert_gather.py | 4 +- .../ir/inference/test_trt_convert_gelu.py | 2 +- .../test_trt_convert_nearest_interp.py | 2 +- .../ir/inference/test_trt_convert_pool2d.py | 10 ++--- .../inference/test_trt_convert_roi_align.py | 4 +- .../test_trt_convert_shuffle_channel.py | 2 +- .../test_trt_convert_skip_layernorm.py | 2 +- .../ir/inference/test_trt_convert_stack.py | 2 +- .../ir/inference/test_trt_convert_tile.py | 2 +- .../ir/inference/test_trt_convert_top_k_v2.py | 2 +- .../inference/test_trt_convert_transpose.py | 2 +- .../ir/inference/test_trt_convert_yolo_box.py | 4 +- .../mkldnn/test_elementwise_add_mkldnn_op.py | 6 +-- .../mkldnn/test_elementwise_mul_mkldnn_op.py | 6 +-- ...st_onnx_format_quantization_mobilenetv1.py | 2 +- .../paddle/fluid/tests/unittests/op_test.py | 35 ++++++++--------- .../fluid/tests/unittests/op_test_xpu.py | 8 ++-- .../tests/unittests/ps/ps_dnn_trainer.py | 2 +- .../fluid/tests/unittests/test_adam_op.py | 4 +- .../fluid/tests/unittests/test_adamw_op.py | 4 +- .../test_async_ssa_graph_executor_mnist.py | 4 +- .../tests/unittests/test_batch_norm_op.py | 2 +- .../tests/unittests/test_batch_norm_op_v2.py | 2 +- .../tests/unittests/test_box_coder_op.py | 8 ++-- .../fluid/tests/unittests/test_center_loss.py | 2 +- .../tests/unittests/test_compare_reduce_op.py | 2 +- .../fluid/tests/unittests/test_conv2d_op.py | 25 ++++++------ .../unittests/test_conv2d_transpose_op.py | 8 ++-- .../fluid/tests/unittests/test_conv3d_op.py | 8 ++-- .../tests/unittests/test_dataset_download.py | 2 +- .../fluid/tests/unittests/test_dist_base.py | 12 +++--- .../unittests/test_elementwise_add_op.py | 12 +++--- .../unittests/test_elementwise_mul_op.py | 10 ++--- .../tests/unittests/test_empty_like_op.py | 4 +- .../fluid/tests/unittests/test_empty_op.py | 12 +++--- .../test_imperative_layer_trainable.py | 8 ++-- .../fluid/tests/unittests/test_mean_op.py | 4 +- .../fluid/tests/unittests/test_momentum_op.py | 4 +- .../tests/unittests/test_multiclass_nms_op.py | 12 ++---- .../fluid/tests/unittests/test_ops_nms.py | 2 +- .../fluid/tests/unittests/test_optimizer.py | 2 +- .../test_parallel_executor_drop_scope.py | 4 +- .../fluid/tests/unittests/test_pool2d_op.py | 18 ++++----- .../fluid/tests/unittests/test_pool3d_op.py | 2 +- .../fluid/tests/unittests/test_sgd_op.py | 8 ++-- .../fluid/tests/unittests/test_softmax_op.py | 14 +++---- .../test_softmax_with_cross_entropy_op.py | 2 +- .../unittests/test_sparse_attention_op.py | 10 ++--- .../fluid/tests/unittests/test_var_base.py | 8 ++-- .../fluid/tests/unittests/test_where_op.py | 4 +- .../unittests/xpu/test_batch_norm_op_xpu.py | 2 +- .../tests/unittests/xpu/test_conv2d_op_xpu.py | 30 +++----------- .../unittests/xpu/test_dropout_op_xpu.py | 4 +- .../xpu/test_elementwise_mul_op_xpu.py | 6 +-- .../tests/unittests/xpu/test_empty_op_xpu.py | 4 +- .../xpu/test_fused_gemm_epilogue_op_xpu.py | 4 +- .../tests/unittests/xpu/test_matmul_op_xpu.py | 8 ++-- .../unittests/xpu/test_matmul_v2_op_xpu.py | 2 +- .../tests/unittests/xpu/test_pool2d_op_xpu.py | 2 +- .../tests/unittests/xpu/test_where_op_xpu.py | 4 +- python/paddle/hapi/model_summary.py | 2 +- python/paddle/incubate/autograd/primrules.py | 4 +- python/paddle/nn/functional/common.py | 4 +- python/paddle/nn/functional/loss.py | 20 +++++----- python/paddle/nn/functional/pooling.py | 2 +- python/paddle/nn/layer/distance.py | 2 +- python/paddle/nn/layer/norm.py | 17 ++++---- python/paddle/nn/layer/rnn.py | 6 +-- python/paddle/nn/quant/quant_layers.py | 2 +- python/paddle/profiler/profiler_statistic.py | 4 +- python/paddle/profiler/utils.py | 4 +- python/paddle/sparse/nn/layer/norm.py | 5 ++- python/paddle/tensor/linalg.py | 10 ++--- python/paddle/tensor/manipulation.py | 8 ++-- python/paddle/tensor/random.py | 2 +- python/paddle/text/datasets/conll05.py | 4 +- tools/analysisPyXml.py | 39 +++++++++---------- tools/check_op_benchmark_result.py | 2 +- tools/get_pr_ut.py | 4 +- tools/get_single_test_cov.py | 2 +- ...rate_pd_op_dialect_from_paddle_op_maker.py | 32 +++++++-------- tools/sampcd_processor.py | 4 +- 126 files changed, 347 insertions(+), 401 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py index dc38d3d46b2..0ceff353608 100644 --- a/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py +++ b/paddle/fluid/eager/auto_code_generator/generator/python_c_gen.py @@ -532,7 +532,7 @@ class PythonCGenerator(GeneratorBase): ) status = f_generator.run() - if status == True: + if status: self.python_c_functions_str += ( f_generator.python_c_function_str + "\n" ) diff --git a/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py b/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py index 219ce72077c..a5ff3717199 100644 --- a/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py +++ b/paddle/fluid/inference/tests/api/full_ILSVRC2012_val_preprocess.py @@ -45,7 +45,7 @@ def resize_short(img, target_size): def crop_image(img, target_size, center): width, height = img.size size = target_size - if center == True: + if center: w_start = (width - size) // 2 h_start = (height - size) // 2 else: diff --git a/python/paddle/audio/backends/init_backend.py b/python/paddle/audio/backends/init_backend.py index 3ca77ba316f..6bf972d435f 100644 --- a/python/paddle/audio/backends/init_backend.py +++ b/python/paddle/audio/backends/init_backend.py @@ -79,7 +79,7 @@ def list_available_backends() -> List[str]: if "paddleaudio" in sys.modules: version = paddleaudio.__version__ - if _check_version(version) == False: + if not _check_version(version): err_msg = ( "the version of paddleaudio installed is {},\n" "please ensure the paddleaudio >= 1.0.2." diff --git a/python/paddle/dataset/conll05.py b/python/paddle/dataset/conll05.py index 22038594f60..08a383badae 100644 --- a/python/paddle/dataset/conll05.py +++ b/python/paddle/dataset/conll05.py @@ -109,9 +109,9 @@ def corpus_reader(data_path, words_name, props_name): lbl_seq = [] verb_word = '' for l in lbl: - if l == '*' and is_in_bracket == False: + if l == '*' and not is_in_bracket: lbl_seq.append('O') - elif l == '*' and is_in_bracket == True: + elif l == '*' and is_in_bracket: lbl_seq.append('I-' + cur_tag) elif l == '*)': lbl_seq.append('I-' + cur_tag) diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/process_group.py index 9883f116f4e..10d2556f299 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/process_group.py @@ -106,7 +106,7 @@ class ProcessGroup: return else: assert ( - self.is_instantiate() == False + not self.is_instantiate() ), "Cannot add new ranks after instantiating the process group" self._ranks.extend(new_ranks) self._ranks = sorted(list(set(self.ranks))) diff --git a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py b/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py index 0ed03defbed..9f31766f19f 100644 --- a/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py +++ b/python/paddle/distributed/auto_parallel/tuner/parallel_tuner.py @@ -268,7 +268,7 @@ class ParallelTuner: return for idx, dim in enumerate(dims_list): - if visited[idx] == False: + if not visited[idx]: dims_mapping[start] = dim visited[idx] = True self._generate_dims_mapping_candidates_helper( diff --git a/python/paddle/distributed/fleet/dataset/dataset.py b/python/paddle/distributed/fleet/dataset/dataset.py index b74f700391c..0217e012579 100755 --- a/python/paddle/distributed/fleet/dataset/dataset.py +++ b/python/paddle/distributed/fleet/dataset/dataset.py @@ -514,7 +514,7 @@ class InMemoryDataset(DatasetBase): self._set_fleet_send_batch_size(kwargs[key]) elif key == "fleet_send_sleep_seconds": self._set_fleet_send_sleep_seconds(kwargs[key]) - elif key == "fea_eval" and kwargs[key] == True: + elif key == "fea_eval" and kwargs[key]: candidate_size = kwargs.get("candidate_size", 10000) self._set_fea_eval(candidate_size, True) diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index db3aae28a20..2630fa8283e 100644 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -303,7 +303,7 @@ class Fleet(object): paddle.distributed.init_parallel_env() # hybrid parallel not support for npu/xpu - if self._user_defined_strategy.heter_ccl_mode == False: + if not self._user_defined_strategy.heter_ccl_mode: # init hybrid parallel environment in dygraph if tp._HYBRID_PARALLEL_GROUP is None: self._init_hybrid_parallel_env() diff --git a/python/paddle/distributed/fleet/launch.py b/python/paddle/distributed/fleet/launch.py index ca301c52a7f..998f64c3ec2 100755 --- a/python/paddle/distributed/fleet/launch.py +++ b/python/paddle/distributed/fleet/launch.py @@ -369,7 +369,7 @@ def get_cluster_info(args): if os.environ.get('FLAGS_START_PORT') is not None: start_port = os.environ.get('FLAGS_START_PORT') # auto mapping between processes and devices for auto-parallel - if args.enable_auto_mapping == True: + if args.enable_auto_mapping: assert ( args.cluster_topo_path is not None ), "The cluster topology must be provied when enabling auto mapping." diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index 4ec2aa07787..d4b6b86119f 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -1582,7 +1582,7 @@ class ParameterServerLauncher(object): x.strip().split(":")[0] for x in self.worker_endpoints.split(",") ] - if self.with_coordinator == True: + if self.with_coordinator: self.coordinator_endpoints_ips = [ x.strip().split(":")[0] for x in self.coordinator_endpoints.split(",") diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index b1627d5a3b7..04c4272ee0e 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -582,7 +582,7 @@ def _parallel_linear( # set is_distributed for splited bias # if a linear layer is splited by row, each rank would hold a complete bias and they should be the same in each rank. # if a linear layer is splited by col, the bias would also be split into each rank as its weight - if axis == 1 and linear._bias_attr != False: + if axis == 1 and linear._bias_attr is not False: _set_var_distributed(linear.bias) if not gather_out: diff --git a/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py index eb97122587f..9a2fb127999 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/gradient_merge_optimizer.py @@ -53,7 +53,7 @@ class GradientMergeOptimizer(MetaOptimizerBase): return False can_apply = ( - self.user_defined_strategy.gradient_merge == True + self.user_defined_strategy.gradient_merge ) and self.user_defined_strategy.gradient_merge_configs["k_steps"] > 1 return can_apply diff --git a/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py index dd2ccfc7ff7..a1a33992d59 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py @@ -177,7 +177,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase): gradient_scale_configs['scale_strategy'] ] - if self.user_defined_strategy.recompute == True: + if self.user_defined_strategy.recompute: logging.warn( "set enable_sequential_execution=True since you have enable the recompute strategy" ) diff --git a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py index dfb8fe5b224..dfadeff3807 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/pipeline_optimizer.py @@ -66,7 +66,7 @@ class PipelineOptimizer(MetaOptimizerBase): if self.use_sharding: return False - if self.user_defined_strategy.pipeline == True: + if self.user_defined_strategy.pipeline: return True return False diff --git a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py index 6ae89a9754e..53972452d80 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/raw_program_optimizer.py @@ -65,7 +65,7 @@ class RawProgramOptimizer(MetaOptimizerBase): if not self.role_maker._is_collective: return False - if self.without_graph_optimization == True: + if self.without_graph_optimization: return True return False diff --git a/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py index 524c3a123ab..7a817b6fd04 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/recompute_optimizer.py @@ -55,7 +55,7 @@ class RecomputeOptimizer(MetaOptimizerBase): if not self.role_maker._is_collective: return False - if self.user_defined_strategy.recompute == True: + if self.user_defined_strategy.recompute: if ( len(self.user_defined_strategy.recompute_configs["checkpoints"]) == 0 diff --git a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py index 1ec9457854d..ea42130300f 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py +++ b/python/paddle/distributed/fleet/meta_optimizers/sharding/utils.py @@ -38,7 +38,7 @@ def check_broadcast(block): broadcast_vars = {} for idx, op in enumerate(block.ops): if op.type == "c_broadcast": - if op.all_attrs()["use_calc_stream"] == False: + if not op.all_attrs()["use_calc_stream"]: var_name = op.desc.input_arg_names()[0] if "@BroadCast" in var_name: if var_name in broadcast_vars: @@ -72,7 +72,7 @@ def check_broadcast(block): last_sync_calc_op_idx = idx continue if op.type == "c_broadcast": - if op.all_attrs()["use_calc_stream"] == False: + if not op.all_attrs()["use_calc_stream"]: var_name = op.desc.input_arg_names()[0] if "@BroadCast" in var_name: if broadcast_vars[var_name]["fill_constant_pos"] != -1: @@ -117,7 +117,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): for idx, op in enumerate(block.ops): # sharding use both allreduce and reduce to sync grad if op.type == "c_allreduce_sum" or op.type == "c_reduce_sum": - if op.all_attrs()["use_calc_stream"] == False: + if not op.all_attrs()["use_calc_stream"]: ring_id = op.desc.attr("ring_id") var_name = op.desc.input_arg_names()[0] param = var_name.split("@")[0] @@ -153,7 +153,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): dp_grads_status[var_name] = 1 # check sharding allreduce and reduce but skip megatron allreduce elif op.type == "c_allreduce_sum" or op.type == "c_reduce_sum": - if op.all_attrs()["use_calc_stream"] == False: + if not op.all_attrs()["use_calc_stream"]: var_name = op.desc.input_arg_names()[0] ring_id = op.desc.attr("ring_id") if ring_id == sharding_ring_id: diff --git a/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py index 0cd86ad08bd..8f2e113b52e 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/tensor_parallel_optimizer.py @@ -57,7 +57,7 @@ class TensorParallelOptimizer(MetaOptimizerBase): if not self.role_maker._is_collective: return False - if self.user_defined_strategy.tensor_parallel == True: + if self.user_defined_strategy.tensor_parallel: return True return False diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index fec5005627b..29cbe0d9dca 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -720,7 +720,7 @@ class PipelineLayer(Layer): def _need_recompute(self, funcs, inputs): if not any( - input_.stop_gradient == False + not input_.stop_gradient for input_ in inputs if isinstance(input_, paddle.Tensor) ): diff --git a/python/paddle/distributed/fleet/model.py b/python/paddle/distributed/fleet/model.py index 21e6d07ad55..a132860aac1 100644 --- a/python/paddle/distributed/fleet/model.py +++ b/python/paddle/distributed/fleet/model.py @@ -90,7 +90,7 @@ def distributed_model(model): amp_enable = False strategy = fleet_env._user_defined_strategy - if strategy.amp == True: + if strategy.amp: amp_enable = True amp_level = "O2" if strategy.amp_configs['use_pure_fp16'] else "O1" if amp_level.upper() == "O2": @@ -122,7 +122,7 @@ def distributed_model(model): use_dynamic_loss_scaling=use_dynamic_loss_scaling, ) - if strategy.heter_ccl_mode == True: + if strategy.heter_ccl_mode: distributed_model = paddle.DataParallel( model, comm_buffer_size=strategy.fuse_grad_size_in_MB, diff --git a/python/paddle/distributed/fleet/optimizer.py b/python/paddle/distributed/fleet/optimizer.py index 37a3a896f6b..f67c108486a 100644 --- a/python/paddle/distributed/fleet/optimizer.py +++ b/python/paddle/distributed/fleet/optimizer.py @@ -59,7 +59,7 @@ def _dygraph_distributed_optimizer(optimizer, strategy=None): fleet_env._context = {} if fleet_env.worker_num() > 1: - if fleet_env._user_defined_strategy.heter_ccl_mode == False: + if not fleet_env._user_defined_strategy.heter_ccl_mode: return HybridParallelOptimizer( optimizer, fleet_env._hcg, fleet_env._user_defined_strategy ) diff --git a/python/paddle/distributed/fleet/recompute/recompute.py b/python/paddle/distributed/fleet/recompute/recompute.py index 2657c60f02c..fd8cffdff00 100755 --- a/python/paddle/distributed/fleet/recompute/recompute.py +++ b/python/paddle/distributed/fleet/recompute/recompute.py @@ -41,7 +41,7 @@ def detach_variable(inputs): def check_recompute_necessary(inputs): if not any( - input_.stop_gradient == False + not input_.stop_gradient for input_ in inputs if isinstance(input_, (core.eager.Tensor, paddle.Tensor)) ): diff --git a/python/paddle/distributed/fleet/runtime/the_one_ps.py b/python/paddle/distributed/fleet/runtime/the_one_ps.py index 5a0be9a1e01..7de34aa6e1c 100644 --- a/python/paddle/distributed/fleet/runtime/the_one_ps.py +++ b/python/paddle/distributed/fleet/runtime/the_one_ps.py @@ -337,7 +337,7 @@ class CommonAccessor: self.table_num = size self.table_dim = single_dim - if oop.type != 'adam' and adam_d2sum == True: + if oop.type != 'adam' and adam_d2sum: print('optimization algorithm is not adam, set adam_d2sum False') adam_d2sum = False print("adam_d2sum:", adam_d2sum) diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index 0c5bff02ed8..6dd100a6f9e 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -231,7 +231,7 @@ class HybridParallelInferenceHelper(object): ) else: if isinstance(role_maker, fleet.base.role_maker.RoleMakerBase): - assert role_maker._is_collective == True + assert role_maker._is_collective self.role_maker = role_maker # communication_group info diff --git a/python/paddle/distributed/fleet/utils/ps_util.py b/python/paddle/distributed/fleet/utils/ps_util.py index 9b079d64bb5..d283dbe1fe8 100644 --- a/python/paddle/distributed/fleet/utils/ps_util.py +++ b/python/paddle/distributed/fleet/utils/ps_util.py @@ -210,7 +210,7 @@ class DistributedInfer: if found: break if found: - if output_indexes[j] == True: + if output_indexes[j]: warnings.warn( "unable to re-arrange dags order to combine distributed embedding ops" ) diff --git a/python/paddle/distributed/passes/auto_parallel_amp.py b/python/paddle/distributed/passes/auto_parallel_amp.py index 3305982f505..c8932069a79 100644 --- a/python/paddle/distributed/passes/auto_parallel_amp.py +++ b/python/paddle/distributed/passes/auto_parallel_amp.py @@ -80,9 +80,9 @@ class AMPState(object): fwd_op_id = dist_op_context.grad_op_id_to_op_id[ op.desc.original_id() ] - if self._is_fp16_op(fwd_op_id) == True: + if self._is_fp16_op(fwd_op_id) is True: self._op_fp16_dict[op.desc.original_id()] = True - elif self._is_fp16_op(fwd_op_id) == False: + elif self._is_fp16_op(fwd_op_id) is False: self._op_fp16_dict[op.desc.original_id()] = False elif int(op.attr('op_role')) == int(OpRole.Optimize): break @@ -132,13 +132,13 @@ class AMPState(object): # if it's one of inputs if ( self._is_fp16_op(prev_op.desc.original_id()) - == False + is False or prev_op.type in amp_lists.black_list ): is_black_op = True elif ( self._is_fp16_op(prev_op.desc.original_id()) - == True + is True or prev_op.type in amp_lists.white_list ): is_white_op = True @@ -161,7 +161,7 @@ class AMPState(object): num_cast_ops = 0 if int(op.attr('op_role')) == int(OpRole.Backward): break - if self._is_fp16_op(op.desc.original_id()) == False: + if self._is_fp16_op(op.desc.original_id()) is False: num_cast_ops = self._insert_cast_op_forward( op, idx, @@ -169,7 +169,7 @@ class AMPState(object): core.VarDesc.VarType.FP32, dist_context, ) - elif self._is_fp16_op(op.desc.original_id()) == True: + elif self._is_fp16_op(op.desc.original_id()) is True: num_cast_ops = self._insert_cast_op_forward( op, idx, @@ -302,7 +302,7 @@ class AMPState(object): grad_op_orig_id = grad_op.desc.original_id() dist_op_context = dist_context.dist_op_context if grad_op_orig_id in dist_op_context.grad_op_id_to_op_id: - if self._is_fp16_op(grad_op_orig_id) == False: # fp32 + if self._is_fp16_op(grad_op_orig_id) is False: # fp32 num_cast_ops = self._insert_cast_op_backward( grad_op, idx, @@ -311,7 +311,7 @@ class AMPState(object): dist_context, appended_grad_times, ) - elif self._is_fp16_op(grad_op_orig_id) == True: # fp16 + elif self._is_fp16_op(grad_op_orig_id) is True: # fp16 num_cast_ops = self._insert_cast_op_backward( grad_op, idx, diff --git a/python/paddle/distributed/passes/auto_parallel_fp16.py b/python/paddle/distributed/passes/auto_parallel_fp16.py index cf1b2d45290..8ad8b2a8fad 100644 --- a/python/paddle/distributed/passes/auto_parallel_fp16.py +++ b/python/paddle/distributed/passes/auto_parallel_fp16.py @@ -235,10 +235,7 @@ class FP16State(object): for op in block.ops: if is_forward_op(op): # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python - if ( - self._is_fp16_op(op.desc.original_id()) == True - or op.type == "cast" - ): + if self._is_fp16_op(op.desc.original_id()) or op.type == "cast": for in_name in op.input_names: if _keep_fp32_input(op, in_name): continue @@ -255,7 +252,7 @@ class FP16State(object): self.set_var_to_fp16(out_var_name, block) set_op_dtype_to_fp16(op) # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python - elif self._is_fp16_op(op.desc.original_id()) == False: + elif not self._is_fp16_op(op.desc.original_id()): for out_var_name in op.output_arg_names: out_var = block.vars.get(out_var_name) if out_var is None or out_var.type not in _valid_types: @@ -263,7 +260,7 @@ class FP16State(object): if out_var.dtype == core.VarDesc.VarType.FP16: out_var.desc.set_dtype(core.VarDesc.VarType.FP32) elif is_backward_op(op): - if self._is_fp16_op(op.desc.original_id()) == True: + if self._is_fp16_op(op.desc.original_id()): for out_name in op.output_names: if _keep_fp32_output(op, out_name): continue @@ -271,7 +268,7 @@ class FP16State(object): self.set_var_to_fp16(out_var_name, block) set_op_dtype_to_fp16(op) # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python - elif self._is_fp16_op(op.desc.original_id()) == False: + elif not self._is_fp16_op(op.desc.original_id()): for out_var_name in op.output_arg_names: out_var = block.vars.get(out_var_name) if out_var is None or out_var.type not in _valid_types: @@ -290,7 +287,7 @@ class FP16State(object): idx += 1 continue elif is_forward_op(op): - if self._is_fp16_op(op.desc.original_id()) == False: + if not self._is_fp16_op(op.desc.original_id()): num_cast_ops = self._insert_forward_cast_ops( op, idx, @@ -299,7 +296,7 @@ class FP16State(object): core.VarDesc.VarType.FP32, self.dist_context, ) - elif self._is_fp16_op(op.desc.original_id()) == True: + elif self._is_fp16_op(op.desc.original_id()): num_cast_ops = self._insert_forward_cast_ops( op, idx, @@ -310,7 +307,7 @@ class FP16State(object): ) elif is_backward_op(op): if op.desc.original_id() in dist_op_context.grad_op_id_to_op_id: - if self._is_fp16_op(op.desc.original_id()) == False: + if not self._is_fp16_op(op.desc.original_id()): num_cast_ops = self._insert_backward_cast_ops( op, idx, @@ -319,7 +316,7 @@ class FP16State(object): core.VarDesc.VarType.FP32, self.dist_context, ) - elif self._is_fp16_op(op.desc.original_id()) == True: + elif self._is_fp16_op(op.desc.original_id()): num_cast_ops = self._insert_backward_cast_ops( op, idx, diff --git a/python/paddle/distributed/passes/ps_server_pass.py b/python/paddle/distributed/passes/ps_server_pass.py index 37e5622ea8e..c8f99895a83 100755 --- a/python/paddle/distributed/passes/ps_server_pass.py +++ b/python/paddle/distributed/passes/ps_server_pass.py @@ -140,7 +140,7 @@ class AddLrDecayTablePass(PassBase): def _apply_single_impl(self, main_program, startup_program, pass_ctx): attrs = pass_ctx._attrs - if hasattr(attrs['origin_main_program'], 'lr_sheduler') == False: + if not hasattr(attrs['origin_main_program'], 'lr_sheduler'): return assert isinstance( diff --git a/python/paddle/distributed/passes/ps_trainer_pass.py b/python/paddle/distributed/passes/ps_trainer_pass.py index f99d9f316d4..56f73078d54 100755 --- a/python/paddle/distributed/passes/ps_trainer_pass.py +++ b/python/paddle/distributed/passes/ps_trainer_pass.py @@ -304,7 +304,7 @@ class DistributedOpsPass(PassBase): if found: break if found: - if output_indexes[j] == True: + if output_indexes[j]: warnings.warn( "unable to re-arrange dags order to combine distributed embedding ops" ) diff --git a/python/paddle/distributed/ps/the_one_ps.py b/python/paddle/distributed/ps/the_one_ps.py index 86766d60ae8..d341a95b24b 100755 --- a/python/paddle/distributed/ps/the_one_ps.py +++ b/python/paddle/distributed/ps/the_one_ps.py @@ -443,7 +443,7 @@ class CommonAccessor(Accessor): self.table_num = size self.table_dim = single_dim - if oop.type != 'adam' and adam_d2sum == True: + if oop.type != 'adam' and adam_d2sum: print('optimization algorithm is not adam, set adam_d2sum False') adam_d2sum = False print("adam_d2sum:", adam_d2sum) @@ -703,7 +703,7 @@ class SparseTable(Table): if ( ctx.is_tensor_table() or len(ctx.origin_varnames()) < 1 - or (ctx.is_sparse() == False) + or (not ctx.is_sparse()) ): return table_proto.table_id = ctx.table_id() @@ -810,7 +810,7 @@ class GeoSparseTable(SparseTable): if ( ctx.is_tensor_table() or len(ctx.origin_varnames()) < 1 - or (ctx.is_sparse() == False) + or (not ctx.is_sparse()) ): return table_proto.table_id = ctx.table_id() @@ -845,7 +845,7 @@ class DenseTable(Table): if ( ctx.is_tensor_table() or len(ctx.origin_varnames()) < 1 - or (ctx.is_sparse() == True) + or (ctx.is_sparse()) ): return @@ -1281,7 +1281,7 @@ class TheOnePSRuntime(RuntimeBase): if not is_test: if ( self.context['ps_mode'] == DistributedMode.GEO - or self.is_heter_ps_mode == True + or self.is_heter_ps_mode ): self._communicator.init_params(dense_map) else: @@ -1298,7 +1298,7 @@ class TheOnePSRuntime(RuntimeBase): if ( self.context['ps_mode'] == DistributedMode.GEO - or self.is_heter_ps_mode == True + or self.is_heter_ps_mode ): if not self._communicator.is_running(): self._communicator.start() diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 75182a49721..53628ad7e50 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -1744,7 +1744,7 @@ def create_backward_block( ): is_skip = True break - if is_skip == True: + if is_skip: continue block_append_op(program, origin_program, heter_block, op) diff --git a/python/paddle/fluid/tests/book/test_recognize_digits.py b/python/paddle/fluid/tests/book/test_recognize_digits.py index fd99eb04d99..9204bc7f123 100644 --- a/python/paddle/fluid/tests/book/test_recognize_digits.py +++ b/python/paddle/fluid/tests/book/test_recognize_digits.py @@ -237,7 +237,7 @@ def main(use_cuda, parallel, nn_type, combine): if not use_cuda and not parallel: save_dirname = "recognize_digits_" + nn_type + ".inference.model" save_full_dirname = "recognize_digits_" + nn_type + ".train.model" - if combine == True: + if combine: model_filename = "__model_combined__" params_filename = "__params_combined__" diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py index 3c8b71e7139..637f1ba844b 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_to_static.py @@ -144,7 +144,7 @@ class TestToStatic(unittest.TestCase): # inputs = InputSpec([batch_size, hidden_size], 'float32', 'x') # labels = InputSpec([batch_size], 'int64', 'label') - assert _non_static_mode() == True + assert _non_static_mode() engine = auto.Engine( model=mlp, loss=loss, @@ -155,7 +155,7 @@ class TestToStatic(unittest.TestCase): engine.fit(dataset, batch_size=batch_size) engine.evaluate(dataset, batch_size=batch_size) engine.predict(dataset, batch_size=batch_size) - assert _non_static_mode() == False + assert not _non_static_mode() class TestLazyInit(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py index 81f1bdede6f..37af2845807 100644 --- a/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py +++ b/python/paddle/fluid/tests/unittests/autograd/test_autograd_functional_dynamic.py @@ -593,7 +593,7 @@ class TestHessianNoBatch(unittest.TestCase): numerical_hessian = utils._np_concat_matrix_sequence(numerical_hessian) self.x.stop_gradient = False hessian = paddle.incubate.autograd.Hessian(func, self.x) - assert hessian[:].stop_gradient == False + assert not hessian[:].stop_gradient np.testing.assert_allclose( hessian[:].numpy(), numerical_hessian, self.rtol, self.atol ) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py index 57a199c1333..bc725e9d138 100755 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_sharding_meta_optimizer.py @@ -116,9 +116,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.optimizer(avg_cost, strategy, train_prog, startup_prog) ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('cast', ops) self.assertIn('check_finite_and_unscale', ops) @@ -227,9 +225,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('subprog', ''.join(vars)) @@ -316,9 +312,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('subprog', ''.join(vars)) @@ -445,9 +439,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('cast', ops) @@ -564,9 +556,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): startup_prog, regularization=regularization, ) - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] self.assertIn('@BroadCast', ''.join(vars)) @@ -653,9 +643,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): self.optimizer( avg_cost, strategy, train_prog, startup_prog, grad_clip=clip ) - parameters = [ - x.name for x in train_prog.list_vars() if x.persistable == True - ] + parameters = [x.name for x in train_prog.list_vars() if x.persistable] ops = [op.type for op in avg_cost.block.ops] vars = [x.name for x in train_prog.list_vars()] self.assertIn('@BroadCast', ''.join(vars)) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py index 916a21359a4..514009577cd 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision.py @@ -420,13 +420,13 @@ class TestAmpScaler(unittest.TestCase): decr_every_n_nan_or_inf=2, use_dynamic_loss_scaling=True, ) - self.assertEqual(scaler.is_enable() == True, True) + self.assertEqual(scaler.is_enable(), True) self.assertEqual(scaler.get_init_loss_scaling() == 1024, True) self.assertEqual(scaler.get_incr_ratio() == 2.0, True) self.assertEqual(scaler.get_decr_ratio() == 0.5, True) self.assertEqual(scaler.get_incr_every_n_steps() == 1000, True) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 2, True) - self.assertEqual(scaler.is_use_dynamic_loss_scaling() == True, True) + self.assertEqual(scaler.is_use_dynamic_loss_scaling(), True) scaler.set_decr_every_n_nan_or_inf(4) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 4, True) scaler.set_decr_ratio(0.1) @@ -460,7 +460,7 @@ class TestAmpScaler(unittest.TestCase): scaler3 = paddle.amp.GradScaler(enable=False) scaler3.load_state_dict(scaler_state) - self.assertEqual(scaler3.is_enable() == False, True) + self.assertFalse(scaler3.is_enable()) def test_state_dict_and_load_state_dict_error(self): def test_error(): diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py index 83c9462a89e..1eec439f792 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_imperative_auto_mixed_precision_for_eager.py @@ -419,13 +419,13 @@ class TestAmpScaler(unittest.TestCase): decr_every_n_nan_or_inf=2, use_dynamic_loss_scaling=True, ) - self.assertEqual(scaler.is_enable() == True, True) + self.assertEqual(scaler.is_enable(), True) self.assertEqual(scaler.get_init_loss_scaling() == 1024, True) self.assertEqual(scaler.get_incr_ratio() == 2.0, True) self.assertEqual(scaler.get_decr_ratio() == 0.5, True) self.assertEqual(scaler.get_incr_every_n_steps() == 1000, True) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 2, True) - self.assertEqual(scaler.is_use_dynamic_loss_scaling() == True, True) + self.assertEqual(scaler.is_use_dynamic_loss_scaling(), True) scaler.set_decr_every_n_nan_or_inf(4) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 4, True) scaler.set_decr_ratio(0.1) @@ -459,7 +459,7 @@ class TestAmpScaler(unittest.TestCase): scaler3 = paddle.amp.GradScaler(enable=False) scaler3.load_state_dict(scaler_state) - self.assertEqual(scaler3.is_enable() == False, True) + self.assertFalse(scaler3.is_enable()) def test_state_dict_and_load_state_dict_error(self): def test_error(): diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py index 03db8935079..b484a88b7df 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_cycle_gan.py @@ -356,7 +356,7 @@ class conv2d(fluid.dygraph.Layer): ): super(conv2d, self).__init__() - if use_bias == False: + if not use_bias: con_bias_attr = False else: con_bias_attr = fluid.ParamAttr( @@ -426,7 +426,7 @@ class DeConv2D(fluid.dygraph.Layer): ): super(DeConv2D, self).__init__() - if use_bias == False: + if not use_bias: de_bias_attr = False else: de_bias_attr = fluid.ParamAttr( diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py index ecda3427e7e..67ea0a28bc0 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_tensor_methods.py @@ -93,7 +93,7 @@ class TestTensorSize(unittest.TestCase): prog_trans = paddle.jit.ProgramTranslator() prog_trans.enable(to_static) x = paddle.ones([1, 2, 3]) - if to_static == False: + if not to_static: return tensor_size(x) return tensor_size(x).numpy() diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py index e16fd8b10c2..09171d64a28 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_conv_bn_fuse_pass.py @@ -134,7 +134,7 @@ class TestConvBnFusePass(PassAutoScanTest): data_layout=data_format, is_test=True, ) - if has_bias == True: + if has_bias: conv2d_op.inputs["Bias"] = ["conv2d_bias"] ops = [conv2d_op, bn_op] @@ -156,7 +156,7 @@ class TestConvBnFusePass(PassAutoScanTest): }, outputs=["batch_norm_Y"], ) - if has_bias == True: + if has_bias: program_config.weights["conv2d_bias"] = TensorConfig( data_gen=partial(generate_conv2d_Bias) ) @@ -202,7 +202,7 @@ class TestConvBnFusePass(PassAutoScanTest): def teller2(program_config, predictor_config): return ( predictor_config.mkldnn_enabled() - and program_config.ops[0].attrs['has_bias'] == True + and program_config.ops[0].attrs['has_bias'] ) self.add_ignore_check_case( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py index 3cbd48dea6d..883b4a75bc2 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_emb_eltwise_layernorm_fuse_pass.py @@ -43,11 +43,11 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): def is_program_valid(self, program_config: ProgramConfig) -> bool: # is_sparse is only support False - if program_config.ops[0].attrs['is_sparse'] == True: + if program_config.ops[0].attrs['is_sparse']: return False # is_distributed only support False - if program_config.ops[0].attrs['is_distributed'] == True: + if program_config.ops[0].attrs['is_distributed']: return False # axis only support -1 and the last dim. diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py index ba6179a1ff4..92881bd8d82 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_conv_affine_channel_fuse_pass.py @@ -100,7 +100,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): outputs={"Out": ["affine_channel_ouput"]}, data_layout=data_format, ) - if has_bias == True: + if has_bias: conv2d_op.inputs["Bias"] = ["conv2d_bias"] ops = [conv2d_op, ac_op] @@ -123,7 +123,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): }, outputs=["affine_channel_ouput"], ) - if has_bias == True: + if has_bias: program_config.weights["conv2d_bias"] = TensorConfig( data_gen=partial(generate_bias) ) @@ -145,7 +145,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): def teller2(program_config, predictor_config): return ( predictor_config.mkldnn_enabled() - and program_config.ops[0].attrs['has_bias'] == True + and program_config.ops[0].attrs['has_bias'] ) self.add_ignore_check_case( diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py index 46a7b82ef4d..6c3c4768775 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_concat.py @@ -304,7 +304,7 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if dynamic_shape == True: + if dynamic_shape: return 1, 4 else: if attrs[0]['axis'] != 0: diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py index 91b8380d7d6..ee8f900f512 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_dropout.py @@ -123,7 +123,7 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): if attrs[0]['dropout_implementation'] == "upscale_in_train": return 0, 2 - elif self.dims == 1 and dynamic_shape == False: + elif self.dims == 1 and not dynamic_shape: return 0, 3 else: return 1, 2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py index ca2984fa187..fab7428579a 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gather.py @@ -85,7 +85,7 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): "index_data": TensorConfig( data_gen=partial( generate_input2 - if index_type_int32 == True + if index_type_int32 else generate_input4, index, ) @@ -180,7 +180,7 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): if self.input_num == 3: return 0, 5 else: - if dynamic_shape and self.index_type_int32 == True: + if dynamic_shape and self.index_type_int32: return 1, 3 else: return 0, 4 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py index d6d2f876361..29962386a48 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_gelu.py @@ -107,7 +107,7 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest): if compile_version >= valid_version: return 1, 2 else: - if attrs[0]['approximate'] == True: + if attrs[0]['approximate']: return 0, 3 else: return 1, 2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py index 1df42024992..8f39add1493 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_nearest_interp.py @@ -137,7 +137,7 @@ class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): and self.dynamic_shape.min_input_shape ): return True - if program_config.ops[0].attrs['align_corners'] == True: + if program_config.ops[0].attrs['align_corners']: return True return False diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py index b4eacbb136f..7bdaab0ee84 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_pool2d.py @@ -29,7 +29,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ksize = program_config.ops[0].attrs['ksize'] pooling_type = program_config.ops[0].attrs['pooling_type'] global_pooling = program_config.ops[0].attrs['global_pooling'] - if global_pooling == False: + if not global_pooling: if pooling_type == 'avg': for index in range(len(ksize)): if ksize[index] <= paddings[index]: @@ -174,10 +174,10 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): def teller(program_config, predictor_config): if ( program_config.ops[0].attrs['pooling_type'] == 'avg' - and program_config.ops[0].attrs['global_pooling'] == False - and program_config.ops[0].attrs['exclusive'] == True - and program_config.ops[0].attrs['adaptive'] == False - and program_config.ops[0].attrs['ceil_mode'] == True + and not program_config.ops[0].attrs['global_pooling'] + and program_config.ops[0].attrs['exclusive'] + and not program_config.ops[0].attrs['adaptive'] + and program_config.ops[0].attrs['ceil_mode'] ): return True return False diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py index f89527359d4..0dc286722d1 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_roi_align.py @@ -159,10 +159,10 @@ class TrtConvertRoiAlignTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): if self.num_input == 0: - if dynamic_shape == True: + if dynamic_shape: return 0, 5 elif self.num_input == 1: - if dynamic_shape == True: + if dynamic_shape: return 1, 3 else: return 0, 4 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py index 04c1e3259fc..9c4c9071e37 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_shuffle_channel.py @@ -77,7 +77,7 @@ class TrtConvertShuffleChannelTest(TrtLayerAutoScanTest): ver = paddle_infer.get_trt_compile_version() if ( ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8000 - and dynamic_shape == True + and dynamic_shape ): return 0, 3 else: diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py index 18ea2abe6bc..5e2e984fc7b 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_skip_layernorm.py @@ -192,7 +192,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if dynamic_shape == True: + if dynamic_shape: return 1, 3 else: return 0, 4 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py index 91e5e499b19..c891f236f2f 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_stack.py @@ -181,7 +181,7 @@ class TrtConvertStackTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if dynamic_shape == True: + if dynamic_shape: return 1, 4 else: return 0, 5 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py index d65d7e3c29f..adcb5c5e4b9 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_tile.py @@ -81,7 +81,7 @@ class TrtConvertTileTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): ver = paddle_infer.get_trt_compile_version() if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 >= 7000: - if dynamic_shape == True: + if dynamic_shape: return 0, 3 else: return 1, 2 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py index 477ea649eff..a5575eae554 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_top_k_v2.py @@ -126,7 +126,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): def generate_trt_nodes_num(attrs, dynamic_shape): if self.dims == 1: return 0, 4 - if self.sort == False: + if not self.sort: return 0, 4 return 1, 3 diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py index 9ebcd873992..0a987ca1fb6 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_transpose.py @@ -123,7 +123,7 @@ class TrtConvertTransposeTest(TrtLayerAutoScanTest): self.dynamic_shape.opt_input_shape = {} def generate_trt_nodes_num(attrs, dynamic_shape): - if dynamic_shape == True: + if dynamic_shape: return 1, 2 else: if attrs[0]['axis'][0] == 0: diff --git a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py index a0d089c69c9..7e595a48c9d 100644 --- a/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py +++ b/python/paddle/fluid/tests/unittests/ir/inference/test_trt_convert_yolo_box.py @@ -28,7 +28,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): def sample_program_configs(self): def generate_input1(attrs: List[Dict[str, Any]], batch, channel): - if attrs[0]['iou_aware'] == True: + if attrs[0]['iou_aware']: return np.ones([batch, 3 * (channel + 6), 13, 13]).astype( np.float32 ) @@ -108,7 +108,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): self, program_config ) -> (paddle_infer.Config, List[int], float): def generate_dynamic_shape(attrs): - if attrs[0]['iou_aware'] == True: + if attrs[0]['iou_aware']: channel = 3 * (attrs[0]['class_num'] + 6) self.dynamic_shape.min_input_shape = { "yolo_box_input": [1, channel, 12, 12], diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py index 4001e2ba76b..d71e6446c60 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_add_mkldnn_op.py @@ -128,7 +128,7 @@ class TestInt8(TestElementwiseAddOp): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad_normal(self): pass @@ -165,9 +165,7 @@ class TestInt8Scales(TestInt8): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() int_atol = 1 # different quantization techniques - self.check_output( - check_dygraph=(self.use_mkldnn == False), atol=int_atol - ) + self.check_output(check_dygraph=(not self.use_mkldnn), atol=int_atol) class TestUint8Scales(TestInt8Scales): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py index 3ca09093b81..4881d1c3763 100644 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_elementwise_mul_mkldnn_op.py @@ -101,7 +101,7 @@ class TestInt8(ElementwiseMulOp): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad_normal(self): pass @@ -138,9 +138,7 @@ class TestInt8Scales(TestInt8): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.init_scales() int_atol = 1 # different quantization techniques - self.check_output( - check_dygraph=(self.use_mkldnn == False), atol=int_atol - ) + self.check_output(check_dygraph=(not self.use_mkldnn), atol=int_atol) class TestUint8Scales(TestInt8Scales): diff --git a/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py b/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py index 1b27a39f2e9..218900b35f4 100755 --- a/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py +++ b/python/paddle/fluid/tests/unittests/mkldnn/test_onnx_format_quantization_mobilenetv1.py @@ -49,7 +49,7 @@ def resize_short(img, target_size): def crop_image(img, target_size, center): width, height = img.size size = target_size - if center == True: + if center: w_start = (width - size) / 2 h_start = (height - size) / 2 else: diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index 6147c88dc56..baa4f26feb8 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -371,25 +371,22 @@ class OpTest(unittest.TestCase): return True def is_xpu_op_test(): - return hasattr(cls, "use_xpu") and cls.use_xpu == True + return hasattr(cls, "use_xpu") and cls.use_xpu def is_mkldnn_op_test(): - return hasattr(cls, "use_mkldnn") and cls.use_mkldnn == True + return hasattr(cls, "use_mkldnn") and cls.use_mkldnn def is_rocm_op_test(): return core.is_compiled_with_rocm() def is_npu_op_test(): - return hasattr(cls, "use_npu") and cls.use_npu == True + return hasattr(cls, "use_npu") and cls.use_npu def is_mlu_op_test(): - return hasattr(cls, "use_mlu") and cls.use_mlu == True + return hasattr(cls, "use_mlu") and cls.use_mlu def is_custom_device_op_test(): - return ( - hasattr(cls, "use_custom_device") - and cls.use_custom_device == True - ) + return hasattr(cls, "use_custom_device") and cls.use_custom_device if not hasattr(cls, "op_type"): raise AssertionError( @@ -465,17 +462,17 @@ class OpTest(unittest.TestCase): ) def is_mkldnn_op(self): - return (hasattr(self, "use_mkldnn") and self.use_mkldnn == True) or ( + return (hasattr(self, "use_mkldnn") and self.use_mkldnn) or ( hasattr(self, "attrs") and "use_mkldnn" in self.attrs - and self.attrs["use_mkldnn"] == True + and self.attrs["use_mkldnn"] ) def is_xpu_op(self): - return (hasattr(self, "use_xpu") and self.use_xpu == True) or ( + return (hasattr(self, "use_xpu") and self.use_xpu) or ( hasattr(self, "attrs") and "use_xpu" in self.attrs - and self.attrs["use_xpu"] == True + and self.attrs["use_xpu"] ) # set the self.output_dtype . @@ -1542,7 +1539,7 @@ class OpTest(unittest.TestCase): ): # disable legacy dygraph check when check_eager is True - if check_eager == True: + if check_eager: check_dygraph = False def find_imperative_actual(target_name, dygraph_outs, place): @@ -1912,7 +1909,7 @@ class OpTest(unittest.TestCase): ) if check_eager: - assert check_dygraph == False + assert not check_dygraph return outs, eager_dygraph_outs, fetch_list elif check_dygraph: return outs, dygraph_outs, fetch_list @@ -2002,7 +1999,7 @@ class OpTest(unittest.TestCase): ): # disable legacy dygraph check when check_eager is True - if check_eager == True: + if check_eager: check_dygraph = False self.__class__.op_type = self.op_type @@ -2024,7 +2021,7 @@ class OpTest(unittest.TestCase): check_eager=check_eager, ) if check_eager: - assert check_dygraph == False + assert not check_dygraph outs, eager_dygraph_outs, fetch_list = res elif check_dygraph: outs, dygraph_outs, fetch_list = res @@ -2143,7 +2140,7 @@ class OpTest(unittest.TestCase): ): # disable legacy dygraph check when check_eager is True - if check_eager == True: + if check_eager: check_dygraph = False self._check_grad_helper() @@ -2180,7 +2177,7 @@ class OpTest(unittest.TestCase): ): # disable legacy dygraph check when check_eager is True - if check_eager == True: + if check_eager: check_dygraph = False self.scope = core.Scope() @@ -2207,7 +2204,7 @@ class OpTest(unittest.TestCase): # oneDNN numeric gradient should use CPU kernel use_onednn = False - if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"] == True: + if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"]: op_attrs["use_mkldnn"] = False use_onednn = True diff --git a/python/paddle/fluid/tests/unittests/op_test_xpu.py b/python/paddle/fluid/tests/unittests/op_test_xpu.py index 220bd09f2ca..295f6a67d8d 100644 --- a/python/paddle/fluid/tests/unittests/op_test_xpu.py +++ b/python/paddle/fluid/tests/unittests/op_test_xpu.py @@ -51,7 +51,7 @@ class XPUOpTest(OpTest): if cls.dtype == np.float16: place = paddle.XPUPlace(0) - if core.is_float16_supported(place) == False: + if not core.is_float16_supported(place): return if cls.dtype == np.float64: @@ -98,7 +98,7 @@ class XPUOpTest(OpTest): return if self.dtype == np.float16: - if core.is_float16_supported(place) == False: + if not core.is_float16_supported(place): return if self.dtype == np.float16: @@ -172,7 +172,7 @@ class XPUOpTest(OpTest): return if self.dtype == np.float16: - if core.is_float16_supported(place) == False: + if not core.is_float16_supported(place): return if self.dtype == np.float16: @@ -254,7 +254,7 @@ class XPUOpTest(OpTest): # oneDNN numeric gradient should use CPU kernel use_onednn = False - if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"] == True: + if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"]: op_attrs["use_mkldnn"] = False use_onednn = True diff --git a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py index 08055ae1703..ccaed0b984f 100755 --- a/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py +++ b/python/paddle/fluid/tests/unittests/ps/ps_dnn_trainer.py @@ -167,7 +167,7 @@ def get_user_defined_strategy(config): strategy.is_fl_ps_mode = ( True if config.get("runner.is_fl_ps_mode") == 1 else False ) - if strategy.is_fl_ps_mode == True: + if strategy.is_fl_ps_mode: strategy.pipeline = False micro_num = 1 strategy.pipeline_configs = { diff --git a/python/paddle/fluid/tests/unittests/test_adam_op.py b/python/paddle/fluid/tests/unittests/test_adam_op.py index 937d30cd74a..9904ee0d100 100644 --- a/python/paddle/fluid/tests/unittests/test_adam_op.py +++ b/python/paddle/fluid/tests/unittests/test_adam_op.py @@ -1126,11 +1126,11 @@ class TestMultiTensorAdam(unittest.TestCase): ) for idx in range(2): - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) diff --git a/python/paddle/fluid/tests/unittests/test_adamw_op.py b/python/paddle/fluid/tests/unittests/test_adamw_op.py index 6e4a7b43f20..15c8bf69bc0 100644 --- a/python/paddle/fluid/tests/unittests/test_adamw_op.py +++ b/python/paddle/fluid/tests/unittests/test_adamw_op.py @@ -302,11 +302,11 @@ class TestAdamWOpMultiPrecison(unittest.TestCase): ) for idx in range(2): - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) diff --git a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py index c56235cd0d1..41fc1718709 100644 --- a/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py +++ b/python/paddle/fluid/tests/unittests/test_async_ssa_graph_executor_mnist.py @@ -177,8 +177,8 @@ def train(use_cuda, thread_num, cpu_num): fetch_list=[array, acc, prediction, avg_loss.name] ) - assert numpy.allclose(array_v[0], prediction_v) == True - assert numpy.allclose(array_v[1], acc_v) == True + assert numpy.allclose(array_v[0], prediction_v) + assert numpy.allclose(array_v[1], acc_v) loss_val = numpy.mean(loss_val) if step % 10 == 0: diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py index 381640621cb..ccd7de2c317 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op.py @@ -313,7 +313,7 @@ class TestBatchNormOpInference(unittest.TestCase): # dims will be in NCHW order as it is MKL-DNN way # of memory descripting. So we need to convert NCHW # dims into NHWC. - if data_layout == "NHWC" and self.use_mkldnn == True: + if data_layout == "NHWC" and self.use_mkldnn: # Create executor to have MKL-DNN cache # cleared after NHWC unit test place = core.CPUPlace() diff --git a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py index 778489eb668..f5db751169a 100644 --- a/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py +++ b/python/paddle/fluid/tests/unittests/test_batch_norm_op_v2.py @@ -391,7 +391,7 @@ class TestBatchNormUseGlobalStats(unittest.TestCase): ) net2.weight = net1.weight net2.bias = net1.bias - if self.trainable_statistics == True: + if self.trainable_statistics: net1.training = False net2.training = False y1 = net1(x) diff --git a/python/paddle/fluid/tests/unittests/test_box_coder_op.py b/python/paddle/fluid/tests/unittests/test_box_coder_op.py index 233a686c058..fd3106f9c6f 100644 --- a/python/paddle/fluid/tests/unittests/test_box_coder_op.py +++ b/python/paddle/fluid/tests/unittests/test_box_coder_op.py @@ -20,8 +20,8 @@ import paddle.fluid.core as core def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): - pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False) - pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False) + pb_w = p_box[:, 2] - p_box[:, 0] + (not norm) + pb_h = p_box[:, 3] - p_box[:, 1] + (not norm) pb_x = pb_w * 0.5 + p_box[:, 0] pb_y = pb_h * 0.5 + p_box[:, 1] shape = (1, p_box.shape[0]) if axis == 0 else (p_box.shape[0], 1) @@ -55,8 +55,8 @@ def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): def box_encoder(t_box, p_box, pb_v, output_box, norm): - pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False) - pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False) + pb_w = p_box[:, 2] - p_box[:, 0] + (not norm) + pb_h = p_box[:, 3] - p_box[:, 1] + (not norm) pb_x = pb_w * 0.5 + p_box[:, 0] pb_y = pb_h * 0.5 + p_box[:, 1] shape = (1, p_box.shape[0]) diff --git a/python/paddle/fluid/tests/unittests/test_center_loss.py b/python/paddle/fluid/tests/unittests/test_center_loss.py index b7eda71c021..7bf68100e02 100644 --- a/python/paddle/fluid/tests/unittests/test_center_loss.py +++ b/python/paddle/fluid/tests/unittests/test_center_loss.py @@ -58,7 +58,7 @@ class TestCenterLossOp(OpTest): 'CenterUpdateRate': rate, } - if self.need_update == True: + if self.need_update: self.outputs = { 'SampleCenterDiff': output, 'Loss': loss, diff --git a/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py b/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py index bc88cba96eb..fb8a7057fd7 100644 --- a/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_reduce_op.py @@ -115,7 +115,7 @@ class TestEqualReduceAPI(unittest.TestCase): x = paddle.ones(shape=[10, 10], dtype="int32") y = paddle.ones(shape=[10, 10], dtype="int32") out = paddle.equal_all(x, y) - assert out.numpy()[0] == True + assert out.numpy()[0] is np.True_ paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_op.py index 34a34f062ef..0c22f7ff7b2 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_op.py @@ -477,13 +477,12 @@ class TestConv2DOp(OpTest): place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) def test_check_grad(self): if self.dtype == np.float16 or ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True + hasattr(self, "no_need_check_grad") and self.no_need_check_grad ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() @@ -493,13 +492,12 @@ class TestConv2DOp(OpTest): {'Input', 'Filter'}, 'Output', max_relative_error=0.02, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_filter(self): if self.dtype == np.float16 or ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True + hasattr(self, "no_need_check_grad") and self.no_need_check_grad ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() @@ -510,13 +508,12 @@ class TestConv2DOp(OpTest): 'Output', max_relative_error=0.02, no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_input(self): if self.dtype == np.float16 or ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True + hasattr(self, "no_need_check_grad") and self.no_need_check_grad ): return place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() @@ -526,7 +523,7 @@ class TestConv2DOp(OpTest): ['Filter'], 'Output', no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_test_case(self): @@ -804,7 +801,7 @@ class TestConv2DOp_v2(OpTest): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) def test_check_grad(self): @@ -817,7 +814,7 @@ class TestConv2DOp_v2(OpTest): {'Input', 'Filter'}, 'Output', max_relative_error=0.02, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_filter(self): @@ -831,7 +828,7 @@ class TestConv2DOp_v2(OpTest): 'Output', max_relative_error=0.02, no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_input(self): @@ -844,7 +841,7 @@ class TestConv2DOp_v2(OpTest): ['Filter'], 'Output', no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py index 29ffbd80d36..482da8164b2 100644 --- a/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv2d_transpose_op.py @@ -183,10 +183,10 @@ class TestConv2DTransposeOp(OpTest): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) else: - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad_no_input(self): if self.need_check_grad: @@ -724,10 +724,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=0.02, check_dygraph=(self.use_mkldnn == False) + place, atol=0.02, check_dygraph=(not self.use_mkldnn) ) else: - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) @unittest.skipIf( diff --git a/python/paddle/fluid/tests/unittests/test_conv3d_op.py b/python/paddle/fluid/tests/unittests/test_conv3d_op.py index eaa6ba04c64..54a3621e0ba 100644 --- a/python/paddle/fluid/tests/unittests/test_conv3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_conv3d_op.py @@ -327,7 +327,7 @@ class TestConv3DOp(OpTest): # TODO(wangzhongpu): support mkldnn op in dygraph mode place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) def test_check_grad(self): @@ -340,7 +340,7 @@ class TestConv3DOp(OpTest): {'Input', 'Filter'}, 'Output', max_relative_error=0.03, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_filter(self): @@ -354,7 +354,7 @@ class TestConv3DOp(OpTest): 'Output', max_relative_error=0.03, no_grad_set=set(['Filter']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_no_input(self): @@ -368,7 +368,7 @@ class TestConv3DOp(OpTest): 'Output', max_relative_error=0.03, no_grad_set=set(['Input']), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/test_dataset_download.py b/python/paddle/fluid/tests/unittests/test_dataset_download.py index f1fba215b93..b009a2fe58d 100644 --- a/python/paddle/fluid/tests/unittests/test_dataset_download.py +++ b/python/paddle/fluid/tests/unittests/test_dataset_download.py @@ -34,7 +34,7 @@ class TestDataSetDownload(unittest.TestCase): except Exception as e: catch_exp = True - self.assertTrue(catch_exp == False) + self.assertTrue(not catch_exp) file_path = DATA_HOME + "/flowers/imagelabels.mat" diff --git a/python/paddle/fluid/tests/unittests/test_dist_base.py b/python/paddle/fluid/tests/unittests/test_dist_base.py index 4c109feaef2..6212de9ebcf 100755 --- a/python/paddle/fluid/tests/unittests/test_dist_base.py +++ b/python/paddle/fluid/tests/unittests/test_dist_base.py @@ -1330,8 +1330,8 @@ class TestDistBase(unittest.TestCase): tr_cmd += " --diff_batch" self.__use_cuda = False self.__use_xpu = False - assert self.__use_cuda == False, "gloo not support use cuda" - assert self.__use_xpu == False, "gloo not support use xpu" + assert not self.__use_cuda, "gloo not support use cuda" + assert not self.__use_xpu, "gloo not support use xpu" tr_cmd += " --use_cpu" env.update( { @@ -1345,7 +1345,7 @@ class TestDistBase(unittest.TestCase): } ) - assert self._use_dgc == False, "gloo not support use dgc" + assert not self._use_dgc, "gloo not support use dgc" if self._accumulate_gradient: tr_cmd += " --accumulate_gradient" @@ -1353,7 +1353,7 @@ class TestDistBase(unittest.TestCase): if self._find_unused_parameters: tr_cmd += " --find_unused_parameters" - assert self._pipeline_mode == False, "gloo not support use pipeline" + assert not self._pipeline_mode, "gloo not support use pipeline" if self._enable_backward_deps: # build strategy, save it tr_cmd += " --enable_backward_deps" @@ -1361,8 +1361,8 @@ class TestDistBase(unittest.TestCase): if self._fuse_all_reduce is not None: tr_cmd += " --fuse_all_reduce {}".format(self._fuse_all_reduce) - assert self._use_fleet_api == False, "gloo not support use fleet api" - assert self._use_fleet_api_20 == False, "gloo not support use fleet api" + assert not self._use_fleet_api, "gloo not support use fleet api" + assert not self._use_fleet_api_20, "gloo not support use fleet api" return tr_cmd, env def _get_nccl2_trainer_cmd( diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py index ae516bc44ca..6bfd14dc841 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_add_op.py @@ -46,12 +46,12 @@ class TestElementwiseAddOp(OpTest): self.outputs = {'Out': self.out} def check_eager(self): - return self.use_mkldnn == False and self.axis == -1 + return not self.use_mkldnn and self.axis == -1 def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode self.check_output( - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), check_eager=self.check_eager(), ) @@ -62,7 +62,7 @@ class TestElementwiseAddOp(OpTest): self.check_grad( ['X', 'Y'], 'Out', - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), check_eager=self.check_eager(), ) @@ -74,7 +74,7 @@ class TestElementwiseAddOp(OpTest): ['Y'], 'Out', no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), check_eager=self.check_eager(), ) @@ -86,7 +86,7 @@ class TestElementwiseAddOp(OpTest): ['X'], 'Out', no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), check_eager=self.check_eager(), ) @@ -115,7 +115,7 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): place = core.CUDAPlace(0) if core.is_float16_supported(place): self.check_output_with_place( - place, atol=1e-3, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-3, check_dygraph=(not self.use_mkldnn) ) diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index cc3cd9be823..987a17ff1f5 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -49,13 +49,11 @@ class ElementwiseMulOp(OpTest): def test_check_output(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad_normal(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - self.check_grad( - ['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False) - ) + self.check_grad(['X', 'Y'], 'Out', check_dygraph=(not self.use_mkldnn)) def test_check_grad_ingore_x(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode @@ -63,7 +61,7 @@ class ElementwiseMulOp(OpTest): ['Y'], 'Out', no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_ingore_y(self): @@ -72,7 +70,7 @@ class ElementwiseMulOp(OpTest): ['X'], 'Out', no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_input_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_empty_like_op.py b/python/paddle/fluid/tests/unittests/test_empty_like_op.py index 4ce4ab6a6d5..82ad72e11e5 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_like_op.py @@ -47,8 +47,8 @@ class TestEmptyLikeAPICommon(unittest.TestCase): ) elif data_type in ['bool']: total_num = out.size - true_num = np.sum(out == True) - false_num = np.sum(out == False) + true_num = np.sum(out) + false_num = np.sum(~out) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', diff --git a/python/paddle/fluid/tests/unittests/test_empty_op.py b/python/paddle/fluid/tests/unittests/test_empty_op.py index 11b66325c1f..7b488aa0c6d 100644 --- a/python/paddle/fluid/tests/unittests/test_empty_op.py +++ b/python/paddle/fluid/tests/unittests/test_empty_op.py @@ -43,8 +43,8 @@ class TestEmptyOp(OpTest): ) elif data_type in ['bool']: total_num = outs[0].size - true_num = np.sum(outs[0] == True) - false_num = np.sum(outs[0] == False) + true_num = np.sum(outs[0]) + false_num = np.sum(~outs[0]) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', @@ -132,8 +132,8 @@ class TestEmptyOp_ShapeTensor(OpTest): ) elif data_type in ['bool']: total_num = outs[0].size - true_num = np.sum(outs[0] == True) - false_num = np.sum(outs[0] == False) + true_num = np.sum(outs[0]) + false_num = np.sum(~outs[0]) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', @@ -182,8 +182,8 @@ class TestEmptyOp_ShapeTensorList(OpTest): ) elif data_type in ['bool']: total_num = outs[0].size - true_num = np.sum(outs[0] == True) - false_num = np.sum(outs[0] == False) + true_num = np.sum(outs[0]) + false_num = np.sum(~outs[0]) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', diff --git a/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py b/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py index b0dcfd653fb..dc4ad0cea15 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_layer_trainable.py @@ -29,16 +29,16 @@ class TestImperativeLayerTrainable(unittest.TestCase): linear = dygraph.Linear(10, 10) y = linear(label) - self.assertTrue(y.stop_gradient == False) + self.assertFalse(y.stop_gradient) linear.weight.trainable = False linear.bias.trainable = False - self.assertTrue(linear.weight.trainable == False) - self.assertTrue(linear.weight.stop_gradient == True) + self.assertFalse(linear.weight.trainable) + self.assertTrue(linear.weight.stop_gradient) y = linear(label) - self.assertTrue(y.stop_gradient == True) + self.assertTrue(y.stop_gradient) with self.assertRaises(ValueError): linear.weight.trainable = "1" diff --git a/python/paddle/fluid/tests/unittests/test_mean_op.py b/python/paddle/fluid/tests/unittests/test_mean_op.py index 68e88c9ba2a..0c52d7596c1 100644 --- a/python/paddle/fluid/tests/unittests/test_mean_op.py +++ b/python/paddle/fluid/tests/unittests/test_mean_op.py @@ -28,13 +28,13 @@ np.random.seed(10) def mean_wrapper(x, axis=None, keepdim=False, reduce_all=False): - if reduce_all == True: + if reduce_all: return paddle.mean(x, range(len(x.shape)), keepdim) return paddle.mean(x, axis, keepdim) def reduce_mean_wrapper(x, axis=0, keepdim=False, reduce_all=False): - if reduce_all == True: + if reduce_all: return paddle.mean(x, range(len(x.shape)), keepdim) return paddle.mean(x, axis, keepdim) diff --git a/python/paddle/fluid/tests/unittests/test_momentum_op.py b/python/paddle/fluid/tests/unittests/test_momentum_op.py index 017b001e259..fd9b8b88016 100644 --- a/python/paddle/fluid/tests/unittests/test_momentum_op.py +++ b/python/paddle/fluid/tests/unittests/test_momentum_op.py @@ -910,10 +910,10 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): multi_precision=use_amp, ) for idx in range(5): - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) - if place == 'gpu' and use_amp == True: + if place == 'gpu' and use_amp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index cde6c8daf96..6cc6fdd4311 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -146,12 +146,8 @@ def iou(box_a, box_b, norm): xmax_b = max(box_b[0], box_b[2]) ymax_b = max(box_b[1], box_b[3]) - area_a = (ymax_a - ymin_a + (norm == False)) * ( - xmax_a - xmin_a + (norm == False) - ) - area_b = (ymax_b - ymin_b + (norm == False)) * ( - xmax_b - xmin_b + (norm == False) - ) + area_a = (ymax_a - ymin_a + (not norm)) * (xmax_a - xmin_a + (not norm)) + area_b = (ymax_b - ymin_b + (not norm)) * (xmax_b - xmin_b + (not norm)) if area_a <= 0 and area_b <= 0: return 0.0 @@ -160,9 +156,7 @@ def iou(box_a, box_b, norm): xb = min(xmax_a, xmax_b) yb = min(ymax_a, ymax_b) - inter_area = max(xb - xa + (norm == False), 0.0) * max( - yb - ya + (norm == False), 0.0 - ) + inter_area = max(xb - xa + (not norm), 0.0) * max(yb - ya + (not norm), 0.0) iou_ratio = inter_area / (area_a + area_b - inter_area) diff --git a/python/paddle/fluid/tests/unittests/test_ops_nms.py b/python/paddle/fluid/tests/unittests/test_ops_nms.py index 573231a8a72..be4d5f49213 100644 --- a/python/paddle/fluid/tests/unittests/test_ops_nms.py +++ b/python/paddle/fluid/tests/unittests/test_ops_nms.py @@ -55,7 +55,7 @@ def multiclass_nms(boxes, scores, category_idxs, iou_threshold, top_k): mask[cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs]] = True - keep_boxes_idxs = _find(mask == True) + keep_boxes_idxs = _find(mask) topK_sub_indices = np.argsort(-scores[keep_boxes_idxs])[:top_k] return keep_boxes_idxs[topK_sub_indices] diff --git a/python/paddle/fluid/tests/unittests/test_optimizer.py b/python/paddle/fluid/tests/unittests/test_optimizer.py index 17ad33f67ab..ab5d5ac46da 100644 --- a/python/paddle/fluid/tests/unittests/test_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_optimizer.py @@ -784,7 +784,7 @@ class TestRecomputeOptimizer(unittest.TestCase): type="mean", inputs={"X": b2_out}, outputs={"Out": mean_out} ) - if return_input == True: + if return_input: return mul_x, mul_out, b1_out, b2_out, mean_out return mul_out, b1_out, b2_out, mean_out diff --git a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py index 9c8d3429937..e0e545448b5 100644 --- a/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py +++ b/python/paddle/fluid/tests/unittests/test_parallel_executor_drop_scope.py @@ -58,8 +58,8 @@ class TestParallelExecutorDropExeScope(unittest.TestCase): train_exe.run(feed={"X": x}, fetch_list=[loss.name]) test_exe.run(feed={"X": x}, fetch_list=[loss.name]) - assert train_exe._need_create_local_exe_scopes() == False - assert test_exe._need_create_local_exe_scopes() == False + assert not train_exe._need_create_local_exe_scopes() + assert not test_exe._need_create_local_exe_scopes() # drop the local execution scope immediately train_exe.drop_local_exe_scopes() diff --git a/python/paddle/fluid/tests/unittests/test_pool2d_op.py b/python/paddle/fluid/tests/unittests/test_pool2d_op.py index 7c44827262d..b2ae6318cc5 100644 --- a/python/paddle/fluid/tests/unittests/test_pool2d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool2d_op.py @@ -181,7 +181,7 @@ def pool2D_forward_naive( if padding_algorithm == "VALID": paddings = [0, 0, 0, 0] - if ceil_mode != False: + if ceil_mode is not False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " @@ -346,10 +346,10 @@ class TestPool2D_Op_Mixin(object): if self.has_cudnn(): place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) else: - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad(self): if self.dtype == np.float16: @@ -362,14 +362,14 @@ class TestPool2D_Op_Mixin(object): set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) elif self.pool_type != "max": self.check_grad( set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_data_format(self): @@ -512,7 +512,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): self.check_output_with_place( place, atol=1e-3, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad(self): @@ -528,7 +528,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op") @@ -553,7 +553,7 @@ def create_test_fp16_class(parent, check_grad=True): self.check_output_with_place( place, atol=1e-3, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad(self): @@ -569,7 +569,7 @@ def create_test_fp16_class(parent, check_grad=True): set(['X']), 'Out', max_relative_error=0.07, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op") diff --git a/python/paddle/fluid/tests/unittests/test_pool3d_op.py b/python/paddle/fluid/tests/unittests/test_pool3d_op.py index a5bf5066956..09222e99c36 100644 --- a/python/paddle/fluid/tests/unittests/test_pool3d_op.py +++ b/python/paddle/fluid/tests/unittests/test_pool3d_op.py @@ -68,7 +68,7 @@ def pool3D_forward_naive( if padding_algorithm == "VALID": paddings = [0, 0, 0, 0, 0, 0] - if ceil_mode != False: + if ceil_mode is not False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " diff --git a/python/paddle/fluid/tests/unittests/test_sgd_op.py b/python/paddle/fluid/tests/unittests/test_sgd_op.py index 34420ce5a9c..255a4799984 100644 --- a/python/paddle/fluid/tests/unittests/test_sgd_op.py +++ b/python/paddle/fluid/tests/unittests/test_sgd_op.py @@ -321,12 +321,12 @@ class TestSGDMultiPrecision2_0(unittest.TestCase): optimizer = paddle.optimizer.SGD( parameters=model.parameters(), multi_precision=mp ) - if mp == True: + if mp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) for idx in range(5): - if mp == True: + if mp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) @@ -429,12 +429,12 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): parameter_list=model.parameters(), multi_precision=mp, ) - if mp == True: + if mp: model = paddle.amp.decorate(models=model, level='O2') scaler = paddle.amp.GradScaler(init_loss_scaling=1024) for idx in range(5): - if mp == True: + if mp: with paddle.amp.auto_cast(level='O2'): output = model(input) loss = paddle.mean(output) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index c83f569cb11..18a5737225f 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -78,10 +78,10 @@ class TestSoftmaxOp(OpTest): if self.use_cudnn: place = core.CUDAPlace(0) self.check_output_with_place( - place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) + place, atol=1e-5, check_dygraph=(not self.use_mkldnn) ) else: - self.check_output(check_dygraph=(self.use_mkldnn == False)) + self.check_output(check_dygraph=(not self.use_mkldnn)) def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode @@ -93,14 +93,14 @@ class TestSoftmaxOp(OpTest): ["X"], "Out", max_relative_error=0.01, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) else: self.check_grad( ["X"], "Out", max_relative_error=0.01, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) @@ -389,9 +389,7 @@ class TestSoftmaxBF16Op(OpTest): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place( - place, check_dygraph=(self.use_mkldnn == False) - ) + self.check_output_with_place(place, check_dygraph=(not self.use_mkldnn)) def test_check_grad(self): place = core.CUDAPlace(0) @@ -400,7 +398,7 @@ class TestSoftmaxBF16Op(OpTest): ["X"], "Out", numeric_grad_delta=0.05, - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) diff --git a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py index fb0e46b6740..a623a311ccf 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_with_cross_entropy_op.py @@ -131,7 +131,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): softmax, labels, self.soft_label, self.axis, self.ignore_index ) - if self.use_softmax == False: + if not self.use_softmax: self.inputs = {"Logits": softmax, "Label": labels} else: self.inputs = {"Logits": logits, "Label": labels} diff --git a/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py b/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py index 3b29d335da4..92e2d0200c8 100644 --- a/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py +++ b/python/paddle/fluid/tests/unittests/test_sparse_attention_op.py @@ -221,7 +221,7 @@ class TestSparseAttentionOp(OpTest): self.key_padding_mask = key_padding_mask.astype(self.dtype) self.attn_mask = attn_mask.astype(self.dtype) - if self.use_mask == True: + if self.use_mask: result, result_sdd, result_softmax = ref_batch_sparse_attention( self.q, self.k, @@ -236,7 +236,7 @@ class TestSparseAttentionOp(OpTest): self.q, self.k, self.v, self.offset, self.columns ) - if self.use_mask == True: + if self.use_mask: self.inputs = { 'Q': self.q, 'K': self.k, @@ -326,7 +326,7 @@ class TestSparseAttentionAPI(unittest.TestCase): ) key_padding_mask_shape = (self.shape[0], self.shape[2]) attn_mask_shape = (self.shape[2], self.shape[2]) - if self.use_mask == True: + if self.use_mask: key_padding_mask = paddle.static.data( name="KeyPaddingMask", shape=key_padding_mask_shape, @@ -367,7 +367,7 @@ class TestSparseAttentionAPI(unittest.TestCase): attn_mask_np = attn_mask_np.astype(self.dtype) exe = fluid.Executor(self.place) - if self.use_mask == True: + if self.use_mask: fetches_result = exe.run( feed={ "Q": Q_np, @@ -436,7 +436,7 @@ class TestSparseAttentionAPI(unittest.TestCase): paddle_kp_mask = paddle.to_tensor(key_padding_mask, place=self.place) paddle_attn_mask = paddle.to_tensor(attn_mask, place=self.place) - if self.use_mask == True: + if self.use_mask: paddle_result = F.sparse_attention( paddle_query, paddle_key, diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index d1b820bd74c..38e65744a81 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -1147,10 +1147,10 @@ class TestVarBase(unittest.TestCase): if var2: var2_bool = True - assert var1_bool == False, "if var1 should be false" - assert var2_bool == True, "if var2 should be true" - assert bool(var1) == False, "bool(var1) is False" - assert bool(var2) == True, "bool(var2) is True" + assert not var1_bool, "if var1 should be false" + assert var2_bool, "if var2 should be true" + assert not bool(var1), "bool(var1) is False" + assert bool(var2), "bool(var2) is True" def test_if(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_where_op.py b/python/paddle/fluid/tests/unittests/test_where_op.py index 9ae7d9a4833..7420753d2d3 100644 --- a/python/paddle/fluid/tests/unittests/test_where_op.py +++ b/python/paddle/fluid/tests/unittests/test_where_op.py @@ -68,10 +68,10 @@ class TestWhereAPI(unittest.TestCase): self.out = np.where(self.cond, self.x, self.y) def ref_x_backward(self, dout): - return np.where((self.cond == True), dout, 0) + return np.where(self.cond, dout, 0) def ref_y_backward(self, dout): - return np.where((self.cond == False), dout, 0) + return np.where(~self.cond, dout, 0) def test_api(self, use_cuda=False): for x_stop_gradient in [False, True]: diff --git a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py index 7d818cc02c8..c6c7d9f34d8 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_batch_norm_op_xpu.py @@ -377,7 +377,7 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): ) net2.weight = net1.weight net2.bias = net1.bias - if self.trainable_statistics == True: + if self.trainable_statistics: net1.training = False net2.training = False y1 = net1(x) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py index 973e2908c4e..a7036f52181 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_conv2d_op_xpu.py @@ -261,10 +261,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): self.check_output_with_place(self.place) def test_check_grad(self): - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -273,10 +270,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ) def test_check_grad_no_filter(self): - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -285,10 +279,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ) def test_check_grad_no_input(self): - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -433,10 +424,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -446,10 +434,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): def test_check_grad_no_filter(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() @@ -459,10 +444,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): def test_check_grad_no_input(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode - if ( - hasattr(self, "no_need_check_grad") - and self.no_need_check_grad == True - ): + if hasattr(self, "no_need_check_grad") and self.no_need_check_grad: return if core.is_compiled_with_xpu(): paddle.enable_static() diff --git a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py index 794ff490d7e..36434ce2020 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_dropout_op_xpu.py @@ -52,7 +52,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): } out = self.inputs['X'] * (1.0 - self.dropout_prob) - if self.is_test == False: + if not self.is_test: mask = None if self.dropout_prob == 0.0: mask = np.ones(self.shape).astype(self.dtype) @@ -78,7 +78,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): def test_check_grad_normal(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return diff --git a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py index 22ee95c07d4..1d9c8c80f5a 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_elementwise_mul_op_xpu.py @@ -61,7 +61,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): place, ['X', 'Y'], 'Out', - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_ingore_x(self): @@ -72,7 +72,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): ['Y'], 'Out', no_grad_set=set("X"), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def test_check_grad_ingore_y(self): @@ -83,7 +83,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): ['X'], 'Out', no_grad_set=set('Y'), - check_dygraph=(self.use_mkldnn == False), + check_dygraph=(not self.use_mkldnn), ) def init_input_output(self): diff --git a/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py index cb56e9b51f4..f11740d74d4 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_empty_op_xpu.py @@ -72,8 +72,8 @@ class XPUTestEmptyOp(XPUOpTestWrapper): ) elif data_type in ['bool']: total_num = outs[0].size - true_num = np.sum(outs[0] == True) - false_num = np.sum(outs[0] == False) + true_num = np.sum(outs[0]) + false_num = np.sum(~outs[0]) self.assertTrue( total_num == true_num + false_num, 'The value should always be True or False.', diff --git a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py index c37d1bff5dd..7c2a5ed2f09 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_fused_gemm_epilogue_op_xpu.py @@ -106,14 +106,14 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): - 0.5, } - if self.trans_x == True: + if self.trans_x: numpy_input_x = ( self.inputs['X'].reshape((self.x_shape[0], -1)).T ) else: numpy_input_x = self.inputs['X'].reshape((-1, self.x_shape[-1])) - if self.trans_y == True: + if self.trans_y: numpy_input_y = self.inputs['Y'].T else: numpy_input_y = self.inputs['Y'] diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index c4aab23a952..21e46e31783 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -106,7 +106,7 @@ def generate_compatible_shapes( shape_Y = [BATCH_SIZE] + shape_Y if dim_Y == 3 and dim_X == 2: - if transpose_X == False: + if not transpose_X: shape_X[1] = shape_X[1] * BATCH_SIZE else: shape_X[0] = shape_X[0] * BATCH_SIZE @@ -326,7 +326,7 @@ class TestMatmulBaseGenerator(XPUOpTest): def test_check_grad_normal(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return @@ -338,7 +338,7 @@ class TestMatmulBaseGenerator(XPUOpTest): def test_check_grad_ignore_x(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return @@ -350,7 +350,7 @@ class TestMatmulBaseGenerator(XPUOpTest): def test_check_grad_ignore_y(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py index 63354ac7607..3e873a965f6 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_v2_op_xpu.py @@ -101,7 +101,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): def test_check_grad(self): if ( hasattr(self.__class__, "no_need_check_grad") - and self.__class__.no_need_check_grad == True + and self.__class__.no_need_check_grad ): return place = paddle.XPUPlace(0) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py index 45c9f518cbd..36cb5dfaefd 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_pool2d_op_xpu.py @@ -178,7 +178,7 @@ def pool2D_forward_naive( if padding_algorithm == "VALID": paddings = [0, 0, 0, 0] - if ceil_mode != False: + if ceil_mode is not False: raise ValueError( "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" " must be False. " diff --git a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py index 18af22f3c64..bd6accf59d1 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_where_op_xpu.py @@ -91,10 +91,10 @@ class TestXPUWhereAPI(unittest.TestCase): self.out = np.where(self.cond, self.x, self.y) def ref_x_backward(self, dout): - return np.where(self.cond == True, dout, 0) + return np.where(self.cond, dout, 0) def ref_y_backward(self, dout): - return np.where(self.cond == False, dout, 0) + return np.where(~self.cond, dout, 0) def test_api(self): for x_stop_gradient in [False, True]: diff --git a/python/paddle/hapi/model_summary.py b/python/paddle/hapi/model_summary.py index d90c64b7621..14cb186fce9 100644 --- a/python/paddle/hapi/model_summary.py +++ b/python/paddle/hapi/model_summary.py @@ -450,7 +450,7 @@ def summary_string(model, input_size=None, dtypes=None, input=None): total_output += np.sum(np.prod(output_shape, axis=-1)) if "trainable" in summary[layer]: - if summary[layer]["trainable"] == True: + if summary[layer]["trainable"]: trainable_params += summary[layer]["trainable_params"] summary_str += line_new + "\n" diff --git a/python/paddle/incubate/autograd/primrules.py b/python/paddle/incubate/autograd/primrules.py index badd8476463..0532ade86c6 100644 --- a/python/paddle/incubate/autograd/primrules.py +++ b/python/paddle/incubate/autograd/primrules.py @@ -515,7 +515,7 @@ def dropout_orig2prim(op, seed_t, x): ), 'Can not lower dropout into prim ops with seedtensor.' mask = bernoulli(shape=x.shape, dtype=x.dtype, p=op.attr('dropout_prob')) if op.attr('dropout_implementation') == 'upscale_in_train': - if op.attr('is_test') == False: + if not op.attr('is_test'): out = div( mul(x, mask), fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype), @@ -524,7 +524,7 @@ def dropout_orig2prim(op, seed_t, x): else: return primops.cast(mask, dtype=paddle.uint8), x elif op.attr('dropout_implementation') == 'downgrade_in_infer': - if op.attr('is_test') == False: + if not op.attr('is_test'): return primops.cast(mask, dtype=paddle.uint8), mul(x, mask) else: return primops.cast(mask, dtype=paddle.uint8), mul( diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index b6936c5a90c..a61d0576130 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -2109,7 +2109,7 @@ def class_center_sample(label, num_classes, num_samples, group=None): #Tensor(shape=[7], dtype=int64, place=CUDAPlace(1), stop_gradient=True, # [0, 1, 2, 3, 5, 7, 8]) """ - if not (group == False or group is None or hasattr(group, 'is_member')): + if not (group is False or group is None or hasattr(group, 'is_member')): raise ValueError( 'Expected group is False, None or instance of paddle.distributed.collective.Group \ (got group: {})'.format( @@ -2124,7 +2124,7 @@ def class_center_sample(label, num_classes, num_samples, group=None): ring_id = 0 rank = 0 nranks = 1 - if group != False: + if group is not False: if core.is_compiled_with_dist(): parallel_env = paddle.distributed.ParallelEnv() global_rank = parallel_env.rank diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 48cda9d0b4f..b7e1045b6ee 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -2033,7 +2033,7 @@ def margin_cross_entropy( """ assert reduction in ['mean', 'sum', 'none', None] - if not (group == False or group is None or hasattr(group, 'is_member')): + if not (group is False or group is None or hasattr(group, 'is_member')): raise ValueError( 'Expected group is False, None or instance of paddle.distributed.collective.Group \ (got group: {})'.format( @@ -2048,7 +2048,7 @@ def margin_cross_entropy( ring_id = 0 rank = 0 nranks = 1 - if group != False: + if group is not False: ring_id = 0 if group is None else group.id if core.is_compiled_with_dist(): parallel_env = paddle.distributed.ParallelEnv() @@ -2537,7 +2537,7 @@ def cross_entropy( "should be 'sum', 'mean' or 'none', but received %s, which is not allowed." % reduction ) - if ignore_index > 0 and soft_label == True: + if ignore_index > 0 and soft_label: raise ValueError( "When soft_label == True, the value of 'ignore_index' in softmax_cross_entropy" "should be '-100', but received %s, which is not allowed." @@ -2560,12 +2560,12 @@ def cross_entropy( label = paddle.unsqueeze(label, axis=axis) if in_dygraph_mode(): - if soft_label == False: + if not soft_label: valid_label = ( paddle.cast(label != ignore_index, dtype=label.dtype) * label ) if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): - if soft_label == False: + if not soft_label: _, _, out = _legacy_C_ops.softmax_with_cross_entropy( input, valid_label, @@ -2603,7 +2603,7 @@ def cross_entropy( if weight is not None: # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. - if soft_label == True: + if soft_label: # chajchaj: # weight's shape is C, where C is class num. # for 1d case: label's shape is [N,C], weight_gather's shape is N. @@ -2710,7 +2710,7 @@ def cross_entropy( return out elif _in_legacy_dygraph(): - if soft_label == False: + if not soft_label: valid_label = ( paddle.cast(label != ignore_index, dtype=label.dtype) * label ) @@ -2725,7 +2725,7 @@ def cross_entropy( "Target {} is out of upper bound.".format(label_max.item()) ) if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): - if soft_label == False: + if not soft_label: _, _, out = _legacy_C_ops.softmax_with_cross_entropy( input, valid_label, @@ -2774,7 +2774,7 @@ def cross_entropy( if weight is not None: # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. - if soft_label == True: + if soft_label: # chajchaj: # weight's shape is C, where C is class num. # for 1d case: label's shape is [N,C], weight_gather's shape is N. @@ -2921,7 +2921,7 @@ def cross_entropy( weight, 'weight', ['float32', 'float64'], 'softmax_cross_entropy' ) weight_name = name if reduction == 'none' else None - if soft_label == True: + if soft_label: # chajchaj: # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. # weight's shape is C, where C is class num. diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index f9ece56dc7e..d81987fa9ee 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -110,7 +110,7 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): ) ) if padding == "VALID": - if ceil_mode != False: + if ceil_mode is not False: raise ValueError( "When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. " "Received ceil_mode: True." diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index 344c1d482d4..ef09a1cd5e2 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -76,7 +76,7 @@ class PairwiseDistance(Layer): main_str = 'p={p}' if self.epsilon != 1e-6: main_str += ', epsilon={epsilon}' - if self.keepdim != False: + if self.keepdim is not False: main_str += ', keepdim={keepdim}' if self.name != None: main_str += ', name={name}' diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 1b5784fbedf..5f4a4d8d1d8 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -71,7 +71,7 @@ class _InstanceNormBase(Layer): ): super(_InstanceNormBase, self).__init__() - if weight_attr == False or bias_attr == False: + if weight_attr is False or bias_attr is False: assert ( weight_attr == bias_attr ), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm" @@ -80,7 +80,7 @@ class _InstanceNormBase(Layer): self._bias_attr = bias_attr self._num_features = num_features - if weight_attr != False and bias_attr != False: + if weight_attr is not False and bias_attr is not False: self.scale = self.create_parameter( attr=self._weight_attr, shape=[num_features], @@ -382,7 +382,7 @@ class GroupNorm(Layer): param_shape = [self._num_channels] - if weight_attr == False: + if weight_attr is False: self.weight = self.create_parameter( attr=None, shape=param_shape, default_initializer=Constant(1.0) ) @@ -398,7 +398,7 @@ class GroupNorm(Layer): and self._weight_attr.learning_rate == 0.0 ) - if bias_attr == False: + if bias_attr is False: self.bias = self.create_parameter( attr=None, shape=param_shape, @@ -619,7 +619,7 @@ class _BatchNormBase(Layer): param_shape = [num_features] # create parameter - if weight_attr == False: + if weight_attr is False: self.weight = self.create_parameter( attr=None, shape=param_shape, @@ -639,7 +639,7 @@ class _BatchNormBase(Layer): and self._weight_attr.learning_rate == 0.0 ) - if bias_attr == False: + if bias_attr is False: self.bias = self.create_parameter( attr=None, shape=param_shape, @@ -1315,7 +1315,10 @@ class SyncBatchNorm(_BatchNormBase): layer._name, ) - if layer._weight_attr != False and layer._bias_attr != False: + if ( + layer._weight_attr is not False + and layer._bias_attr is not False + ): with no_grad(): layer_output.weight = layer.weight layer_output.bias = layer.bias diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 7e0fa6d7d70..bfebde2b5ee 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -964,9 +964,9 @@ class RNNBase(LayerList): for direction in range(self.num_directions): suffix = '_reverse' if direction == 1 else '' param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}']) - if bias_ih_attr != False: + if bias_ih_attr is not False: param_names.append('bias_ih_l{}{}') - if bias_hh_attr != False: + if bias_hh_attr is not False: param_names.append('bias_hh_l{}{}') param_names = [x.format(layer, suffix) for x in param_names] for name, param in zip(param_names, self.parameters()): @@ -1187,7 +1187,7 @@ class RNNBase(LayerList): main_str = '{input_size}, {hidden_size}' if self.num_layers != 1: main_str += ', num_layers={num_layers}' - if self.time_major != False: + if self.time_major is not False: main_str += ', time_major={time_major}' if self.dropout != 0: main_str += ', dropout={dropout}' diff --git a/python/paddle/nn/quant/quant_layers.py b/python/paddle/nn/quant/quant_layers.py index 6eeaed7f86f..7033df7fb37 100644 --- a/python/paddle/nn/quant/quant_layers.py +++ b/python/paddle/nn/quant/quant_layers.py @@ -298,7 +298,7 @@ class FakeQuantChannelWiseAbsMax(Layer): reduce_type=None, ): assert ( - quant_on_weight == True + quant_on_weight ), "Channel_wise only can be used on weight quantization." super(FakeQuantChannelWiseAbsMax, self).__init__() self._quant_bits = quant_bits diff --git a/python/paddle/profiler/profiler_statistic.py b/python/paddle/profiler/profiler_statistic.py index c383323d51f..8db866be45f 100755 --- a/python/paddle/profiler/profiler_statistic.py +++ b/python/paddle/profiler/profiler_statistic.py @@ -1237,7 +1237,7 @@ def _build_table( if statistic_data.event_summary.items: all_row_values = [] name_column_width = 52 - if thread_sep == True: + if thread_sep: thread_items = statistic_data.event_summary.thread_items else: thread_items = { @@ -1721,7 +1721,7 @@ def _build_table( 'ProfileStep' ].general_gpu_time ) - if thread_sep == True: + if thread_sep: userdefined_thread_items = ( statistic_data.event_summary.userdefined_thread_items ) diff --git a/python/paddle/profiler/utils.py b/python/paddle/profiler/utils.py index efe3975f144..4d7b36554b5 100644 --- a/python/paddle/profiler/utils.py +++ b/python/paddle/profiler/utils.py @@ -164,7 +164,7 @@ def load_profiler_result(filename: str): def in_profiler_mode(): - return _is_profiler_used == True + return _is_profiler_used def wrap_optimizers(): @@ -182,7 +182,7 @@ def wrap_optimizers(): return warpper global _has_optimizer_wrapped - if _has_optimizer_wrapped == True: + if _has_optimizer_wrapped: return import paddle.optimizer as optimizer diff --git a/python/paddle/sparse/nn/layer/norm.py b/python/paddle/sparse/nn/layer/norm.py index 936e43a18fa..117fbf01a1d 100644 --- a/python/paddle/sparse/nn/layer/norm.py +++ b/python/paddle/sparse/nn/layer/norm.py @@ -398,7 +398,10 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): layer._name, ) - if layer._weight_attr != False and layer._bias_attr != False: + if ( + layer._weight_attr is not False + and layer._bias_attr is not False + ): with no_grad(): layer_output.weight = layer.weight layer_output.bias = layer.bias diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 3ed56a35dfa..5348681ad04 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -466,9 +466,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): if in_dygraph_mode(): out = _C_ops.abs(input) reduce_all = ( - True - if axis == None or axis == [] or asvector == True - else False + True if axis == None or axis == [] or asvector else False ) axis = axis if axis != None and axis != [] else [0] if reduce_all: @@ -487,9 +485,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): dtype=helper.input_dtype() ) - reduce_all = ( - True if axis == None or axis == [] or asvector == True else False - ) + reduce_all = True if axis == None or axis == [] or asvector else False axis = axis if axis != None and axis != [] else [0] reduce_type = ( @@ -1322,7 +1318,7 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): avg = nx.sum(axis=1) / w_sum nx_w = nx - if w is not None and aweights is not None and ddof == True: + if w is not None and aweights is not None and ddof: norm_factor = w_sum - (w * aweights).sum() / w_sum else: norm_factor = w_sum - ddof diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 7c629a556b0..3379a60a3bc 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -3206,7 +3206,7 @@ def tile(x, repeat_times, name=None): check_variable_and_dtype( x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile' ) - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: raise ValueError( "When the date type is bool for the input 'x' of tile op, you " "must set its stop_gradient to be True by " @@ -3288,7 +3288,7 @@ def expand_as(x, y, name=None): ) check_type(y, 'y', Variable, 'expand_as') - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: raise ValueError( "When the data type of input 'x' for expand_as is bool, " "you must set its stop_gradient to be False by " @@ -3359,7 +3359,7 @@ def broadcast_to(x, shape, name=None): x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'broadcast_to' ) check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to') - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: raise ValueError( "When the data type of input 'x' for broadcast_to is bool, " "you must set its stop_gradient to be False by " @@ -3457,7 +3457,7 @@ def expand(x, shape, name=None): 'expand', ) check_type(shape, 'shape', (list, tuple, Variable), 'expand') - if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: + if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient: raise ValueError( "When the data type of input 'x' for expand is bool, " "you must set its stop_gradient to be False by " diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 8791ebb7af2..f5f448cf4ef 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -188,7 +188,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None): """ assert ( - core.is_compiled_with_rocm() == False + not core.is_compiled_with_rocm() ), "multinomial op is not supported on ROCM yet." if in_dygraph_mode(): diff --git a/python/paddle/text/datasets/conll05.py b/python/paddle/text/datasets/conll05.py index 807c8c3fbeb..69f23cdaab9 100644 --- a/python/paddle/text/datasets/conll05.py +++ b/python/paddle/text/datasets/conll05.py @@ -228,9 +228,9 @@ class Conll05st(Dataset): lbl_seq = [] verb_word = '' for l in lbl: - if l == '*' and is_in_bracket == False: + if l == '*' and not is_in_bracket: lbl_seq.append('O') - elif l == '*' and is_in_bracket == True: + elif l == '*' and is_in_bracket: lbl_seq.append('I-' + cur_tag) elif l == '*)': lbl_seq.append('I-' + cur_tag) diff --git a/tools/analysisPyXml.py b/tools/analysisPyXml.py index ee110d6ce7f..b184ef76fcc 100644 --- a/tools/analysisPyXml.py +++ b/tools/analysisPyXml.py @@ -46,28 +46,25 @@ def analysisPyXml(rootPath, ut): command = 'sed -n %sp %s' % (line_number, clazz_filename) _code, output = commands.getstatusoutput(command) if _code == 0: - if ( - output.strip().startswith( - ( - 'from', - 'import', - '__all__', - 'def', - 'class', - '"""', - '@', - '\'\'\'', - 'logger', - '_logger', - 'logging', - 'r"""', - 'pass', - 'try', - 'except', - 'if __name__ == "__main__"', - ) + if not output.strip().startswith( + ( + 'from', + 'import', + '__all__', + 'def', + 'class', + '"""', + '@', + '\'\'\'', + 'logger', + '_logger', + 'logging', + 'r"""', + 'pass', + 'try', + 'except', + 'if __name__ == "__main__"', ) - == False ): pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" # a='b'/a="b"/a=0 if re.match(pattern, output.strip()) == None: diff --git a/tools/check_op_benchmark_result.py b/tools/check_op_benchmark_result.py index 8fce5081022..6c4e0fc06b6 100644 --- a/tools/check_op_benchmark_result.py +++ b/tools/check_op_benchmark_result.py @@ -40,7 +40,7 @@ def parse_log_file(log_file): for line in f.read().strip().split('\n')[::-1]: try: result = json.loads(line) - if result.get("disabled", False) == True: + if result.get("disabled", False): return None return result except ValueError: diff --git a/tools/get_pr_ut.py b/tools/get_pr_ut.py index 0788da6e116..fcfa68bb4da 100644 --- a/tools/get_pr_ut.py +++ b/tools/get_pr_ut.py @@ -349,7 +349,7 @@ class PRChecker(object): file_list.append(filename) else: isWhiteFile = self.get_is_white_file(filename) - if isWhiteFile == False: + if not isWhiteFile: file_list.append(filename) else: filterFiles.append(filename) @@ -417,7 +417,7 @@ class PRChecker(object): == tempfilename.split(".")[0] ): f_judge_in_added_ut = True - if f_judge_in_added_ut == True: + if f_judge_in_added_ut: print( "Adding new unit tests not hit mapFiles: %s" % f_judge diff --git a/tools/get_single_test_cov.py b/tools/get_single_test_cov.py index 266872feaf4..ee5f2d9fd50 100644 --- a/tools/get_single_test_cov.py +++ b/tools/get_single_test_cov.py @@ -91,7 +91,7 @@ def analysisFNDAFile(rootPath, test): if matchObj == None: OP_REGIST = False break - if OP_REGIST == False: + if not OP_REGIST: related_file_list.append(clazz_filename) os.system( 'echo %s >> %s' % (clazz_filename, related_ut_map_file) diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index 313d844d6f6..7ece773aa78 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -122,14 +122,14 @@ def generate_all_ops_inputs_outputs_map(op_descs): outpus = list() for input_ in op_proto[INPUTS]: if ( - op_proto[INPUTS][input_][EXTRA] != True - and op_proto[INPUTS][input_][INTERMEDIATE] != True + not op_proto[INPUTS][input_][EXTRA] + and not op_proto[INPUTS][input_][INTERMEDIATE] ): inputs.append(input_) for output_ in op_proto[OUTPUTS]: if ( - op_proto[OUTPUTS][output_][EXTRA] != True - and op_proto[OUTPUTS][output_][INTERMEDIATE] != True + not op_proto[OUTPUTS][output_][EXTRA] + and not op_proto[OUTPUTS][output_][INTERMEDIATE] ): outpus.append(output_) ops_inputs_map[op_type] = inputs @@ -214,9 +214,9 @@ def get_constraint(op_type, op_proto): optional_input_num_ = 0 for input_ in op_proto[INPUTS]: if ( - op_proto[INPUTS][input_][EXTRA] != True - and op_proto[INPUTS][input_][INTERMEDIATE] != True - and op_proto[INPUTS][input_][DISPENSABLE] == True + not op_proto[INPUTS][input_][EXTRA] + and not op_proto[INPUTS][input_][INTERMEDIATE] + and op_proto[INPUTS][input_][DISPENSABLE] ): optional_input_num_ += 1 if optional_input_num_ > 1: @@ -306,11 +306,11 @@ def convert_op_proto_into_mlir(op_descs): # 2.3.1 inputs for input_ in op_proto[INPUTS]: if ( - op_proto[INPUTS][input_][EXTRA] != True - and op_proto[INPUTS][input_][INTERMEDIATE] != True + not op_proto[INPUTS][input_][EXTRA] + and not op_proto[INPUTS][input_][INTERMEDIATE] ): - if op_proto[INPUTS][input_][DISPENSABLE] != True: - if op_proto[INPUTS][input_][DUPLICABLE] != True: + if not op_proto[INPUTS][input_][DISPENSABLE]: + if not op_proto[INPUTS][input_][DUPLICABLE]: ARGUMENTS = ( ARGUMENTS + " PD_Tensor:$" + input_ + "," ) @@ -319,7 +319,7 @@ def convert_op_proto_into_mlir(op_descs): ARGUMENTS + " PD_Tensor_Array:$" + input_ + "," ) else: - if op_proto[INPUTS][input_][DUPLICABLE] != True: + if not op_proto[INPUTS][input_][DUPLICABLE]: ARGUMENTS = ( ARGUMENTS + " Optional:$" @@ -350,7 +350,7 @@ def convert_op_proto_into_mlir(op_descs): # 2.3.2 attributes for attr in op_proto[ATTRS]: - if (op_proto[ATTRS][attr][EXTRA] == True) or ( + if (op_proto[ATTRS][attr][EXTRA]) or ( attr in skipped_attr_list ): continue @@ -434,10 +434,10 @@ def convert_op_proto_into_mlir(op_descs): outputs = "" for output_ in op_proto[OUTPUTS]: if ( - op_proto[OUTPUTS][output_][EXTRA] != True - and op_proto[OUTPUTS][output_][INTERMEDIATE] != True + not op_proto[OUTPUTS][output_][EXTRA] + and not op_proto[OUTPUTS][output_][INTERMEDIATE] ): - if op_proto[OUTPUTS][output_][DUPLICABLE] != True: + if not op_proto[OUTPUTS][output_][DUPLICABLE]: outputs = outputs + "PD_Tensor:${},".format(output_) else: outputs = outputs + "PD_Tensor_Array:${},".format( diff --git a/tools/sampcd_processor.py b/tools/sampcd_processor.py index 24af09893e3..5afa47dc4fa 100644 --- a/tools/sampcd_processor.py +++ b/tools/sampcd_processor.py @@ -376,7 +376,7 @@ Please use '.. code-block:: python' to format the sample code.""" # None - no sample code found; # False - it need other special equipment or environment. # so, the following conditional statements are intentionally arranged. - if matched == True: + if matched: tfname = os.path.join( SAMPLECODE_TEMPDIR, '{}_example{}'.format( @@ -395,7 +395,7 @@ Please use '.. code-block:: python' to format the sample code.""" ) ) SUMMARY_INFO['skiptest'].append("{}-{}".format(name, cb['id'])) - elif matched == False: + elif not matched: logger.info( '{}\' code block (name:{}, id:{}) required({}) not match capacity({}).'.format( name, -- GitLab