From a35a4a53221e190c4ba566245eb72900c75b7165 Mon Sep 17 00:00:00 2001 From: Nyakku Shigure Date: Tue, 1 Nov 2022 22:14:52 +0800 Subject: [PATCH] [CodeStyle][E711] use `is`/`is not` for comparison with `None` (#47452) * [CodeStyle][E711] use `is`/`is not` for comparison with `None` * `self.assertTrue($A is None)` -> `self.assertIsNone($A)` * `self.assertTrue($A is not None)` -> `self.assertIsNotNone($A)` * `self.assertFalse($A is None)` -> `self.assertIsNotNone($A)` * `self.assertEqual($A, None)` -> `self.assertIsNone($A)` * `self.assertNotEqual($A, None)` -> `self.assertIsNotNone($A)` --- paddle/scripts/conda_build.py | 8 ++--- python/paddle/cost_model/cost_model.py | 2 +- python/paddle/dataset/imdb.py | 2 +- python/paddle/dataset/tests/imdb_test.py | 4 +-- .../auto_parallel/cost/base_cost.py | 2 +- .../distributed/auto_parallel/partitioner.py | 2 +- .../auto_parallel/process_group.py | 2 +- .../auto_parallel/tuner/optimization_tuner.py | 2 +- python/paddle/distributed/elastic.py | 2 +- .../distributed/fleet/base/role_maker.py | 36 +++++++++---------- .../fleet/base/strategy_compiler.py | 6 ++-- .../distributed/fleet/base/util_factory.py | 4 ++- .../fleet/data_generator/data_generator.py | 4 +-- python/paddle/distributed/fleet/fleet.py | 2 +- python/paddle/distributed/fleet/launch.py | 2 +- .../paddle/distributed/fleet/launch_utils.py | 6 ++-- .../graph_execution_optimizer.py | 2 +- .../fleet/meta_optimizers/ps_optimizer.py | 2 +- .../fleet/runtime/parameter_server_runtime.py | 2 +- .../distributed/fleet/runtime/the_one_ps.py | 6 ++-- python/paddle/distributed/fleet/utils/fs.py | 2 +- .../fleet/utils/hybrid_parallel_inference.py | 2 +- python/paddle/distributed/ps/coordinator.py | 2 +- python/paddle/distributed/ps/the_one_ps.py | 6 ++-- python/paddle/distributed/ps/utils/public.py | 2 +- python/paddle/distributed/rpc/rpc.py | 2 +- .../paddle/distributed/utils/launch_utils.py | 2 +- python/paddle/fluid/backward.py | 2 +- python/paddle/fluid/communicator.py | 20 +++++------ python/paddle/fluid/contrib/layers/nn.py | 2 +- .../paddle/fluid/contrib/layers/rnn_impl.py | 4 +-- .../post_training_quantization.py | 2 +- .../slim/quantization/quantization_pass.py | 16 ++++----- .../quantization/quantize_transpiler_v2.py | 2 +- .../fluid/contrib/slim/quantization/utils.py | 2 +- .../tests/test_user_defined_quantization.py | 2 +- .../test_weight_quantization_mobilenetv1.py | 2 +- python/paddle/fluid/device_worker.py | 8 ++--- python/paddle/fluid/distributed/helper.py | 6 ++-- .../dygraph_to_static/return_transformer.py | 2 +- python/paddle/fluid/dygraph/layers.py | 2 +- python/paddle/fluid/executor.py | 4 +-- python/paddle/fluid/framework.py | 3 +- .../incubate/checkpoint/auto_checkpoint.py | 6 ++-- .../fluid/incubate/data_generator/__init__.py | 4 +-- .../fluid/incubate/fleet/base/role_maker.py | 2 +- .../incubate/fleet/collective/__init__.py | 4 +-- .../fleet/parameter_server/ir/trainer_pass.py | 2 +- .../pslib/optimizer_factory.py | 2 +- .../paddle/fluid/incubate/fleet/utils/hdfs.py | 2 +- .../fluid/incubate/fleet/utils/utils.py | 2 +- python/paddle/fluid/io.py | 6 ++-- python/paddle/fluid/layers/control_flow.py | 2 +- python/paddle/fluid/layers/nn.py | 34 +++++++++--------- python/paddle/fluid/metrics.py | 2 +- python/paddle/fluid/net_drawer.py | 4 +-- python/paddle/fluid/optimizer.py | 20 +++++------ .../custom_op/test_custom_raw_op_kernel_op.py | 2 +- .../asp/test_asp_optimize_dynamic.py | 4 +-- .../unittests/auto_parallel/test_strategy.py | 6 ++-- .../auto_parallel/test_while_op_partition.py | 2 +- .../collective/fleet/test_auto_checkpoint.py | 16 ++++----- .../fleet/test_auto_checkpoint_dist_basic.py | 2 +- .../fleet/test_auto_checkpoint_multiple.py | 2 +- .../fleet/test_fleet_rolemaker_new.py | 4 +-- .../unittests/collective/fleet/test_hdfs1.py | 2 +- .../collective/init_process_group.py | 4 +-- .../unittests/dist_text_classification.py | 2 +- .../distributed_passes/dist_pass_test_base.py | 2 +- .../seq2seq_dygraph_model.py | 12 +++---- .../test_decorator_transform.py | 2 +- .../dygraph_to_static/test_function_spec.py | 2 +- .../unittests/dygraph_to_static/test_place.py | 2 +- .../unittests/ir_memory_optimize_net_base.py | 2 +- .../unittests/test_adaptive_avg_pool2d.py | 6 ++-- .../unittests/test_adaptive_avg_pool3d.py | 8 ++--- .../unittests/test_adaptive_max_pool2d.py | 6 ++-- .../unittests/test_adaptive_max_pool3d.py | 8 ++--- .../unittests/test_auto_parallel_mapper.py | 6 ++-- .../test_auto_parallel_partitioner_gpt.py | 14 ++++---- .../fluid/tests/unittests/test_base_layer.py | 2 +- .../test_decoupled_py_reader_data_check.py | 4 +-- .../unittests/test_dygraph_spectral_norm.py | 2 +- .../unittests/test_dygraph_weight_norm.py | 2 +- .../test_eager_deletion_delete_vars.py | 4 +-- .../test_eager_deletion_padding_rnn.py | 6 ++-- .../fluid/tests/unittests/test_fleet_base.py | 2 +- .../fluid/tests/unittests/test_fleet_util.py | 4 +-- .../tests/unittests/test_fused_matmul_bias.py | 2 +- .../test_global_var_getter_setter.py | 4 +-- .../unittests/test_imperative_auto_prune.py | 34 +++++++++--------- .../tests/unittests/test_imperative_basic.py | 20 +++++------ .../unittests/test_imperative_double_grad.py | 4 +-- .../unittests/test_imperative_optimizer.py | 6 ++-- .../unittests/test_imperative_optimizer_v2.py | 6 ++-- .../unittests/test_imperative_save_load.py | 6 ++-- .../test_imperative_selected_rows.py | 20 +++++------ .../unittests/test_inference_model_io.py | 2 +- .../fluid/tests/unittests/test_input_spec.py | 4 +-- .../fluid/tests/unittests/test_lambv2_op.py | 4 +-- .../fluid/tests/unittests/test_layers.py | 12 +++---- .../test_paddle_imperative_double_grad.py | 2 +- .../fluid/tests/unittests/test_profiler.py | 4 +-- .../fluid/tests/unittests/test_pylayer_op.py | 14 ++++---- .../fluid/tests/unittests/test_regularizer.py | 4 +-- .../unittests/test_tensor_register_hook.py | 2 +- .../fluid/tests/unittests/test_var_base.py | 8 ++--- .../fluid/tests/unittests/test_var_info.py | 2 +- .../paddle/fluid/tests/unittests/testsuite.py | 2 +- .../tests/unittests/xpu/test_matmul_op_xpu.py | 2 +- python/paddle/fluid/trainer_desc.py | 10 +++--- python/paddle/fluid/trainer_factory.py | 4 +-- .../fluid/transpiler/distribute_transpiler.py | 2 +- .../fluid/transpiler/geo_sgd_transpiler.py | 2 +- python/paddle/nn/functional/common.py | 2 +- python/paddle/nn/functional/norm.py | 2 +- python/paddle/nn/functional/pooling.py | 10 +++--- python/paddle/nn/layer/distance.py | 2 +- python/paddle/nn/layer/norm.py | 18 +++++----- python/paddle/optimizer/optimizer.py | 2 +- python/paddle/profiler/profiler.py | 2 +- python/paddle/profiler/utils.py | 2 +- python/paddle/sparse/nn/layer/norm.py | 10 +++--- python/paddle/static/io.py | 2 +- python/paddle/tensor/einsum.py | 2 +- python/paddle/tensor/linalg.py | 12 +++---- python/paddle/tensor/manipulation.py | 4 +-- python/paddle/tensor/math.py | 10 +++--- python/paddle/tensor/search.py | 2 +- python/paddle/tests/test_utils_lazyimport.py | 2 +- python/paddle/text/datasets/imdb.py | 2 +- python/paddle/vision/ops.py | 2 +- tools/analysisPyXml.py | 2 +- tools/check_op_desc.py | 2 +- tools/get_single_test_cov.py | 2 +- ...rate_pd_op_dialect_from_paddle_op_maker.py | 2 +- tools/test_runner.py | 2 +- 137 files changed, 357 insertions(+), 352 deletions(-) diff --git a/paddle/scripts/conda_build.py b/paddle/scripts/conda_build.py index 98692ff3df3..ed3f1709884 100644 --- a/paddle/scripts/conda_build.py +++ b/paddle/scripts/conda_build.py @@ -171,7 +171,7 @@ package: def meta_build_linux( var, python_str, paddle_version, build_var, build_name_str, cuda_str=None ): - if cuda_str == None: + if cuda_str is None: package_str = ( """ package: @@ -192,7 +192,7 @@ package: ) meta_build = var.build + build_name_str meta_str = package_str + meta_build + requirement - if not (cuda_str == None): + if not (cuda_str is None): meta_str = meta_str + cuda_str meta_str = meta_str + var.test + var.about @@ -209,7 +209,7 @@ package: def meta_build_windows( var, python_str, paddle_version, blt_var, build_name_str, cuda_str=None ): - if cuda_str == None: + if cuda_str is None: package_str = ( """ package: @@ -235,7 +235,7 @@ package: meta_build = var.build + build_name_str meta_str = package_str + meta_build + requirement - if not (cuda_str == None): + if not (cuda_str is None): meta_str = meta_str + cuda_str blt_str = var.blt_const + blt_var diff --git a/python/paddle/cost_model/cost_model.py b/python/paddle/cost_model/cost_model.py index 8797868287b..b3178d2e05a 100644 --- a/python/paddle/cost_model/cost_model.py +++ b/python/paddle/cost_model/cost_model.py @@ -74,7 +74,7 @@ class CostModel: def get_static_op_time(self, op_name, forward=True, dtype="float32"): # if forward is True, return op forward time, otherwise return op backward time. - if op_name == None: + if op_name is None: raise ValueError( 'op_name should not be empty when you want to get static op time' ) diff --git a/python/paddle/dataset/imdb.py b/python/paddle/dataset/imdb.py index e95a9e6df00..622d33aa187 100644 --- a/python/paddle/dataset/imdb.py +++ b/python/paddle/dataset/imdb.py @@ -45,7 +45,7 @@ def tokenize(pattern): # tarfile.extractfile, which does random access and might # destroy hard disks. tf = tarf.next() - while tf != None: + while tf is not None: if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate( diff --git a/python/paddle/dataset/tests/imdb_test.py b/python/paddle/dataset/tests/imdb_test.py index c2a787cecd8..32dbc75b874 100644 --- a/python/paddle/dataset/tests/imdb_test.py +++ b/python/paddle/dataset/tests/imdb_test.py @@ -31,13 +31,13 @@ class TestIMDB(unittest.TestCase): word_idx = None def test_build_dict(self): - if self.word_idx == None: + if self.word_idx is None: self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150) self.assertEqual(len(self.word_idx), 7036) def check_dataset(self, dataset, expected_size): - if self.word_idx == None: + if self.word_idx is None: self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150) sum = 0 diff --git a/python/paddle/distributed/auto_parallel/cost/base_cost.py b/python/paddle/distributed/auto_parallel/cost/base_cost.py index 2ce98a0a051..35353de9b66 100644 --- a/python/paddle/distributed/auto_parallel/cost/base_cost.py +++ b/python/paddle/distributed/auto_parallel/cost/base_cost.py @@ -587,7 +587,7 @@ class CommContext: if forward_order_beta > backward_order_beta else backward_order_beta ) - if max_beta == None: + if max_beta is None: max_beta = beta else: if beta > max_beta: diff --git a/python/paddle/distributed/auto_parallel/partitioner.py b/python/paddle/distributed/auto_parallel/partitioner.py index 2ba33981324..2a7b7f3e67d 100644 --- a/python/paddle/distributed/auto_parallel/partitioner.py +++ b/python/paddle/distributed/auto_parallel/partitioner.py @@ -84,7 +84,7 @@ class Partitioner(object): dist_op_context.rank_id = self._rank_id # partition startup program - if serial_startup_program == None: + if serial_startup_program is None: partitioned_startup_prog = None else: partitioned_startup_prog = self.partition_startup_program( diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/process_group.py index 10d2556f299..ebe478f1dff 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/process_group.py @@ -61,7 +61,7 @@ def new_process_group(ranks, group_id=None): num_groups = len(_g_process_group_map) # Note: our process group may interfere with the original implementation # so the created group id should start from the original _new_ring_id() - if group_id == None: + if group_id is None: group_id = _new_ring_id() + num_groups + 1 new_pg = ProcessGroup(group_id, ranks) diff --git a/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py b/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py index a5cdbc7f95a..3cd58f2c004 100644 --- a/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py +++ b/python/paddle/distributed/auto_parallel/tuner/optimization_tuner.py @@ -530,7 +530,7 @@ class OptimizationTuner: self._finished_trials.append(trial) cur_mertic = get_metric(results) - if self._best_metric == None or cur_mertic > self._best_metric: + if self._best_metric is None or cur_mertic > self._best_metric: self._best_metric = cur_mertic self._best_iter = i diff --git a/python/paddle/distributed/elastic.py b/python/paddle/distributed/elastic.py index d1fd9a790f2..55b73ab315b 100644 --- a/python/paddle/distributed/elastic.py +++ b/python/paddle/distributed/elastic.py @@ -31,7 +31,7 @@ class Command(object): self.etcd.put(self.np_path, '{}'.format(np).encode('latin-1')) def scale_np(self, np): - if self.etcd.get(self.np_path)[0] != None: + if self.etcd.get(self.np_path)[0] is not None: self.set_np(np) return True return False diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index 28dace611a0..3a9e7e7aa47 100755 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -293,7 +293,7 @@ class Gloo(object): if "Gateway" in item and "Iface" in item: gateway_idx = item.index("Gateway") iface_idx = item.index("Iface") - elif gateway_idx != None and iface_idx != None: + elif gateway_idx is not None and iface_idx is not None: gateway = None if len(item) > gateway_idx: gateway = item[gateway_idx] @@ -845,7 +845,7 @@ class PaddleCloudRoleMaker(RoleMakerBase): self._server_endpoints = self._server_endpoints.split(",") self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", None) - if self._worker_endpoints != None: + if self._worker_endpoints is not None: self._worker_endpoints = self._worker_endpoints.split(",") else: self._worker_endpoints = [] @@ -860,14 +860,14 @@ class PaddleCloudRoleMaker(RoleMakerBase): self._coordinator_endpoints = self._coordinator_endpoints.split(",") trainers_num = os.getenv("PADDLE_TRAINERS_NUM", None) - if trainers_num == None: + if trainers_num is None: raise ValueError( "Can not find PADDLE_TRAINERS_NUM, please check your environment." ) trainers_num = int(trainers_num) training_role = os.getenv("TRAINING_ROLE", None) - if training_role == None: + if training_role is None: raise ValueError( "Can not find TRAINING_ROLE, please check your environment." ) @@ -937,20 +937,20 @@ class PaddleCloudRoleMaker(RoleMakerBase): if training_role == "TRAINER": role = Role.WORKER current_id = os.getenv("PADDLE_TRAINER_ID", None) - if current_id == None: + if current_id is None: raise ValueError( "Can not find PADDLE_TRAINER_ID, please check your environment." ) current_id = int(current_id) if self._is_heter_parameter_server_mode: self._stage_id = os.getenv("STAGE_ID", None) - if self._stage_id == None: + if self._stage_id is None: raise ValueError( "Can not find STAGE_ID, please check your environment." ) self._stage_id = int(self._stage_id) self._stage_num = os.getenv("STAGE_NUM", None) - if self._stage_num == None: + if self._stage_num is None: raise ValueError( "Can not find STAGE_NUM, please check your environment." ) @@ -958,18 +958,18 @@ class PaddleCloudRoleMaker(RoleMakerBase): self._stage_trainers = os.getenv( "PADDLE_STAGE_TRAINERS_NUM", None ) - if self._stage_trainers == None: + if self._stage_trainers is None: raise ValueError( "Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment." ) self._stage_trainers = eval(self._stage_trainers) cur_port = os.getenv("PADDLE_PORT", None) - if cur_port == None: + if cur_port is None: raise ValueError( "Can not find PADDLE_PORT, please check your environment." ) cur_ip = os.getenv("POD_IP", None) - if cur_ip == None: + if cur_ip is None: raise ValueError( "Can not find POD_IP, please check your environment." ) @@ -982,12 +982,12 @@ class PaddleCloudRoleMaker(RoleMakerBase): elif training_role == "PSERVER": role = Role.SERVER cur_port = os.getenv("PADDLE_PORT", None) - if cur_port == None: + if cur_port is None: raise ValueError( "Can not find PADDLE_PORT, please check your environment." ) cur_ip = os.getenv("POD_IP", None) - if cur_ip == None: + if cur_ip is None: raise ValueError( "Can not find POD_IP, please check your environment." ) @@ -997,20 +997,20 @@ class PaddleCloudRoleMaker(RoleMakerBase): elif training_role == "HETER_TRAINER": role = Role.HETER_WORKER self._stage_id = os.getenv("STAGE_ID", None) - if self._stage_id == None: + if self._stage_id is None: raise ValueError( "Can not find STAGE_ID, please check your environment." ) self._stage_id = int(self._stage_id) self._stage_num = os.getenv("STAGE_NUM", None) - if self._stage_num == None: + if self._stage_num is None: raise ValueError( "Can not find STAGE_NUM, please check your environment." ) self._stage_num = int(self._stage_num) self._stage_trainers = os.getenv("PADDLE_STAGE_TRAINERS_NUM", None) - if self._stage_trainers == None: + if self._stage_trainers is None: raise ValueError( "Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment." ) @@ -1019,7 +1019,7 @@ class PaddleCloudRoleMaker(RoleMakerBase): self._heter_trainer_device_type = os.getenv( "HETER_DEVICE_TYPE", None ) - if self._heter_trainer_device_type == None: + if self._heter_trainer_device_type is None: raise ValueError( "Can not find HETER_DEVICE_TYPE, please check your environment." ) @@ -1040,12 +1040,12 @@ class PaddleCloudRoleMaker(RoleMakerBase): ) cur_port = os.getenv("PADDLE_PORT", None) - if cur_port == None: + if cur_port is None: raise ValueError( "Can not find PADDLE_PORT, please check your environment." ) cur_ip = os.getenv("POD_IP", None) - if cur_ip == None: + if cur_ip is None: raise ValueError( "Can not find POD_IP, please check your environment." ) diff --git a/python/paddle/distributed/fleet/base/strategy_compiler.py b/python/paddle/distributed/fleet/base/strategy_compiler.py index 14db852f770..d161eb68079 100644 --- a/python/paddle/distributed/fleet/base/strategy_compiler.py +++ b/python/paddle/distributed/fleet/base/strategy_compiler.py @@ -204,13 +204,13 @@ class StrategyCompiler(StrategyCompilerBase): ) return_meta = ( - None if meta_optimizers == None else meta_optimizers[0] + None if meta_optimizers is None else meta_optimizers[0] ) return_graph = ( - None if graph_optimizers == None else graph_optimizers[0] + None if graph_optimizers is None else graph_optimizers[0] ) - if meta_optimizers == None or graph_optimizers == None: + if meta_optimizers is None or graph_optimizers is None: return return_meta, return_graph # do heuristic filter here, if any meta optimizer in graph optimizers is in diff --git a/python/paddle/distributed/fleet/base/util_factory.py b/python/paddle/distributed/fleet/base/util_factory.py index 5af1acb9413..dcaa256a26d 100755 --- a/python/paddle/distributed/fleet/base/util_factory.py +++ b/python/paddle/distributed/fleet/base/util_factory.py @@ -509,7 +509,9 @@ class UtilBase(object): } for each_var in saved_params: var_temp = fluid.global_scope().find_var(each_var.name) - assert var_temp != None, "can't not find var: " + each_var.name + assert var_temp is not None, ( + "can't not find var: " + each_var.name + ) new_shape = (np.array(var_temp.get_tensor())).shape assert each_var.name in orig_para_shape, ( each_var.name + "MUST in var list" diff --git a/python/paddle/distributed/fleet/data_generator/data_generator.py b/python/paddle/distributed/fleet/data_generator/data_generator.py index 9e9ec7b61bc..ec6114dd21f 100644 --- a/python/paddle/distributed/fleet/data_generator/data_generator.py +++ b/python/paddle/distributed/fleet/data_generator/data_generator.py @@ -79,7 +79,7 @@ class DataGenerator(object): batch_samples = [] line_iter = self.generate_sample(None) for user_parsed_line in line_iter(): - if user_parsed_line == None: + if user_parsed_line is None: continue batch_samples.append(user_parsed_line) if len(batch_samples) == self.batch_size_: @@ -121,7 +121,7 @@ class DataGenerator(object): for line in sys.stdin: line_iter = self.generate_sample(line) for user_parsed_line in line_iter(): - if user_parsed_line == None: + if user_parsed_line is None: continue batch_samples.append(user_parsed_line) if len(batch_samples) == self.batch_size_: diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index 2630fa8283e..695f03fe1f2 100644 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -1285,7 +1285,7 @@ class Fleet(object): context["origin_main_program"] = self.origin_main_program context["origin_main_programs"] = [self.origin_main_program] context["loss"] = loss - if startup_program == None: + if startup_program is None: self.origin_startup_program = ( paddle.static.default_startup_program().clone(for_test=False) ) diff --git a/python/paddle/distributed/fleet/launch.py b/python/paddle/distributed/fleet/launch.py index 998f64c3ec2..ecf6436b94f 100755 --- a/python/paddle/distributed/fleet/launch.py +++ b/python/paddle/distributed/fleet/launch.py @@ -796,7 +796,7 @@ def launch(): ) # which_distributed_mode must modify args.backend else: assert ( - args.run_mode == 'collective' or args.run_mode == None + args.run_mode == 'collective' or args.run_mode is None ), "When backend is not 'auto', run mode must be collective" check_backend(args.backend) distribute_mode = DistributeMode.COLLECTIVE diff --git a/python/paddle/distributed/fleet/launch_utils.py b/python/paddle/distributed/fleet/launch_utils.py index d4b6b86119f..e471535c274 100755 --- a/python/paddle/distributed/fleet/launch_utils.py +++ b/python/paddle/distributed/fleet/launch_utils.py @@ -120,7 +120,7 @@ class Cluster(object): for pod in self.pods: ep = "{}:{}".format(pod.addr, pod.port) assert ( - pod.port != None and pod.addr != None + pod.port is not None and pod.addr is not None ), "{} not a valid endpoint".format(ep) r.append(ep) return r @@ -979,7 +979,7 @@ def get_custom_endpoints(origin_endpoints, offset=0): origin_endpoint: ip:port user_define_endpoint: ip:(port+offset) """ - assert origin_endpoints != None + assert origin_endpoints is not None paddle_user_define_endpoints_list = [] for ip_port in origin_endpoints.split(","): ip = ip_port.split(":")[0] @@ -1625,7 +1625,7 @@ class ParameterServerLauncher(object): else: self.is_local = False pod_ip = os.getenv("POD_IP", None) - if pod_ip == None: + if pod_ip is None: _, self.current_node_ip = get_host_name_ip() else: self.current_node_ip = pod_ip diff --git a/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py index a1a33992d59..9b077f26a9f 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/graph_execution_optimizer.py @@ -269,7 +269,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase): def minimize( self, loss, startup_program=None, parameter_list=None, no_grad_set=None ): - if startup_program == None: + if startup_program is None: startup_program = paddle.static.default_startup_program() compiled_program = self._try_to_compile( startup_program, loss.block.program, loss diff --git a/python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py index 000ef98bcae..5dfa2cb7e68 100755 --- a/python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/ps_optimizer.py @@ -133,7 +133,7 @@ class ParameterServerOptimizer(MetaOptimizerBase): self.inner_opt.minimize( loss, startup_program, parameter_list, no_grad_set ) - if startup_program == None: + if startup_program is None: startup_program = paddle.static.default_startup_program() # print("program after inner optimizer minimize:", diff --git a/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py index e2c5e5da29e..b746a757f51 100644 --- a/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py +++ b/python/paddle/distributed/fleet/runtime/parameter_server_runtime.py @@ -82,7 +82,7 @@ class ParameterServerRuntime(RuntimeBase): def _load_sparse_params( self, executor, dirname, varnames, main_program=None ): - assert vars != None + assert vars is not None check_vars = [] load_prog = Program() load_block = load_prog.global_block() diff --git a/python/paddle/distributed/fleet/runtime/the_one_ps.py b/python/paddle/distributed/fleet/runtime/the_one_ps.py index 7de34aa6e1c..f5cdd7f8dd7 100644 --- a/python/paddle/distributed/fleet/runtime/the_one_ps.py +++ b/python/paddle/distributed/fleet/runtime/the_one_ps.py @@ -997,7 +997,7 @@ class TheOnePSRuntime(RuntimeBase): tensor_table_dict = self.compiled_strategy.get_tensor_table_dict() program_idx = 0 for table_name in tensor_table_dict: - if tensor_table_dict[table_name]["startup_program"] != None: + if tensor_table_dict[table_name]["startup_program"] is not None: tensor_table_dict[table_name][ "startup_program_id" ] = program_idx @@ -1005,7 +1005,7 @@ class TheOnePSRuntime(RuntimeBase): tensor_table_dict[table_name]["startup_program"].desc ) program_idx += 1 - if tensor_table_dict[table_name]["main_program"] != None: + if tensor_table_dict[table_name]["main_program"] is not None: tensor_table_dict[table_name][ "main_program_id" ] = program_idx @@ -1241,7 +1241,7 @@ class TheOnePSRuntime(RuntimeBase): self._communicator.stop() if self.role_maker._is_heter_parameter_server_mode: assert ( - self._heter_client != None + self._heter_client is not None ), "heter client should not be None in heterps mode" self._heter_client.stop() # executor = self._get_executor() diff --git a/python/paddle/distributed/fleet/utils/fs.py b/python/paddle/distributed/fleet/utils/fs.py index 046a5aee69d..8a67301e174 100644 --- a/python/paddle/distributed/fleet/utils/fs.py +++ b/python/paddle/distributed/fleet/utils/fs.py @@ -574,7 +574,7 @@ class HDFSClient(FS): def _test_match(self, lines): for l in lines: m = self._bd_err_re.match(l) - if m != None: + if m is not None: return m return None diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index 6dd100a6f9e..a56c71fc40b 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -466,7 +466,7 @@ class HybridParallelInferenceHelper(object): variable named var_name. """ prev_ops = self._output_var_to_op[var_name] - if prev_ops == None: + if prev_ops is None: return None result_op = None for prev_op, prev_idx in reversed(prev_ops): diff --git a/python/paddle/distributed/ps/coordinator.py b/python/paddle/distributed/ps/coordinator.py index 3a6e0756df4..a357d9677ab 100755 --- a/python/paddle/distributed/ps/coordinator.py +++ b/python/paddle/distributed/ps/coordinator.py @@ -287,7 +287,7 @@ class FLClient(FLClientBase): fleet.init_worker() def callback_initialize_model_params(self): - if self.exe == None or self.main_program == None: + if self.exe is None or self.main_program is None: raise AssertionError("exe or main_program not set") self.exe.run(self.startup_program) diff --git a/python/paddle/distributed/ps/the_one_ps.py b/python/paddle/distributed/ps/the_one_ps.py index d341a95b24b..825801f17ae 100755 --- a/python/paddle/distributed/ps/the_one_ps.py +++ b/python/paddle/distributed/ps/the_one_ps.py @@ -1326,7 +1326,7 @@ class TheOnePSRuntime(RuntimeBase): ) # --> HeterClient::GetInstance def _init_coordinator(self, scopes=None): - if self._coordinator == None: + if self._coordinator is None: self._coordinator = Coordinator(self.string_hosts) print(">>> curr node ip: {}".format(self.coordinator_hosts[0])) @@ -1336,7 +1336,7 @@ class TheOnePSRuntime(RuntimeBase): ) def _make_fl_strategy(self): - if self._coordinator == None: + if self._coordinator is None: assert "Coordinator py object is null!" else: self._coordinator.make_fl_strategy() @@ -1401,7 +1401,7 @@ class TheOnePSRuntime(RuntimeBase): self._worker.stop_worker() if self.is_heter_ps_mode: assert ( - self._heter_client != None + self._heter_client is not None ), "heter client should not be None in heterps mode" self._heter_client.stop() diff --git a/python/paddle/distributed/ps/utils/public.py b/python/paddle/distributed/ps/utils/public.py index 53628ad7e50..578d664dc41 100755 --- a/python/paddle/distributed/ps/utils/public.py +++ b/python/paddle/distributed/ps/utils/public.py @@ -671,7 +671,7 @@ def find_heter_ops(program, default_device="cpu"): # Todo: need update this method # op._set_attr('op_device', current_heter_device) return True - elif op_device == None or op_device == default_device: + elif op_device is None or op_device == default_device: op._set_attr('op_device', default_device) return False return False diff --git a/python/paddle/distributed/rpc/rpc.py b/python/paddle/distributed/rpc/rpc.py index 6a93f27517c..e01446a5374 100644 --- a/python/paddle/distributed/rpc/rpc.py +++ b/python/paddle/distributed/rpc/rpc.py @@ -106,7 +106,7 @@ def init_rpc(name, rank=None, world_size=None, master_endpoint=None): logger.info("Trainer {}: worker endpoint: {}".format(rank, worker_endpoint)) master_endpoint = ( master_endpoint - if master_endpoint != None + if master_endpoint is not None else os.environ["PADDLE_MASTER_ENDPOINT"] ) master_addr, master_port = master_endpoint.split(":") diff --git a/python/paddle/distributed/utils/launch_utils.py b/python/paddle/distributed/utils/launch_utils.py index 88acc643ead..975f5d49356 100644 --- a/python/paddle/distributed/utils/launch_utils.py +++ b/python/paddle/distributed/utils/launch_utils.py @@ -180,7 +180,7 @@ class Cluster(object): for pod in self.pods: ep = "{}:{}".format(pod.addr, pod.port) assert ( - pod.port != None and pod.addr != None + pod.port is not None and pod.addr is not None ), "{} not a valid endpoint".format(ep) r.append(ep) diff --git a/python/paddle/fluid/backward.py b/python/paddle/fluid/backward.py index 7823b3f7bd4..e73e2fe1ab1 100755 --- a/python/paddle/fluid/backward.py +++ b/python/paddle/fluid/backward.py @@ -1942,7 +1942,7 @@ def append_backward( # sub-block (control flow) is_recompute = False if ( - checkpoints != None + checkpoints is not None and isinstance(checkpoints, list) and len(checkpoints) > 0 ): diff --git a/python/paddle/fluid/communicator.py b/python/paddle/fluid/communicator.py index be3fb5ea22d..d947d7b1543 100755 --- a/python/paddle/fluid/communicator.py +++ b/python/paddle/fluid/communicator.py @@ -63,8 +63,8 @@ class Communicator(object): """ # set all recv op to not_run mode - if kwargs == None: - if envs == None: + if kwargs is None: + if envs is None: envs = {} else: if mode == DistributedMode.SYNC: @@ -97,7 +97,7 @@ class Communicator(object): def init_with_ctx( self, send_ctx, recv_ctx, proto_txt, unit64_hosts, scope=None ): - if scope == None: + if scope is None: scope = global_scope() self.communicator_ = core.DistCommunicator( self.mode, @@ -144,7 +144,7 @@ class Communicator(object): comm.start() comm.stop() """ - if self.communicator_ == None: + if self.communicator_ is None: print('you must call init_with_ctx first to init comm before start') return self.communicator_.start() @@ -166,7 +166,7 @@ class Communicator(object): comm.start() comm.stop() """ - if self.communicator_ == None: + if self.communicator_ is None: print('you must call init_with_ctx first to init comm before stop') return self.communicator_.stop() @@ -187,7 +187,7 @@ class Communicator(object): comm = fluid.communicator.Communicator(prog) comm.is_running() """ - if self.communicator_ == None: + if self.communicator_ is None: print('you must call init_with_ctx first to init comm before stop') return self.communicator_.is_running() @@ -202,7 +202,7 @@ class Communicator(object): self.communicator_.pull_dense(context) def push_sparse_param(self, var_name, table_id=-1, scope=None): - if scope == None: + if scope is None: scope = global_scope() if not self.is_running(): raise ValueError( @@ -226,14 +226,14 @@ class FLCommunicator(Communicator): ## only for coordinator self.init_with_ctx(send_ctx, dense_map, prototxt, ps_hosts) def start_coordinator(self, self_endpoint, trainer_endpoints): - if self.communicator_ != None: + if self.communicator_ is not None: self.communicator_.start_coordinator( self_endpoint, trainer_endpoints ) return def save_fl_strategy(self, mp): - if self.communicator_ != None: + if self.communicator_ is not None: self.communicator_.save_fl_strategy(mp) else: raise ValueError("self.communicator_ is null") @@ -241,7 +241,7 @@ class FLCommunicator(Communicator): ## only for coordinator def query_fl_clients_info(self): info_mp = {} - if self.communicator_ != None: + if self.communicator_ is not None: info_mp = self.communicator_.query_fl_clients_info() return info_mp diff --git a/python/paddle/fluid/contrib/layers/nn.py b/python/paddle/fluid/contrib/layers/nn.py index 08ab46dfac9..3695f8cad20 100644 --- a/python/paddle/fluid/contrib/layers/nn.py +++ b/python/paddle/fluid/contrib/layers/nn.py @@ -1242,7 +1242,7 @@ def sparse_embedding( ) entry_str = entry._to_attr() - if slot == None: + if slot is None: slot = 0 helper.append_op( diff --git a/python/paddle/fluid/contrib/layers/rnn_impl.py b/python/paddle/fluid/contrib/layers/rnn_impl.py index 668ce445f78..a84ddcc968a 100644 --- a/python/paddle/fluid/contrib/layers/rnn_impl.py +++ b/python/paddle/fluid/contrib/layers/rnn_impl.py @@ -375,7 +375,7 @@ def basic_gru( rnn.step_output(new_hidden) step_input = new_hidden - if dropout_prob != None and dropout_prob > 0.0: + if dropout_prob is not None and dropout_prob > 0.0: step_input = layers.dropout( step_input, dropout_prob=dropout_prob, @@ -677,7 +677,7 @@ def basic_lstm( rnn.step_output(new_cell) step_input = new_hidden - if dropout_prob != None and dropout_prob > 0.0: + if dropout_prob is not None and dropout_prob > 0.0: step_input = layers.dropout( step_input, dropout_prob=dropout_prob, diff --git a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py index c959fc29bc9..3db16060e0e 100644 --- a/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py +++ b/python/paddle/fluid/contrib/slim/quantization/post_training_quantization.py @@ -344,7 +344,7 @@ class PostTrainingQuantization(object): # Save input params self._bias_correction = bias_correction self._executor = executor - self._scope = global_scope() if scope == None else scope + self._scope = global_scope() if scope is None else scope self._model_dir = model_dir self._model_filename = model_filename self._params_filename = params_filename diff --git a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py index 81aac8823d8..7a9b89866eb 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantization_pass.py @@ -1874,8 +1874,8 @@ class AddQuantDequantPass(object): '%s_grad' % (op) for op in self._quantizable_op_type ] - assert self._scope != None, "scope must not be None." - assert self._place != None, "place must not be None." + assert self._scope is not None, "scope must not be None." + assert self._place is not None, "place must not be None." def apply(self, graph): """ @@ -2737,8 +2737,8 @@ class AddQuantDequantPassV2(object): '%s_grad' % (op) for op in self._quantizable_op_type ] - assert self._scope != None, "scope must not be None." - assert self._place != None, "place must not be None." + assert self._scope is not None, "scope must not be None." + assert self._place is not None, "place must not be None." self.persistable_vars = [] def apply(self, graph): @@ -2878,8 +2878,8 @@ class ReplaceFakeQuantDequantPass(object): self._place = _get_paddle_place(place) self._scope = scope self._quant_bits = quant_bits - assert self._scope != None, "scope must not be None." - assert self._place != None, "place must not be None." + assert self._scope is not None, "scope must not be None." + assert self._place is not None, "place must not be None." def apply(self, graph): assert isinstance( @@ -3027,8 +3027,8 @@ class QuantWeightPass(object): self._bias_correction = bias_correction self._quant_bits = quant_bits self._save_int_weight = save_int_weight - assert self._scope != None, "scope must not be None." - assert self._place != None, "place must not be None." + assert self._scope is not None, "scope must not be None." + assert self._place is not None, "place must not be None." def apply(self, graph): assert isinstance( diff --git a/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py b/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py index 12f6f3c53d2..dbc6277a3bf 100644 --- a/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py +++ b/python/paddle/fluid/contrib/slim/quantization/quantize_transpiler_v2.py @@ -162,7 +162,7 @@ class QuantizeTranspilerV2(object): scope(fluid.Scope, optional): The scope of the program, use it to load and save variables. If scope=None, get scope by global_scope(). """ - scope = global_scope() if scope == None else scope + scope = global_scope() if scope is None else scope for block in test_program.blocks: for op in block.ops: diff --git a/python/paddle/fluid/contrib/slim/quantization/utils.py b/python/paddle/fluid/contrib/slim/quantization/utils.py index 11e39116389..9862772c64a 100644 --- a/python/paddle/fluid/contrib/slim/quantization/utils.py +++ b/python/paddle/fluid/contrib/slim/quantization/utils.py @@ -332,7 +332,7 @@ def set_variable_data(scope, place, var_name, np_value): np_value, np.ndarray ), 'The type of value should be numpy array.' var_node = scope.find_var(var_name) - if var_node != None: + if var_node is not None: tensor = var_node.get_tensor() tensor.set(np_value, place) diff --git a/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py b/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py index 96635700666..cc8136e3b7b 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py +++ b/python/paddle/fluid/contrib/slim/tests/test_user_defined_quantization.py @@ -219,7 +219,7 @@ class TestUserDefinedQuantization(unittest.TestCase): mapping_table = load_dict(mapping_table_path) test_graph.out_node_mapping_table = mapping_table - if act_quantize_func == None and weight_quantize_func == None: + if act_quantize_func is None and weight_quantize_func is None: freeze_pass.apply(test_graph) tempdir.cleanup() diff --git a/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py b/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py index 929eb34994b..8a8099df945 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py +++ b/python/paddle/fluid/contrib/slim/tests/test_weight_quantization_mobilenetv1.py @@ -40,7 +40,7 @@ def _set_variable_data(scope, place, var_name, np_value): np_value, np.ndarray ), 'The type of value should be numpy array.' var_node = scope.find_var(var_name) - if var_node != None: + if var_node is not None: tensor = var_node.get_tensor() tensor.set(np_value, place) diff --git a/python/paddle/fluid/device_worker.py b/python/paddle/fluid/device_worker.py index ee82c7ebbdc..9b23a942928 100644 --- a/python/paddle/fluid/device_worker.py +++ b/python/paddle/fluid/device_worker.py @@ -109,7 +109,7 @@ class Hogwild(DeviceWorker): dense_table_set = set() program_id = str(id(self._program)) print("device worker program id:", program_id) - if self._program == None: + if self._program is None: print("program of current device worker is not configured") exit(-1) opt_info = self._program._fleet_opt @@ -259,7 +259,7 @@ class DownpourLite(DeviceWorker): dense_table_set = set() program_id = str(id(self._program)) print("device worker program id:", program_id) - if self._program == None: + if self._program is None: print("program of current device worker is not configured") exit(-1) opt_info = self._program._fleet_opt @@ -392,7 +392,7 @@ class DownpourSGD(DeviceWorker): """ dense_table_set = set() program_id = str(id(self._program)) - if self._program == None: + if self._program is None: print("program of current device worker is not configured") exit(-1) opt_info = self._program._fleet_opt @@ -511,7 +511,7 @@ class DownpourSGDOPT(DeviceWorker): """ dense_table_set = set() program_id = str(id(self._program)) - if self._program == None: + if self._program is None: print("program of current device worker is not configured") exit(-1) opt_info = self._program._fleet_opt diff --git a/python/paddle/fluid/distributed/helper.py b/python/paddle/fluid/distributed/helper.py index 9511ce2db62..4a164373339 100644 --- a/python/paddle/fluid/distributed/helper.py +++ b/python/paddle/fluid/distributed/helper.py @@ -34,9 +34,9 @@ class FileSystem(object): passwd=None, hadoop_bin="", ): - assert user != None - assert passwd != None - assert hadoop_bin != None + assert user is not None + assert passwd is not None + assert hadoop_bin is not None import ps_pb2 as pslib self.fs_client = pslib.FsClientParameter() diff --git a/python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py b/python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py index 80bebbf501e..dd6d7feb558 100644 --- a/python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py +++ b/python/paddle/fluid/dygraph/dygraph_to_static/return_transformer.py @@ -85,7 +85,7 @@ class ReplaceReturnNoneTransformer(BaseTransformer): if isinstance(node.value, gast.Name) and node.value.id == 'None': node.value = None return node - if isinstance(node.value, gast.Constant) and node.value.value == None: + if isinstance(node.value, gast.Constant) and node.value.value is None: node.value = None return node return node diff --git a/python/paddle/fluid/dygraph/layers.py b/python/paddle/fluid/dygraph/layers.py index 889a910d8b9..661cfaaa001 100644 --- a/python/paddle/fluid/dygraph/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -1046,7 +1046,7 @@ class Layer(object): for prefix, layer in model.named_sublayers(): print(prefix, layer) """ - assert isinstance(sublayer, Layer) or sublayer == None + assert isinstance(sublayer, Layer) or sublayer is None self._sub_layers[name] = sublayer return sublayer diff --git a/python/paddle/fluid/executor.py b/python/paddle/fluid/executor.py index 55a0334c8e9..2dbf2d5ceda 100755 --- a/python/paddle/fluid/executor.py +++ b/python/paddle/fluid/executor.py @@ -622,7 +622,7 @@ def _as_lodtensor(data, place, dtype=None): class FetchHandler(object): def __init__(self, var_dict=None, period_secs=60): - assert var_dict != None + assert var_dict is not None self.var_dict = var_dict self.period_secs = period_secs @@ -2309,7 +2309,7 @@ class Executor(object): ) else: # cache trainer instance for heterps pipeline training - if fetch_list == None: + if fetch_list is None: fetch_list = [] cache_key = _get_strong_program_cache_key(program, None, fetch_list) trainer_instance = self._get_trainer_cache(cache_key) diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 188ff9a8ea8..63ec07fe741 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -2880,7 +2880,8 @@ class Operator(object): ) if 'force_cpu' in op_attrs: if ( - type == 'less_than' and op_attrs['force_cpu'] != None + type == 'less_than' + and op_attrs['force_cpu'] is not None ) or op_attrs['force_cpu'] != False: warnings.warn( "The Attr(force_cpu) of Op(%s) will be deprecated in the future, " diff --git a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py index 134723cdbc0..5fa0ed085b1 100644 --- a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py +++ b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py @@ -46,7 +46,7 @@ g_program_attr = {} # program_name->can_be_auto_checkpoint def _get_logger(log_level, name="auto_checkpoint"): global logger - if logger != None: + if logger is not None: return logger logger = logging.getLogger(name) @@ -683,12 +683,12 @@ def _get_valid_program(prog): def _auto_checkpoint(exe, prog): _get_checker() - assert exe._auto_checkpoint_name != None + assert exe._auto_checkpoint_name is not None if not _can_auto_checkpoint(prog): return program = _get_valid_program(prog) - assert program._auto_checkpoint_name != None + assert program._auto_checkpoint_name is not None exe_status = g_train_epoch_range._exe_status key = _get_running_key( diff --git a/python/paddle/fluid/incubate/data_generator/__init__.py b/python/paddle/fluid/incubate/data_generator/__init__.py index 10e4fba92dd..4729f44f2b1 100644 --- a/python/paddle/fluid/incubate/data_generator/__init__.py +++ b/python/paddle/fluid/incubate/data_generator/__init__.py @@ -80,7 +80,7 @@ class DataGenerator(object): batch_samples = [] line_iter = self.generate_sample(None) for user_parsed_line in line_iter(): - if user_parsed_line == None: + if user_parsed_line is None: continue batch_samples.append(user_parsed_line) if len(batch_samples) == self.batch_size_: @@ -117,7 +117,7 @@ class DataGenerator(object): for line in sys.stdin: line_iter = self.generate_sample(line) for user_parsed_line in line_iter(): - if user_parsed_line == None: + if user_parsed_line is None: continue batch_samples.append(user_parsed_line) if len(batch_samples) == self.batch_size_: diff --git a/python/paddle/fluid/incubate/fleet/base/role_maker.py b/python/paddle/fluid/incubate/fleet/base/role_maker.py index 3349dcf275b..341eea35d1c 100644 --- a/python/paddle/fluid/incubate/fleet/base/role_maker.py +++ b/python/paddle/fluid/incubate/fleet/base/role_maker.py @@ -1010,7 +1010,7 @@ class GeneralRoleMaker(RoleMakerBase): if "Gateway" in item and "Iface" in item: gateway_idx = item.index("Gateway") iface_idx = item.index("Iface") - elif gateway_idx != None and iface_idx != None: + elif gateway_idx is not None and iface_idx is not None: gateway = None if len(item) > gateway_idx: gateway = item[gateway_idx] diff --git a/python/paddle/fluid/incubate/fleet/collective/__init__.py b/python/paddle/fluid/incubate/fleet/collective/__init__.py index 229d6e44bd6..1b72d9db8aa 100644 --- a/python/paddle/fluid/incubate/fleet/collective/__init__.py +++ b/python/paddle/fluid/incubate/fleet/collective/__init__.py @@ -170,7 +170,7 @@ class Collective(Fleet): """ This function save persistables and current epoch num to path. """ - if main_program == None: + if main_program is None: main_program = self._transpiled_program m = PaddleModel(executor, main_program) @@ -203,7 +203,7 @@ class Collective(Fleet): This function load persistables and current epoch num from path. """ - if main_program == None: + if main_program is None: main_program = self._transpiled_program m = PaddleModel(executor, main_program) diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py b/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py index fa818e3c413..66973f43559 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/ir/trainer_pass.py @@ -737,7 +737,7 @@ def find_heter_ops(program, default_device="cpu"): # Todo: need update this method # op._set_attr('op_device', current_heter_device) return True - elif op_device == None or op_device == default_device: + elif op_device is None or op_device == default_device: op._set_attr('op_device', default_device) return False return False diff --git a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py index ed9db255146..e48c199cc24 100644 --- a/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py +++ b/python/paddle/fluid/incubate/fleet/parameter_server/pslib/optimizer_factory.py @@ -499,7 +499,7 @@ class DistributedAdam(DistributedOptimizerImplBase): for num in range(len(losses)): loss = losses[num] parameters = None - if parameter_list != None: + if parameter_list is not None: parameters = parameter_list[num] prog_id = str(id(loss.block.program)) # param_grads of program diff --git a/python/paddle/fluid/incubate/fleet/utils/hdfs.py b/python/paddle/fluid/incubate/fleet/utils/hdfs.py index c35e266357e..e3f4c7c6acf 100644 --- a/python/paddle/fluid/incubate/fleet/utils/hdfs.py +++ b/python/paddle/fluid/incubate/fleet/utils/hdfs.py @@ -163,7 +163,7 @@ class HDFSClient(FS): def _test_match(self, lines): for l in lines: m = self._bd_err_re.match(l) - if m != None: + if m is not None: return m return None diff --git a/python/paddle/fluid/incubate/fleet/utils/utils.py b/python/paddle/fluid/incubate/fleet/utils/utils.py index c675fea39bc..ef022c96ecf 100644 --- a/python/paddle/fluid/incubate/fleet/utils/utils.py +++ b/python/paddle/fluid/incubate/fleet/utils/utils.py @@ -256,7 +256,7 @@ def try_load_model_vars( } for each_var in saved_params: var_temp = fluid.global_scope().find_var(each_var.name) - assert var_temp != None, "can't not find var: " + each_var.name + assert var_temp is not None, "can't not find var: " + each_var.name new_shape = (np.array(var_temp.get_tensor())).shape assert each_var.name in orig_para_shape, ( each_var.name + "MUST in var list" diff --git a/python/paddle/fluid/io.py b/python/paddle/fluid/io.py index c5e55a95186..175cbb6fe35 100644 --- a/python/paddle/fluid/io.py +++ b/python/paddle/fluid/io.py @@ -1013,7 +1013,7 @@ def load_vars( if not isinstance(each_var, Parameter): continue var_temp = paddle.fluid.global_scope().find_var(each_var.name) - assert var_temp != None, "can't not find var: " + each_var.name + assert var_temp is not None, "can't not find var: " + each_var.name new_shape = (np.array(var_temp.get_tensor())).shape assert each_var.name in orig_para_shape, ( each_var.name + "MUST in var list" @@ -2146,7 +2146,7 @@ def load(program, model_path, executor=None, var_list=None): return elif os.path.isfile(model_path): - if var_list == None: + if var_list is None: raise ValueError( "var_list is required when loading model file saved with [ save_params, save_persistables, save_vars ]" ) @@ -2479,7 +2479,7 @@ def set_program_state(program, state_dict): for para in parameter_list: var_temp = paddle.fluid.global_scope().find_var(para.name) assert ( - var_temp != None + var_temp is not None ), "Variable [ {} ] Not found, Please make sure run startup program".format( para.name ) diff --git a/python/paddle/fluid/layers/control_flow.py b/python/paddle/fluid/layers/control_flow.py index 5b79e3b86fa..6555fff7b85 100755 --- a/python/paddle/fluid/layers/control_flow.py +++ b/python/paddle/fluid/layers/control_flow.py @@ -1953,7 +1953,7 @@ def less_than(x, y, force_cpu=None, cond=None, name=None): ) if cond is not None: check_type(cond, "cond", Variable, "less_than") - if force_cpu != None: + if force_cpu is not None: check_type(force_cpu, "force_cpu", bool, "less_than") helper = LayerHelper("less_than", **locals()) diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index b1a49e23cd7..6ef406202f8 100755 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -3856,7 +3856,7 @@ def data_norm( bias_default = param_attr.get("bias", 0.0) # create scale and shift(bias) when enable_scale_and_shift is True - if name == None: + if name is None: name = "dn" if enable_scale_and_shift: scale_w = helper.create_parameter( @@ -5234,17 +5234,17 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): dim = [dim] if in_dygraph_mode(): - return _C_ops.max(input, dim if dim != None else [], keep_dim) + return _C_ops.max(input, dim if dim is not None else [], keep_dim) helper.append_op( type='reduce_max', inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None and dim != [] else [0], + 'dim': dim if dim is not None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True - if dim == None or dim == [] or len(dim) == len(input.shape) + if dim is None or dim == [] or len(dim) == len(input.shape) else False, }, ) @@ -5306,17 +5306,17 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): dim = [dim] if in_dygraph_mode(): - return _C_ops.min(input, dim if dim != None else [], keep_dim) + return _C_ops.min(input, dim if dim is not None else [], keep_dim) helper.append_op( type='reduce_min', inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None and dim != [] else [0], + 'dim': dim if dim is not None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True - if dim == None or dim == [] or len(dim) == len(input.shape) + if dim is None or dim == [] or len(dim) == len(input.shape) else False, }, ) @@ -5387,10 +5387,10 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): if in_dygraph_mode(): return _C_ops.prod( input, - dim if dim != None and dim != [] else [0], + dim if dim is not None and dim != [] else [0], keep_dim, True - if dim == None or dim == [] or len(dim) == len(input.shape) + if dim is None or dim == [] or len(dim) == len(input.shape) else False, ) @@ -5404,10 +5404,10 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None and dim != [] else [0], + 'dim': dim if dim is not None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True - if dim == None or dim == [] or len(dim) == len(input.shape) + if dim is None or dim == [] or len(dim) == len(input.shape) else False, }, ) @@ -5462,7 +5462,7 @@ def reduce_all(input, dim=None, keep_dim=False, name=None): dim = [dim] if in_dygraph_mode(): - return _C_ops.all(input, dim if dim != None else [], keep_dim) + return _C_ops.all(input, dim if dim is not None else [], keep_dim) check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all') helper = LayerHelper('reduce_all', **locals()) @@ -5472,10 +5472,10 @@ def reduce_all(input, dim=None, keep_dim=False, name=None): inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None and dim != [] else [0], + 'dim': dim if dim is not None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True - if dim == None or dim == [] or len(dim) == len(input.shape) + if dim is None or dim == [] or len(dim) == len(input.shape) else False, }, ) @@ -5535,10 +5535,10 @@ def reduce_any(input, dim=None, keep_dim=False, name=None): inputs={'X': input}, outputs={'Out': out}, attrs={ - 'dim': dim if dim != None and dim != [] else [0], + 'dim': dim if dim is not None and dim != [] else [0], 'keep_dim': keep_dim, 'reduce_all': True - if dim == None or dim == [] or len(dim) == len(input.shape) + if dim is None or dim == [] or len(dim) == len(input.shape) else False, }, ) @@ -11386,7 +11386,7 @@ def unstack(x, axis=0, num=None): """ if _non_static_mode(): - if num == None: + if num is None: num = x.shape[axis] if num == 0: return [] diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index 8f15508e699..431e7a1481a 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -99,7 +99,7 @@ class MetricBase(object): The MetricBase or its succeed classes """ - self._name = str(name) if name != None else self.__class__.__name__ + self._name = str(name) if name is not None else self.__class__.__name__ def __str__(self): return self._name diff --git a/python/paddle/fluid/net_drawer.py b/python/paddle/fluid/net_drawer.py index cf5cbf60ea3..585ff39a82e 100644 --- a/python/paddle/fluid/net_drawer.py +++ b/python/paddle/fluid/net_drawer.py @@ -114,7 +114,7 @@ def draw_graph(startup_program, main_program, **kwargs): graph_id = unique_id() filename = kwargs.get("filename") - if filename == None: + if filename is None: filename = str(graph_id) + ".gv" g = Graph( name=str(graph_id), @@ -129,6 +129,6 @@ def draw_graph(startup_program, main_program, **kwargs): parse_graph(startup_program, g, var_dict) parse_graph(main_program, g, var_dict) - if filename != None: + if filename is not None: g.save() return g diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index f06c380838f..b347aa48da2 100755 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -707,7 +707,7 @@ class Optimizer(object): name, param.name ) ) - if shape == None: + if shape is None: shape = param.shape assert isinstance(self.helper, LayerHelper) @@ -770,7 +770,7 @@ class Optimizer(object): if framework._non_static_mode(): return self._global_accumulators[name] raise Exception("Global accumulator {} already exists".format(name)) - if shape == None: + if shape is None: shape = [1] # most case, global accumulator is of shape [1] assert isinstance(self.helper, LayerHelper) @@ -1268,7 +1268,7 @@ class Optimizer(object): # NOTE(zhiqiu): currently, only support ClipGradByGlobalNorm and without regularization. if self._flatten_param_grads and self.regularization is None: - if self._grad_clip == None or isinstance( + if self._grad_clip is None or isinstance( self._grad_clip, ClipGradByGlobalNorm ): params_grads = self.flatten_param_grads(params_grads) @@ -3344,7 +3344,7 @@ class DpsgdOptimizer(Optimizer): assert isinstance(block, framework.Block) # create the dpsgd optimize op - if self._seed == None: + if self._seed is None: self._seed = 0 if framework._non_static_mode(): @@ -4454,10 +4454,10 @@ class ModelAverage(Optimizer): tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) sum = layers.sum(x=[sum_1, sum_2, sum_3]) tmp = layers.cast( - x=tmp, dtype='float32' if self._dtype == None else self._dtype + x=tmp, dtype='float32' if self._dtype is None else self._dtype ) sum = layers.cast( - x=sum, dtype='float32' if self._dtype == None else self._dtype + x=sum, dtype='float32' if self._dtype is None else self._dtype ) ops._elementwise_div(x=sum, y=tmp, out=param) @@ -5254,7 +5254,7 @@ class PipelineOptimizer(object): var_name = var_name.replace('.cast_fp16', '') post_ops = self.input_var_to_op[var_name] - if post_ops == None: + if post_ops is None: return None result_op = None for post_op, post_idx in reversed(post_ops): @@ -5269,7 +5269,7 @@ class PipelineOptimizer(object): variable named var_name. """ prev_ops = self.output_var_to_op[var_name] - if prev_ops == None: + if prev_ops is None: return None result_op = None for prev_op, prev_idx in reversed(prev_ops): @@ -7270,7 +7270,7 @@ class RecomputeOptimizer(Optimizer): if output_var in self.un_offload_checkpoint_names: # insert sync op if last checkpoint has not been sync - if last_offload_checkpoint != None: + if last_offload_checkpoint is not None: if ( self.checkpoint_usage_count_and_idx[ last_offload_checkpoint @@ -7400,7 +7400,7 @@ class RecomputeOptimizer(Optimizer): """ self._main_program = loss.block.program self.block = loss.block - if startup_program == None: + if startup_program is None: startup_program = paddle.static.default_startup_program() with program_guard(self._main_program, startup_program): diff --git a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py index 33f98c219d8..195857b8a76 100644 --- a/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py +++ b/python/paddle/fluid/tests/custom_op/test_custom_raw_op_kernel_op.py @@ -66,7 +66,7 @@ class TestCustomRawReluOp(unittest.TestCase): def custom_raw_relu(self, x): module = importlib.import_module(MODULE_NAME) custom_raw_relu_op = getattr(module, "custom_raw_relu") - self.assertTrue(custom_raw_relu_op is not None) + self.assertIsNotNone(custom_raw_relu_op) return custom_raw_relu_op(x) def test_static(self): diff --git a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py index 389645139c5..3e352c816bb 100644 --- a/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py +++ b/python/paddle/fluid/tests/unittests/asp/test_asp_optimize_dynamic.py @@ -139,9 +139,9 @@ class TestASPDynamicOptimize(unittest.TestCase): name, None ) if ASPHelper._is_supported_layer(program, name): - self.assertTrue(mask_var is not None) + self.assertIsNotNone(mask_var) else: - self.assertTrue(mask_var is None) + self.assertIsNone(mask_var) def test_asp_training(self): self.optimizer = paddle.incubate.asp.decorate(self.optimizer) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_strategy.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_strategy.py index 54b11c4934c..cbe899a7e6e 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_strategy.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_strategy.py @@ -23,7 +23,7 @@ class TestStrategy(unittest.TestCase): recompute = strategy.recompute self.assertEqual(recompute.enable, False) - self.assertEqual(recompute.checkpoints, None) + self.assertIsNone(recompute.checkpoints) amp = strategy.amp self.assertEqual(amp.enable, False) @@ -59,12 +59,12 @@ class TestStrategy(unittest.TestCase): self.assertEqual(qat.weight_bits, 8) self.assertEqual(qat.activation_bits, 8) self.assertEqual(qat.not_quant_pattern, ['skip_quant']) - self.assertEqual(qat.algo, None) + self.assertIsNone(qat.algo) tuning = strategy.tuning self.assertEqual(tuning.enable, False) self.assertEqual(tuning.batch_size, 1) - self.assertEqual(tuning.dataset, None) + self.assertIsNone(tuning.dataset) self.assertEqual(tuning.profile_start_step, 1) self.assertEqual(tuning.profile_end_step, 1) self.assertEqual(tuning.run_after_tuning, True) diff --git a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py index 4bb9272748b..4a6df10ec54 100644 --- a/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py +++ b/python/paddle/fluid/tests/unittests/auto_parallel/test_while_op_partition.py @@ -395,7 +395,7 @@ class TestMLP(unittest.TestCase): # test fill_constant_batch_size_like - self.assertTrue(fill_op is not None) + self.assertIsNotNone(fill_op) ref_shape = [-1, 8, 0, 48] shape = fill_op.attr("shape") self.assertTrue(ref_shape == shape) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint.py index cb61e2c9a8a..4d4ed5e488f 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint.py @@ -71,15 +71,15 @@ class AutoCheckPointACLBase(AutoCheckpointBase): exe, main_prog, startup_prog ) for i in range(3): - self.assertEqual(acp._get_train_epoch_range(), None) - self.assertEqual(acp.g_acp_type, None) + self.assertIsNone(acp._get_train_epoch_range()) + self.assertIsNone(acp.g_acp_type) for data in data_loader(): - self.assertEqual(acp.g_acp_type, None) - self.assertEqual(acp._get_train_epoch_range(), None) + self.assertIsNone(acp.g_acp_type) + self.assertIsNone(acp._get_train_epoch_range()) fetch = exe.run(compiled, feed=data, fetch_list=[loss]) - self.assertEqual(acp.g_acp_type, None) - self.assertEqual(acp._get_train_epoch_range(), None) + self.assertIsNone(acp.g_acp_type) + self.assertIsNone(acp._get_train_epoch_range()) m1 = PaddleModel(exe, compiled) m1.serialize(save_dir) @@ -136,7 +136,7 @@ class AutoCheckPointACLBase(AutoCheckpointBase): break o = acp._get_train_epoch_range() - assert o == None, "now train epoch must not exits now" + assert o is None, "now train epoch must not exits now" if break_epoch_no is None: self.assertEqual(i, 2) else: @@ -169,7 +169,7 @@ class AutoCheckPointACLBase(AutoCheckpointBase): fetch = exe.run(compiled, feed=data, fetch_list=[loss]) o = acp._get_train_epoch_range() - self.assertTrue(o == None, "now train epoch must not exits now") + self.assertTrue(o is None, "now train epoch must not exits now") self.assertEqual(i, 2) if break_epoch_no is not None: diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_dist_basic.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_dist_basic.py index 702c3eb24a3..7c0d444acc8 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_dist_basic.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_dist_basic.py @@ -98,7 +98,7 @@ class AutoCheckpointTestDist(AutoCheckPointACLBase): self.assertEqual(len(o._exe_status), 1) o = acp._get_train_epoch_range() - assert o == None, "now train epoch must not exits now" + assert o is None, "now train epoch must not exits now" self.assertEqual(i, 2) fs.delete(save_dir) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_multiple.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_multiple.py index 6bb59c5d2aa..82bee87b55f 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_multiple.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_auto_checkpoint_multiple.py @@ -94,7 +94,7 @@ class AutoCheckpointTestMul(AutoCheckPointACLBase): epochs.append(i) o = acp._get_train_epoch_range() - self.assertTrue(o == None, "now train epoch must not exits now") + self.assertTrue(o is None, "now train epoch must not exits now") self.assertEqual(i, 2) self.assertEqual(epochs, [0, 1, 2]) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py index 18c2487b66a..4152dbf3f81 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_fleet_rolemaker_new.py @@ -45,8 +45,8 @@ class TestRoleMakerBase(unittest.TestCase): self.assertTrue(len(pserver_endpoints) == 0) print(role.to_string()) - self.assertTrue(role._all_gather(1, "worker") is None) - self.assertTrue(role._all_reduce(1, "sum", "worker") is None) + self.assertIsNone(role._all_gather(1, "worker")) + self.assertIsNone(role._all_reduce(1, "sum", "worker")) role._barrier("worker") diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py b/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py index b141b1ed65b..0b1f7b34b31 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py @@ -68,7 +68,7 @@ java.io.IOException: Input/output error """ # fmt: off, avoid remove tabs in string print("split lines:", s.splitlines()) - self.assertTrue(fs._test_match(s.splitlines()) != None) + self.assertIsNotNone(fs._test_match(s.splitlines())) def test_config(self): config = {"fs.default.name": "hdfs://xxx", "hadoop.job.ugi": "ugi"} diff --git a/python/paddle/fluid/tests/unittests/collective/init_process_group.py b/python/paddle/fluid/tests/unittests/collective/init_process_group.py index f45e4004831..2cdd029b90c 100644 --- a/python/paddle/fluid/tests/unittests/collective/init_process_group.py +++ b/python/paddle/fluid/tests/unittests/collective/init_process_group.py @@ -30,11 +30,11 @@ class TestProcessGroupFp32(unittest.TestCase): paddle.distributed.init_parallel_env() paddle.distributed.new_group() group = paddle.distributed.new_group([-1, -2]) - assert group.process_group == None + assert group.process_group is None group = paddle.distributed.collective.Group(-1, 2, 0, [-1, -2]) ret = paddle.distributed.barrier(group) - assert ret == None + assert ret is None paddle.enable_static() in_tensor = paddle.empty((1, 2)) in_tensor2 = paddle.empty((1, 2)) diff --git a/python/paddle/fluid/tests/unittests/dist_text_classification.py b/python/paddle/fluid/tests/unittests/dist_text_classification.py index 417ff66e0cb..eece2c32f8c 100644 --- a/python/paddle/fluid/tests/unittests/dist_text_classification.py +++ b/python/paddle/fluid/tests/unittests/dist_text_classification.py @@ -167,7 +167,7 @@ def tokenize(pattern): # tarfile.extractfile, which does random access and might # destroy hard disks. tf = tarf.next() - while tf != None: + while tf is not None: if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate( diff --git a/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py b/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py index 5f62faeb422..2b19e974169 100644 --- a/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py +++ b/python/paddle/fluid/tests/unittests/distributed_passes/dist_pass_test_base.py @@ -101,7 +101,7 @@ class DistPassTestBase(unittest.TestCase): zip(no_pass_ret, pass_ret) ): if out_var_no_pass is None: - self.assertTrue(out_var_pass is None) + self.assertIsNone(out_var_pass) else: np.testing.assert_allclose( out_var_no_pass, diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py index 266bcf4e7b7..3e5aae4d311 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/seq2seq_dygraph_model.py @@ -246,7 +246,7 @@ class BaseModel(fluid.dygraph.Layer): enc_new_hidden, enc_new_cell = self.enc_units[i]( enc_step_input, enc_hidden[i], enc_cell[i] ) - if self.dropout != None and self.dropout > 0.0: + if self.dropout is not None and self.dropout > 0.0: enc_step_input = fluid.layers.dropout( enc_new_hidden, dropout_prob=self.dropout, @@ -278,7 +278,7 @@ class BaseModel(fluid.dygraph.Layer): ) new_dec_hidden.append(new_hidden) new_dec_cell.append(new_cell) - if self.dropout != None and self.dropout > 0.0: + if self.dropout is not None and self.dropout > 0.0: step_input = fluid.layers.dropout( new_hidden, dropout_prob=self.dropout, @@ -346,7 +346,7 @@ class BaseModel(fluid.dygraph.Layer): enc_new_hidden, enc_new_cell = self.enc_units[i]( enc_step_input, enc_hidden[i], enc_cell[i] ) - if self.dropout != None and self.dropout > 0.0: + if self.dropout is not None and self.dropout > 0.0: enc_step_input = fluid.layers.dropout( enc_new_hidden, dropout_prob=self.dropout, @@ -418,7 +418,7 @@ class BaseModel(fluid.dygraph.Layer): ) new_dec_hidden.append(new_hidden) new_dec_cell.append(new_cell) - if self.dropout != None and self.dropout > 0.0: + if self.dropout is not None and self.dropout > 0.0: step_input = fluid.layers.dropout( new_hidden, dropout_prob=self.dropout, @@ -760,7 +760,7 @@ class AttentionModel(fluid.dygraph.Layer): enc_new_hidden, enc_new_cell = self.enc_units[i]( enc_step_input, enc_hidden[i], enc_cell[i] ) - if self.dropout != None and self.dropout > 0.0: + if self.dropout is not None and self.dropout > 0.0: enc_step_input = fluid.layers.dropout( enc_new_hidden, dropout_prob=self.dropout, @@ -803,7 +803,7 @@ class AttentionModel(fluid.dygraph.Layer): ) new_dec_hidden.append(new_hidden) new_dec_cell.append(new_cell) - if self.dropout != None and self.dropout > 0.0: + if self.dropout is not None and self.dropout > 0.0: step_input = fluid.layers.dropout( new_hidden, dropout_prob=self.dropout, diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_decorator_transform.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_decorator_transform.py index 2b73a1075e5..13fd569c920 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_decorator_transform.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_decorator_transform.py @@ -73,7 +73,7 @@ def deco4(func=None, x=0): return inner_deco - if func == None: + if func is None: return decorated return decorated(func) diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_function_spec.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_function_spec.py index 37bc3f5dc12..2f0672f7185 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_function_spec.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_function_spec.py @@ -29,7 +29,7 @@ class TestFunctionSpec(unittest.TestCase): args_name = foo_spec.args_name self.assertListEqual(args_name, ['a', 'b', 'c', 'd']) self.assertTrue(foo_spec.dygraph_function == foo_func) - self.assertTrue(foo_spec.input_spec is None) + self.assertIsNone(foo_spec.input_spec) def test_verify_input_spec(self): a_spec = InputSpec([None, 10], name='a') diff --git a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_place.py b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_place.py index 58e8e7b6728..bd414685604 100644 --- a/python/paddle/fluid/tests/unittests/dygraph_to_static/test_place.py +++ b/python/paddle/fluid/tests/unittests/dygraph_to_static/test_place.py @@ -21,7 +21,7 @@ class TestPlace(unittest.TestCase): paddle.enable_static() x = paddle.to_tensor([1, 2, 3, 4]) - self.assertTrue(x.place() == None) + self.assertIsNone(x.place()) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py b/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py index 88c2024cff1..5a7ca3bc204 100644 --- a/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py +++ b/python/paddle/fluid/tests/unittests/ir_memory_optimize_net_base.py @@ -89,7 +89,7 @@ class BuildIrMemOptBase(unittest.TestCase): first_loss, last_loss = None, None step_id = 0 custom_iter = getattr(self, "iter", None) - if not custom_iter == None: + if custom_iter is not None: iter = custom_iter for data in reader(): ret = exe.run(train_cp, feed=data, fetch_list=fetch_list) diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py index 9c12d5977d4..ef1051c377a 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool2d.py @@ -43,17 +43,17 @@ def adaptive_pool2d_forward( else [x.shape[3], x.shape[1], x.shape[2]] ) - if isinstance(output_size, int) or output_size == None: + if isinstance(output_size, int) or output_size is None: H_out = output_size W_out = output_size output_size = [H_out, W_out] else: H_out, W_out = output_size - if output_size[0] == None: + if output_size[0] is None: output_size[0] = H H_out = H - if output_size[1] == None: + if output_size[1] is None: output_size[1] = W W_out = W diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py index e7a8685a8f1..065a27c90e8 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_avg_pool3d.py @@ -39,7 +39,7 @@ def adaptive_pool3d_forward( else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]] ) - if isinstance(output_size, int) or output_size == None: + if isinstance(output_size, int) or output_size is None: H_out = output_size W_out = output_size D_out = output_size @@ -47,13 +47,13 @@ def adaptive_pool3d_forward( else: D_out, H_out, W_out = output_size - if output_size[0] == None: + if output_size[0] is None: output_size[0] = D D_out = D - if output_size[1] == None: + if output_size[1] is None: output_size[1] = H H_out = H - if output_size[2] == None: + if output_size[2] is None: output_size[2] = W W_out = W diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py index 9884cb6c90d..decbfbfa012 100644 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool2d.py @@ -41,17 +41,17 @@ def adaptive_pool2d_forward( else [x.shape[3], x.shape[1], x.shape[2]] ) - if isinstance(output_size, int) or output_size == None: + if isinstance(output_size, int) or output_size is None: H_out = output_size W_out = output_size output_size = [H_out, W_out] else: H_out, W_out = output_size - if output_size[0] == None: + if output_size[0] is None: output_size[0] = H H_out = H - if output_size[1] == None: + if output_size[1] is None: output_size[1] = W W_out = W diff --git a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py index e90c4061980..21400576c47 100755 --- a/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py +++ b/python/paddle/fluid/tests/unittests/test_adaptive_max_pool3d.py @@ -41,7 +41,7 @@ def adaptive_pool3d_forward( else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]] ) - if isinstance(output_size, int) or output_size == None: + if isinstance(output_size, int) or output_size is None: H_out = output_size W_out = output_size D_out = output_size @@ -49,13 +49,13 @@ def adaptive_pool3d_forward( else: D_out, H_out, W_out = output_size - if output_size[0] == None: + if output_size[0] is None: output_size[0] = D D_out = D - if output_size[1] == None: + if output_size[1] is None: output_size[1] = H H_out = H - if output_size[2] == None: + if output_size[2] is None: output_size[2] = W W_out = W diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py index e18a585d33f..62ab2124a5e 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_mapper.py @@ -602,7 +602,7 @@ class TestAutoParallelMapper(unittest.TestCase): outputs={'Out': output}, ) self.assertEqual(get_comm_volume(broadcast_op, 0, 1), 400) - self.assertEqual(get_comm_volume(broadcast_op, 1, 0), None) + self.assertIsNone(get_comm_volume(broadcast_op, 1, 0)) allgather_op = train_program.global_block().append_op( type="c_allgather", inputs={'X': input}, @@ -610,14 +610,14 @@ class TestAutoParallelMapper(unittest.TestCase): outputs={'Out': output}, ) self.assertEqual(get_comm_volume(allgather_op, 0, 1), 400) - self.assertEqual(get_comm_volume(allgather_op, 0, 0), None) + self.assertIsNone(get_comm_volume(allgather_op, 0, 0)) reduce_op = train_program.global_block().append_op( type="c_reduce_sum", inputs={'X': input}, attrs={'ring_id': ring_id, 'root_id': root_id}, outputs={'Out': output}, ) - self.assertEqual(get_comm_volume(reduce_op, 0, 1), None) + self.assertIsNone(get_comm_volume(reduce_op, 0, 1)) self.assertEqual(get_comm_volume(reduce_op, 1, 0), 400) cast_op = train_program.global_block().append_op( type="cast", diff --git a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py index 852c6ab74b1..f235d136cd6 100644 --- a/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py +++ b/python/paddle/fluid/tests/unittests/test_auto_parallel_partitioner_gpt.py @@ -53,26 +53,26 @@ def is_valid_completed_program(dist_context, program): vars_ = program.list_vars() for op in ops: op_dist_attrs = dist_context.get_op_dist_attr_for_program(op) - if op_dist_attrs == None: + if op_dist_attrs is None: return False - if op_dist_attrs.process_mesh == None: + if op_dist_attrs.process_mesh is None: return False for tensor_dist_attr in op_dist_attrs.inputs_dist_attrs.values(): - if None == tensor_dist_attr.dims_mapping: + if tensor_dist_attr.dims_mapping is None: return False for tensor_dist_attr in op_dist_attrs.outputs_dist_attrs.values(): - if None == tensor_dist_attr.dims_mapping: + if tensor_dist_attr.dims_mapping is None: return False for var in vars_: var_dist_attrs = dist_context.get_tensor_dist_attr_for_program(var) - if var_dist_attrs == None: + if var_dist_attrs is None: return False - elif var_dist_attrs.process_mesh == None: + elif var_dist_attrs.process_mesh is None: return False - elif var_dist_attrs.dims_mapping == None: + elif var_dist_attrs.dims_mapping is None: return False return True diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index 66cf1b489f6..fda58617373 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -606,7 +606,7 @@ class TestLayerTo(unittest.TestCase): buffer = None model.register_buffer("buf_name", buffer, persistable=True) model.to(dtype='float64') - self.assertEqual(model._buffers['buf_name'], None) + self.assertIsNone(model._buffers['buf_name']) def test_main(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py index 107792a892d..90c2e84d0ee 100644 --- a/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py +++ b/python/paddle/fluid/tests/unittests/test_decoupled_py_reader_data_check.py @@ -106,9 +106,9 @@ class TestClass(unittest.TestCase): break if break_beforehand: - self.assertTrue(next(gen, None) is not None) + self.assertIsNotNone(next(gen, None)) else: - self.assertTrue(next(gen, None) is None) + self.assertIsNone(next(gen, None)) class TestClass2(TestClass): diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py b/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py index b865a1b1799..b8968d3fdd0 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_spectral_norm.py @@ -67,7 +67,7 @@ class TestDygraphSpectralNorm(unittest.TestCase): def test_check_output(self): linear = paddle.nn.Conv2D(2, 1, 3) before_weight = linear.weight.numpy().copy() - if self.dim == None: + if self.dim is None: if isinstance( linear, ( diff --git a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py index ebae140f88c..1a3e4a2e7b5 100644 --- a/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py +++ b/python/paddle/fluid/tests/unittests/test_dygraph_weight_norm.py @@ -122,7 +122,7 @@ class TestDygraphWeightNorm(unittest.TestCase): fluid.enable_imperative() linear = paddle.nn.Conv2D(2, 3, 3) before_weight = linear.weight.numpy() - if self.dim == None: + if self.dim is None: self.dim = -1 if self.dim != -1: diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py index eebc321b1a8..684322c5292 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_delete_vars.py @@ -102,7 +102,7 @@ class TestExecutor(unittest.TestCase): outline_p_vars = [] for name in persitables: var = scope.find_var(name) - self.assertTrue(var is not None) + self.assertIsNotNone(var) t = var.get_tensor() if not t._is_initialized(): outline_p_vars.append(name) @@ -110,7 +110,7 @@ class TestExecutor(unittest.TestCase): outline_np_vars = [] for name in non_persistables: var = scope.find_var(name) - self.assertTrue(var is not None) + self.assertIsNotNone(var) t = var.get_tensor() if t._is_initialized(): outline_np_vars.append(name) diff --git a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py index e48a8056d03..622e36abd80 100644 --- a/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_eager_deletion_padding_rnn.py @@ -204,7 +204,7 @@ def lm_model( input = m - if dropout != None and dropout > 0.0: + if dropout is not None and dropout > 0.0: input = layers.dropout( input, dropout_prob=dropout, @@ -308,7 +308,7 @@ def lm_model( cell_array[k] = c input = m - if dropout != None and dropout > 0.0: + if dropout is not None and dropout > 0.0: input = layers.dropout( input, dropout_prob=dropout, @@ -390,7 +390,7 @@ def lm_model( x_emb = layers.reshape( x_emb, shape=[-1, num_steps, hidden_size], inplace=True ) - if dropout != None and dropout > 0.0: + if dropout is not None and dropout > 0.0: x_emb = layers.dropout( x_emb, dropout_prob=dropout, diff --git a/python/paddle/fluid/tests/unittests/test_fleet_base.py b/python/paddle/fluid/tests/unittests/test_fleet_base.py index 49dd5d6928b..686c7fa1ef7 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_base.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_base.py @@ -111,7 +111,7 @@ class TestFleetBase(unittest.TestCase): def test_util(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - self.assertNotEqual(fleet.util, None) + self.assertIsNotNone(fleet.util) def test_barrier_worker(self): role = role_maker.PaddleCloudRoleMaker(is_collective=True) diff --git a/python/paddle/fluid/tests/unittests/test_fleet_util.py b/python/paddle/fluid/tests/unittests/test_fleet_util.py index ff12ef64b5f..3dbe084960f 100644 --- a/python/paddle/fluid/tests/unittests/test_fleet_util.py +++ b/python/paddle/fluid/tests/unittests/test_fleet_util.py @@ -50,7 +50,7 @@ class TestFleetUtil(unittest.TestCase): context["role_maker"] = role_maker context["valid_strategy"] = strategy util = factory._create_util(context) - self.assertEqual(util.role_maker, None) + self.assertIsNone(util.role_maker) def test_get_util(self): import paddle.distributed.fleet as fleet @@ -58,7 +58,7 @@ class TestFleetUtil(unittest.TestCase): role = role_maker.PaddleCloudRoleMaker(is_collective=True) fleet.init(role) - self.assertNotEqual(fleet.util, None) + self.assertIsNotNone(fleet.util) def test_set_user_defined_util(self): import paddle.distributed.fleet as fleet diff --git a/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py b/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py index a4d152c75b9..53ef3610d6f 100644 --- a/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py +++ b/python/paddle/fluid/tests/unittests/test_fused_matmul_bias.py @@ -112,7 +112,7 @@ class TestFusedMatmulBias(unittest.TestCase): if need_bias: np.testing.assert_array_equal(bias.grad.numpy(), bias_grad_np) else: - self.assertTrue(bias_grad_np is None) + self.assertIsNone(bias_grad_np) def rand_test(self, m, n, k, dtype): seed = int(np.random.randint(low=0, high=1000, size=[1])) diff --git a/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py b/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py index 3394a08de8b..9e32b43ad42 100644 --- a/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py +++ b/python/paddle/fluid/tests/unittests/test_global_var_getter_setter.py @@ -36,7 +36,7 @@ class TestGlobalVarGetterSetter(unittest.TestCase): self.assertTrue(var.name in g.keys()) value1 = g[var.name] value2 = g.get(var.name, None) - self.assertTrue(value1 is not None) + self.assertIsNotNone(value1) self.assertEqual(value1, value2) self.assertEqual(type(value1), var.type) self.assertEqual(type(value2), var.type) @@ -53,7 +53,7 @@ class TestGlobalVarGetterSetter(unittest.TestCase): name = "__any_non_exist_name__" self.assertFalse(name in g) self.assertFalse(name in g.keys()) - self.assertTrue(g.get(name, None) is None) + self.assertIsNone(g.get(name, None)) self.assertEquals(g.get(name, -1), -1) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py b/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py index 3cda7d5d216..ba533bf720a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_auto_prune.py @@ -162,8 +162,8 @@ class TestImperativeAutoPrune(unittest.TestCase): v2 = fluid.dygraph.to_variable(value2) loss = case1(v1, v2) loss.backward() - self.assertTrue(case1.linear2.weight._grad_ivar() is not None) - self.assertTrue(case1.linear1.weight._grad_ivar() is not None) + self.assertIsNotNone(case1.linear2.weight._grad_ivar()) + self.assertIsNotNone(case1.linear1.weight._grad_ivar()) def test_auto_prune(self): with _test_eager_guard(): @@ -180,8 +180,8 @@ class TestImperativeAutoPrune(unittest.TestCase): loss = case2(v1, v2) loss.backward() - self.assertTrue(case2.linear2.weight._grad_ivar() is None) - self.assertTrue(case2.linear1.weight._grad_ivar() is not None) + self.assertIsNone(case2.linear2.weight._grad_ivar()) + self.assertIsNotNone(case2.linear1.weight._grad_ivar()) def test_auto_prune2(self): with _test_eager_guard(): @@ -198,7 +198,7 @@ class TestImperativeAutoPrune(unittest.TestCase): v2 = fluid.dygraph.to_variable(value2) loss, part2 = case3(v1, v2, 1) loss.backward() - self.assertTrue(case3.linear.weight._grad_ivar() is not None) + self.assertIsNotNone(case3.linear.weight._grad_ivar()) self.assertTrue((part2.gradient() == 0).all()) def test_auto_prune3(self): @@ -217,7 +217,7 @@ class TestImperativeAutoPrune(unittest.TestCase): v2 = fluid.dygraph.to_variable(value2) loss, part2 = case4(v1, v2, 1) part2.backward() - self.assertTrue(case4.linear.weight._grad_ivar() is not None) + self.assertIsNotNone(case4.linear.weight._grad_ivar()) self.assertTrue((part2.gradient() == 1).all()) def test_auto_prune4(self): @@ -236,7 +236,7 @@ class TestImperativeAutoPrune(unittest.TestCase): v2 = fluid.dygraph.to_variable(value2) loss, part1, part2 = case4(v1, v2, 2) part1.backward() - self.assertTrue(case4.linear.weight._grad_ivar() is not None) + self.assertIsNotNone(case4.linear.weight._grad_ivar()) self.assertTrue((part2.gradient() == 0).all()) def test_auto_prune5(self): @@ -261,8 +261,8 @@ class TestImperativeAutoPrune(unittest.TestCase): out1.stop_gradient = True out = fluid.layers.concat(input=[out1, out2, c], axis=1) out.backward() - self.assertTrue(linear.weight.gradient() is None) - self.assertTrue(out1.gradient() is None) + self.assertIsNone(linear.weight.gradient()) + self.assertIsNone(out1.gradient()) def test_auto_prune6(self): with _test_eager_guard(): @@ -284,8 +284,8 @@ class TestImperativeAutoPrune(unittest.TestCase): out1.stop_gradient = True out = fluid.layers.concat(input=[out1, out2, c], axis=1) out.backward() - self.assertTrue(linear.weight.gradient() is None) - self.assertTrue(out1.gradient() is None) + self.assertIsNone(linear.weight.gradient()) + self.assertIsNone(out1.gradient()) def test_auto_prune7(self): with _test_eager_guard(): @@ -377,8 +377,8 @@ class TestImperativeAutoPrune(unittest.TestCase): # TODO(jiabin): In Eager Mode we don't actually need sort_sum_gradient, this test should be removed when we don't support fluid anymore. fluid.set_flags({'FLAGS_sort_sum_gradient': True}) out.backward() - self.assertTrue(linear.weight.gradient() is None) - self.assertTrue(out1.gradient() is None) + self.assertIsNone(linear.weight.gradient()) + self.assertIsNone(out1.gradient()) def test_auto_prune10(self): with _test_eager_guard(): @@ -449,8 +449,8 @@ class TestImperativeAutoPrune(unittest.TestCase): case3 = AutoPruneLayer2(input_size=784) loss = case3(v1, v2) loss.backward() - self.assertTrue(case3.linear2.weight._grad_ivar() is None) - self.assertTrue(case3.linear.weight._grad_ivar() is not None) + self.assertIsNone(case3.linear2.weight._grad_ivar()) + self.assertIsNotNone(case3.linear.weight._grad_ivar()) def test_case2_prune_no_grad_branch(self): with _test_eager_guard(): @@ -468,7 +468,7 @@ class TestImperativeAutoPrune(unittest.TestCase): out = fluid.layers.one_hot(input=label, depth=100) loss = paddle.mean(out) loss.backward() - self.assertTrue(linear.weight._grad_ivar() is None) + self.assertIsNone(linear.weight._grad_ivar()) def test_case3_prune_no_grad_branch2(self): with _test_eager_guard(): @@ -480,7 +480,7 @@ class TestImperativeAutoPrune(unittest.TestCase): out = fluid.layers.gaussian_random(shape=[20, 30]) loss = paddle.mean(out) loss.backward() - self.assertTrue(out._grad_ivar() is None) + self.assertIsNone(out._grad_ivar()) def test_case4_with_no_grad_op_maker(self): with _test_eager_guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 62afffb59ab..72f0a599385 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -272,7 +272,7 @@ class TestImperative(unittest.TestCase): data = np.array([[2, 3], [4, 5]]).astype('float32') with fluid.dygraph.guard(): l0 = fluid.Linear(2, 2) - self.assertTrue(l0.weight._grad_ivar() is None) + self.assertIsNone(l0.weight._grad_ivar()) l1 = fluid.Linear(2, 2) with fluid.dygraph.no_grad(): self.assertTrue(l1.weight.stop_gradient is False) @@ -283,14 +283,14 @@ class TestImperative(unittest.TestCase): o = l1(y) o.backward() - self.assertTrue(tmp._grad_ivar() is None) - self.assertTrue(l0.weight._grad_ivar() is not None) + self.assertIsNone(tmp._grad_ivar()) + self.assertIsNotNone(l0.weight._grad_ivar()) def test_paddle_imperative_no_grad_guard(self): data = np.array([[2, 3], [4, 5]]).astype('float32') with fluid.dygraph.guard(): l0 = fluid.Linear(2, 2) - self.assertTrue(l0.weight._grad_ivar() is None) + self.assertIsNone(l0.weight._grad_ivar()) l1 = fluid.Linear(2, 2) with paddle.no_grad(): self.assertTrue(l1.weight.stop_gradient is False) @@ -301,14 +301,14 @@ class TestImperative(unittest.TestCase): o = l1(y) o.backward() - self.assertTrue(tmp._grad_ivar() is None) - self.assertTrue(l0.weight._grad_ivar() is not None) + self.assertIsNone(tmp._grad_ivar()) + self.assertIsNotNone(l0.weight._grad_ivar()) def test_paddle_imperative_set_grad_enabled(self): data = np.array([[2, 3], [4, 5]]).astype('float32') with fluid.dygraph.guard(): l0 = fluid.Linear(2, 2) - self.assertTrue(l0.weight._grad_ivar() is None) + self.assertIsNone(l0.weight._grad_ivar()) l1 = fluid.Linear(2, 2) with paddle.set_grad_enabled(False): self.assertTrue(l1.weight.stop_gradient is False) @@ -322,9 +322,9 @@ class TestImperative(unittest.TestCase): o = l1(y) o.backward() - self.assertTrue(tmp._grad_ivar() is None) - self.assertTrue(tmp2._grad_ivar() is not None) - self.assertTrue(l0.weight._grad_ivar() is not None) + self.assertIsNone(tmp._grad_ivar()) + self.assertIsNotNone(tmp2._grad_ivar()) + self.assertIsNotNone(l0.weight._grad_ivar()) def test_paddle_imperative_is_grad_enabled(self): with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index 140f1191c96..f121bacb2a5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -83,7 +83,7 @@ class TestEagerGrad(TestCase): # stop_gradient = !create_graph, create_graph default false self.assertEqual(dx[0].stop_gradient, True) # x is unused input in the graph - self.assertEqual(dx[1], None) + self.assertIsNone(dx[1]) def test_simple_example_eager_grad_allow_unused(self): with _test_eager_guard(): @@ -292,7 +292,7 @@ class TestDygraphDoubleGrad(TestCase): (none_grad,) = self.grad( [x], [y], create_graph=create_graph, allow_unused=True ) - self.assertTrue(none_grad is None) + self.assertIsNone(none_grad) (grad_with_none_and_not_none,) = self.grad( [x, y], [y], create_graph=create_graph diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 0219cc99479..c1e6c3b3b3a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -84,7 +84,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): def _check_exception(self, exception_message, place=None): seed = 90 batch_size = 128 - if place == None: + if place is None: place = ( fluid.CUDAPlace(0) if core.is_compiled_with_cuda() @@ -106,7 +106,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): seed = 90 batch_size = 128 - if place == None: + if place is None: place = ( fluid.CPUPlace() if not core.is_compiled_with_cuda() @@ -161,7 +161,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - if place == None: + if place is None: place = ( fluid.CPUPlace() if not core.is_compiled_with_cuda() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py index 251ac5b6b93..c531374478b 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer_v2.py @@ -81,7 +81,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): def _check_exception(self, exception_message, place=None): seed = 90 batch_size = 128 - if place == None: + if place is None: place = ( fluid.CUDAPlace(0) if core.is_compiled_with_cuda() @@ -105,7 +105,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): seed = 90 batch_size = 128 - if place == None: + if place is None: place = ( fluid.CPUPlace() if not core.is_compiled_with_cuda() @@ -170,7 +170,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): paddle.seed(seed) paddle.framework.random._manual_program_seed(seed) - if place == None: + if place is None: place = ( fluid.CPUPlace() if not core.is_compiled_with_cuda() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py index 55cc00b12ab..10c69cbc43d 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_save_load.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_save_load.py @@ -1003,7 +1003,7 @@ class TestDygraphPtbRnn(unittest.TestCase): os.path.join('saved_dy', 'emb_dy') ) - self.assertTrue(opti_state_dict == None) + self.assertIsNone(opti_state_dict) para_state_dict, opti_state_dict = fluid.load_dygraph( os.path.join('saved_dy', 'emb_dy.pdparams') @@ -1022,8 +1022,8 @@ class TestDygraphPtbRnn(unittest.TestCase): para_state_dict, opti_state_dict = fluid.load_dygraph( os.path.join('saved_dy', 'emb_dy'), keep_name_table=True ) - self.assertTrue(para_state_dict != None) - self.assertTrue(opti_state_dict == None) + self.assertIsNotNone(para_state_dict) + self.assertIsNone(opti_state_dict) def test_main(self): self.func_setUp() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py index 93a04a36115..3b5cab4ce97 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_selected_rows.py @@ -62,18 +62,18 @@ class TestSimpleNet(unittest.TestCase): ) # grad_clip=grad_clip input_emb, emb = simplenet(input) - self.assertTrue(emb.weight.gradient() is None) - self.assertTrue(input_emb.gradient() is None) + self.assertIsNone(emb.weight.gradient()) + self.assertIsNone(input_emb.gradient()) input_emb.backward() adam.minimize(input_emb) - self.assertTrue(emb.weight.gradient() is not None) + self.assertIsNotNone(emb.weight.gradient()) emb.clear_gradients() - self.assertTrue(emb.weight.gradient() is None) + self.assertIsNone(emb.weight.gradient()) input_emb.clear_gradient() - self.assertTrue(input_emb.gradient() is not None) + self.assertIsNotNone(input_emb.gradient()) paddle.enable_static() def test_selectedrows_gradient1(self): @@ -107,18 +107,18 @@ class TestSimpleNet(unittest.TestCase): ) input_emb, emb = simplenet(input) - self.assertTrue(emb.weight.gradient() is None) - self.assertTrue(input_emb.gradient() is None) + self.assertIsNone(emb.weight.gradient()) + self.assertIsNone(input_emb.gradient()) input_emb.backward() adam.minimize(input_emb) - self.assertTrue(emb.weight.gradient() is not None) + self.assertIsNotNone(emb.weight.gradient()) emb.clear_gradients() - self.assertTrue(emb.weight.gradient() is None) + self.assertIsNone(emb.weight.gradient()) input_emb.clear_gradient() - self.assertTrue(input_emb.gradient() is not None) + self.assertIsNotNone(input_emb.gradient()) def test_selectedrows_gradient2(self): fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index bf9ac53f99c..4e3288fd037 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -449,7 +449,7 @@ class TestSaveInferenceModelNew(unittest.TestCase): self.assertTrue(isinstance(res2, bytes)) # test if variables in program is empty res = paddle.static.io._serialize_persistables(Program(), None) - self.assertEqual(res, None) + self.assertIsNone(res) self.assertRaises( TypeError, paddle.static.io.deserialize_persistables, diff --git a/python/paddle/fluid/tests/unittests/test_input_spec.py b/python/paddle/fluid/tests/unittests/test_input_spec.py index 9d805d64e93..5f3670aa50f 100644 --- a/python/paddle/fluid/tests/unittests/test_input_spec.py +++ b/python/paddle/fluid/tests/unittests/test_input_spec.py @@ -32,7 +32,7 @@ class TestInputSpec(unittest.TestCase): self.assertEqual( tensor_spec.dtype, convert_np_dtype_to_dtype_('float32') ) - self.assertEqual(tensor_spec.name, None) + self.assertIsNone(tensor_spec.name) def test_from_tensor(self): x_bool = fluid.layers.fill_constant(shape=[1], dtype='bool', value=True) @@ -51,7 +51,7 @@ class TestInputSpec(unittest.TestCase): x_np_spec.dtype, convert_np_dtype_to_dtype_(x_numpy.dtype) ) self.assertEqual(x_np_spec.shape, x_numpy.shape) - self.assertEqual(x_np_spec.name, None) + self.assertIsNone(x_np_spec.name) x_numpy2 = np.array([1, 2, 3, 4]).astype('int64') x_np_spec2 = InputSpec.from_numpy(x_numpy2, name='x_np_int64') diff --git a/python/paddle/fluid/tests/unittests/test_lambv2_op.py b/python/paddle/fluid/tests/unittests/test_lambv2_op.py index 475866fb903..bb2aee88739 100644 --- a/python/paddle/fluid/tests/unittests/test_lambv2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lambv2_op.py @@ -241,8 +241,8 @@ class TestLambOpMultiPrecision(unittest.TestCase): ) return params[0].astype(np.float32) else: - self.assertTrue(params[0] is not None) - self.assertTrue(params[1] is None) + self.assertIsNotNone(params[0]) + self.assertIsNone(params[1]) params[0] = np.array(params[0]) return params[0] diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 14754e347f9..93c630888be 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -502,7 +502,7 @@ class TestLayer(LayerTest): bias_attr=False, ) dy_ret = conv2d(base.to_variable(images)) - self.assertTrue(conv2d.bias is None) + self.assertIsNone(conv2d.bias) images = np.ones([2, 3, 5, 5], dtype='float32') conv2d = nn.Conv2D( @@ -512,7 +512,7 @@ class TestLayer(LayerTest): bias_attr=False, ) dy_ret = conv2d(base.to_variable(images)) - self.assertTrue(conv2d.bias is None) + self.assertIsNone(conv2d.bias) with self.static_graph(): # the input of Conv2D must be Variable. @@ -4359,8 +4359,8 @@ class TestBook(LayerTest): crf_decode = layers.crf_decoding( input=emission, param_attr=ParamAttr(name="crfw") ) - self.assertFalse(crf is None) - self.assertFalse(crf_decode is None) + self.assertIsNotNone(crf) + self.assertIsNotNone(crf_decode) return layers.chunk_eval( input=crf_decode, label=label, @@ -4386,8 +4386,8 @@ class TestBook(LayerTest): crf_decode = layers.crf_decoding( input=emission, length=length, param_attr=ParamAttr(name="crfw") ) - self.assertFalse(crf is None) - self.assertFalse(crf_decode is None) + self.assertIsNotNone(crf) + self.assertIsNotNone(crf_decode) return layers.chunk_eval( input=crf_decode, label=label, diff --git a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py index dd958243fad..e6e0e50ac76 100644 --- a/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_paddle_imperative_double_grad.py @@ -128,7 +128,7 @@ class TestDygraphDoubleGrad(TestCase): (none_grad,) = self.grad( [x], [y], create_graph=create_graph, allow_unused=True ) - self.assertTrue(none_grad is None) + self.assertIsNone(none_grad) (grad_with_none_and_not_none,) = self.grad( [x, y], [y], create_graph=create_graph diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index b7dd11f7b8a..e888d3c09c8 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -205,8 +205,8 @@ class TestProfiler(unittest.TestCase): class TestProfilerAPIError(unittest.TestCase): def test_errors(self): options = utils.ProfilerOptions() - self.assertTrue(options['profile_path'] is None) - self.assertTrue(options['timeline_path'] is None) + self.assertIsNone(options['profile_path']) + self.assertIsNone(options['timeline_path']) options = options.with_state('All') self.assertTrue(options['state'] == 'All') diff --git a/python/paddle/fluid/tests/unittests/test_pylayer_op.py b/python/paddle/fluid/tests/unittests/test_pylayer_op.py index becab9796df..eaa000e3582 100644 --- a/python/paddle/fluid/tests/unittests/test_pylayer_op.py +++ b/python/paddle/fluid/tests/unittests/test_pylayer_op.py @@ -187,12 +187,12 @@ class TestPyLayer(unittest.TestCase): for dtype in dtypes: input1 = paddle.randn([2, 3]) input1.stop_gradient = False - self.assertTrue(input1.grad is None) + self.assertIsNone(input1.grad) z = tanh.apply(input1, dtype) z = paddle.cast(z, "float32") z.sum().backward() - self.assertTrue(input1.grad is not None) + self.assertIsNotNone(input1.grad) def test_pylayer_dtype(self): with _test_eager_guard(): @@ -283,7 +283,7 @@ class TestPyLayer(unittest.TestCase): input1 = paddle.randn([2, 3]).astype("float64") z = tanh.apply(input1, paddle.tanh, paddle.square) z.mean().backward() - self.assertTrue(z.grad is None) + self.assertIsNone(z.grad) def test_pylayer_nograd(self): with _test_eager_guard(): @@ -472,7 +472,7 @@ class TestPyLayer(unittest.TestCase): layer = Layer() z = layer(data) z.backward() - self.assertTrue(data.grad is not None) + self.assertIsNotNone(data.grad) def test_pylayer_inplace(self): with _test_eager_guard(): @@ -547,7 +547,7 @@ class TestPyLayer(unittest.TestCase): layer = Layer() z = layer(data) z.backward() - self.assertTrue(data.grad is not None) + self.assertIsNotNone(data.grad) def test_pylayer_inplace_backward_success_2(self): with _test_eager_guard(): @@ -580,7 +580,7 @@ class TestPyLayer(unittest.TestCase): layer = Layer() z = layer(data) z.backward() - self.assertTrue(data.grad is not None) + self.assertIsNotNone(data.grad) def func_test_pylayer_inplace_and_leaf_exception(self): class cus_pylayer_op( @@ -630,7 +630,7 @@ class TestPyLayer(unittest.TestCase): temp.stop_gradient = False z = paddle.tanh(temp) z.backward() - self.assertTrue(temp.grad is not None) + self.assertIsNotNone(temp.grad) return paddle.to_tensor(temp.grad) for i in range(2): diff --git a/python/paddle/fluid/tests/unittests/test_regularizer.py b/python/paddle/fluid/tests/unittests/test_regularizer.py index a3f1697032b..35e7f47fefb 100644 --- a/python/paddle/fluid/tests/unittests/test_regularizer.py +++ b/python/paddle/fluid/tests/unittests/test_regularizer.py @@ -37,7 +37,7 @@ class TestL2DecayRegularizer(unittest.TestCase): name="mul.x", regularizer=regularizer.L2DecayRegularizer(0.5), ) - self.assertTrue(mul_x.regularizer is not None) + self.assertIsNotNone(mul_x.regularizer) self.assertTrue( isinstance(mul_x.regularizer, regularizer.L2DecayRegularizer) ) @@ -82,7 +82,7 @@ class TestL1DecayRegularizer(unittest.TestCase): name="mul.x", regularizer=regularizer.L1DecayRegularizer(0.5), ) - self.assertTrue(mul_x.regularizer is not None) + self.assertIsNotNone(mul_x.regularizer) self.assertTrue( isinstance(mul_x.regularizer, regularizer.L1DecayRegularizer) ) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py b/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py index 8e7de658969..bfe7f16cea1 100644 --- a/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py +++ b/python/paddle/fluid/tests/unittests/test_tensor_register_hook.py @@ -494,7 +494,7 @@ class TestTensorRegisterHook(unittest.TestCase): )[0] z = y + dx - self.assertTrue(x.grad is None) + self.assertIsNone(x.grad) # If create_graph = True, the gradient of dx # would be backpropagated. Therefore, diff --git a/python/paddle/fluid/tests/unittests/test_var_base.py b/python/paddle/fluid/tests/unittests/test_var_base.py index 38e65744a81..6adf1c74180 100644 --- a/python/paddle/fluid/tests/unittests/test_var_base.py +++ b/python/paddle/fluid/tests/unittests/test_var_base.py @@ -125,7 +125,7 @@ class TestVarBase(unittest.TestCase): ) np.testing.assert_array_equal(x.numpy(), [1.0, 2.0]) self.assertEqual(x.dtype, core.VarDesc.VarType.FP32) - self.assertEqual(x.grad, None) + self.assertIsNone(x.grad) self.assertEqual(x.shape, [2]) self.assertEqual(x.stop_gradient, False) self.assertEqual(x.type, core.VarDesc.VarType.LOD_TENSOR) @@ -447,7 +447,7 @@ class TestVarBase(unittest.TestCase): y = x**2 y.backward() self.assertTrue(cmp_float(x.grad.numpy(), [20.0])) - self.assertEqual(detach_x.grad, None) + self.assertIsNone(detach_x.grad) detach_x.stop_gradient = ( False # Set stop_gradient to be False, supported auto-grad @@ -1844,10 +1844,10 @@ class TestEagerTensorGradNameValue(unittest.TestCase): a = paddle.to_tensor(a_np) a.stop_gradient = False b = a**2 - self.assertEqual(a._grad_value(), None) + self.assertIsNone(a._grad_value()) b.backward() # Note, for new dygraph, there are no generated grad name, so we skip the name check. - self.assertNotEqual(a._grad_value(), None) + self.assertIsNotNone(a._grad_value()) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_var_info.py b/python/paddle/fluid/tests/unittests/test_var_info.py index 4bb6648488e..d03fb2387bd 100644 --- a/python/paddle/fluid/tests/unittests/test_var_info.py +++ b/python/paddle/fluid/tests/unittests/test_var_info.py @@ -32,7 +32,7 @@ class TestVarInfo(unittest.TestCase): ret = var._get_info("name") assert ret == "test" ret = var._get_info("not_exist") - assert ret == None + assert ret is None if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/testsuite.py b/python/paddle/fluid/tests/unittests/testsuite.py index a13b73f501b..f40e111a6e3 100644 --- a/python/paddle/fluid/tests/unittests/testsuite.py +++ b/python/paddle/fluid/tests/unittests/testsuite.py @@ -41,7 +41,7 @@ def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None): __create_var__(in_name, sub_in_name) else: __create_var__(in_name, in_name) - if cache_list != None and isinstance(cache_list, list): + if cache_list is not None and isinstance(cache_list, list): for name in cache_list: kwargs[name] = [] scope.var(name) diff --git a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py index 21e46e31783..b439ffb5d20 100644 --- a/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py +++ b/python/paddle/fluid/tests/unittests/xpu/test_matmul_op_xpu.py @@ -72,7 +72,7 @@ def generate_compatible_shapes( dim_X, dim_Y, transpose_X, transpose_Y, batch_size ): BATCH_SIZE = 2 - if batch_size != None: + if batch_size is not None: BATCH_SIZE = batch_size M = 3 diff --git a/python/paddle/fluid/trainer_desc.py b/python/paddle/fluid/trainer_desc.py index f8151297c2d..eaa60c7e3a9 100644 --- a/python/paddle/fluid/trainer_desc.py +++ b/python/paddle/fluid/trainer_desc.py @@ -354,7 +354,7 @@ class DistMultiTrainer(TrainerDesc): def _gen_trainer_desc(self): super(DistMultiTrainer, self)._gen_trainer_desc() self.proto_desc.class_name = "DistMultiTrainer" - if self._program == None: + if self._program is None: raise RuntimeError("None Program") self._device_worker._set_infer(self._infer) self._device_worker._set_program(self._program) @@ -378,7 +378,7 @@ class HeterXpuTrainer(TrainerDesc): def _gen_trainer_desc(self): super(HeterXpuTrainer, self)._gen_trainer_desc() self.proto_desc.class_name = "HeterXpuTrainer" - if self._program == None: + if self._program is None: raise RuntimeError("None Program") self._device_worker._set_infer(self._infer) self._device_worker._set_program(self._program) @@ -402,7 +402,7 @@ class PSGPUTrainer(TrainerDesc): def _gen_trainer_desc(self): super(PSGPUTrainer, self)._gen_trainer_desc() self.proto_desc.class_name = "PSGPUTrainer" - if self._program == None: + if self._program is None: raise RuntimeError("None Program") self._device_worker._set_infer(self._infer) self._device_worker._set_program(self._program) @@ -426,7 +426,7 @@ class HeterPipelineTrainer(TrainerDesc): def _gen_trainer_desc(self): super(HeterPipelineTrainer, self)._gen_trainer_desc() self.proto_desc.class_name = "HeterPipelineTrainer" - if self._program == None: + if self._program is None: raise RuntimeError("None Program") self._device_worker._set_infer(self._infer) self._device_worker._set_program(self._program) @@ -450,7 +450,7 @@ class PipelineTrainer(TrainerDesc): def _gen_trainer_desc(self): super(PipelineTrainer, self)._gen_trainer_desc() self.proto_desc.class_name = "PipelineTrainer" - if self._program == None: + if self._program is None: raise RuntimeError("None Program") self._device_worker._set_infer(self._infer) self._device_worker._set_program(self._program) diff --git a/python/paddle/fluid/trainer_factory.py b/python/paddle/fluid/trainer_factory.py index 7ac367b38fd..761895fe304 100644 --- a/python/paddle/fluid/trainer_factory.py +++ b/python/paddle/fluid/trainer_factory.py @@ -192,7 +192,7 @@ class FetchHandlerMonitor(object): for key in var_name_to_key: var = scope.find_var(key) fetch_dict[key] = var - if var == None: + if var is None: local_logger.warning( "{} value currently not available".format( var_name_to_key[key] @@ -201,7 +201,7 @@ class FetchHandlerMonitor(object): res_dict = {} for key in fetch_dict: user_name = var_name_to_key[key] - if fetch_dict[key] == None: + if fetch_dict[key] is None: res_dict[user_name] = None continue else: diff --git a/python/paddle/fluid/transpiler/distribute_transpiler.py b/python/paddle/fluid/transpiler/distribute_transpiler.py index 28dbf22153f..6bdddddd93f 100644 --- a/python/paddle/fluid/transpiler/distribute_transpiler.py +++ b/python/paddle/fluid/transpiler/distribute_transpiler.py @@ -692,7 +692,7 @@ WIKI: https://github.com/PaddlePaddle/Fleet/blob/develop/markdown_doc/transpiler ps_dispatcher = self.config.split_method(self.pserver_endpoints) self.table_name = find_distributed_lookup_table(self.origin_program) - self.has_distributed_lookup_table = self.table_name != None + self.has_distributed_lookup_table = self.table_name is not None self.param_name_to_grad_name = dict() self.grad_name_to_param_name = dict() for param_var, grad_var in self.params_grads: diff --git a/python/paddle/fluid/transpiler/geo_sgd_transpiler.py b/python/paddle/fluid/transpiler/geo_sgd_transpiler.py index 0163d591cb4..80b39dceaac 100644 --- a/python/paddle/fluid/transpiler/geo_sgd_transpiler.py +++ b/python/paddle/fluid/transpiler/geo_sgd_transpiler.py @@ -104,7 +104,7 @@ class GeoSgdTranspiler(DistributeTranspiler): # distribute lookup table self.table_name = find_distributed_lookup_table(self.origin_program) - self.has_distributed_lookup_table = self.table_name != None + self.has_distributed_lookup_table = self.table_name is not None self.origin_program._distributed_lookup_table = ( self.table_name if self.table_name else None ) diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index a61d0576130..10b980aaabf 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -1115,7 +1115,7 @@ def dropout( if axis and not isinstance(axis, (int, list, tuple)): raise TypeError("datatype of axis argument should be int or list") - if axis == None: # commonly used dropout + if axis is None: # commonly used dropout seed = None mode = ( 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index 87d61e91809..25008f7e2dc 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -193,7 +193,7 @@ def batch_norm( data_format = 'NCHW' if data_format[1] == 'C' else 'NHWC' - if use_global_stats == None: + if use_global_stats is None: use_global_stats = not training trainable_statistics = False else: diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index d81987fa9ee..daa5c8d841a 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -2174,9 +2174,9 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None): output_size = utils.convert_to_list(output_size, 2, 'output_size') else: output_size = list(output_size) - if output_size[0] == None: + if output_size[0] is None: output_size[0] = in_h - if output_size[1] == None: + if output_size[1] is None: output_size[1] = in_w if in_dygraph_mode(): pool_out = _C_ops.max_pool2d_with_index( @@ -2269,11 +2269,11 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None): output_size = utils.convert_to_list(output_size, 3, 'output_size') else: output_size = list(output_size) - if output_size[0] == None: + if output_size[0] is None: output_size[0] = in_l - if output_size[1] == None: + if output_size[1] is None: output_size[1] = in_h - if output_size[2] == None: + if output_size[2] is None: output_size[2] = in_w if in_dynamic_mode(): diff --git a/python/paddle/nn/layer/distance.py b/python/paddle/nn/layer/distance.py index ef09a1cd5e2..a4f1b2b789d 100644 --- a/python/paddle/nn/layer/distance.py +++ b/python/paddle/nn/layer/distance.py @@ -78,6 +78,6 @@ class PairwiseDistance(Layer): main_str += ', epsilon={epsilon}' if self.keepdim is not False: main_str += ', keepdim={keepdim}' - if self.name != None: + if self.name is not None: main_str += ', name={name}' return main_str.format(**self.__dict__) diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 5f4a4d8d1d8..8864237a282 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -394,7 +394,7 @@ class GroupNorm(Layer): default_initializer=Constant(1.0), ) self.weight.stop_gradient = ( - self._weight_attr != None + self._weight_attr is not None and self._weight_attr.learning_rate == 0.0 ) @@ -411,7 +411,8 @@ class GroupNorm(Layer): attr=self._bias_attr, shape=param_shape, is_bias=True ) self.bias.stop_gradient = ( - self._bias_attr != None and self._bias_attr.learning_rate == 0.0 + self._bias_attr is not None + and self._bias_attr.learning_rate == 0.0 ) def forward(self, input): @@ -635,7 +636,7 @@ class _BatchNormBase(Layer): default_initializer=Constant(1.0), ) self.weight.stop_gradient = ( - self._weight_attr != None + self._weight_attr is not None and self._weight_attr.learning_rate == 0.0 ) @@ -656,7 +657,8 @@ class _BatchNormBase(Layer): is_bias=True, ) self.bias.stop_gradient = ( - self._bias_attr != None and self._bias_attr.learning_rate == 0.0 + self._bias_attr is not None + and self._bias_attr.learning_rate == 0.0 ) moving_mean_name = None @@ -1293,15 +1295,15 @@ class SyncBatchNorm(_BatchNormBase): layer_output = layer if isinstance(layer, _BatchNormBase): if ( - layer._weight_attr != None + layer._weight_attr is not None and not isinstance(layer._weight_attr, bool) - and layer._weight_attr.name != None + and layer._weight_attr.name is not None ): layer._weight_attr.name = layer._weight_attr.name + '_sync' if ( - layer._bias_attr != None + layer._bias_attr is not None and not isinstance(layer._bias_attr, bool) - and layer._bias_attr.name != None + and layer._bias_attr.name is not None ): layer._bias_attr.name = layer._bias_attr.name + '_sync' diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index a7b672383a9..3e64c409559 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -704,7 +704,7 @@ class Optimizer(object): name, param.name ) ) - if shape == None: + if shape is None: shape = param.shape assert isinstance(self.helper, LayerHelper) diff --git a/python/paddle/profiler/profiler.py b/python/paddle/profiler/profiler.py index bcd87d9c826..97c3f73beae 100644 --- a/python/paddle/profiler/profiler.py +++ b/python/paddle/profiler/profiler.py @@ -522,7 +522,7 @@ class Profiler: else: self.scheduler = _default_state_scheduler - if on_trace_ready == None: + if on_trace_ready is None: self.on_trace_ready = export_chrome_tracing('./profiler_log/') else: self.on_trace_ready = on_trace_ready diff --git a/python/paddle/profiler/utils.py b/python/paddle/profiler/utils.py index 4d7b36554b5..d26e6de82f2 100644 --- a/python/paddle/profiler/utils.py +++ b/python/paddle/profiler/utils.py @@ -189,6 +189,6 @@ def wrap_optimizers(): for classname in optimizer.__all__: if classname != 'Optimizer': classobject = getattr(optimizer, classname) - if getattr(classobject, 'step', None) != None: + if getattr(classobject, 'step', None) is not None: classobject.step = optimizer_warpper(classobject.step) _has_optimizer_wrapped = True diff --git a/python/paddle/sparse/nn/layer/norm.py b/python/paddle/sparse/nn/layer/norm.py index 117fbf01a1d..b3993a2947b 100644 --- a/python/paddle/sparse/nn/layer/norm.py +++ b/python/paddle/sparse/nn/layer/norm.py @@ -129,7 +129,7 @@ class BatchNorm(paddle.nn.BatchNorm1D): "When training, we now always track global mean and variance." ) - if self._use_global_stats == None: + if self._use_global_stats is None: self._use_global_stats = not self.training trainable_statistics = False else: @@ -363,15 +363,15 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): layer_output = layer if isinstance(layer, _BatchNormBase): if ( - layer._weight_attr != None + layer._weight_attr is not None and not isinstance(layer._weight_attr, bool) - and layer._weight_attr.name != None + and layer._weight_attr.name is not None ): layer._weight_attr.name = layer._weight_attr.name + '_sync' if ( - layer._bias_attr != None + layer._bias_attr is not None and not isinstance(layer._bias_attr, bool) - and layer._bias_attr.name != None + and layer._bias_attr.name is not None ): layer._bias_attr.name = layer._bias_attr.name + '_sync' diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index ac598328352..47e3dddbbd8 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -688,7 +688,7 @@ def deserialize_persistables(program, data, executor): if not isinstance(var, Parameter): continue var_tmp = paddle.fluid.global_scope().find_var(var.name) - assert var_tmp != None, "can't not find var: " + var.name + assert var_tmp is not None, "can't not find var: " + var.name new_shape = (np.array(var_tmp.get_tensor())).shape assert var.name in origin_shape_map, var.name + " MUST in var list." origin_shape = origin_shape_map.get(var.name) diff --git a/python/paddle/tensor/einsum.py b/python/paddle/tensor/einsum.py index 19a63d515be..edf316bbf50 100644 --- a/python/paddle/tensor/einsum.py +++ b/python/paddle/tensor/einsum.py @@ -218,7 +218,7 @@ def build_global_view(nop_labels, rhs, n_bcast_dims): else: count[-1] += 1 - if rhs != None: + if rhs is not None: validate_rhs(rhs, labels, n_bcast_dims) g_labels_out = rhs.replace('...', '.' * n_bcast_dims) else: diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 5348681ad04..257fad6cafa 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -466,9 +466,9 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): if in_dygraph_mode(): out = _C_ops.abs(input) reduce_all = ( - True if axis == None or axis == [] or asvector else False + True if axis is None or axis == [] or asvector else False ) - axis = axis if axis != None and axis != [] else [0] + axis = axis if axis is not None and axis != [] else [0] if reduce_all: assert (axis == []) or (axis is None) if porder == np.float64('inf'): @@ -485,8 +485,8 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): dtype=helper.input_dtype() ) - reduce_all = True if axis == None or axis == [] or asvector else False - axis = axis if axis != None and axis != [] else [0] + reduce_all = True if axis is None or axis == [] or asvector else False + axis = axis if axis is not None and axis != [] else [0] reduce_type = ( 'reduce_max' if porder == np.float64('inf') else 'reduce_min' @@ -830,7 +830,7 @@ def cond(x, p=None, name=None): when porder is in (1, -1, inf, -inf) """ reduce_all = True if axis is None or axis == [] else False - axis = axis if axis != None and axis != [] else [0] + axis = axis if axis is not None and axis != [] else [0] keepdim = False if in_dygraph_mode(): @@ -1106,7 +1106,7 @@ def cond(x, p=None, name=None): "input should be a matrix or batches of matrices, " + "but the dimention of received input is {}".format(len(x_shape)) ) - if p == None: + if p is None: p = 2 x_size = 0 if (0 in x_shape) else 1 if p in ("fro", "nuc", 1, -1, np.inf, -np.inf): diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 3379a60a3bc..84a7b7a385a 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -503,14 +503,14 @@ def unstack(x, axis=0, num=None): """ if in_dygraph_mode(): - if num == None: + if num is None: num = x.shape[axis] if num == 0: return [] return _C_ops.unstack(x, axis, num) if _non_static_mode(): - if num == None: + if num is None: num = x.shape[axis] if num == 0: return [] diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index f34851fdcca..eafa9944c3f 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -3921,13 +3921,13 @@ def all(x, axis=None, keepdim=False, name=None): return _C_ops.all(x, axis, keepdim) if _in_legacy_dygraph(): - axis = axis if axis != None and axis != [] else [0] + axis = axis if axis is not None and axis != [] else [0] return _legacy_C_ops.reduce_all( x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all_flag ) attrs = { - 'dim': axis if axis != None and axis != [] and axis != () else [0], + 'dim': axis if axis is not None and axis != [] and axis != () else [0], 'keep_dim': keepdim, 'reduce_all': reduce_all_flag, } @@ -4010,13 +4010,13 @@ def any(x, axis=None, keepdim=False, name=None): return _C_ops.any(x, axis, keepdim) if _in_legacy_dygraph(): - axis = axis if axis != None and axis != [] else [0] + axis = axis if axis is not None and axis != [] else [0] return _legacy_C_ops.reduce_any( x, 'dim', axis, 'keep_dim', keepdim, 'reduce_all', reduce_all_flag ) attrs = { - 'dim': axis if axis != None and axis != [] and axis != () else [0], + 'dim': axis if axis is not None and axis != [] and axis != () else [0], 'keep_dim': keepdim, 'reduce_all': reduce_all_flag, } @@ -4322,7 +4322,7 @@ def logit(x, eps=None, name=None): """ - if eps == None: + if eps is None: eps = 0.0 if _in_legacy_dygraph(): return _legacy_C_ops.logit(x, 'eps', eps) diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index 355bc63f037..62509aaedf8 100644 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -904,7 +904,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): """ if in_dygraph_mode(): - if axis == None: + if axis is None: axis = -1 out, indices = _C_ops.topk(x, k, axis, largest, sorted) return out, indices diff --git a/python/paddle/tests/test_utils_lazyimport.py b/python/paddle/tests/test_utils_lazyimport.py index 98324650bf0..1064bd80864 100644 --- a/python/paddle/tests/test_utils_lazyimport.py +++ b/python/paddle/tests/test_utils_lazyimport.py @@ -22,7 +22,7 @@ class TestUtilsLazyImport(unittest.TestCase): def func_test_lazy_import(self): paddle = try_import('paddle') - self.assertTrue(paddle.__version__ is not None) + self.assertIsNotNone(paddle.__version__) with self.assertRaises(ImportError) as context: paddle2 = try_import('paddle2') diff --git a/python/paddle/text/datasets/imdb.py b/python/paddle/text/datasets/imdb.py index d71d23ff692..cc93dc5f52d 100644 --- a/python/paddle/text/datasets/imdb.py +++ b/python/paddle/text/datasets/imdb.py @@ -112,7 +112,7 @@ class Imdb(Dataset): data = [] with tarfile.open(self.data_file) as tarf: tf = tarf.next() - while tf != None: + while tf is not None: if bool(pattern.match(tf.name)): # newline and punctuations removal and ad-hoc tokenization. data.append( diff --git a/python/paddle/vision/ops.py b/python/paddle/vision/ops.py index 1e8fc049efd..05eea5802b5 100755 --- a/python/paddle/vision/ops.py +++ b/python/paddle/vision/ops.py @@ -583,7 +583,7 @@ def prior_box( if in_dygraph_mode(): step_w, step_h = steps - if max_sizes == None: + if max_sizes is None: max_sizes = [] box, var = _C_ops.prior_box( input, diff --git a/tools/analysisPyXml.py b/tools/analysisPyXml.py index b184ef76fcc..200116779db 100644 --- a/tools/analysisPyXml.py +++ b/tools/analysisPyXml.py @@ -67,7 +67,7 @@ def analysisPyXml(rootPath, ut): ) ): pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" # a='b'/a="b"/a=0 - if re.match(pattern, output.strip()) == None: + if re.match(pattern, output.strip()) is None: pyCov_file.append(clazz_filename) coverageMessage = 'RELATED' break diff --git a/tools/check_op_desc.py b/tools/check_op_desc.py index 1c1ff14f7b6..3bb1f238253 100644 --- a/tools/check_op_desc.py +++ b/tools/check_op_desc.py @@ -173,7 +173,7 @@ def diff_attr(ori_attrs, new_attrs): for attr_name in attrs_only_in_new: attr_added_error_massage.append(attr_name) - if new_attrs.get(attr_name).get(DEFAULT_VALUE) == None: + if new_attrs.get(attr_name).get(DEFAULT_VALUE) is None: error, attr_error = True, True attr_added_def_error_massage.append(attr_name) diff --git a/tools/get_single_test_cov.py b/tools/get_single_test_cov.py index ee5f2d9fd50..10aaa96ce7b 100644 --- a/tools/get_single_test_cov.py +++ b/tools/get_single_test_cov.py @@ -88,7 +88,7 @@ def analysisFNDAFile(rootPath, test): fn, re.I, ) - if matchObj == None: + if matchObj is None: OP_REGIST = False break if not OP_REGIST: diff --git a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py index 7ece773aa78..6c5d879df6c 100644 --- a/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py +++ b/tools/infrt/generate_pd_op_dialect_from_paddle_op_maker.py @@ -354,7 +354,7 @@ def convert_op_proto_into_mlir(op_descs): attr in skipped_attr_list ): continue - if op_proto[ATTRS][attr][DEFAULT_VALUE] != None: + if op_proto[ATTRS][attr][DEFAULT_VALUE] is not None: if op_proto[ATTRS][attr][TYPE] in attr_mlir_converter: default_value = str( op_proto[ATTRS][attr][DEFAULT_VALUE] diff --git a/tools/test_runner.py b/tools/test_runner.py index 65da72b539d..2f1b9a22ab3 100644 --- a/tools/test_runner.py +++ b/tools/test_runner.py @@ -28,7 +28,7 @@ import static_mode_white_list def main(): sys.path.append(os.getcwd()) if core.is_compiled_with_cuda() or core.is_compiled_with_rocm(): - if os.getenv('FLAGS_enable_gpu_memory_usage_log') == None: + if os.getenv('FLAGS_enable_gpu_memory_usage_log') is None: os.environ['FLAGS_enable_gpu_memory_usage_log'] = 'true' os.environ['FLAGS_enable_gpu_memory_usage_log_mb'] = 'false' -- GitLab