未验证 提交 a35a4a53 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][E711] use `is`/`is not` for comparison with `None` (#47452)

* [CodeStyle][E711] use `is`/`is not` for comparison with `None`

* `self.assertTrue($A is None)` -> `self.assertIsNone($A)`

* `self.assertTrue($A is not None)` -> `self.assertIsNotNone($A)`

* `self.assertFalse($A is None)` -> `self.assertIsNotNone($A)`

* `self.assertEqual($A, None)` -> `self.assertIsNone($A)`

* `self.assertNotEqual($A, None)` -> `self.assertIsNotNone($A)`
上级 9d801855
...@@ -171,7 +171,7 @@ package: ...@@ -171,7 +171,7 @@ package:
def meta_build_linux( def meta_build_linux(
var, python_str, paddle_version, build_var, build_name_str, cuda_str=None var, python_str, paddle_version, build_var, build_name_str, cuda_str=None
): ):
if cuda_str == None: if cuda_str is None:
package_str = ( package_str = (
""" """
package: package:
...@@ -192,7 +192,7 @@ package: ...@@ -192,7 +192,7 @@ package:
) )
meta_build = var.build + build_name_str meta_build = var.build + build_name_str
meta_str = package_str + meta_build + requirement meta_str = package_str + meta_build + requirement
if not (cuda_str == None): if not (cuda_str is None):
meta_str = meta_str + cuda_str meta_str = meta_str + cuda_str
meta_str = meta_str + var.test + var.about meta_str = meta_str + var.test + var.about
...@@ -209,7 +209,7 @@ package: ...@@ -209,7 +209,7 @@ package:
def meta_build_windows( def meta_build_windows(
var, python_str, paddle_version, blt_var, build_name_str, cuda_str=None var, python_str, paddle_version, blt_var, build_name_str, cuda_str=None
): ):
if cuda_str == None: if cuda_str is None:
package_str = ( package_str = (
""" """
package: package:
...@@ -235,7 +235,7 @@ package: ...@@ -235,7 +235,7 @@ package:
meta_build = var.build + build_name_str meta_build = var.build + build_name_str
meta_str = package_str + meta_build + requirement meta_str = package_str + meta_build + requirement
if not (cuda_str == None): if not (cuda_str is None):
meta_str = meta_str + cuda_str meta_str = meta_str + cuda_str
blt_str = var.blt_const + blt_var blt_str = var.blt_const + blt_var
......
...@@ -74,7 +74,7 @@ class CostModel: ...@@ -74,7 +74,7 @@ class CostModel:
def get_static_op_time(self, op_name, forward=True, dtype="float32"): def get_static_op_time(self, op_name, forward=True, dtype="float32"):
# if forward is True, return op forward time, otherwise return op backward time. # if forward is True, return op forward time, otherwise return op backward time.
if op_name == None: if op_name is None:
raise ValueError( raise ValueError(
'op_name should not be empty when you want to get static op time' 'op_name should not be empty when you want to get static op time'
) )
......
...@@ -45,7 +45,7 @@ def tokenize(pattern): ...@@ -45,7 +45,7 @@ def tokenize(pattern):
# tarfile.extractfile, which does random access and might # tarfile.extractfile, which does random access and might
# destroy hard disks. # destroy hard disks.
tf = tarf.next() tf = tarf.next()
while tf != None: while tf is not None:
if bool(pattern.match(tf.name)): if bool(pattern.match(tf.name)):
# newline and punctuations removal and ad-hoc tokenization. # newline and punctuations removal and ad-hoc tokenization.
yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate( yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate(
......
...@@ -31,13 +31,13 @@ class TestIMDB(unittest.TestCase): ...@@ -31,13 +31,13 @@ class TestIMDB(unittest.TestCase):
word_idx = None word_idx = None
def test_build_dict(self): def test_build_dict(self):
if self.word_idx == None: if self.word_idx is None:
self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150) self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150)
self.assertEqual(len(self.word_idx), 7036) self.assertEqual(len(self.word_idx), 7036)
def check_dataset(self, dataset, expected_size): def check_dataset(self, dataset, expected_size):
if self.word_idx == None: if self.word_idx is None:
self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150) self.word_idx = paddle.dataset.imdb.build_dict(TRAIN_PATTERN, 150)
sum = 0 sum = 0
......
...@@ -587,7 +587,7 @@ class CommContext: ...@@ -587,7 +587,7 @@ class CommContext:
if forward_order_beta > backward_order_beta if forward_order_beta > backward_order_beta
else backward_order_beta else backward_order_beta
) )
if max_beta == None: if max_beta is None:
max_beta = beta max_beta = beta
else: else:
if beta > max_beta: if beta > max_beta:
......
...@@ -84,7 +84,7 @@ class Partitioner(object): ...@@ -84,7 +84,7 @@ class Partitioner(object):
dist_op_context.rank_id = self._rank_id dist_op_context.rank_id = self._rank_id
# partition startup program # partition startup program
if serial_startup_program == None: if serial_startup_program is None:
partitioned_startup_prog = None partitioned_startup_prog = None
else: else:
partitioned_startup_prog = self.partition_startup_program( partitioned_startup_prog = self.partition_startup_program(
......
...@@ -61,7 +61,7 @@ def new_process_group(ranks, group_id=None): ...@@ -61,7 +61,7 @@ def new_process_group(ranks, group_id=None):
num_groups = len(_g_process_group_map) num_groups = len(_g_process_group_map)
# Note: our process group may interfere with the original implementation # Note: our process group may interfere with the original implementation
# so the created group id should start from the original _new_ring_id() # so the created group id should start from the original _new_ring_id()
if group_id == None: if group_id is None:
group_id = _new_ring_id() + num_groups + 1 group_id = _new_ring_id() + num_groups + 1
new_pg = ProcessGroup(group_id, ranks) new_pg = ProcessGroup(group_id, ranks)
......
...@@ -530,7 +530,7 @@ class OptimizationTuner: ...@@ -530,7 +530,7 @@ class OptimizationTuner:
self._finished_trials.append(trial) self._finished_trials.append(trial)
cur_mertic = get_metric(results) cur_mertic = get_metric(results)
if self._best_metric == None or cur_mertic > self._best_metric: if self._best_metric is None or cur_mertic > self._best_metric:
self._best_metric = cur_mertic self._best_metric = cur_mertic
self._best_iter = i self._best_iter = i
......
...@@ -31,7 +31,7 @@ class Command(object): ...@@ -31,7 +31,7 @@ class Command(object):
self.etcd.put(self.np_path, '{}'.format(np).encode('latin-1')) self.etcd.put(self.np_path, '{}'.format(np).encode('latin-1'))
def scale_np(self, np): def scale_np(self, np):
if self.etcd.get(self.np_path)[0] != None: if self.etcd.get(self.np_path)[0] is not None:
self.set_np(np) self.set_np(np)
return True return True
return False return False
......
...@@ -293,7 +293,7 @@ class Gloo(object): ...@@ -293,7 +293,7 @@ class Gloo(object):
if "Gateway" in item and "Iface" in item: if "Gateway" in item and "Iface" in item:
gateway_idx = item.index("Gateway") gateway_idx = item.index("Gateway")
iface_idx = item.index("Iface") iface_idx = item.index("Iface")
elif gateway_idx != None and iface_idx != None: elif gateway_idx is not None and iface_idx is not None:
gateway = None gateway = None
if len(item) > gateway_idx: if len(item) > gateway_idx:
gateway = item[gateway_idx] gateway = item[gateway_idx]
...@@ -845,7 +845,7 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -845,7 +845,7 @@ class PaddleCloudRoleMaker(RoleMakerBase):
self._server_endpoints = self._server_endpoints.split(",") self._server_endpoints = self._server_endpoints.split(",")
self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", None) self._worker_endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS", None)
if self._worker_endpoints != None: if self._worker_endpoints is not None:
self._worker_endpoints = self._worker_endpoints.split(",") self._worker_endpoints = self._worker_endpoints.split(",")
else: else:
self._worker_endpoints = [] self._worker_endpoints = []
...@@ -860,14 +860,14 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -860,14 +860,14 @@ class PaddleCloudRoleMaker(RoleMakerBase):
self._coordinator_endpoints = self._coordinator_endpoints.split(",") self._coordinator_endpoints = self._coordinator_endpoints.split(",")
trainers_num = os.getenv("PADDLE_TRAINERS_NUM", None) trainers_num = os.getenv("PADDLE_TRAINERS_NUM", None)
if trainers_num == None: if trainers_num is None:
raise ValueError( raise ValueError(
"Can not find PADDLE_TRAINERS_NUM, please check your environment." "Can not find PADDLE_TRAINERS_NUM, please check your environment."
) )
trainers_num = int(trainers_num) trainers_num = int(trainers_num)
training_role = os.getenv("TRAINING_ROLE", None) training_role = os.getenv("TRAINING_ROLE", None)
if training_role == None: if training_role is None:
raise ValueError( raise ValueError(
"Can not find TRAINING_ROLE, please check your environment." "Can not find TRAINING_ROLE, please check your environment."
) )
...@@ -937,20 +937,20 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -937,20 +937,20 @@ class PaddleCloudRoleMaker(RoleMakerBase):
if training_role == "TRAINER": if training_role == "TRAINER":
role = Role.WORKER role = Role.WORKER
current_id = os.getenv("PADDLE_TRAINER_ID", None) current_id = os.getenv("PADDLE_TRAINER_ID", None)
if current_id == None: if current_id is None:
raise ValueError( raise ValueError(
"Can not find PADDLE_TRAINER_ID, please check your environment." "Can not find PADDLE_TRAINER_ID, please check your environment."
) )
current_id = int(current_id) current_id = int(current_id)
if self._is_heter_parameter_server_mode: if self._is_heter_parameter_server_mode:
self._stage_id = os.getenv("STAGE_ID", None) self._stage_id = os.getenv("STAGE_ID", None)
if self._stage_id == None: if self._stage_id is None:
raise ValueError( raise ValueError(
"Can not find STAGE_ID, please check your environment." "Can not find STAGE_ID, please check your environment."
) )
self._stage_id = int(self._stage_id) self._stage_id = int(self._stage_id)
self._stage_num = os.getenv("STAGE_NUM", None) self._stage_num = os.getenv("STAGE_NUM", None)
if self._stage_num == None: if self._stage_num is None:
raise ValueError( raise ValueError(
"Can not find STAGE_NUM, please check your environment." "Can not find STAGE_NUM, please check your environment."
) )
...@@ -958,18 +958,18 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -958,18 +958,18 @@ class PaddleCloudRoleMaker(RoleMakerBase):
self._stage_trainers = os.getenv( self._stage_trainers = os.getenv(
"PADDLE_STAGE_TRAINERS_NUM", None "PADDLE_STAGE_TRAINERS_NUM", None
) )
if self._stage_trainers == None: if self._stage_trainers is None:
raise ValueError( raise ValueError(
"Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment." "Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment."
) )
self._stage_trainers = eval(self._stage_trainers) self._stage_trainers = eval(self._stage_trainers)
cur_port = os.getenv("PADDLE_PORT", None) cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None: if cur_port is None:
raise ValueError( raise ValueError(
"Can not find PADDLE_PORT, please check your environment." "Can not find PADDLE_PORT, please check your environment."
) )
cur_ip = os.getenv("POD_IP", None) cur_ip = os.getenv("POD_IP", None)
if cur_ip == None: if cur_ip is None:
raise ValueError( raise ValueError(
"Can not find POD_IP, please check your environment." "Can not find POD_IP, please check your environment."
) )
...@@ -982,12 +982,12 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -982,12 +982,12 @@ class PaddleCloudRoleMaker(RoleMakerBase):
elif training_role == "PSERVER": elif training_role == "PSERVER":
role = Role.SERVER role = Role.SERVER
cur_port = os.getenv("PADDLE_PORT", None) cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None: if cur_port is None:
raise ValueError( raise ValueError(
"Can not find PADDLE_PORT, please check your environment." "Can not find PADDLE_PORT, please check your environment."
) )
cur_ip = os.getenv("POD_IP", None) cur_ip = os.getenv("POD_IP", None)
if cur_ip == None: if cur_ip is None:
raise ValueError( raise ValueError(
"Can not find POD_IP, please check your environment." "Can not find POD_IP, please check your environment."
) )
...@@ -997,20 +997,20 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -997,20 +997,20 @@ class PaddleCloudRoleMaker(RoleMakerBase):
elif training_role == "HETER_TRAINER": elif training_role == "HETER_TRAINER":
role = Role.HETER_WORKER role = Role.HETER_WORKER
self._stage_id = os.getenv("STAGE_ID", None) self._stage_id = os.getenv("STAGE_ID", None)
if self._stage_id == None: if self._stage_id is None:
raise ValueError( raise ValueError(
"Can not find STAGE_ID, please check your environment." "Can not find STAGE_ID, please check your environment."
) )
self._stage_id = int(self._stage_id) self._stage_id = int(self._stage_id)
self._stage_num = os.getenv("STAGE_NUM", None) self._stage_num = os.getenv("STAGE_NUM", None)
if self._stage_num == None: if self._stage_num is None:
raise ValueError( raise ValueError(
"Can not find STAGE_NUM, please check your environment." "Can not find STAGE_NUM, please check your environment."
) )
self._stage_num = int(self._stage_num) self._stage_num = int(self._stage_num)
self._stage_trainers = os.getenv("PADDLE_STAGE_TRAINERS_NUM", None) self._stage_trainers = os.getenv("PADDLE_STAGE_TRAINERS_NUM", None)
if self._stage_trainers == None: if self._stage_trainers is None:
raise ValueError( raise ValueError(
"Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment." "Can not find PADDLE_STAGE_TRAINERS_NUM, please check your environment."
) )
...@@ -1019,7 +1019,7 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -1019,7 +1019,7 @@ class PaddleCloudRoleMaker(RoleMakerBase):
self._heter_trainer_device_type = os.getenv( self._heter_trainer_device_type = os.getenv(
"HETER_DEVICE_TYPE", None "HETER_DEVICE_TYPE", None
) )
if self._heter_trainer_device_type == None: if self._heter_trainer_device_type is None:
raise ValueError( raise ValueError(
"Can not find HETER_DEVICE_TYPE, please check your environment." "Can not find HETER_DEVICE_TYPE, please check your environment."
) )
...@@ -1040,12 +1040,12 @@ class PaddleCloudRoleMaker(RoleMakerBase): ...@@ -1040,12 +1040,12 @@ class PaddleCloudRoleMaker(RoleMakerBase):
) )
cur_port = os.getenv("PADDLE_PORT", None) cur_port = os.getenv("PADDLE_PORT", None)
if cur_port == None: if cur_port is None:
raise ValueError( raise ValueError(
"Can not find PADDLE_PORT, please check your environment." "Can not find PADDLE_PORT, please check your environment."
) )
cur_ip = os.getenv("POD_IP", None) cur_ip = os.getenv("POD_IP", None)
if cur_ip == None: if cur_ip is None:
raise ValueError( raise ValueError(
"Can not find POD_IP, please check your environment." "Can not find POD_IP, please check your environment."
) )
......
...@@ -204,13 +204,13 @@ class StrategyCompiler(StrategyCompilerBase): ...@@ -204,13 +204,13 @@ class StrategyCompiler(StrategyCompilerBase):
) )
return_meta = ( return_meta = (
None if meta_optimizers == None else meta_optimizers[0] None if meta_optimizers is None else meta_optimizers[0]
) )
return_graph = ( return_graph = (
None if graph_optimizers == None else graph_optimizers[0] None if graph_optimizers is None else graph_optimizers[0]
) )
if meta_optimizers == None or graph_optimizers == None: if meta_optimizers is None or graph_optimizers is None:
return return_meta, return_graph return return_meta, return_graph
# do heuristic filter here, if any meta optimizer in graph optimizers is in # do heuristic filter here, if any meta optimizer in graph optimizers is in
......
...@@ -509,7 +509,9 @@ class UtilBase(object): ...@@ -509,7 +509,9 @@ class UtilBase(object):
} }
for each_var in saved_params: for each_var in saved_params:
var_temp = fluid.global_scope().find_var(each_var.name) var_temp = fluid.global_scope().find_var(each_var.name)
assert var_temp != None, "can't not find var: " + each_var.name assert var_temp is not None, (
"can't not find var: " + each_var.name
)
new_shape = (np.array(var_temp.get_tensor())).shape new_shape = (np.array(var_temp.get_tensor())).shape
assert each_var.name in orig_para_shape, ( assert each_var.name in orig_para_shape, (
each_var.name + "MUST in var list" each_var.name + "MUST in var list"
......
...@@ -79,7 +79,7 @@ class DataGenerator(object): ...@@ -79,7 +79,7 @@ class DataGenerator(object):
batch_samples = [] batch_samples = []
line_iter = self.generate_sample(None) line_iter = self.generate_sample(None)
for user_parsed_line in line_iter(): for user_parsed_line in line_iter():
if user_parsed_line == None: if user_parsed_line is None:
continue continue
batch_samples.append(user_parsed_line) batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_: if len(batch_samples) == self.batch_size_:
...@@ -121,7 +121,7 @@ class DataGenerator(object): ...@@ -121,7 +121,7 @@ class DataGenerator(object):
for line in sys.stdin: for line in sys.stdin:
line_iter = self.generate_sample(line) line_iter = self.generate_sample(line)
for user_parsed_line in line_iter(): for user_parsed_line in line_iter():
if user_parsed_line == None: if user_parsed_line is None:
continue continue
batch_samples.append(user_parsed_line) batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_: if len(batch_samples) == self.batch_size_:
......
...@@ -1285,7 +1285,7 @@ class Fleet(object): ...@@ -1285,7 +1285,7 @@ class Fleet(object):
context["origin_main_program"] = self.origin_main_program context["origin_main_program"] = self.origin_main_program
context["origin_main_programs"] = [self.origin_main_program] context["origin_main_programs"] = [self.origin_main_program]
context["loss"] = loss context["loss"] = loss
if startup_program == None: if startup_program is None:
self.origin_startup_program = ( self.origin_startup_program = (
paddle.static.default_startup_program().clone(for_test=False) paddle.static.default_startup_program().clone(for_test=False)
) )
......
...@@ -796,7 +796,7 @@ def launch(): ...@@ -796,7 +796,7 @@ def launch():
) # which_distributed_mode must modify args.backend ) # which_distributed_mode must modify args.backend
else: else:
assert ( assert (
args.run_mode == 'collective' or args.run_mode == None args.run_mode == 'collective' or args.run_mode is None
), "When backend is not 'auto', run mode must be collective" ), "When backend is not 'auto', run mode must be collective"
check_backend(args.backend) check_backend(args.backend)
distribute_mode = DistributeMode.COLLECTIVE distribute_mode = DistributeMode.COLLECTIVE
......
...@@ -120,7 +120,7 @@ class Cluster(object): ...@@ -120,7 +120,7 @@ class Cluster(object):
for pod in self.pods: for pod in self.pods:
ep = "{}:{}".format(pod.addr, pod.port) ep = "{}:{}".format(pod.addr, pod.port)
assert ( assert (
pod.port != None and pod.addr != None pod.port is not None and pod.addr is not None
), "{} not a valid endpoint".format(ep) ), "{} not a valid endpoint".format(ep)
r.append(ep) r.append(ep)
return r return r
...@@ -979,7 +979,7 @@ def get_custom_endpoints(origin_endpoints, offset=0): ...@@ -979,7 +979,7 @@ def get_custom_endpoints(origin_endpoints, offset=0):
origin_endpoint: ip:port origin_endpoint: ip:port
user_define_endpoint: ip:(port+offset) user_define_endpoint: ip:(port+offset)
""" """
assert origin_endpoints != None assert origin_endpoints is not None
paddle_user_define_endpoints_list = [] paddle_user_define_endpoints_list = []
for ip_port in origin_endpoints.split(","): for ip_port in origin_endpoints.split(","):
ip = ip_port.split(":")[0] ip = ip_port.split(":")[0]
...@@ -1625,7 +1625,7 @@ class ParameterServerLauncher(object): ...@@ -1625,7 +1625,7 @@ class ParameterServerLauncher(object):
else: else:
self.is_local = False self.is_local = False
pod_ip = os.getenv("POD_IP", None) pod_ip = os.getenv("POD_IP", None)
if pod_ip == None: if pod_ip is None:
_, self.current_node_ip = get_host_name_ip() _, self.current_node_ip = get_host_name_ip()
else: else:
self.current_node_ip = pod_ip self.current_node_ip = pod_ip
......
...@@ -269,7 +269,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase): ...@@ -269,7 +269,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase):
def minimize( def minimize(
self, loss, startup_program=None, parameter_list=None, no_grad_set=None self, loss, startup_program=None, parameter_list=None, no_grad_set=None
): ):
if startup_program == None: if startup_program is None:
startup_program = paddle.static.default_startup_program() startup_program = paddle.static.default_startup_program()
compiled_program = self._try_to_compile( compiled_program = self._try_to_compile(
startup_program, loss.block.program, loss startup_program, loss.block.program, loss
......
...@@ -133,7 +133,7 @@ class ParameterServerOptimizer(MetaOptimizerBase): ...@@ -133,7 +133,7 @@ class ParameterServerOptimizer(MetaOptimizerBase):
self.inner_opt.minimize( self.inner_opt.minimize(
loss, startup_program, parameter_list, no_grad_set loss, startup_program, parameter_list, no_grad_set
) )
if startup_program == None: if startup_program is None:
startup_program = paddle.static.default_startup_program() startup_program = paddle.static.default_startup_program()
# print("program after inner optimizer minimize:", # print("program after inner optimizer minimize:",
......
...@@ -82,7 +82,7 @@ class ParameterServerRuntime(RuntimeBase): ...@@ -82,7 +82,7 @@ class ParameterServerRuntime(RuntimeBase):
def _load_sparse_params( def _load_sparse_params(
self, executor, dirname, varnames, main_program=None self, executor, dirname, varnames, main_program=None
): ):
assert vars != None assert vars is not None
check_vars = [] check_vars = []
load_prog = Program() load_prog = Program()
load_block = load_prog.global_block() load_block = load_prog.global_block()
......
...@@ -997,7 +997,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -997,7 +997,7 @@ class TheOnePSRuntime(RuntimeBase):
tensor_table_dict = self.compiled_strategy.get_tensor_table_dict() tensor_table_dict = self.compiled_strategy.get_tensor_table_dict()
program_idx = 0 program_idx = 0
for table_name in tensor_table_dict: for table_name in tensor_table_dict:
if tensor_table_dict[table_name]["startup_program"] != None: if tensor_table_dict[table_name]["startup_program"] is not None:
tensor_table_dict[table_name][ tensor_table_dict[table_name][
"startup_program_id" "startup_program_id"
] = program_idx ] = program_idx
...@@ -1005,7 +1005,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1005,7 +1005,7 @@ class TheOnePSRuntime(RuntimeBase):
tensor_table_dict[table_name]["startup_program"].desc tensor_table_dict[table_name]["startup_program"].desc
) )
program_idx += 1 program_idx += 1
if tensor_table_dict[table_name]["main_program"] != None: if tensor_table_dict[table_name]["main_program"] is not None:
tensor_table_dict[table_name][ tensor_table_dict[table_name][
"main_program_id" "main_program_id"
] = program_idx ] = program_idx
...@@ -1241,7 +1241,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1241,7 +1241,7 @@ class TheOnePSRuntime(RuntimeBase):
self._communicator.stop() self._communicator.stop()
if self.role_maker._is_heter_parameter_server_mode: if self.role_maker._is_heter_parameter_server_mode:
assert ( assert (
self._heter_client != None self._heter_client is not None
), "heter client should not be None in heterps mode" ), "heter client should not be None in heterps mode"
self._heter_client.stop() self._heter_client.stop()
# executor = self._get_executor() # executor = self._get_executor()
......
...@@ -574,7 +574,7 @@ class HDFSClient(FS): ...@@ -574,7 +574,7 @@ class HDFSClient(FS):
def _test_match(self, lines): def _test_match(self, lines):
for l in lines: for l in lines:
m = self._bd_err_re.match(l) m = self._bd_err_re.match(l)
if m != None: if m is not None:
return m return m
return None return None
......
...@@ -466,7 +466,7 @@ class HybridParallelInferenceHelper(object): ...@@ -466,7 +466,7 @@ class HybridParallelInferenceHelper(object):
variable named var_name. variable named var_name.
""" """
prev_ops = self._output_var_to_op[var_name] prev_ops = self._output_var_to_op[var_name]
if prev_ops == None: if prev_ops is None:
return None return None
result_op = None result_op = None
for prev_op, prev_idx in reversed(prev_ops): for prev_op, prev_idx in reversed(prev_ops):
......
...@@ -287,7 +287,7 @@ class FLClient(FLClientBase): ...@@ -287,7 +287,7 @@ class FLClient(FLClientBase):
fleet.init_worker() fleet.init_worker()
def callback_initialize_model_params(self): def callback_initialize_model_params(self):
if self.exe == None or self.main_program == None: if self.exe is None or self.main_program is None:
raise AssertionError("exe or main_program not set") raise AssertionError("exe or main_program not set")
self.exe.run(self.startup_program) self.exe.run(self.startup_program)
......
...@@ -1326,7 +1326,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1326,7 +1326,7 @@ class TheOnePSRuntime(RuntimeBase):
) # --> HeterClient::GetInstance ) # --> HeterClient::GetInstance
def _init_coordinator(self, scopes=None): def _init_coordinator(self, scopes=None):
if self._coordinator == None: if self._coordinator is None:
self._coordinator = Coordinator(self.string_hosts) self._coordinator = Coordinator(self.string_hosts)
print(">>> curr node ip: {}".format(self.coordinator_hosts[0])) print(">>> curr node ip: {}".format(self.coordinator_hosts[0]))
...@@ -1336,7 +1336,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1336,7 +1336,7 @@ class TheOnePSRuntime(RuntimeBase):
) )
def _make_fl_strategy(self): def _make_fl_strategy(self):
if self._coordinator == None: if self._coordinator is None:
assert "Coordinator py object is null!" assert "Coordinator py object is null!"
else: else:
self._coordinator.make_fl_strategy() self._coordinator.make_fl_strategy()
...@@ -1401,7 +1401,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1401,7 +1401,7 @@ class TheOnePSRuntime(RuntimeBase):
self._worker.stop_worker() self._worker.stop_worker()
if self.is_heter_ps_mode: if self.is_heter_ps_mode:
assert ( assert (
self._heter_client != None self._heter_client is not None
), "heter client should not be None in heterps mode" ), "heter client should not be None in heterps mode"
self._heter_client.stop() self._heter_client.stop()
......
...@@ -671,7 +671,7 @@ def find_heter_ops(program, default_device="cpu"): ...@@ -671,7 +671,7 @@ def find_heter_ops(program, default_device="cpu"):
# Todo: need update this method # Todo: need update this method
# op._set_attr('op_device', current_heter_device) # op._set_attr('op_device', current_heter_device)
return True return True
elif op_device == None or op_device == default_device: elif op_device is None or op_device == default_device:
op._set_attr('op_device', default_device) op._set_attr('op_device', default_device)
return False return False
return False return False
......
...@@ -106,7 +106,7 @@ def init_rpc(name, rank=None, world_size=None, master_endpoint=None): ...@@ -106,7 +106,7 @@ def init_rpc(name, rank=None, world_size=None, master_endpoint=None):
logger.info("Trainer {}: worker endpoint: {}".format(rank, worker_endpoint)) logger.info("Trainer {}: worker endpoint: {}".format(rank, worker_endpoint))
master_endpoint = ( master_endpoint = (
master_endpoint master_endpoint
if master_endpoint != None if master_endpoint is not None
else os.environ["PADDLE_MASTER_ENDPOINT"] else os.environ["PADDLE_MASTER_ENDPOINT"]
) )
master_addr, master_port = master_endpoint.split(":") master_addr, master_port = master_endpoint.split(":")
......
...@@ -180,7 +180,7 @@ class Cluster(object): ...@@ -180,7 +180,7 @@ class Cluster(object):
for pod in self.pods: for pod in self.pods:
ep = "{}:{}".format(pod.addr, pod.port) ep = "{}:{}".format(pod.addr, pod.port)
assert ( assert (
pod.port != None and pod.addr != None pod.port is not None and pod.addr is not None
), "{} not a valid endpoint".format(ep) ), "{} not a valid endpoint".format(ep)
r.append(ep) r.append(ep)
......
...@@ -1942,7 +1942,7 @@ def append_backward( ...@@ -1942,7 +1942,7 @@ def append_backward(
# sub-block (control flow) # sub-block (control flow)
is_recompute = False is_recompute = False
if ( if (
checkpoints != None checkpoints is not None
and isinstance(checkpoints, list) and isinstance(checkpoints, list)
and len(checkpoints) > 0 and len(checkpoints) > 0
): ):
......
...@@ -63,8 +63,8 @@ class Communicator(object): ...@@ -63,8 +63,8 @@ class Communicator(object):
""" """
# set all recv op to not_run mode # set all recv op to not_run mode
if kwargs == None: if kwargs is None:
if envs == None: if envs is None:
envs = {} envs = {}
else: else:
if mode == DistributedMode.SYNC: if mode == DistributedMode.SYNC:
...@@ -97,7 +97,7 @@ class Communicator(object): ...@@ -97,7 +97,7 @@ class Communicator(object):
def init_with_ctx( def init_with_ctx(
self, send_ctx, recv_ctx, proto_txt, unit64_hosts, scope=None self, send_ctx, recv_ctx, proto_txt, unit64_hosts, scope=None
): ):
if scope == None: if scope is None:
scope = global_scope() scope = global_scope()
self.communicator_ = core.DistCommunicator( self.communicator_ = core.DistCommunicator(
self.mode, self.mode,
...@@ -144,7 +144,7 @@ class Communicator(object): ...@@ -144,7 +144,7 @@ class Communicator(object):
comm.start() comm.start()
comm.stop() comm.stop()
""" """
if self.communicator_ == None: if self.communicator_ is None:
print('you must call init_with_ctx first to init comm before start') print('you must call init_with_ctx first to init comm before start')
return return
self.communicator_.start() self.communicator_.start()
...@@ -166,7 +166,7 @@ class Communicator(object): ...@@ -166,7 +166,7 @@ class Communicator(object):
comm.start() comm.start()
comm.stop() comm.stop()
""" """
if self.communicator_ == None: if self.communicator_ is None:
print('you must call init_with_ctx first to init comm before stop') print('you must call init_with_ctx first to init comm before stop')
return return
self.communicator_.stop() self.communicator_.stop()
...@@ -187,7 +187,7 @@ class Communicator(object): ...@@ -187,7 +187,7 @@ class Communicator(object):
comm = fluid.communicator.Communicator(prog) comm = fluid.communicator.Communicator(prog)
comm.is_running() comm.is_running()
""" """
if self.communicator_ == None: if self.communicator_ is None:
print('you must call init_with_ctx first to init comm before stop') print('you must call init_with_ctx first to init comm before stop')
return return
self.communicator_.is_running() self.communicator_.is_running()
...@@ -202,7 +202,7 @@ class Communicator(object): ...@@ -202,7 +202,7 @@ class Communicator(object):
self.communicator_.pull_dense(context) self.communicator_.pull_dense(context)
def push_sparse_param(self, var_name, table_id=-1, scope=None): def push_sparse_param(self, var_name, table_id=-1, scope=None):
if scope == None: if scope is None:
scope = global_scope() scope = global_scope()
if not self.is_running(): if not self.is_running():
raise ValueError( raise ValueError(
...@@ -226,14 +226,14 @@ class FLCommunicator(Communicator): ## only for coordinator ...@@ -226,14 +226,14 @@ class FLCommunicator(Communicator): ## only for coordinator
self.init_with_ctx(send_ctx, dense_map, prototxt, ps_hosts) self.init_with_ctx(send_ctx, dense_map, prototxt, ps_hosts)
def start_coordinator(self, self_endpoint, trainer_endpoints): def start_coordinator(self, self_endpoint, trainer_endpoints):
if self.communicator_ != None: if self.communicator_ is not None:
self.communicator_.start_coordinator( self.communicator_.start_coordinator(
self_endpoint, trainer_endpoints self_endpoint, trainer_endpoints
) )
return return
def save_fl_strategy(self, mp): def save_fl_strategy(self, mp):
if self.communicator_ != None: if self.communicator_ is not None:
self.communicator_.save_fl_strategy(mp) self.communicator_.save_fl_strategy(mp)
else: else:
raise ValueError("self.communicator_ is null") raise ValueError("self.communicator_ is null")
...@@ -241,7 +241,7 @@ class FLCommunicator(Communicator): ## only for coordinator ...@@ -241,7 +241,7 @@ class FLCommunicator(Communicator): ## only for coordinator
def query_fl_clients_info(self): def query_fl_clients_info(self):
info_mp = {} info_mp = {}
if self.communicator_ != None: if self.communicator_ is not None:
info_mp = self.communicator_.query_fl_clients_info() info_mp = self.communicator_.query_fl_clients_info()
return info_mp return info_mp
......
...@@ -1242,7 +1242,7 @@ def sparse_embedding( ...@@ -1242,7 +1242,7 @@ def sparse_embedding(
) )
entry_str = entry._to_attr() entry_str = entry._to_attr()
if slot == None: if slot is None:
slot = 0 slot = 0
helper.append_op( helper.append_op(
......
...@@ -375,7 +375,7 @@ def basic_gru( ...@@ -375,7 +375,7 @@ def basic_gru(
rnn.step_output(new_hidden) rnn.step_output(new_hidden)
step_input = new_hidden step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0: if dropout_prob is not None and dropout_prob > 0.0:
step_input = layers.dropout( step_input = layers.dropout(
step_input, step_input,
dropout_prob=dropout_prob, dropout_prob=dropout_prob,
...@@ -677,7 +677,7 @@ def basic_lstm( ...@@ -677,7 +677,7 @@ def basic_lstm(
rnn.step_output(new_cell) rnn.step_output(new_cell)
step_input = new_hidden step_input = new_hidden
if dropout_prob != None and dropout_prob > 0.0: if dropout_prob is not None and dropout_prob > 0.0:
step_input = layers.dropout( step_input = layers.dropout(
step_input, step_input,
dropout_prob=dropout_prob, dropout_prob=dropout_prob,
......
...@@ -344,7 +344,7 @@ class PostTrainingQuantization(object): ...@@ -344,7 +344,7 @@ class PostTrainingQuantization(object):
# Save input params # Save input params
self._bias_correction = bias_correction self._bias_correction = bias_correction
self._executor = executor self._executor = executor
self._scope = global_scope() if scope == None else scope self._scope = global_scope() if scope is None else scope
self._model_dir = model_dir self._model_dir = model_dir
self._model_filename = model_filename self._model_filename = model_filename
self._params_filename = params_filename self._params_filename = params_filename
......
...@@ -1874,8 +1874,8 @@ class AddQuantDequantPass(object): ...@@ -1874,8 +1874,8 @@ class AddQuantDequantPass(object):
'%s_grad' % (op) for op in self._quantizable_op_type '%s_grad' % (op) for op in self._quantizable_op_type
] ]
assert self._scope != None, "scope must not be None." assert self._scope is not None, "scope must not be None."
assert self._place != None, "place must not be None." assert self._place is not None, "place must not be None."
def apply(self, graph): def apply(self, graph):
""" """
...@@ -2737,8 +2737,8 @@ class AddQuantDequantPassV2(object): ...@@ -2737,8 +2737,8 @@ class AddQuantDequantPassV2(object):
'%s_grad' % (op) for op in self._quantizable_op_type '%s_grad' % (op) for op in self._quantizable_op_type
] ]
assert self._scope != None, "scope must not be None." assert self._scope is not None, "scope must not be None."
assert self._place != None, "place must not be None." assert self._place is not None, "place must not be None."
self.persistable_vars = [] self.persistable_vars = []
def apply(self, graph): def apply(self, graph):
...@@ -2878,8 +2878,8 @@ class ReplaceFakeQuantDequantPass(object): ...@@ -2878,8 +2878,8 @@ class ReplaceFakeQuantDequantPass(object):
self._place = _get_paddle_place(place) self._place = _get_paddle_place(place)
self._scope = scope self._scope = scope
self._quant_bits = quant_bits self._quant_bits = quant_bits
assert self._scope != None, "scope must not be None." assert self._scope is not None, "scope must not be None."
assert self._place != None, "place must not be None." assert self._place is not None, "place must not be None."
def apply(self, graph): def apply(self, graph):
assert isinstance( assert isinstance(
...@@ -3027,8 +3027,8 @@ class QuantWeightPass(object): ...@@ -3027,8 +3027,8 @@ class QuantWeightPass(object):
self._bias_correction = bias_correction self._bias_correction = bias_correction
self._quant_bits = quant_bits self._quant_bits = quant_bits
self._save_int_weight = save_int_weight self._save_int_weight = save_int_weight
assert self._scope != None, "scope must not be None." assert self._scope is not None, "scope must not be None."
assert self._place != None, "place must not be None." assert self._place is not None, "place must not be None."
def apply(self, graph): def apply(self, graph):
assert isinstance( assert isinstance(
......
...@@ -162,7 +162,7 @@ class QuantizeTranspilerV2(object): ...@@ -162,7 +162,7 @@ class QuantizeTranspilerV2(object):
scope(fluid.Scope, optional): The scope of the program, use it to load scope(fluid.Scope, optional): The scope of the program, use it to load
and save variables. If scope=None, get scope by global_scope(). and save variables. If scope=None, get scope by global_scope().
""" """
scope = global_scope() if scope == None else scope scope = global_scope() if scope is None else scope
for block in test_program.blocks: for block in test_program.blocks:
for op in block.ops: for op in block.ops:
......
...@@ -332,7 +332,7 @@ def set_variable_data(scope, place, var_name, np_value): ...@@ -332,7 +332,7 @@ def set_variable_data(scope, place, var_name, np_value):
np_value, np.ndarray np_value, np.ndarray
), 'The type of value should be numpy array.' ), 'The type of value should be numpy array.'
var_node = scope.find_var(var_name) var_node = scope.find_var(var_name)
if var_node != None: if var_node is not None:
tensor = var_node.get_tensor() tensor = var_node.get_tensor()
tensor.set(np_value, place) tensor.set(np_value, place)
......
...@@ -219,7 +219,7 @@ class TestUserDefinedQuantization(unittest.TestCase): ...@@ -219,7 +219,7 @@ class TestUserDefinedQuantization(unittest.TestCase):
mapping_table = load_dict(mapping_table_path) mapping_table = load_dict(mapping_table_path)
test_graph.out_node_mapping_table = mapping_table test_graph.out_node_mapping_table = mapping_table
if act_quantize_func == None and weight_quantize_func == None: if act_quantize_func is None and weight_quantize_func is None:
freeze_pass.apply(test_graph) freeze_pass.apply(test_graph)
tempdir.cleanup() tempdir.cleanup()
......
...@@ -40,7 +40,7 @@ def _set_variable_data(scope, place, var_name, np_value): ...@@ -40,7 +40,7 @@ def _set_variable_data(scope, place, var_name, np_value):
np_value, np.ndarray np_value, np.ndarray
), 'The type of value should be numpy array.' ), 'The type of value should be numpy array.'
var_node = scope.find_var(var_name) var_node = scope.find_var(var_name)
if var_node != None: if var_node is not None:
tensor = var_node.get_tensor() tensor = var_node.get_tensor()
tensor.set(np_value, place) tensor.set(np_value, place)
......
...@@ -109,7 +109,7 @@ class Hogwild(DeviceWorker): ...@@ -109,7 +109,7 @@ class Hogwild(DeviceWorker):
dense_table_set = set() dense_table_set = set()
program_id = str(id(self._program)) program_id = str(id(self._program))
print("device worker program id:", program_id) print("device worker program id:", program_id)
if self._program == None: if self._program is None:
print("program of current device worker is not configured") print("program of current device worker is not configured")
exit(-1) exit(-1)
opt_info = self._program._fleet_opt opt_info = self._program._fleet_opt
...@@ -259,7 +259,7 @@ class DownpourLite(DeviceWorker): ...@@ -259,7 +259,7 @@ class DownpourLite(DeviceWorker):
dense_table_set = set() dense_table_set = set()
program_id = str(id(self._program)) program_id = str(id(self._program))
print("device worker program id:", program_id) print("device worker program id:", program_id)
if self._program == None: if self._program is None:
print("program of current device worker is not configured") print("program of current device worker is not configured")
exit(-1) exit(-1)
opt_info = self._program._fleet_opt opt_info = self._program._fleet_opt
...@@ -392,7 +392,7 @@ class DownpourSGD(DeviceWorker): ...@@ -392,7 +392,7 @@ class DownpourSGD(DeviceWorker):
""" """
dense_table_set = set() dense_table_set = set()
program_id = str(id(self._program)) program_id = str(id(self._program))
if self._program == None: if self._program is None:
print("program of current device worker is not configured") print("program of current device worker is not configured")
exit(-1) exit(-1)
opt_info = self._program._fleet_opt opt_info = self._program._fleet_opt
...@@ -511,7 +511,7 @@ class DownpourSGDOPT(DeviceWorker): ...@@ -511,7 +511,7 @@ class DownpourSGDOPT(DeviceWorker):
""" """
dense_table_set = set() dense_table_set = set()
program_id = str(id(self._program)) program_id = str(id(self._program))
if self._program == None: if self._program is None:
print("program of current device worker is not configured") print("program of current device worker is not configured")
exit(-1) exit(-1)
opt_info = self._program._fleet_opt opt_info = self._program._fleet_opt
......
...@@ -34,9 +34,9 @@ class FileSystem(object): ...@@ -34,9 +34,9 @@ class FileSystem(object):
passwd=None, passwd=None,
hadoop_bin="", hadoop_bin="",
): ):
assert user != None assert user is not None
assert passwd != None assert passwd is not None
assert hadoop_bin != None assert hadoop_bin is not None
import ps_pb2 as pslib import ps_pb2 as pslib
self.fs_client = pslib.FsClientParameter() self.fs_client = pslib.FsClientParameter()
......
...@@ -85,7 +85,7 @@ class ReplaceReturnNoneTransformer(BaseTransformer): ...@@ -85,7 +85,7 @@ class ReplaceReturnNoneTransformer(BaseTransformer):
if isinstance(node.value, gast.Name) and node.value.id == 'None': if isinstance(node.value, gast.Name) and node.value.id == 'None':
node.value = None node.value = None
return node return node
if isinstance(node.value, gast.Constant) and node.value.value == None: if isinstance(node.value, gast.Constant) and node.value.value is None:
node.value = None node.value = None
return node return node
return node return node
......
...@@ -1046,7 +1046,7 @@ class Layer(object): ...@@ -1046,7 +1046,7 @@ class Layer(object):
for prefix, layer in model.named_sublayers(): for prefix, layer in model.named_sublayers():
print(prefix, layer) print(prefix, layer)
""" """
assert isinstance(sublayer, Layer) or sublayer == None assert isinstance(sublayer, Layer) or sublayer is None
self._sub_layers[name] = sublayer self._sub_layers[name] = sublayer
return sublayer return sublayer
......
...@@ -622,7 +622,7 @@ def _as_lodtensor(data, place, dtype=None): ...@@ -622,7 +622,7 @@ def _as_lodtensor(data, place, dtype=None):
class FetchHandler(object): class FetchHandler(object):
def __init__(self, var_dict=None, period_secs=60): def __init__(self, var_dict=None, period_secs=60):
assert var_dict != None assert var_dict is not None
self.var_dict = var_dict self.var_dict = var_dict
self.period_secs = period_secs self.period_secs = period_secs
...@@ -2309,7 +2309,7 @@ class Executor(object): ...@@ -2309,7 +2309,7 @@ class Executor(object):
) )
else: else:
# cache trainer instance for heterps pipeline training # cache trainer instance for heterps pipeline training
if fetch_list == None: if fetch_list is None:
fetch_list = [] fetch_list = []
cache_key = _get_strong_program_cache_key(program, None, fetch_list) cache_key = _get_strong_program_cache_key(program, None, fetch_list)
trainer_instance = self._get_trainer_cache(cache_key) trainer_instance = self._get_trainer_cache(cache_key)
......
...@@ -2880,7 +2880,8 @@ class Operator(object): ...@@ -2880,7 +2880,8 @@ class Operator(object):
) )
if 'force_cpu' in op_attrs: if 'force_cpu' in op_attrs:
if ( if (
type == 'less_than' and op_attrs['force_cpu'] != None type == 'less_than'
and op_attrs['force_cpu'] is not None
) or op_attrs['force_cpu'] != False: ) or op_attrs['force_cpu'] != False:
warnings.warn( warnings.warn(
"The Attr(force_cpu) of Op(%s) will be deprecated in the future, " "The Attr(force_cpu) of Op(%s) will be deprecated in the future, "
......
...@@ -46,7 +46,7 @@ g_program_attr = {} # program_name->can_be_auto_checkpoint ...@@ -46,7 +46,7 @@ g_program_attr = {} # program_name->can_be_auto_checkpoint
def _get_logger(log_level, name="auto_checkpoint"): def _get_logger(log_level, name="auto_checkpoint"):
global logger global logger
if logger != None: if logger is not None:
return logger return logger
logger = logging.getLogger(name) logger = logging.getLogger(name)
...@@ -683,12 +683,12 @@ def _get_valid_program(prog): ...@@ -683,12 +683,12 @@ def _get_valid_program(prog):
def _auto_checkpoint(exe, prog): def _auto_checkpoint(exe, prog):
_get_checker() _get_checker()
assert exe._auto_checkpoint_name != None assert exe._auto_checkpoint_name is not None
if not _can_auto_checkpoint(prog): if not _can_auto_checkpoint(prog):
return return
program = _get_valid_program(prog) program = _get_valid_program(prog)
assert program._auto_checkpoint_name != None assert program._auto_checkpoint_name is not None
exe_status = g_train_epoch_range._exe_status exe_status = g_train_epoch_range._exe_status
key = _get_running_key( key = _get_running_key(
......
...@@ -80,7 +80,7 @@ class DataGenerator(object): ...@@ -80,7 +80,7 @@ class DataGenerator(object):
batch_samples = [] batch_samples = []
line_iter = self.generate_sample(None) line_iter = self.generate_sample(None)
for user_parsed_line in line_iter(): for user_parsed_line in line_iter():
if user_parsed_line == None: if user_parsed_line is None:
continue continue
batch_samples.append(user_parsed_line) batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_: if len(batch_samples) == self.batch_size_:
...@@ -117,7 +117,7 @@ class DataGenerator(object): ...@@ -117,7 +117,7 @@ class DataGenerator(object):
for line in sys.stdin: for line in sys.stdin:
line_iter = self.generate_sample(line) line_iter = self.generate_sample(line)
for user_parsed_line in line_iter(): for user_parsed_line in line_iter():
if user_parsed_line == None: if user_parsed_line is None:
continue continue
batch_samples.append(user_parsed_line) batch_samples.append(user_parsed_line)
if len(batch_samples) == self.batch_size_: if len(batch_samples) == self.batch_size_:
......
...@@ -1010,7 +1010,7 @@ class GeneralRoleMaker(RoleMakerBase): ...@@ -1010,7 +1010,7 @@ class GeneralRoleMaker(RoleMakerBase):
if "Gateway" in item and "Iface" in item: if "Gateway" in item and "Iface" in item:
gateway_idx = item.index("Gateway") gateway_idx = item.index("Gateway")
iface_idx = item.index("Iface") iface_idx = item.index("Iface")
elif gateway_idx != None and iface_idx != None: elif gateway_idx is not None and iface_idx is not None:
gateway = None gateway = None
if len(item) > gateway_idx: if len(item) > gateway_idx:
gateway = item[gateway_idx] gateway = item[gateway_idx]
......
...@@ -170,7 +170,7 @@ class Collective(Fleet): ...@@ -170,7 +170,7 @@ class Collective(Fleet):
""" """
This function save persistables and current epoch num to path. This function save persistables and current epoch num to path.
""" """
if main_program == None: if main_program is None:
main_program = self._transpiled_program main_program = self._transpiled_program
m = PaddleModel(executor, main_program) m = PaddleModel(executor, main_program)
...@@ -203,7 +203,7 @@ class Collective(Fleet): ...@@ -203,7 +203,7 @@ class Collective(Fleet):
This function load persistables and current epoch num from path. This function load persistables and current epoch num from path.
""" """
if main_program == None: if main_program is None:
main_program = self._transpiled_program main_program = self._transpiled_program
m = PaddleModel(executor, main_program) m = PaddleModel(executor, main_program)
......
...@@ -737,7 +737,7 @@ def find_heter_ops(program, default_device="cpu"): ...@@ -737,7 +737,7 @@ def find_heter_ops(program, default_device="cpu"):
# Todo: need update this method # Todo: need update this method
# op._set_attr('op_device', current_heter_device) # op._set_attr('op_device', current_heter_device)
return True return True
elif op_device == None or op_device == default_device: elif op_device is None or op_device == default_device:
op._set_attr('op_device', default_device) op._set_attr('op_device', default_device)
return False return False
return False return False
......
...@@ -499,7 +499,7 @@ class DistributedAdam(DistributedOptimizerImplBase): ...@@ -499,7 +499,7 @@ class DistributedAdam(DistributedOptimizerImplBase):
for num in range(len(losses)): for num in range(len(losses)):
loss = losses[num] loss = losses[num]
parameters = None parameters = None
if parameter_list != None: if parameter_list is not None:
parameters = parameter_list[num] parameters = parameter_list[num]
prog_id = str(id(loss.block.program)) prog_id = str(id(loss.block.program))
# param_grads of program # param_grads of program
......
...@@ -163,7 +163,7 @@ class HDFSClient(FS): ...@@ -163,7 +163,7 @@ class HDFSClient(FS):
def _test_match(self, lines): def _test_match(self, lines):
for l in lines: for l in lines:
m = self._bd_err_re.match(l) m = self._bd_err_re.match(l)
if m != None: if m is not None:
return m return m
return None return None
......
...@@ -256,7 +256,7 @@ def try_load_model_vars( ...@@ -256,7 +256,7 @@ def try_load_model_vars(
} }
for each_var in saved_params: for each_var in saved_params:
var_temp = fluid.global_scope().find_var(each_var.name) var_temp = fluid.global_scope().find_var(each_var.name)
assert var_temp != None, "can't not find var: " + each_var.name assert var_temp is not None, "can't not find var: " + each_var.name
new_shape = (np.array(var_temp.get_tensor())).shape new_shape = (np.array(var_temp.get_tensor())).shape
assert each_var.name in orig_para_shape, ( assert each_var.name in orig_para_shape, (
each_var.name + "MUST in var list" each_var.name + "MUST in var list"
......
...@@ -1013,7 +1013,7 @@ def load_vars( ...@@ -1013,7 +1013,7 @@ def load_vars(
if not isinstance(each_var, Parameter): if not isinstance(each_var, Parameter):
continue continue
var_temp = paddle.fluid.global_scope().find_var(each_var.name) var_temp = paddle.fluid.global_scope().find_var(each_var.name)
assert var_temp != None, "can't not find var: " + each_var.name assert var_temp is not None, "can't not find var: " + each_var.name
new_shape = (np.array(var_temp.get_tensor())).shape new_shape = (np.array(var_temp.get_tensor())).shape
assert each_var.name in orig_para_shape, ( assert each_var.name in orig_para_shape, (
each_var.name + "MUST in var list" each_var.name + "MUST in var list"
...@@ -2146,7 +2146,7 @@ def load(program, model_path, executor=None, var_list=None): ...@@ -2146,7 +2146,7 @@ def load(program, model_path, executor=None, var_list=None):
return return
elif os.path.isfile(model_path): elif os.path.isfile(model_path):
if var_list == None: if var_list is None:
raise ValueError( raise ValueError(
"var_list is required when loading model file saved with [ save_params, save_persistables, save_vars ]" "var_list is required when loading model file saved with [ save_params, save_persistables, save_vars ]"
) )
...@@ -2479,7 +2479,7 @@ def set_program_state(program, state_dict): ...@@ -2479,7 +2479,7 @@ def set_program_state(program, state_dict):
for para in parameter_list: for para in parameter_list:
var_temp = paddle.fluid.global_scope().find_var(para.name) var_temp = paddle.fluid.global_scope().find_var(para.name)
assert ( assert (
var_temp != None var_temp is not None
), "Variable [ {} ] Not found, Please make sure run startup program".format( ), "Variable [ {} ] Not found, Please make sure run startup program".format(
para.name para.name
) )
......
...@@ -1953,7 +1953,7 @@ def less_than(x, y, force_cpu=None, cond=None, name=None): ...@@ -1953,7 +1953,7 @@ def less_than(x, y, force_cpu=None, cond=None, name=None):
) )
if cond is not None: if cond is not None:
check_type(cond, "cond", Variable, "less_than") check_type(cond, "cond", Variable, "less_than")
if force_cpu != None: if force_cpu is not None:
check_type(force_cpu, "force_cpu", bool, "less_than") check_type(force_cpu, "force_cpu", bool, "less_than")
helper = LayerHelper("less_than", **locals()) helper = LayerHelper("less_than", **locals())
......
...@@ -3856,7 +3856,7 @@ def data_norm( ...@@ -3856,7 +3856,7 @@ def data_norm(
bias_default = param_attr.get("bias", 0.0) bias_default = param_attr.get("bias", 0.0)
# create scale and shift(bias) when enable_scale_and_shift is True # create scale and shift(bias) when enable_scale_and_shift is True
if name == None: if name is None:
name = "dn" name = "dn"
if enable_scale_and_shift: if enable_scale_and_shift:
scale_w = helper.create_parameter( scale_w = helper.create_parameter(
...@@ -5234,17 +5234,17 @@ def reduce_max(input, dim=None, keep_dim=False, name=None): ...@@ -5234,17 +5234,17 @@ def reduce_max(input, dim=None, keep_dim=False, name=None):
dim = [dim] dim = [dim]
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.max(input, dim if dim != None else [], keep_dim) return _C_ops.max(input, dim if dim is not None else [], keep_dim)
helper.append_op( helper.append_op(
type='reduce_max', type='reduce_max',
inputs={'X': input}, inputs={'X': input},
outputs={'Out': out}, outputs={'Out': out},
attrs={ attrs={
'dim': dim if dim != None and dim != [] else [0], 'dim': dim if dim is not None and dim != [] else [0],
'keep_dim': keep_dim, 'keep_dim': keep_dim,
'reduce_all': True 'reduce_all': True
if dim == None or dim == [] or len(dim) == len(input.shape) if dim is None or dim == [] or len(dim) == len(input.shape)
else False, else False,
}, },
) )
...@@ -5306,17 +5306,17 @@ def reduce_min(input, dim=None, keep_dim=False, name=None): ...@@ -5306,17 +5306,17 @@ def reduce_min(input, dim=None, keep_dim=False, name=None):
dim = [dim] dim = [dim]
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.min(input, dim if dim != None else [], keep_dim) return _C_ops.min(input, dim if dim is not None else [], keep_dim)
helper.append_op( helper.append_op(
type='reduce_min', type='reduce_min',
inputs={'X': input}, inputs={'X': input},
outputs={'Out': out}, outputs={'Out': out},
attrs={ attrs={
'dim': dim if dim != None and dim != [] else [0], 'dim': dim if dim is not None and dim != [] else [0],
'keep_dim': keep_dim, 'keep_dim': keep_dim,
'reduce_all': True 'reduce_all': True
if dim == None or dim == [] or len(dim) == len(input.shape) if dim is None or dim == [] or len(dim) == len(input.shape)
else False, else False,
}, },
) )
...@@ -5387,10 +5387,10 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): ...@@ -5387,10 +5387,10 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.prod( return _C_ops.prod(
input, input,
dim if dim != None and dim != [] else [0], dim if dim is not None and dim != [] else [0],
keep_dim, keep_dim,
True True
if dim == None or dim == [] or len(dim) == len(input.shape) if dim is None or dim == [] or len(dim) == len(input.shape)
else False, else False,
) )
...@@ -5404,10 +5404,10 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None): ...@@ -5404,10 +5404,10 @@ def reduce_prod(input, dim=None, keep_dim=False, name=None):
inputs={'X': input}, inputs={'X': input},
outputs={'Out': out}, outputs={'Out': out},
attrs={ attrs={
'dim': dim if dim != None and dim != [] else [0], 'dim': dim if dim is not None and dim != [] else [0],
'keep_dim': keep_dim, 'keep_dim': keep_dim,
'reduce_all': True 'reduce_all': True
if dim == None or dim == [] or len(dim) == len(input.shape) if dim is None or dim == [] or len(dim) == len(input.shape)
else False, else False,
}, },
) )
...@@ -5462,7 +5462,7 @@ def reduce_all(input, dim=None, keep_dim=False, name=None): ...@@ -5462,7 +5462,7 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
dim = [dim] dim = [dim]
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.all(input, dim if dim != None else [], keep_dim) return _C_ops.all(input, dim if dim is not None else [], keep_dim)
check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all') check_variable_and_dtype(input, 'input', ('bool'), 'reduce_all')
helper = LayerHelper('reduce_all', **locals()) helper = LayerHelper('reduce_all', **locals())
...@@ -5472,10 +5472,10 @@ def reduce_all(input, dim=None, keep_dim=False, name=None): ...@@ -5472,10 +5472,10 @@ def reduce_all(input, dim=None, keep_dim=False, name=None):
inputs={'X': input}, inputs={'X': input},
outputs={'Out': out}, outputs={'Out': out},
attrs={ attrs={
'dim': dim if dim != None and dim != [] else [0], 'dim': dim if dim is not None and dim != [] else [0],
'keep_dim': keep_dim, 'keep_dim': keep_dim,
'reduce_all': True 'reduce_all': True
if dim == None or dim == [] or len(dim) == len(input.shape) if dim is None or dim == [] or len(dim) == len(input.shape)
else False, else False,
}, },
) )
...@@ -5535,10 +5535,10 @@ def reduce_any(input, dim=None, keep_dim=False, name=None): ...@@ -5535,10 +5535,10 @@ def reduce_any(input, dim=None, keep_dim=False, name=None):
inputs={'X': input}, inputs={'X': input},
outputs={'Out': out}, outputs={'Out': out},
attrs={ attrs={
'dim': dim if dim != None and dim != [] else [0], 'dim': dim if dim is not None and dim != [] else [0],
'keep_dim': keep_dim, 'keep_dim': keep_dim,
'reduce_all': True 'reduce_all': True
if dim == None or dim == [] or len(dim) == len(input.shape) if dim is None or dim == [] or len(dim) == len(input.shape)
else False, else False,
}, },
) )
...@@ -11386,7 +11386,7 @@ def unstack(x, axis=0, num=None): ...@@ -11386,7 +11386,7 @@ def unstack(x, axis=0, num=None):
""" """
if _non_static_mode(): if _non_static_mode():
if num == None: if num is None:
num = x.shape[axis] num = x.shape[axis]
if num == 0: if num == 0:
return [] return []
......
...@@ -99,7 +99,7 @@ class MetricBase(object): ...@@ -99,7 +99,7 @@ class MetricBase(object):
The MetricBase or its succeed classes The MetricBase or its succeed classes
""" """
self._name = str(name) if name != None else self.__class__.__name__ self._name = str(name) if name is not None else self.__class__.__name__
def __str__(self): def __str__(self):
return self._name return self._name
......
...@@ -114,7 +114,7 @@ def draw_graph(startup_program, main_program, **kwargs): ...@@ -114,7 +114,7 @@ def draw_graph(startup_program, main_program, **kwargs):
graph_id = unique_id() graph_id = unique_id()
filename = kwargs.get("filename") filename = kwargs.get("filename")
if filename == None: if filename is None:
filename = str(graph_id) + ".gv" filename = str(graph_id) + ".gv"
g = Graph( g = Graph(
name=str(graph_id), name=str(graph_id),
...@@ -129,6 +129,6 @@ def draw_graph(startup_program, main_program, **kwargs): ...@@ -129,6 +129,6 @@ def draw_graph(startup_program, main_program, **kwargs):
parse_graph(startup_program, g, var_dict) parse_graph(startup_program, g, var_dict)
parse_graph(main_program, g, var_dict) parse_graph(main_program, g, var_dict)
if filename != None: if filename is not None:
g.save() g.save()
return g return g
...@@ -707,7 +707,7 @@ class Optimizer(object): ...@@ -707,7 +707,7 @@ class Optimizer(object):
name, param.name name, param.name
) )
) )
if shape == None: if shape is None:
shape = param.shape shape = param.shape
assert isinstance(self.helper, LayerHelper) assert isinstance(self.helper, LayerHelper)
...@@ -770,7 +770,7 @@ class Optimizer(object): ...@@ -770,7 +770,7 @@ class Optimizer(object):
if framework._non_static_mode(): if framework._non_static_mode():
return self._global_accumulators[name] return self._global_accumulators[name]
raise Exception("Global accumulator {} already exists".format(name)) raise Exception("Global accumulator {} already exists".format(name))
if shape == None: if shape is None:
shape = [1] # most case, global accumulator is of shape [1] shape = [1] # most case, global accumulator is of shape [1]
assert isinstance(self.helper, LayerHelper) assert isinstance(self.helper, LayerHelper)
...@@ -1268,7 +1268,7 @@ class Optimizer(object): ...@@ -1268,7 +1268,7 @@ class Optimizer(object):
# NOTE(zhiqiu): currently, only support ClipGradByGlobalNorm and without regularization. # NOTE(zhiqiu): currently, only support ClipGradByGlobalNorm and without regularization.
if self._flatten_param_grads and self.regularization is None: if self._flatten_param_grads and self.regularization is None:
if self._grad_clip == None or isinstance( if self._grad_clip is None or isinstance(
self._grad_clip, ClipGradByGlobalNorm self._grad_clip, ClipGradByGlobalNorm
): ):
params_grads = self.flatten_param_grads(params_grads) params_grads = self.flatten_param_grads(params_grads)
...@@ -3344,7 +3344,7 @@ class DpsgdOptimizer(Optimizer): ...@@ -3344,7 +3344,7 @@ class DpsgdOptimizer(Optimizer):
assert isinstance(block, framework.Block) assert isinstance(block, framework.Block)
# create the dpsgd optimize op # create the dpsgd optimize op
if self._seed == None: if self._seed is None:
self._seed = 0 self._seed = 0
if framework._non_static_mode(): if framework._non_static_mode():
...@@ -4454,10 +4454,10 @@ class ModelAverage(Optimizer): ...@@ -4454,10 +4454,10 @@ class ModelAverage(Optimizer):
tmp = layers.sum(x=[num_accumulates, old_num_accumulates]) tmp = layers.sum(x=[num_accumulates, old_num_accumulates])
sum = layers.sum(x=[sum_1, sum_2, sum_3]) sum = layers.sum(x=[sum_1, sum_2, sum_3])
tmp = layers.cast( tmp = layers.cast(
x=tmp, dtype='float32' if self._dtype == None else self._dtype x=tmp, dtype='float32' if self._dtype is None else self._dtype
) )
sum = layers.cast( sum = layers.cast(
x=sum, dtype='float32' if self._dtype == None else self._dtype x=sum, dtype='float32' if self._dtype is None else self._dtype
) )
ops._elementwise_div(x=sum, y=tmp, out=param) ops._elementwise_div(x=sum, y=tmp, out=param)
...@@ -5254,7 +5254,7 @@ class PipelineOptimizer(object): ...@@ -5254,7 +5254,7 @@ class PipelineOptimizer(object):
var_name = var_name.replace('.cast_fp16', '') var_name = var_name.replace('.cast_fp16', '')
post_ops = self.input_var_to_op[var_name] post_ops = self.input_var_to_op[var_name]
if post_ops == None: if post_ops is None:
return None return None
result_op = None result_op = None
for post_op, post_idx in reversed(post_ops): for post_op, post_idx in reversed(post_ops):
...@@ -5269,7 +5269,7 @@ class PipelineOptimizer(object): ...@@ -5269,7 +5269,7 @@ class PipelineOptimizer(object):
variable named var_name. variable named var_name.
""" """
prev_ops = self.output_var_to_op[var_name] prev_ops = self.output_var_to_op[var_name]
if prev_ops == None: if prev_ops is None:
return None return None
result_op = None result_op = None
for prev_op, prev_idx in reversed(prev_ops): for prev_op, prev_idx in reversed(prev_ops):
...@@ -7270,7 +7270,7 @@ class RecomputeOptimizer(Optimizer): ...@@ -7270,7 +7270,7 @@ class RecomputeOptimizer(Optimizer):
if output_var in self.un_offload_checkpoint_names: if output_var in self.un_offload_checkpoint_names:
# insert sync op if last checkpoint has not been sync # insert sync op if last checkpoint has not been sync
if last_offload_checkpoint != None: if last_offload_checkpoint is not None:
if ( if (
self.checkpoint_usage_count_and_idx[ self.checkpoint_usage_count_and_idx[
last_offload_checkpoint last_offload_checkpoint
...@@ -7400,7 +7400,7 @@ class RecomputeOptimizer(Optimizer): ...@@ -7400,7 +7400,7 @@ class RecomputeOptimizer(Optimizer):
""" """
self._main_program = loss.block.program self._main_program = loss.block.program
self.block = loss.block self.block = loss.block
if startup_program == None: if startup_program is None:
startup_program = paddle.static.default_startup_program() startup_program = paddle.static.default_startup_program()
with program_guard(self._main_program, startup_program): with program_guard(self._main_program, startup_program):
......
...@@ -66,7 +66,7 @@ class TestCustomRawReluOp(unittest.TestCase): ...@@ -66,7 +66,7 @@ class TestCustomRawReluOp(unittest.TestCase):
def custom_raw_relu(self, x): def custom_raw_relu(self, x):
module = importlib.import_module(MODULE_NAME) module = importlib.import_module(MODULE_NAME)
custom_raw_relu_op = getattr(module, "custom_raw_relu") custom_raw_relu_op = getattr(module, "custom_raw_relu")
self.assertTrue(custom_raw_relu_op is not None) self.assertIsNotNone(custom_raw_relu_op)
return custom_raw_relu_op(x) return custom_raw_relu_op(x)
def test_static(self): def test_static(self):
......
...@@ -139,9 +139,9 @@ class TestASPDynamicOptimize(unittest.TestCase): ...@@ -139,9 +139,9 @@ class TestASPDynamicOptimize(unittest.TestCase):
name, None name, None
) )
if ASPHelper._is_supported_layer(program, name): if ASPHelper._is_supported_layer(program, name):
self.assertTrue(mask_var is not None) self.assertIsNotNone(mask_var)
else: else:
self.assertTrue(mask_var is None) self.assertIsNone(mask_var)
def test_asp_training(self): def test_asp_training(self):
self.optimizer = paddle.incubate.asp.decorate(self.optimizer) self.optimizer = paddle.incubate.asp.decorate(self.optimizer)
......
...@@ -23,7 +23,7 @@ class TestStrategy(unittest.TestCase): ...@@ -23,7 +23,7 @@ class TestStrategy(unittest.TestCase):
recompute = strategy.recompute recompute = strategy.recompute
self.assertEqual(recompute.enable, False) self.assertEqual(recompute.enable, False)
self.assertEqual(recompute.checkpoints, None) self.assertIsNone(recompute.checkpoints)
amp = strategy.amp amp = strategy.amp
self.assertEqual(amp.enable, False) self.assertEqual(amp.enable, False)
...@@ -59,12 +59,12 @@ class TestStrategy(unittest.TestCase): ...@@ -59,12 +59,12 @@ class TestStrategy(unittest.TestCase):
self.assertEqual(qat.weight_bits, 8) self.assertEqual(qat.weight_bits, 8)
self.assertEqual(qat.activation_bits, 8) self.assertEqual(qat.activation_bits, 8)
self.assertEqual(qat.not_quant_pattern, ['skip_quant']) self.assertEqual(qat.not_quant_pattern, ['skip_quant'])
self.assertEqual(qat.algo, None) self.assertIsNone(qat.algo)
tuning = strategy.tuning tuning = strategy.tuning
self.assertEqual(tuning.enable, False) self.assertEqual(tuning.enable, False)
self.assertEqual(tuning.batch_size, 1) self.assertEqual(tuning.batch_size, 1)
self.assertEqual(tuning.dataset, None) self.assertIsNone(tuning.dataset)
self.assertEqual(tuning.profile_start_step, 1) self.assertEqual(tuning.profile_start_step, 1)
self.assertEqual(tuning.profile_end_step, 1) self.assertEqual(tuning.profile_end_step, 1)
self.assertEqual(tuning.run_after_tuning, True) self.assertEqual(tuning.run_after_tuning, True)
......
...@@ -395,7 +395,7 @@ class TestMLP(unittest.TestCase): ...@@ -395,7 +395,7 @@ class TestMLP(unittest.TestCase):
# test fill_constant_batch_size_like # test fill_constant_batch_size_like
self.assertTrue(fill_op is not None) self.assertIsNotNone(fill_op)
ref_shape = [-1, 8, 0, 48] ref_shape = [-1, 8, 0, 48]
shape = fill_op.attr("shape") shape = fill_op.attr("shape")
self.assertTrue(ref_shape == shape) self.assertTrue(ref_shape == shape)
......
...@@ -71,15 +71,15 @@ class AutoCheckPointACLBase(AutoCheckpointBase): ...@@ -71,15 +71,15 @@ class AutoCheckPointACLBase(AutoCheckpointBase):
exe, main_prog, startup_prog exe, main_prog, startup_prog
) )
for i in range(3): for i in range(3):
self.assertEqual(acp._get_train_epoch_range(), None) self.assertIsNone(acp._get_train_epoch_range())
self.assertEqual(acp.g_acp_type, None) self.assertIsNone(acp.g_acp_type)
for data in data_loader(): for data in data_loader():
self.assertEqual(acp.g_acp_type, None) self.assertIsNone(acp.g_acp_type)
self.assertEqual(acp._get_train_epoch_range(), None) self.assertIsNone(acp._get_train_epoch_range())
fetch = exe.run(compiled, feed=data, fetch_list=[loss]) fetch = exe.run(compiled, feed=data, fetch_list=[loss])
self.assertEqual(acp.g_acp_type, None) self.assertIsNone(acp.g_acp_type)
self.assertEqual(acp._get_train_epoch_range(), None) self.assertIsNone(acp._get_train_epoch_range())
m1 = PaddleModel(exe, compiled) m1 = PaddleModel(exe, compiled)
m1.serialize(save_dir) m1.serialize(save_dir)
...@@ -136,7 +136,7 @@ class AutoCheckPointACLBase(AutoCheckpointBase): ...@@ -136,7 +136,7 @@ class AutoCheckPointACLBase(AutoCheckpointBase):
break break
o = acp._get_train_epoch_range() o = acp._get_train_epoch_range()
assert o == None, "now train epoch must not exits now" assert o is None, "now train epoch must not exits now"
if break_epoch_no is None: if break_epoch_no is None:
self.assertEqual(i, 2) self.assertEqual(i, 2)
else: else:
...@@ -169,7 +169,7 @@ class AutoCheckPointACLBase(AutoCheckpointBase): ...@@ -169,7 +169,7 @@ class AutoCheckPointACLBase(AutoCheckpointBase):
fetch = exe.run(compiled, feed=data, fetch_list=[loss]) fetch = exe.run(compiled, feed=data, fetch_list=[loss])
o = acp._get_train_epoch_range() o = acp._get_train_epoch_range()
self.assertTrue(o == None, "now train epoch must not exits now") self.assertTrue(o is None, "now train epoch must not exits now")
self.assertEqual(i, 2) self.assertEqual(i, 2)
if break_epoch_no is not None: if break_epoch_no is not None:
......
...@@ -98,7 +98,7 @@ class AutoCheckpointTestDist(AutoCheckPointACLBase): ...@@ -98,7 +98,7 @@ class AutoCheckpointTestDist(AutoCheckPointACLBase):
self.assertEqual(len(o._exe_status), 1) self.assertEqual(len(o._exe_status), 1)
o = acp._get_train_epoch_range() o = acp._get_train_epoch_range()
assert o == None, "now train epoch must not exits now" assert o is None, "now train epoch must not exits now"
self.assertEqual(i, 2) self.assertEqual(i, 2)
fs.delete(save_dir) fs.delete(save_dir)
......
...@@ -94,7 +94,7 @@ class AutoCheckpointTestMul(AutoCheckPointACLBase): ...@@ -94,7 +94,7 @@ class AutoCheckpointTestMul(AutoCheckPointACLBase):
epochs.append(i) epochs.append(i)
o = acp._get_train_epoch_range() o = acp._get_train_epoch_range()
self.assertTrue(o == None, "now train epoch must not exits now") self.assertTrue(o is None, "now train epoch must not exits now")
self.assertEqual(i, 2) self.assertEqual(i, 2)
self.assertEqual(epochs, [0, 1, 2]) self.assertEqual(epochs, [0, 1, 2])
......
...@@ -45,8 +45,8 @@ class TestRoleMakerBase(unittest.TestCase): ...@@ -45,8 +45,8 @@ class TestRoleMakerBase(unittest.TestCase):
self.assertTrue(len(pserver_endpoints) == 0) self.assertTrue(len(pserver_endpoints) == 0)
print(role.to_string()) print(role.to_string())
self.assertTrue(role._all_gather(1, "worker") is None) self.assertIsNone(role._all_gather(1, "worker"))
self.assertTrue(role._all_reduce(1, "sum", "worker") is None) self.assertIsNone(role._all_reduce(1, "sum", "worker"))
role._barrier("worker") role._barrier("worker")
......
...@@ -68,7 +68,7 @@ java.io.IOException: Input/output error ...@@ -68,7 +68,7 @@ java.io.IOException: Input/output error
""" # fmt: off, avoid remove tabs in string """ # fmt: off, avoid remove tabs in string
print("split lines:", s.splitlines()) print("split lines:", s.splitlines())
self.assertTrue(fs._test_match(s.splitlines()) != None) self.assertIsNotNone(fs._test_match(s.splitlines()))
def test_config(self): def test_config(self):
config = {"fs.default.name": "hdfs://xxx", "hadoop.job.ugi": "ugi"} config = {"fs.default.name": "hdfs://xxx", "hadoop.job.ugi": "ugi"}
......
...@@ -30,11 +30,11 @@ class TestProcessGroupFp32(unittest.TestCase): ...@@ -30,11 +30,11 @@ class TestProcessGroupFp32(unittest.TestCase):
paddle.distributed.init_parallel_env() paddle.distributed.init_parallel_env()
paddle.distributed.new_group() paddle.distributed.new_group()
group = paddle.distributed.new_group([-1, -2]) group = paddle.distributed.new_group([-1, -2])
assert group.process_group == None assert group.process_group is None
group = paddle.distributed.collective.Group(-1, 2, 0, [-1, -2]) group = paddle.distributed.collective.Group(-1, 2, 0, [-1, -2])
ret = paddle.distributed.barrier(group) ret = paddle.distributed.barrier(group)
assert ret == None assert ret is None
paddle.enable_static() paddle.enable_static()
in_tensor = paddle.empty((1, 2)) in_tensor = paddle.empty((1, 2))
in_tensor2 = paddle.empty((1, 2)) in_tensor2 = paddle.empty((1, 2))
......
...@@ -167,7 +167,7 @@ def tokenize(pattern): ...@@ -167,7 +167,7 @@ def tokenize(pattern):
# tarfile.extractfile, which does random access and might # tarfile.extractfile, which does random access and might
# destroy hard disks. # destroy hard disks.
tf = tarf.next() tf = tarf.next()
while tf != None: while tf is not None:
if bool(pattern.match(tf.name)): if bool(pattern.match(tf.name)):
# newline and punctuations removal and ad-hoc tokenization. # newline and punctuations removal and ad-hoc tokenization.
yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate( yield tarf.extractfile(tf).read().rstrip(b'\n\r').translate(
......
...@@ -101,7 +101,7 @@ class DistPassTestBase(unittest.TestCase): ...@@ -101,7 +101,7 @@ class DistPassTestBase(unittest.TestCase):
zip(no_pass_ret, pass_ret) zip(no_pass_ret, pass_ret)
): ):
if out_var_no_pass is None: if out_var_no_pass is None:
self.assertTrue(out_var_pass is None) self.assertIsNone(out_var_pass)
else: else:
np.testing.assert_allclose( np.testing.assert_allclose(
out_var_no_pass, out_var_no_pass,
......
...@@ -246,7 +246,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -246,7 +246,7 @@ class BaseModel(fluid.dygraph.Layer):
enc_new_hidden, enc_new_cell = self.enc_units[i]( enc_new_hidden, enc_new_cell = self.enc_units[i](
enc_step_input, enc_hidden[i], enc_cell[i] enc_step_input, enc_hidden[i], enc_cell[i]
) )
if self.dropout != None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
enc_step_input = fluid.layers.dropout( enc_step_input = fluid.layers.dropout(
enc_new_hidden, enc_new_hidden,
dropout_prob=self.dropout, dropout_prob=self.dropout,
...@@ -278,7 +278,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -278,7 +278,7 @@ class BaseModel(fluid.dygraph.Layer):
) )
new_dec_hidden.append(new_hidden) new_dec_hidden.append(new_hidden)
new_dec_cell.append(new_cell) new_dec_cell.append(new_cell)
if self.dropout != None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
step_input = fluid.layers.dropout( step_input = fluid.layers.dropout(
new_hidden, new_hidden,
dropout_prob=self.dropout, dropout_prob=self.dropout,
...@@ -346,7 +346,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -346,7 +346,7 @@ class BaseModel(fluid.dygraph.Layer):
enc_new_hidden, enc_new_cell = self.enc_units[i]( enc_new_hidden, enc_new_cell = self.enc_units[i](
enc_step_input, enc_hidden[i], enc_cell[i] enc_step_input, enc_hidden[i], enc_cell[i]
) )
if self.dropout != None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
enc_step_input = fluid.layers.dropout( enc_step_input = fluid.layers.dropout(
enc_new_hidden, enc_new_hidden,
dropout_prob=self.dropout, dropout_prob=self.dropout,
...@@ -418,7 +418,7 @@ class BaseModel(fluid.dygraph.Layer): ...@@ -418,7 +418,7 @@ class BaseModel(fluid.dygraph.Layer):
) )
new_dec_hidden.append(new_hidden) new_dec_hidden.append(new_hidden)
new_dec_cell.append(new_cell) new_dec_cell.append(new_cell)
if self.dropout != None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
step_input = fluid.layers.dropout( step_input = fluid.layers.dropout(
new_hidden, new_hidden,
dropout_prob=self.dropout, dropout_prob=self.dropout,
...@@ -760,7 +760,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -760,7 +760,7 @@ class AttentionModel(fluid.dygraph.Layer):
enc_new_hidden, enc_new_cell = self.enc_units[i]( enc_new_hidden, enc_new_cell = self.enc_units[i](
enc_step_input, enc_hidden[i], enc_cell[i] enc_step_input, enc_hidden[i], enc_cell[i]
) )
if self.dropout != None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
enc_step_input = fluid.layers.dropout( enc_step_input = fluid.layers.dropout(
enc_new_hidden, enc_new_hidden,
dropout_prob=self.dropout, dropout_prob=self.dropout,
...@@ -803,7 +803,7 @@ class AttentionModel(fluid.dygraph.Layer): ...@@ -803,7 +803,7 @@ class AttentionModel(fluid.dygraph.Layer):
) )
new_dec_hidden.append(new_hidden) new_dec_hidden.append(new_hidden)
new_dec_cell.append(new_cell) new_dec_cell.append(new_cell)
if self.dropout != None and self.dropout > 0.0: if self.dropout is not None and self.dropout > 0.0:
step_input = fluid.layers.dropout( step_input = fluid.layers.dropout(
new_hidden, new_hidden,
dropout_prob=self.dropout, dropout_prob=self.dropout,
......
...@@ -73,7 +73,7 @@ def deco4(func=None, x=0): ...@@ -73,7 +73,7 @@ def deco4(func=None, x=0):
return inner_deco return inner_deco
if func == None: if func is None:
return decorated return decorated
return decorated(func) return decorated(func)
......
...@@ -29,7 +29,7 @@ class TestFunctionSpec(unittest.TestCase): ...@@ -29,7 +29,7 @@ class TestFunctionSpec(unittest.TestCase):
args_name = foo_spec.args_name args_name = foo_spec.args_name
self.assertListEqual(args_name, ['a', 'b', 'c', 'd']) self.assertListEqual(args_name, ['a', 'b', 'c', 'd'])
self.assertTrue(foo_spec.dygraph_function == foo_func) self.assertTrue(foo_spec.dygraph_function == foo_func)
self.assertTrue(foo_spec.input_spec is None) self.assertIsNone(foo_spec.input_spec)
def test_verify_input_spec(self): def test_verify_input_spec(self):
a_spec = InputSpec([None, 10], name='a') a_spec = InputSpec([None, 10], name='a')
......
...@@ -21,7 +21,7 @@ class TestPlace(unittest.TestCase): ...@@ -21,7 +21,7 @@ class TestPlace(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
x = paddle.to_tensor([1, 2, 3, 4]) x = paddle.to_tensor([1, 2, 3, 4])
self.assertTrue(x.place() == None) self.assertIsNone(x.place())
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -89,7 +89,7 @@ class BuildIrMemOptBase(unittest.TestCase): ...@@ -89,7 +89,7 @@ class BuildIrMemOptBase(unittest.TestCase):
first_loss, last_loss = None, None first_loss, last_loss = None, None
step_id = 0 step_id = 0
custom_iter = getattr(self, "iter", None) custom_iter = getattr(self, "iter", None)
if not custom_iter == None: if custom_iter is not None:
iter = custom_iter iter = custom_iter
for data in reader(): for data in reader():
ret = exe.run(train_cp, feed=data, fetch_list=fetch_list) ret = exe.run(train_cp, feed=data, fetch_list=fetch_list)
......
...@@ -43,17 +43,17 @@ def adaptive_pool2d_forward( ...@@ -43,17 +43,17 @@ def adaptive_pool2d_forward(
else [x.shape[3], x.shape[1], x.shape[2]] else [x.shape[3], x.shape[1], x.shape[2]]
) )
if isinstance(output_size, int) or output_size == None: if isinstance(output_size, int) or output_size is None:
H_out = output_size H_out = output_size
W_out = output_size W_out = output_size
output_size = [H_out, W_out] output_size = [H_out, W_out]
else: else:
H_out, W_out = output_size H_out, W_out = output_size
if output_size[0] == None: if output_size[0] is None:
output_size[0] = H output_size[0] = H
H_out = H H_out = H
if output_size[1] == None: if output_size[1] is None:
output_size[1] = W output_size[1] = W
W_out = W W_out = W
......
...@@ -39,7 +39,7 @@ def adaptive_pool3d_forward( ...@@ -39,7 +39,7 @@ def adaptive_pool3d_forward(
else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]] else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]]
) )
if isinstance(output_size, int) or output_size == None: if isinstance(output_size, int) or output_size is None:
H_out = output_size H_out = output_size
W_out = output_size W_out = output_size
D_out = output_size D_out = output_size
...@@ -47,13 +47,13 @@ def adaptive_pool3d_forward( ...@@ -47,13 +47,13 @@ def adaptive_pool3d_forward(
else: else:
D_out, H_out, W_out = output_size D_out, H_out, W_out = output_size
if output_size[0] == None: if output_size[0] is None:
output_size[0] = D output_size[0] = D
D_out = D D_out = D
if output_size[1] == None: if output_size[1] is None:
output_size[1] = H output_size[1] = H
H_out = H H_out = H
if output_size[2] == None: if output_size[2] is None:
output_size[2] = W output_size[2] = W
W_out = W W_out = W
......
...@@ -41,17 +41,17 @@ def adaptive_pool2d_forward( ...@@ -41,17 +41,17 @@ def adaptive_pool2d_forward(
else [x.shape[3], x.shape[1], x.shape[2]] else [x.shape[3], x.shape[1], x.shape[2]]
) )
if isinstance(output_size, int) or output_size == None: if isinstance(output_size, int) or output_size is None:
H_out = output_size H_out = output_size
W_out = output_size W_out = output_size
output_size = [H_out, W_out] output_size = [H_out, W_out]
else: else:
H_out, W_out = output_size H_out, W_out = output_size
if output_size[0] == None: if output_size[0] is None:
output_size[0] = H output_size[0] = H
H_out = H H_out = H
if output_size[1] == None: if output_size[1] is None:
output_size[1] = W output_size[1] = W
W_out = W W_out = W
......
...@@ -41,7 +41,7 @@ def adaptive_pool3d_forward( ...@@ -41,7 +41,7 @@ def adaptive_pool3d_forward(
else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]] else [x.shape[4], x.shape[1], x.shape[2], x.shape[3]]
) )
if isinstance(output_size, int) or output_size == None: if isinstance(output_size, int) or output_size is None:
H_out = output_size H_out = output_size
W_out = output_size W_out = output_size
D_out = output_size D_out = output_size
...@@ -49,13 +49,13 @@ def adaptive_pool3d_forward( ...@@ -49,13 +49,13 @@ def adaptive_pool3d_forward(
else: else:
D_out, H_out, W_out = output_size D_out, H_out, W_out = output_size
if output_size[0] == None: if output_size[0] is None:
output_size[0] = D output_size[0] = D
D_out = D D_out = D
if output_size[1] == None: if output_size[1] is None:
output_size[1] = H output_size[1] = H
H_out = H H_out = H
if output_size[2] == None: if output_size[2] is None:
output_size[2] = W output_size[2] = W
W_out = W W_out = W
......
...@@ -602,7 +602,7 @@ class TestAutoParallelMapper(unittest.TestCase): ...@@ -602,7 +602,7 @@ class TestAutoParallelMapper(unittest.TestCase):
outputs={'Out': output}, outputs={'Out': output},
) )
self.assertEqual(get_comm_volume(broadcast_op, 0, 1), 400) self.assertEqual(get_comm_volume(broadcast_op, 0, 1), 400)
self.assertEqual(get_comm_volume(broadcast_op, 1, 0), None) self.assertIsNone(get_comm_volume(broadcast_op, 1, 0))
allgather_op = train_program.global_block().append_op( allgather_op = train_program.global_block().append_op(
type="c_allgather", type="c_allgather",
inputs={'X': input}, inputs={'X': input},
...@@ -610,14 +610,14 @@ class TestAutoParallelMapper(unittest.TestCase): ...@@ -610,14 +610,14 @@ class TestAutoParallelMapper(unittest.TestCase):
outputs={'Out': output}, outputs={'Out': output},
) )
self.assertEqual(get_comm_volume(allgather_op, 0, 1), 400) self.assertEqual(get_comm_volume(allgather_op, 0, 1), 400)
self.assertEqual(get_comm_volume(allgather_op, 0, 0), None) self.assertIsNone(get_comm_volume(allgather_op, 0, 0))
reduce_op = train_program.global_block().append_op( reduce_op = train_program.global_block().append_op(
type="c_reduce_sum", type="c_reduce_sum",
inputs={'X': input}, inputs={'X': input},
attrs={'ring_id': ring_id, 'root_id': root_id}, attrs={'ring_id': ring_id, 'root_id': root_id},
outputs={'Out': output}, outputs={'Out': output},
) )
self.assertEqual(get_comm_volume(reduce_op, 0, 1), None) self.assertIsNone(get_comm_volume(reduce_op, 0, 1))
self.assertEqual(get_comm_volume(reduce_op, 1, 0), 400) self.assertEqual(get_comm_volume(reduce_op, 1, 0), 400)
cast_op = train_program.global_block().append_op( cast_op = train_program.global_block().append_op(
type="cast", type="cast",
......
...@@ -53,26 +53,26 @@ def is_valid_completed_program(dist_context, program): ...@@ -53,26 +53,26 @@ def is_valid_completed_program(dist_context, program):
vars_ = program.list_vars() vars_ = program.list_vars()
for op in ops: for op in ops:
op_dist_attrs = dist_context.get_op_dist_attr_for_program(op) op_dist_attrs = dist_context.get_op_dist_attr_for_program(op)
if op_dist_attrs == None: if op_dist_attrs is None:
return False return False
if op_dist_attrs.process_mesh == None: if op_dist_attrs.process_mesh is None:
return False return False
for tensor_dist_attr in op_dist_attrs.inputs_dist_attrs.values(): for tensor_dist_attr in op_dist_attrs.inputs_dist_attrs.values():
if None == tensor_dist_attr.dims_mapping: if tensor_dist_attr.dims_mapping is None:
return False return False
for tensor_dist_attr in op_dist_attrs.outputs_dist_attrs.values(): for tensor_dist_attr in op_dist_attrs.outputs_dist_attrs.values():
if None == tensor_dist_attr.dims_mapping: if tensor_dist_attr.dims_mapping is None:
return False return False
for var in vars_: for var in vars_:
var_dist_attrs = dist_context.get_tensor_dist_attr_for_program(var) var_dist_attrs = dist_context.get_tensor_dist_attr_for_program(var)
if var_dist_attrs == None: if var_dist_attrs is None:
return False return False
elif var_dist_attrs.process_mesh == None: elif var_dist_attrs.process_mesh is None:
return False return False
elif var_dist_attrs.dims_mapping == None: elif var_dist_attrs.dims_mapping is None:
return False return False
return True return True
......
...@@ -606,7 +606,7 @@ class TestLayerTo(unittest.TestCase): ...@@ -606,7 +606,7 @@ class TestLayerTo(unittest.TestCase):
buffer = None buffer = None
model.register_buffer("buf_name", buffer, persistable=True) model.register_buffer("buf_name", buffer, persistable=True)
model.to(dtype='float64') model.to(dtype='float64')
self.assertEqual(model._buffers['buf_name'], None) self.assertIsNone(model._buffers['buf_name'])
def test_main(self): def test_main(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -106,9 +106,9 @@ class TestClass(unittest.TestCase): ...@@ -106,9 +106,9 @@ class TestClass(unittest.TestCase):
break break
if break_beforehand: if break_beforehand:
self.assertTrue(next(gen, None) is not None) self.assertIsNotNone(next(gen, None))
else: else:
self.assertTrue(next(gen, None) is None) self.assertIsNone(next(gen, None))
class TestClass2(TestClass): class TestClass2(TestClass):
......
...@@ -67,7 +67,7 @@ class TestDygraphSpectralNorm(unittest.TestCase): ...@@ -67,7 +67,7 @@ class TestDygraphSpectralNorm(unittest.TestCase):
def test_check_output(self): def test_check_output(self):
linear = paddle.nn.Conv2D(2, 1, 3) linear = paddle.nn.Conv2D(2, 1, 3)
before_weight = linear.weight.numpy().copy() before_weight = linear.weight.numpy().copy()
if self.dim == None: if self.dim is None:
if isinstance( if isinstance(
linear, linear,
( (
......
...@@ -122,7 +122,7 @@ class TestDygraphWeightNorm(unittest.TestCase): ...@@ -122,7 +122,7 @@ class TestDygraphWeightNorm(unittest.TestCase):
fluid.enable_imperative() fluid.enable_imperative()
linear = paddle.nn.Conv2D(2, 3, 3) linear = paddle.nn.Conv2D(2, 3, 3)
before_weight = linear.weight.numpy() before_weight = linear.weight.numpy()
if self.dim == None: if self.dim is None:
self.dim = -1 self.dim = -1
if self.dim != -1: if self.dim != -1:
......
...@@ -102,7 +102,7 @@ class TestExecutor(unittest.TestCase): ...@@ -102,7 +102,7 @@ class TestExecutor(unittest.TestCase):
outline_p_vars = [] outline_p_vars = []
for name in persitables: for name in persitables:
var = scope.find_var(name) var = scope.find_var(name)
self.assertTrue(var is not None) self.assertIsNotNone(var)
t = var.get_tensor() t = var.get_tensor()
if not t._is_initialized(): if not t._is_initialized():
outline_p_vars.append(name) outline_p_vars.append(name)
...@@ -110,7 +110,7 @@ class TestExecutor(unittest.TestCase): ...@@ -110,7 +110,7 @@ class TestExecutor(unittest.TestCase):
outline_np_vars = [] outline_np_vars = []
for name in non_persistables: for name in non_persistables:
var = scope.find_var(name) var = scope.find_var(name)
self.assertTrue(var is not None) self.assertIsNotNone(var)
t = var.get_tensor() t = var.get_tensor()
if t._is_initialized(): if t._is_initialized():
outline_np_vars.append(name) outline_np_vars.append(name)
......
...@@ -204,7 +204,7 @@ def lm_model( ...@@ -204,7 +204,7 @@ def lm_model(
input = m input = m
if dropout != None and dropout > 0.0: if dropout is not None and dropout > 0.0:
input = layers.dropout( input = layers.dropout(
input, input,
dropout_prob=dropout, dropout_prob=dropout,
...@@ -308,7 +308,7 @@ def lm_model( ...@@ -308,7 +308,7 @@ def lm_model(
cell_array[k] = c cell_array[k] = c
input = m input = m
if dropout != None and dropout > 0.0: if dropout is not None and dropout > 0.0:
input = layers.dropout( input = layers.dropout(
input, input,
dropout_prob=dropout, dropout_prob=dropout,
...@@ -390,7 +390,7 @@ def lm_model( ...@@ -390,7 +390,7 @@ def lm_model(
x_emb = layers.reshape( x_emb = layers.reshape(
x_emb, shape=[-1, num_steps, hidden_size], inplace=True x_emb, shape=[-1, num_steps, hidden_size], inplace=True
) )
if dropout != None and dropout > 0.0: if dropout is not None and dropout > 0.0:
x_emb = layers.dropout( x_emb = layers.dropout(
x_emb, x_emb,
dropout_prob=dropout, dropout_prob=dropout,
......
...@@ -111,7 +111,7 @@ class TestFleetBase(unittest.TestCase): ...@@ -111,7 +111,7 @@ class TestFleetBase(unittest.TestCase):
def test_util(self): def test_util(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
self.assertNotEqual(fleet.util, None) self.assertIsNotNone(fleet.util)
def test_barrier_worker(self): def test_barrier_worker(self):
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
......
...@@ -50,7 +50,7 @@ class TestFleetUtil(unittest.TestCase): ...@@ -50,7 +50,7 @@ class TestFleetUtil(unittest.TestCase):
context["role_maker"] = role_maker context["role_maker"] = role_maker
context["valid_strategy"] = strategy context["valid_strategy"] = strategy
util = factory._create_util(context) util = factory._create_util(context)
self.assertEqual(util.role_maker, None) self.assertIsNone(util.role_maker)
def test_get_util(self): def test_get_util(self):
import paddle.distributed.fleet as fleet import paddle.distributed.fleet as fleet
...@@ -58,7 +58,7 @@ class TestFleetUtil(unittest.TestCase): ...@@ -58,7 +58,7 @@ class TestFleetUtil(unittest.TestCase):
role = role_maker.PaddleCloudRoleMaker(is_collective=True) role = role_maker.PaddleCloudRoleMaker(is_collective=True)
fleet.init(role) fleet.init(role)
self.assertNotEqual(fleet.util, None) self.assertIsNotNone(fleet.util)
def test_set_user_defined_util(self): def test_set_user_defined_util(self):
import paddle.distributed.fleet as fleet import paddle.distributed.fleet as fleet
......
...@@ -112,7 +112,7 @@ class TestFusedMatmulBias(unittest.TestCase): ...@@ -112,7 +112,7 @@ class TestFusedMatmulBias(unittest.TestCase):
if need_bias: if need_bias:
np.testing.assert_array_equal(bias.grad.numpy(), bias_grad_np) np.testing.assert_array_equal(bias.grad.numpy(), bias_grad_np)
else: else:
self.assertTrue(bias_grad_np is None) self.assertIsNone(bias_grad_np)
def rand_test(self, m, n, k, dtype): def rand_test(self, m, n, k, dtype):
seed = int(np.random.randint(low=0, high=1000, size=[1])) seed = int(np.random.randint(low=0, high=1000, size=[1]))
......
...@@ -36,7 +36,7 @@ class TestGlobalVarGetterSetter(unittest.TestCase): ...@@ -36,7 +36,7 @@ class TestGlobalVarGetterSetter(unittest.TestCase):
self.assertTrue(var.name in g.keys()) self.assertTrue(var.name in g.keys())
value1 = g[var.name] value1 = g[var.name]
value2 = g.get(var.name, None) value2 = g.get(var.name, None)
self.assertTrue(value1 is not None) self.assertIsNotNone(value1)
self.assertEqual(value1, value2) self.assertEqual(value1, value2)
self.assertEqual(type(value1), var.type) self.assertEqual(type(value1), var.type)
self.assertEqual(type(value2), var.type) self.assertEqual(type(value2), var.type)
...@@ -53,7 +53,7 @@ class TestGlobalVarGetterSetter(unittest.TestCase): ...@@ -53,7 +53,7 @@ class TestGlobalVarGetterSetter(unittest.TestCase):
name = "__any_non_exist_name__" name = "__any_non_exist_name__"
self.assertFalse(name in g) self.assertFalse(name in g)
self.assertFalse(name in g.keys()) self.assertFalse(name in g.keys())
self.assertTrue(g.get(name, None) is None) self.assertIsNone(g.get(name, None))
self.assertEquals(g.get(name, -1), -1) self.assertEquals(g.get(name, -1), -1)
......
...@@ -162,8 +162,8 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -162,8 +162,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
v2 = fluid.dygraph.to_variable(value2) v2 = fluid.dygraph.to_variable(value2)
loss = case1(v1, v2) loss = case1(v1, v2)
loss.backward() loss.backward()
self.assertTrue(case1.linear2.weight._grad_ivar() is not None) self.assertIsNotNone(case1.linear2.weight._grad_ivar())
self.assertTrue(case1.linear1.weight._grad_ivar() is not None) self.assertIsNotNone(case1.linear1.weight._grad_ivar())
def test_auto_prune(self): def test_auto_prune(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -180,8 +180,8 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -180,8 +180,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
loss = case2(v1, v2) loss = case2(v1, v2)
loss.backward() loss.backward()
self.assertTrue(case2.linear2.weight._grad_ivar() is None) self.assertIsNone(case2.linear2.weight._grad_ivar())
self.assertTrue(case2.linear1.weight._grad_ivar() is not None) self.assertIsNotNone(case2.linear1.weight._grad_ivar())
def test_auto_prune2(self): def test_auto_prune2(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -198,7 +198,7 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -198,7 +198,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
v2 = fluid.dygraph.to_variable(value2) v2 = fluid.dygraph.to_variable(value2)
loss, part2 = case3(v1, v2, 1) loss, part2 = case3(v1, v2, 1)
loss.backward() loss.backward()
self.assertTrue(case3.linear.weight._grad_ivar() is not None) self.assertIsNotNone(case3.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 0).all()) self.assertTrue((part2.gradient() == 0).all())
def test_auto_prune3(self): def test_auto_prune3(self):
...@@ -217,7 +217,7 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -217,7 +217,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
v2 = fluid.dygraph.to_variable(value2) v2 = fluid.dygraph.to_variable(value2)
loss, part2 = case4(v1, v2, 1) loss, part2 = case4(v1, v2, 1)
part2.backward() part2.backward()
self.assertTrue(case4.linear.weight._grad_ivar() is not None) self.assertIsNotNone(case4.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 1).all()) self.assertTrue((part2.gradient() == 1).all())
def test_auto_prune4(self): def test_auto_prune4(self):
...@@ -236,7 +236,7 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -236,7 +236,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
v2 = fluid.dygraph.to_variable(value2) v2 = fluid.dygraph.to_variable(value2)
loss, part1, part2 = case4(v1, v2, 2) loss, part1, part2 = case4(v1, v2, 2)
part1.backward() part1.backward()
self.assertTrue(case4.linear.weight._grad_ivar() is not None) self.assertIsNotNone(case4.linear.weight._grad_ivar())
self.assertTrue((part2.gradient() == 0).all()) self.assertTrue((part2.gradient() == 0).all())
def test_auto_prune5(self): def test_auto_prune5(self):
...@@ -261,8 +261,8 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -261,8 +261,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
out1.stop_gradient = True out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1) out = fluid.layers.concat(input=[out1, out2, c], axis=1)
out.backward() out.backward()
self.assertTrue(linear.weight.gradient() is None) self.assertIsNone(linear.weight.gradient())
self.assertTrue(out1.gradient() is None) self.assertIsNone(out1.gradient())
def test_auto_prune6(self): def test_auto_prune6(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -284,8 +284,8 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -284,8 +284,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
out1.stop_gradient = True out1.stop_gradient = True
out = fluid.layers.concat(input=[out1, out2, c], axis=1) out = fluid.layers.concat(input=[out1, out2, c], axis=1)
out.backward() out.backward()
self.assertTrue(linear.weight.gradient() is None) self.assertIsNone(linear.weight.gradient())
self.assertTrue(out1.gradient() is None) self.assertIsNone(out1.gradient())
def test_auto_prune7(self): def test_auto_prune7(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -377,8 +377,8 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -377,8 +377,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
# TODO(jiabin): In Eager Mode we don't actually need sort_sum_gradient, this test should be removed when we don't support fluid anymore. # TODO(jiabin): In Eager Mode we don't actually need sort_sum_gradient, this test should be removed when we don't support fluid anymore.
fluid.set_flags({'FLAGS_sort_sum_gradient': True}) fluid.set_flags({'FLAGS_sort_sum_gradient': True})
out.backward() out.backward()
self.assertTrue(linear.weight.gradient() is None) self.assertIsNone(linear.weight.gradient())
self.assertTrue(out1.gradient() is None) self.assertIsNone(out1.gradient())
def test_auto_prune10(self): def test_auto_prune10(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -449,8 +449,8 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -449,8 +449,8 @@ class TestImperativeAutoPrune(unittest.TestCase):
case3 = AutoPruneLayer2(input_size=784) case3 = AutoPruneLayer2(input_size=784)
loss = case3(v1, v2) loss = case3(v1, v2)
loss.backward() loss.backward()
self.assertTrue(case3.linear2.weight._grad_ivar() is None) self.assertIsNone(case3.linear2.weight._grad_ivar())
self.assertTrue(case3.linear.weight._grad_ivar() is not None) self.assertIsNotNone(case3.linear.weight._grad_ivar())
def test_case2_prune_no_grad_branch(self): def test_case2_prune_no_grad_branch(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -468,7 +468,7 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -468,7 +468,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
out = fluid.layers.one_hot(input=label, depth=100) out = fluid.layers.one_hot(input=label, depth=100)
loss = paddle.mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
self.assertTrue(linear.weight._grad_ivar() is None) self.assertIsNone(linear.weight._grad_ivar())
def test_case3_prune_no_grad_branch2(self): def test_case3_prune_no_grad_branch2(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -480,7 +480,7 @@ class TestImperativeAutoPrune(unittest.TestCase): ...@@ -480,7 +480,7 @@ class TestImperativeAutoPrune(unittest.TestCase):
out = fluid.layers.gaussian_random(shape=[20, 30]) out = fluid.layers.gaussian_random(shape=[20, 30])
loss = paddle.mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
self.assertTrue(out._grad_ivar() is None) self.assertIsNone(out._grad_ivar())
def test_case4_with_no_grad_op_maker(self): def test_case4_with_no_grad_op_maker(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -272,7 +272,7 @@ class TestImperative(unittest.TestCase): ...@@ -272,7 +272,7 @@ class TestImperative(unittest.TestCase):
data = np.array([[2, 3], [4, 5]]).astype('float32') data = np.array([[2, 3], [4, 5]]).astype('float32')
with fluid.dygraph.guard(): with fluid.dygraph.guard():
l0 = fluid.Linear(2, 2) l0 = fluid.Linear(2, 2)
self.assertTrue(l0.weight._grad_ivar() is None) self.assertIsNone(l0.weight._grad_ivar())
l1 = fluid.Linear(2, 2) l1 = fluid.Linear(2, 2)
with fluid.dygraph.no_grad(): with fluid.dygraph.no_grad():
self.assertTrue(l1.weight.stop_gradient is False) self.assertTrue(l1.weight.stop_gradient is False)
...@@ -283,14 +283,14 @@ class TestImperative(unittest.TestCase): ...@@ -283,14 +283,14 @@ class TestImperative(unittest.TestCase):
o = l1(y) o = l1(y)
o.backward() o.backward()
self.assertTrue(tmp._grad_ivar() is None) self.assertIsNone(tmp._grad_ivar())
self.assertTrue(l0.weight._grad_ivar() is not None) self.assertIsNotNone(l0.weight._grad_ivar())
def test_paddle_imperative_no_grad_guard(self): def test_paddle_imperative_no_grad_guard(self):
data = np.array([[2, 3], [4, 5]]).astype('float32') data = np.array([[2, 3], [4, 5]]).astype('float32')
with fluid.dygraph.guard(): with fluid.dygraph.guard():
l0 = fluid.Linear(2, 2) l0 = fluid.Linear(2, 2)
self.assertTrue(l0.weight._grad_ivar() is None) self.assertIsNone(l0.weight._grad_ivar())
l1 = fluid.Linear(2, 2) l1 = fluid.Linear(2, 2)
with paddle.no_grad(): with paddle.no_grad():
self.assertTrue(l1.weight.stop_gradient is False) self.assertTrue(l1.weight.stop_gradient is False)
...@@ -301,14 +301,14 @@ class TestImperative(unittest.TestCase): ...@@ -301,14 +301,14 @@ class TestImperative(unittest.TestCase):
o = l1(y) o = l1(y)
o.backward() o.backward()
self.assertTrue(tmp._grad_ivar() is None) self.assertIsNone(tmp._grad_ivar())
self.assertTrue(l0.weight._grad_ivar() is not None) self.assertIsNotNone(l0.weight._grad_ivar())
def test_paddle_imperative_set_grad_enabled(self): def test_paddle_imperative_set_grad_enabled(self):
data = np.array([[2, 3], [4, 5]]).astype('float32') data = np.array([[2, 3], [4, 5]]).astype('float32')
with fluid.dygraph.guard(): with fluid.dygraph.guard():
l0 = fluid.Linear(2, 2) l0 = fluid.Linear(2, 2)
self.assertTrue(l0.weight._grad_ivar() is None) self.assertIsNone(l0.weight._grad_ivar())
l1 = fluid.Linear(2, 2) l1 = fluid.Linear(2, 2)
with paddle.set_grad_enabled(False): with paddle.set_grad_enabled(False):
self.assertTrue(l1.weight.stop_gradient is False) self.assertTrue(l1.weight.stop_gradient is False)
...@@ -322,9 +322,9 @@ class TestImperative(unittest.TestCase): ...@@ -322,9 +322,9 @@ class TestImperative(unittest.TestCase):
o = l1(y) o = l1(y)
o.backward() o.backward()
self.assertTrue(tmp._grad_ivar() is None) self.assertIsNone(tmp._grad_ivar())
self.assertTrue(tmp2._grad_ivar() is not None) self.assertIsNotNone(tmp2._grad_ivar())
self.assertTrue(l0.weight._grad_ivar() is not None) self.assertIsNotNone(l0.weight._grad_ivar())
def test_paddle_imperative_is_grad_enabled(self): def test_paddle_imperative_is_grad_enabled(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
......
...@@ -83,7 +83,7 @@ class TestEagerGrad(TestCase): ...@@ -83,7 +83,7 @@ class TestEagerGrad(TestCase):
# stop_gradient = !create_graph, create_graph default false # stop_gradient = !create_graph, create_graph default false
self.assertEqual(dx[0].stop_gradient, True) self.assertEqual(dx[0].stop_gradient, True)
# x is unused input in the graph # x is unused input in the graph
self.assertEqual(dx[1], None) self.assertIsNone(dx[1])
def test_simple_example_eager_grad_allow_unused(self): def test_simple_example_eager_grad_allow_unused(self):
with _test_eager_guard(): with _test_eager_guard():
...@@ -292,7 +292,7 @@ class TestDygraphDoubleGrad(TestCase): ...@@ -292,7 +292,7 @@ class TestDygraphDoubleGrad(TestCase):
(none_grad,) = self.grad( (none_grad,) = self.grad(
[x], [y], create_graph=create_graph, allow_unused=True [x], [y], create_graph=create_graph, allow_unused=True
) )
self.assertTrue(none_grad is None) self.assertIsNone(none_grad)
(grad_with_none_and_not_none,) = self.grad( (grad_with_none_and_not_none,) = self.grad(
[x, y], [y], create_graph=create_graph [x, y], [y], create_graph=create_graph
......
...@@ -84,7 +84,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -84,7 +84,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
def _check_exception(self, exception_message, place=None): def _check_exception(self, exception_message, place=None):
seed = 90 seed = 90
batch_size = 128 batch_size = 128
if place == None: if place is None:
place = ( place = (
fluid.CUDAPlace(0) fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() if core.is_compiled_with_cuda()
...@@ -106,7 +106,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -106,7 +106,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
seed = 90 seed = 90
batch_size = 128 batch_size = 128
if place == None: if place is None:
place = ( place = (
fluid.CPUPlace() fluid.CPUPlace()
if not core.is_compiled_with_cuda() if not core.is_compiled_with_cuda()
...@@ -161,7 +161,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -161,7 +161,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
paddle.seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
if place == None: if place is None:
place = ( place = (
fluid.CPUPlace() fluid.CPUPlace()
if not core.is_compiled_with_cuda() if not core.is_compiled_with_cuda()
......
...@@ -81,7 +81,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -81,7 +81,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
def _check_exception(self, exception_message, place=None): def _check_exception(self, exception_message, place=None):
seed = 90 seed = 90
batch_size = 128 batch_size = 128
if place == None: if place is None:
place = ( place = (
fluid.CUDAPlace(0) fluid.CUDAPlace(0)
if core.is_compiled_with_cuda() if core.is_compiled_with_cuda()
...@@ -105,7 +105,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
seed = 90 seed = 90
batch_size = 128 batch_size = 128
if place == None: if place is None:
place = ( place = (
fluid.CPUPlace() fluid.CPUPlace()
if not core.is_compiled_with_cuda() if not core.is_compiled_with_cuda()
...@@ -170,7 +170,7 @@ class TestImperativeOptimizerBase(unittest.TestCase): ...@@ -170,7 +170,7 @@ class TestImperativeOptimizerBase(unittest.TestCase):
paddle.seed(seed) paddle.seed(seed)
paddle.framework.random._manual_program_seed(seed) paddle.framework.random._manual_program_seed(seed)
if place == None: if place is None:
place = ( place = (
fluid.CPUPlace() fluid.CPUPlace()
if not core.is_compiled_with_cuda() if not core.is_compiled_with_cuda()
......
...@@ -1003,7 +1003,7 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -1003,7 +1003,7 @@ class TestDygraphPtbRnn(unittest.TestCase):
os.path.join('saved_dy', 'emb_dy') os.path.join('saved_dy', 'emb_dy')
) )
self.assertTrue(opti_state_dict == None) self.assertIsNone(opti_state_dict)
para_state_dict, opti_state_dict = fluid.load_dygraph( para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy.pdparams') os.path.join('saved_dy', 'emb_dy.pdparams')
...@@ -1022,8 +1022,8 @@ class TestDygraphPtbRnn(unittest.TestCase): ...@@ -1022,8 +1022,8 @@ class TestDygraphPtbRnn(unittest.TestCase):
para_state_dict, opti_state_dict = fluid.load_dygraph( para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy'), keep_name_table=True os.path.join('saved_dy', 'emb_dy'), keep_name_table=True
) )
self.assertTrue(para_state_dict != None) self.assertIsNotNone(para_state_dict)
self.assertTrue(opti_state_dict == None) self.assertIsNone(opti_state_dict)
def test_main(self): def test_main(self):
self.func_setUp() self.func_setUp()
......
...@@ -62,18 +62,18 @@ class TestSimpleNet(unittest.TestCase): ...@@ -62,18 +62,18 @@ class TestSimpleNet(unittest.TestCase):
) # grad_clip=grad_clip ) # grad_clip=grad_clip
input_emb, emb = simplenet(input) input_emb, emb = simplenet(input)
self.assertTrue(emb.weight.gradient() is None) self.assertIsNone(emb.weight.gradient())
self.assertTrue(input_emb.gradient() is None) self.assertIsNone(input_emb.gradient())
input_emb.backward() input_emb.backward()
adam.minimize(input_emb) adam.minimize(input_emb)
self.assertTrue(emb.weight.gradient() is not None) self.assertIsNotNone(emb.weight.gradient())
emb.clear_gradients() emb.clear_gradients()
self.assertTrue(emb.weight.gradient() is None) self.assertIsNone(emb.weight.gradient())
input_emb.clear_gradient() input_emb.clear_gradient()
self.assertTrue(input_emb.gradient() is not None) self.assertIsNotNone(input_emb.gradient())
paddle.enable_static() paddle.enable_static()
def test_selectedrows_gradient1(self): def test_selectedrows_gradient1(self):
...@@ -107,18 +107,18 @@ class TestSimpleNet(unittest.TestCase): ...@@ -107,18 +107,18 @@ class TestSimpleNet(unittest.TestCase):
) )
input_emb, emb = simplenet(input) input_emb, emb = simplenet(input)
self.assertTrue(emb.weight.gradient() is None) self.assertIsNone(emb.weight.gradient())
self.assertTrue(input_emb.gradient() is None) self.assertIsNone(input_emb.gradient())
input_emb.backward() input_emb.backward()
adam.minimize(input_emb) adam.minimize(input_emb)
self.assertTrue(emb.weight.gradient() is not None) self.assertIsNotNone(emb.weight.gradient())
emb.clear_gradients() emb.clear_gradients()
self.assertTrue(emb.weight.gradient() is None) self.assertIsNone(emb.weight.gradient())
input_emb.clear_gradient() input_emb.clear_gradient()
self.assertTrue(input_emb.gradient() is not None) self.assertIsNotNone(input_emb.gradient())
def test_selectedrows_gradient2(self): def test_selectedrows_gradient2(self):
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True}) fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
......
...@@ -449,7 +449,7 @@ class TestSaveInferenceModelNew(unittest.TestCase): ...@@ -449,7 +449,7 @@ class TestSaveInferenceModelNew(unittest.TestCase):
self.assertTrue(isinstance(res2, bytes)) self.assertTrue(isinstance(res2, bytes))
# test if variables in program is empty # test if variables in program is empty
res = paddle.static.io._serialize_persistables(Program(), None) res = paddle.static.io._serialize_persistables(Program(), None)
self.assertEqual(res, None) self.assertIsNone(res)
self.assertRaises( self.assertRaises(
TypeError, TypeError,
paddle.static.io.deserialize_persistables, paddle.static.io.deserialize_persistables,
......
...@@ -32,7 +32,7 @@ class TestInputSpec(unittest.TestCase): ...@@ -32,7 +32,7 @@ class TestInputSpec(unittest.TestCase):
self.assertEqual( self.assertEqual(
tensor_spec.dtype, convert_np_dtype_to_dtype_('float32') tensor_spec.dtype, convert_np_dtype_to_dtype_('float32')
) )
self.assertEqual(tensor_spec.name, None) self.assertIsNone(tensor_spec.name)
def test_from_tensor(self): def test_from_tensor(self):
x_bool = fluid.layers.fill_constant(shape=[1], dtype='bool', value=True) x_bool = fluid.layers.fill_constant(shape=[1], dtype='bool', value=True)
...@@ -51,7 +51,7 @@ class TestInputSpec(unittest.TestCase): ...@@ -51,7 +51,7 @@ class TestInputSpec(unittest.TestCase):
x_np_spec.dtype, convert_np_dtype_to_dtype_(x_numpy.dtype) x_np_spec.dtype, convert_np_dtype_to_dtype_(x_numpy.dtype)
) )
self.assertEqual(x_np_spec.shape, x_numpy.shape) self.assertEqual(x_np_spec.shape, x_numpy.shape)
self.assertEqual(x_np_spec.name, None) self.assertIsNone(x_np_spec.name)
x_numpy2 = np.array([1, 2, 3, 4]).astype('int64') x_numpy2 = np.array([1, 2, 3, 4]).astype('int64')
x_np_spec2 = InputSpec.from_numpy(x_numpy2, name='x_np_int64') x_np_spec2 = InputSpec.from_numpy(x_numpy2, name='x_np_int64')
......
...@@ -241,8 +241,8 @@ class TestLambOpMultiPrecision(unittest.TestCase): ...@@ -241,8 +241,8 @@ class TestLambOpMultiPrecision(unittest.TestCase):
) )
return params[0].astype(np.float32) return params[0].astype(np.float32)
else: else:
self.assertTrue(params[0] is not None) self.assertIsNotNone(params[0])
self.assertTrue(params[1] is None) self.assertIsNone(params[1])
params[0] = np.array(params[0]) params[0] = np.array(params[0])
return params[0] return params[0]
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册