“4b085c579f9d2363d332fca40e45cb601a0246be”上不存在“paddle/fluid/framework/program_utils.cc”
未验证 提交 5a2ab683 编写于 作者: N Nyakku Shigure 提交者: GitHub

[CodeStyle][E712] use `if cond`/`if cond is True` for comparison with `True` (#47464)

* [CodeStyle][E712] use `if cond`/`if cond is True` for comparison with `True`

* revert changes in fluid

* revert unrelated file

* revert changes in norm

* revert changes in auto_parallel_amp

* fix norm and auto_parallel_amp

* revert a typo fix due to fixed at #47477
上级 e12b6c04
...@@ -532,7 +532,7 @@ class PythonCGenerator(GeneratorBase): ...@@ -532,7 +532,7 @@ class PythonCGenerator(GeneratorBase):
) )
status = f_generator.run() status = f_generator.run()
if status == True: if status:
self.python_c_functions_str += ( self.python_c_functions_str += (
f_generator.python_c_function_str + "\n" f_generator.python_c_function_str + "\n"
) )
......
...@@ -45,7 +45,7 @@ def resize_short(img, target_size): ...@@ -45,7 +45,7 @@ def resize_short(img, target_size):
def crop_image(img, target_size, center): def crop_image(img, target_size, center):
width, height = img.size width, height = img.size
size = target_size size = target_size
if center == True: if center:
w_start = (width - size) // 2 w_start = (width - size) // 2
h_start = (height - size) // 2 h_start = (height - size) // 2
else: else:
......
...@@ -79,7 +79,7 @@ def list_available_backends() -> List[str]: ...@@ -79,7 +79,7 @@ def list_available_backends() -> List[str]:
if "paddleaudio" in sys.modules: if "paddleaudio" in sys.modules:
version = paddleaudio.__version__ version = paddleaudio.__version__
if _check_version(version) == False: if not _check_version(version):
err_msg = ( err_msg = (
"the version of paddleaudio installed is {},\n" "the version of paddleaudio installed is {},\n"
"please ensure the paddleaudio >= 1.0.2." "please ensure the paddleaudio >= 1.0.2."
......
...@@ -109,9 +109,9 @@ def corpus_reader(data_path, words_name, props_name): ...@@ -109,9 +109,9 @@ def corpus_reader(data_path, words_name, props_name):
lbl_seq = [] lbl_seq = []
verb_word = '' verb_word = ''
for l in lbl: for l in lbl:
if l == '*' and is_in_bracket == False: if l == '*' and not is_in_bracket:
lbl_seq.append('O') lbl_seq.append('O')
elif l == '*' and is_in_bracket == True: elif l == '*' and is_in_bracket:
lbl_seq.append('I-' + cur_tag) lbl_seq.append('I-' + cur_tag)
elif l == '*)': elif l == '*)':
lbl_seq.append('I-' + cur_tag) lbl_seq.append('I-' + cur_tag)
......
...@@ -106,7 +106,7 @@ class ProcessGroup: ...@@ -106,7 +106,7 @@ class ProcessGroup:
return return
else: else:
assert ( assert (
self.is_instantiate() == False not self.is_instantiate()
), "Cannot add new ranks after instantiating the process group" ), "Cannot add new ranks after instantiating the process group"
self._ranks.extend(new_ranks) self._ranks.extend(new_ranks)
self._ranks = sorted(list(set(self.ranks))) self._ranks = sorted(list(set(self.ranks)))
......
...@@ -268,7 +268,7 @@ class ParallelTuner: ...@@ -268,7 +268,7 @@ class ParallelTuner:
return return
for idx, dim in enumerate(dims_list): for idx, dim in enumerate(dims_list):
if visited[idx] == False: if not visited[idx]:
dims_mapping[start] = dim dims_mapping[start] = dim
visited[idx] = True visited[idx] = True
self._generate_dims_mapping_candidates_helper( self._generate_dims_mapping_candidates_helper(
......
...@@ -514,7 +514,7 @@ class InMemoryDataset(DatasetBase): ...@@ -514,7 +514,7 @@ class InMemoryDataset(DatasetBase):
self._set_fleet_send_batch_size(kwargs[key]) self._set_fleet_send_batch_size(kwargs[key])
elif key == "fleet_send_sleep_seconds": elif key == "fleet_send_sleep_seconds":
self._set_fleet_send_sleep_seconds(kwargs[key]) self._set_fleet_send_sleep_seconds(kwargs[key])
elif key == "fea_eval" and kwargs[key] == True: elif key == "fea_eval" and kwargs[key]:
candidate_size = kwargs.get("candidate_size", 10000) candidate_size = kwargs.get("candidate_size", 10000)
self._set_fea_eval(candidate_size, True) self._set_fea_eval(candidate_size, True)
......
...@@ -303,7 +303,7 @@ class Fleet(object): ...@@ -303,7 +303,7 @@ class Fleet(object):
paddle.distributed.init_parallel_env() paddle.distributed.init_parallel_env()
# hybrid parallel not support for npu/xpu # hybrid parallel not support for npu/xpu
if self._user_defined_strategy.heter_ccl_mode == False: if not self._user_defined_strategy.heter_ccl_mode:
# init hybrid parallel environment in dygraph # init hybrid parallel environment in dygraph
if tp._HYBRID_PARALLEL_GROUP is None: if tp._HYBRID_PARALLEL_GROUP is None:
self._init_hybrid_parallel_env() self._init_hybrid_parallel_env()
......
...@@ -369,7 +369,7 @@ def get_cluster_info(args): ...@@ -369,7 +369,7 @@ def get_cluster_info(args):
if os.environ.get('FLAGS_START_PORT') is not None: if os.environ.get('FLAGS_START_PORT') is not None:
start_port = os.environ.get('FLAGS_START_PORT') start_port = os.environ.get('FLAGS_START_PORT')
# auto mapping between processes and devices for auto-parallel # auto mapping between processes and devices for auto-parallel
if args.enable_auto_mapping == True: if args.enable_auto_mapping:
assert ( assert (
args.cluster_topo_path is not None args.cluster_topo_path is not None
), "The cluster topology must be provied when enabling auto mapping." ), "The cluster topology must be provied when enabling auto mapping."
......
...@@ -1582,7 +1582,7 @@ class ParameterServerLauncher(object): ...@@ -1582,7 +1582,7 @@ class ParameterServerLauncher(object):
x.strip().split(":")[0] for x in self.worker_endpoints.split(",") x.strip().split(":")[0] for x in self.worker_endpoints.split(",")
] ]
if self.with_coordinator == True: if self.with_coordinator:
self.coordinator_endpoints_ips = [ self.coordinator_endpoints_ips = [
x.strip().split(":")[0] x.strip().split(":")[0]
for x in self.coordinator_endpoints.split(",") for x in self.coordinator_endpoints.split(",")
......
...@@ -582,7 +582,7 @@ def _parallel_linear( ...@@ -582,7 +582,7 @@ def _parallel_linear(
# set is_distributed for splited bias # set is_distributed for splited bias
# if a linear layer is splited by row, each rank would hold a complete bias and they should be the same in each rank. # if a linear layer is splited by row, each rank would hold a complete bias and they should be the same in each rank.
# if a linear layer is splited by col, the bias would also be split into each rank as its weight # if a linear layer is splited by col, the bias would also be split into each rank as its weight
if axis == 1 and linear._bias_attr != False: if axis == 1 and linear._bias_attr is not False:
_set_var_distributed(linear.bias) _set_var_distributed(linear.bias)
if not gather_out: if not gather_out:
......
...@@ -53,7 +53,7 @@ class GradientMergeOptimizer(MetaOptimizerBase): ...@@ -53,7 +53,7 @@ class GradientMergeOptimizer(MetaOptimizerBase):
return False return False
can_apply = ( can_apply = (
self.user_defined_strategy.gradient_merge == True self.user_defined_strategy.gradient_merge
) and self.user_defined_strategy.gradient_merge_configs["k_steps"] > 1 ) and self.user_defined_strategy.gradient_merge_configs["k_steps"] > 1
return can_apply return can_apply
......
...@@ -177,7 +177,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase): ...@@ -177,7 +177,7 @@ class GraphExecutionOptimizer(MetaOptimizerBase):
gradient_scale_configs['scale_strategy'] gradient_scale_configs['scale_strategy']
] ]
if self.user_defined_strategy.recompute == True: if self.user_defined_strategy.recompute:
logging.warn( logging.warn(
"set enable_sequential_execution=True since you have enable the recompute strategy" "set enable_sequential_execution=True since you have enable the recompute strategy"
) )
......
...@@ -66,7 +66,7 @@ class PipelineOptimizer(MetaOptimizerBase): ...@@ -66,7 +66,7 @@ class PipelineOptimizer(MetaOptimizerBase):
if self.use_sharding: if self.use_sharding:
return False return False
if self.user_defined_strategy.pipeline == True: if self.user_defined_strategy.pipeline:
return True return True
return False return False
......
...@@ -65,7 +65,7 @@ class RawProgramOptimizer(MetaOptimizerBase): ...@@ -65,7 +65,7 @@ class RawProgramOptimizer(MetaOptimizerBase):
if not self.role_maker._is_collective: if not self.role_maker._is_collective:
return False return False
if self.without_graph_optimization == True: if self.without_graph_optimization:
return True return True
return False return False
......
...@@ -55,7 +55,7 @@ class RecomputeOptimizer(MetaOptimizerBase): ...@@ -55,7 +55,7 @@ class RecomputeOptimizer(MetaOptimizerBase):
if not self.role_maker._is_collective: if not self.role_maker._is_collective:
return False return False
if self.user_defined_strategy.recompute == True: if self.user_defined_strategy.recompute:
if ( if (
len(self.user_defined_strategy.recompute_configs["checkpoints"]) len(self.user_defined_strategy.recompute_configs["checkpoints"])
== 0 == 0
......
...@@ -38,7 +38,7 @@ def check_broadcast(block): ...@@ -38,7 +38,7 @@ def check_broadcast(block):
broadcast_vars = {} broadcast_vars = {}
for idx, op in enumerate(block.ops): for idx, op in enumerate(block.ops):
if op.type == "c_broadcast": if op.type == "c_broadcast":
if op.all_attrs()["use_calc_stream"] == False: if not op.all_attrs()["use_calc_stream"]:
var_name = op.desc.input_arg_names()[0] var_name = op.desc.input_arg_names()[0]
if "@BroadCast" in var_name: if "@BroadCast" in var_name:
if var_name in broadcast_vars: if var_name in broadcast_vars:
...@@ -72,7 +72,7 @@ def check_broadcast(block): ...@@ -72,7 +72,7 @@ def check_broadcast(block):
last_sync_calc_op_idx = idx last_sync_calc_op_idx = idx
continue continue
if op.type == "c_broadcast": if op.type == "c_broadcast":
if op.all_attrs()["use_calc_stream"] == False: if not op.all_attrs()["use_calc_stream"]:
var_name = op.desc.input_arg_names()[0] var_name = op.desc.input_arg_names()[0]
if "@BroadCast" in var_name: if "@BroadCast" in var_name:
if broadcast_vars[var_name]["fill_constant_pos"] != -1: if broadcast_vars[var_name]["fill_constant_pos"] != -1:
...@@ -117,7 +117,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): ...@@ -117,7 +117,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1):
for idx, op in enumerate(block.ops): for idx, op in enumerate(block.ops):
# sharding use both allreduce and reduce to sync grad # sharding use both allreduce and reduce to sync grad
if op.type == "c_allreduce_sum" or op.type == "c_reduce_sum": if op.type == "c_allreduce_sum" or op.type == "c_reduce_sum":
if op.all_attrs()["use_calc_stream"] == False: if not op.all_attrs()["use_calc_stream"]:
ring_id = op.desc.attr("ring_id") ring_id = op.desc.attr("ring_id")
var_name = op.desc.input_arg_names()[0] var_name = op.desc.input_arg_names()[0]
param = var_name.split("@")[0] param = var_name.split("@")[0]
...@@ -153,7 +153,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1): ...@@ -153,7 +153,7 @@ def check_allreduce_sum(block, shard, sharding_ring_id, dp_ring_id=-1):
dp_grads_status[var_name] = 1 dp_grads_status[var_name] = 1
# check sharding allreduce and reduce but skip megatron allreduce # check sharding allreduce and reduce but skip megatron allreduce
elif op.type == "c_allreduce_sum" or op.type == "c_reduce_sum": elif op.type == "c_allreduce_sum" or op.type == "c_reduce_sum":
if op.all_attrs()["use_calc_stream"] == False: if not op.all_attrs()["use_calc_stream"]:
var_name = op.desc.input_arg_names()[0] var_name = op.desc.input_arg_names()[0]
ring_id = op.desc.attr("ring_id") ring_id = op.desc.attr("ring_id")
if ring_id == sharding_ring_id: if ring_id == sharding_ring_id:
......
...@@ -57,7 +57,7 @@ class TensorParallelOptimizer(MetaOptimizerBase): ...@@ -57,7 +57,7 @@ class TensorParallelOptimizer(MetaOptimizerBase):
if not self.role_maker._is_collective: if not self.role_maker._is_collective:
return False return False
if self.user_defined_strategy.tensor_parallel == True: if self.user_defined_strategy.tensor_parallel:
return True return True
return False return False
......
...@@ -720,7 +720,7 @@ class PipelineLayer(Layer): ...@@ -720,7 +720,7 @@ class PipelineLayer(Layer):
def _need_recompute(self, funcs, inputs): def _need_recompute(self, funcs, inputs):
if not any( if not any(
input_.stop_gradient == False not input_.stop_gradient
for input_ in inputs for input_ in inputs
if isinstance(input_, paddle.Tensor) if isinstance(input_, paddle.Tensor)
): ):
......
...@@ -90,7 +90,7 @@ def distributed_model(model): ...@@ -90,7 +90,7 @@ def distributed_model(model):
amp_enable = False amp_enable = False
strategy = fleet_env._user_defined_strategy strategy = fleet_env._user_defined_strategy
if strategy.amp == True: if strategy.amp:
amp_enable = True amp_enable = True
amp_level = "O2" if strategy.amp_configs['use_pure_fp16'] else "O1" amp_level = "O2" if strategy.amp_configs['use_pure_fp16'] else "O1"
if amp_level.upper() == "O2": if amp_level.upper() == "O2":
...@@ -122,7 +122,7 @@ def distributed_model(model): ...@@ -122,7 +122,7 @@ def distributed_model(model):
use_dynamic_loss_scaling=use_dynamic_loss_scaling, use_dynamic_loss_scaling=use_dynamic_loss_scaling,
) )
if strategy.heter_ccl_mode == True: if strategy.heter_ccl_mode:
distributed_model = paddle.DataParallel( distributed_model = paddle.DataParallel(
model, model,
comm_buffer_size=strategy.fuse_grad_size_in_MB, comm_buffer_size=strategy.fuse_grad_size_in_MB,
......
...@@ -59,7 +59,7 @@ def _dygraph_distributed_optimizer(optimizer, strategy=None): ...@@ -59,7 +59,7 @@ def _dygraph_distributed_optimizer(optimizer, strategy=None):
fleet_env._context = {} fleet_env._context = {}
if fleet_env.worker_num() > 1: if fleet_env.worker_num() > 1:
if fleet_env._user_defined_strategy.heter_ccl_mode == False: if not fleet_env._user_defined_strategy.heter_ccl_mode:
return HybridParallelOptimizer( return HybridParallelOptimizer(
optimizer, fleet_env._hcg, fleet_env._user_defined_strategy optimizer, fleet_env._hcg, fleet_env._user_defined_strategy
) )
......
...@@ -41,7 +41,7 @@ def detach_variable(inputs): ...@@ -41,7 +41,7 @@ def detach_variable(inputs):
def check_recompute_necessary(inputs): def check_recompute_necessary(inputs):
if not any( if not any(
input_.stop_gradient == False not input_.stop_gradient
for input_ in inputs for input_ in inputs
if isinstance(input_, (core.eager.Tensor, paddle.Tensor)) if isinstance(input_, (core.eager.Tensor, paddle.Tensor))
): ):
......
...@@ -337,7 +337,7 @@ class CommonAccessor: ...@@ -337,7 +337,7 @@ class CommonAccessor:
self.table_num = size self.table_num = size
self.table_dim = single_dim self.table_dim = single_dim
if oop.type != 'adam' and adam_d2sum == True: if oop.type != 'adam' and adam_d2sum:
print('optimization algorithm is not adam, set adam_d2sum False') print('optimization algorithm is not adam, set adam_d2sum False')
adam_d2sum = False adam_d2sum = False
print("adam_d2sum:", adam_d2sum) print("adam_d2sum:", adam_d2sum)
......
...@@ -231,7 +231,7 @@ class HybridParallelInferenceHelper(object): ...@@ -231,7 +231,7 @@ class HybridParallelInferenceHelper(object):
) )
else: else:
if isinstance(role_maker, fleet.base.role_maker.RoleMakerBase): if isinstance(role_maker, fleet.base.role_maker.RoleMakerBase):
assert role_maker._is_collective == True assert role_maker._is_collective
self.role_maker = role_maker self.role_maker = role_maker
# communication_group info # communication_group info
......
...@@ -210,7 +210,7 @@ class DistributedInfer: ...@@ -210,7 +210,7 @@ class DistributedInfer:
if found: if found:
break break
if found: if found:
if output_indexes[j] == True: if output_indexes[j]:
warnings.warn( warnings.warn(
"unable to re-arrange dags order to combine distributed embedding ops" "unable to re-arrange dags order to combine distributed embedding ops"
) )
......
...@@ -80,9 +80,9 @@ class AMPState(object): ...@@ -80,9 +80,9 @@ class AMPState(object):
fwd_op_id = dist_op_context.grad_op_id_to_op_id[ fwd_op_id = dist_op_context.grad_op_id_to_op_id[
op.desc.original_id() op.desc.original_id()
] ]
if self._is_fp16_op(fwd_op_id) == True: if self._is_fp16_op(fwd_op_id) is True:
self._op_fp16_dict[op.desc.original_id()] = True self._op_fp16_dict[op.desc.original_id()] = True
elif self._is_fp16_op(fwd_op_id) == False: elif self._is_fp16_op(fwd_op_id) is False:
self._op_fp16_dict[op.desc.original_id()] = False self._op_fp16_dict[op.desc.original_id()] = False
elif int(op.attr('op_role')) == int(OpRole.Optimize): elif int(op.attr('op_role')) == int(OpRole.Optimize):
break break
...@@ -132,13 +132,13 @@ class AMPState(object): ...@@ -132,13 +132,13 @@ class AMPState(object):
# if it's one of inputs # if it's one of inputs
if ( if (
self._is_fp16_op(prev_op.desc.original_id()) self._is_fp16_op(prev_op.desc.original_id())
== False is False
or prev_op.type in amp_lists.black_list or prev_op.type in amp_lists.black_list
): ):
is_black_op = True is_black_op = True
elif ( elif (
self._is_fp16_op(prev_op.desc.original_id()) self._is_fp16_op(prev_op.desc.original_id())
== True is True
or prev_op.type in amp_lists.white_list or prev_op.type in amp_lists.white_list
): ):
is_white_op = True is_white_op = True
...@@ -161,7 +161,7 @@ class AMPState(object): ...@@ -161,7 +161,7 @@ class AMPState(object):
num_cast_ops = 0 num_cast_ops = 0
if int(op.attr('op_role')) == int(OpRole.Backward): if int(op.attr('op_role')) == int(OpRole.Backward):
break break
if self._is_fp16_op(op.desc.original_id()) == False: if self._is_fp16_op(op.desc.original_id()) is False:
num_cast_ops = self._insert_cast_op_forward( num_cast_ops = self._insert_cast_op_forward(
op, op,
idx, idx,
...@@ -169,7 +169,7 @@ class AMPState(object): ...@@ -169,7 +169,7 @@ class AMPState(object):
core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP32,
dist_context, dist_context,
) )
elif self._is_fp16_op(op.desc.original_id()) == True: elif self._is_fp16_op(op.desc.original_id()) is True:
num_cast_ops = self._insert_cast_op_forward( num_cast_ops = self._insert_cast_op_forward(
op, op,
idx, idx,
...@@ -302,7 +302,7 @@ class AMPState(object): ...@@ -302,7 +302,7 @@ class AMPState(object):
grad_op_orig_id = grad_op.desc.original_id() grad_op_orig_id = grad_op.desc.original_id()
dist_op_context = dist_context.dist_op_context dist_op_context = dist_context.dist_op_context
if grad_op_orig_id in dist_op_context.grad_op_id_to_op_id: if grad_op_orig_id in dist_op_context.grad_op_id_to_op_id:
if self._is_fp16_op(grad_op_orig_id) == False: # fp32 if self._is_fp16_op(grad_op_orig_id) is False: # fp32
num_cast_ops = self._insert_cast_op_backward( num_cast_ops = self._insert_cast_op_backward(
grad_op, grad_op,
idx, idx,
...@@ -311,7 +311,7 @@ class AMPState(object): ...@@ -311,7 +311,7 @@ class AMPState(object):
dist_context, dist_context,
appended_grad_times, appended_grad_times,
) )
elif self._is_fp16_op(grad_op_orig_id) == True: # fp16 elif self._is_fp16_op(grad_op_orig_id) is True: # fp16
num_cast_ops = self._insert_cast_op_backward( num_cast_ops = self._insert_cast_op_backward(
grad_op, grad_op,
idx, idx,
......
...@@ -235,10 +235,7 @@ class FP16State(object): ...@@ -235,10 +235,7 @@ class FP16State(object):
for op in block.ops: for op in block.ops:
if is_forward_op(op): if is_forward_op(op):
# NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python
if ( if self._is_fp16_op(op.desc.original_id()) or op.type == "cast":
self._is_fp16_op(op.desc.original_id()) == True
or op.type == "cast"
):
for in_name in op.input_names: for in_name in op.input_names:
if _keep_fp32_input(op, in_name): if _keep_fp32_input(op, in_name):
continue continue
...@@ -255,7 +252,7 @@ class FP16State(object): ...@@ -255,7 +252,7 @@ class FP16State(object):
self.set_var_to_fp16(out_var_name, block) self.set_var_to_fp16(out_var_name, block)
set_op_dtype_to_fp16(op) set_op_dtype_to_fp16(op)
# NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python
elif self._is_fp16_op(op.desc.original_id()) == False: elif not self._is_fp16_op(op.desc.original_id()):
for out_var_name in op.output_arg_names: for out_var_name in op.output_arg_names:
out_var = block.vars.get(out_var_name) out_var = block.vars.get(out_var_name)
if out_var is None or out_var.type not in _valid_types: if out_var is None or out_var.type not in _valid_types:
...@@ -263,7 +260,7 @@ class FP16State(object): ...@@ -263,7 +260,7 @@ class FP16State(object):
if out_var.dtype == core.VarDesc.VarType.FP16: if out_var.dtype == core.VarDesc.VarType.FP16:
out_var.desc.set_dtype(core.VarDesc.VarType.FP32) out_var.desc.set_dtype(core.VarDesc.VarType.FP32)
elif is_backward_op(op): elif is_backward_op(op):
if self._is_fp16_op(op.desc.original_id()) == True: if self._is_fp16_op(op.desc.original_id()):
for out_name in op.output_names: for out_name in op.output_names:
if _keep_fp32_output(op, out_name): if _keep_fp32_output(op, out_name):
continue continue
...@@ -271,7 +268,7 @@ class FP16State(object): ...@@ -271,7 +268,7 @@ class FP16State(object):
self.set_var_to_fp16(out_var_name, block) self.set_var_to_fp16(out_var_name, block)
set_op_dtype_to_fp16(op) set_op_dtype_to_fp16(op)
# NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python # NOTE (JZ-LIANG) un-expected cast op when user call "+, -, *, /" in python
elif self._is_fp16_op(op.desc.original_id()) == False: elif not self._is_fp16_op(op.desc.original_id()):
for out_var_name in op.output_arg_names: for out_var_name in op.output_arg_names:
out_var = block.vars.get(out_var_name) out_var = block.vars.get(out_var_name)
if out_var is None or out_var.type not in _valid_types: if out_var is None or out_var.type not in _valid_types:
...@@ -290,7 +287,7 @@ class FP16State(object): ...@@ -290,7 +287,7 @@ class FP16State(object):
idx += 1 idx += 1
continue continue
elif is_forward_op(op): elif is_forward_op(op):
if self._is_fp16_op(op.desc.original_id()) == False: if not self._is_fp16_op(op.desc.original_id()):
num_cast_ops = self._insert_forward_cast_ops( num_cast_ops = self._insert_forward_cast_ops(
op, op,
idx, idx,
...@@ -299,7 +296,7 @@ class FP16State(object): ...@@ -299,7 +296,7 @@ class FP16State(object):
core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP32,
self.dist_context, self.dist_context,
) )
elif self._is_fp16_op(op.desc.original_id()) == True: elif self._is_fp16_op(op.desc.original_id()):
num_cast_ops = self._insert_forward_cast_ops( num_cast_ops = self._insert_forward_cast_ops(
op, op,
idx, idx,
...@@ -310,7 +307,7 @@ class FP16State(object): ...@@ -310,7 +307,7 @@ class FP16State(object):
) )
elif is_backward_op(op): elif is_backward_op(op):
if op.desc.original_id() in dist_op_context.grad_op_id_to_op_id: if op.desc.original_id() in dist_op_context.grad_op_id_to_op_id:
if self._is_fp16_op(op.desc.original_id()) == False: if not self._is_fp16_op(op.desc.original_id()):
num_cast_ops = self._insert_backward_cast_ops( num_cast_ops = self._insert_backward_cast_ops(
op, op,
idx, idx,
...@@ -319,7 +316,7 @@ class FP16State(object): ...@@ -319,7 +316,7 @@ class FP16State(object):
core.VarDesc.VarType.FP32, core.VarDesc.VarType.FP32,
self.dist_context, self.dist_context,
) )
elif self._is_fp16_op(op.desc.original_id()) == True: elif self._is_fp16_op(op.desc.original_id()):
num_cast_ops = self._insert_backward_cast_ops( num_cast_ops = self._insert_backward_cast_ops(
op, op,
idx, idx,
......
...@@ -140,7 +140,7 @@ class AddLrDecayTablePass(PassBase): ...@@ -140,7 +140,7 @@ class AddLrDecayTablePass(PassBase):
def _apply_single_impl(self, main_program, startup_program, pass_ctx): def _apply_single_impl(self, main_program, startup_program, pass_ctx):
attrs = pass_ctx._attrs attrs = pass_ctx._attrs
if hasattr(attrs['origin_main_program'], 'lr_sheduler') == False: if not hasattr(attrs['origin_main_program'], 'lr_sheduler'):
return return
assert isinstance( assert isinstance(
......
...@@ -304,7 +304,7 @@ class DistributedOpsPass(PassBase): ...@@ -304,7 +304,7 @@ class DistributedOpsPass(PassBase):
if found: if found:
break break
if found: if found:
if output_indexes[j] == True: if output_indexes[j]:
warnings.warn( warnings.warn(
"unable to re-arrange dags order to combine distributed embedding ops" "unable to re-arrange dags order to combine distributed embedding ops"
) )
......
...@@ -443,7 +443,7 @@ class CommonAccessor(Accessor): ...@@ -443,7 +443,7 @@ class CommonAccessor(Accessor):
self.table_num = size self.table_num = size
self.table_dim = single_dim self.table_dim = single_dim
if oop.type != 'adam' and adam_d2sum == True: if oop.type != 'adam' and adam_d2sum:
print('optimization algorithm is not adam, set adam_d2sum False') print('optimization algorithm is not adam, set adam_d2sum False')
adam_d2sum = False adam_d2sum = False
print("adam_d2sum:", adam_d2sum) print("adam_d2sum:", adam_d2sum)
...@@ -703,7 +703,7 @@ class SparseTable(Table): ...@@ -703,7 +703,7 @@ class SparseTable(Table):
if ( if (
ctx.is_tensor_table() ctx.is_tensor_table()
or len(ctx.origin_varnames()) < 1 or len(ctx.origin_varnames()) < 1
or (ctx.is_sparse() == False) or (not ctx.is_sparse())
): ):
return return
table_proto.table_id = ctx.table_id() table_proto.table_id = ctx.table_id()
...@@ -810,7 +810,7 @@ class GeoSparseTable(SparseTable): ...@@ -810,7 +810,7 @@ class GeoSparseTable(SparseTable):
if ( if (
ctx.is_tensor_table() ctx.is_tensor_table()
or len(ctx.origin_varnames()) < 1 or len(ctx.origin_varnames()) < 1
or (ctx.is_sparse() == False) or (not ctx.is_sparse())
): ):
return return
table_proto.table_id = ctx.table_id() table_proto.table_id = ctx.table_id()
...@@ -845,7 +845,7 @@ class DenseTable(Table): ...@@ -845,7 +845,7 @@ class DenseTable(Table):
if ( if (
ctx.is_tensor_table() ctx.is_tensor_table()
or len(ctx.origin_varnames()) < 1 or len(ctx.origin_varnames()) < 1
or (ctx.is_sparse() == True) or (ctx.is_sparse())
): ):
return return
...@@ -1281,7 +1281,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1281,7 +1281,7 @@ class TheOnePSRuntime(RuntimeBase):
if not is_test: if not is_test:
if ( if (
self.context['ps_mode'] == DistributedMode.GEO self.context['ps_mode'] == DistributedMode.GEO
or self.is_heter_ps_mode == True or self.is_heter_ps_mode
): ):
self._communicator.init_params(dense_map) self._communicator.init_params(dense_map)
else: else:
...@@ -1298,7 +1298,7 @@ class TheOnePSRuntime(RuntimeBase): ...@@ -1298,7 +1298,7 @@ class TheOnePSRuntime(RuntimeBase):
if ( if (
self.context['ps_mode'] == DistributedMode.GEO self.context['ps_mode'] == DistributedMode.GEO
or self.is_heter_ps_mode == True or self.is_heter_ps_mode
): ):
if not self._communicator.is_running(): if not self._communicator.is_running():
self._communicator.start() self._communicator.start()
......
...@@ -1744,7 +1744,7 @@ def create_backward_block( ...@@ -1744,7 +1744,7 @@ def create_backward_block(
): ):
is_skip = True is_skip = True
break break
if is_skip == True: if is_skip:
continue continue
block_append_op(program, origin_program, heter_block, op) block_append_op(program, origin_program, heter_block, op)
......
...@@ -237,7 +237,7 @@ def main(use_cuda, parallel, nn_type, combine): ...@@ -237,7 +237,7 @@ def main(use_cuda, parallel, nn_type, combine):
if not use_cuda and not parallel: if not use_cuda and not parallel:
save_dirname = "recognize_digits_" + nn_type + ".inference.model" save_dirname = "recognize_digits_" + nn_type + ".inference.model"
save_full_dirname = "recognize_digits_" + nn_type + ".train.model" save_full_dirname = "recognize_digits_" + nn_type + ".train.model"
if combine == True: if combine:
model_filename = "__model_combined__" model_filename = "__model_combined__"
params_filename = "__params_combined__" params_filename = "__params_combined__"
......
...@@ -144,7 +144,7 @@ class TestToStatic(unittest.TestCase): ...@@ -144,7 +144,7 @@ class TestToStatic(unittest.TestCase):
# inputs = InputSpec([batch_size, hidden_size], 'float32', 'x') # inputs = InputSpec([batch_size, hidden_size], 'float32', 'x')
# labels = InputSpec([batch_size], 'int64', 'label') # labels = InputSpec([batch_size], 'int64', 'label')
assert _non_static_mode() == True assert _non_static_mode()
engine = auto.Engine( engine = auto.Engine(
model=mlp, model=mlp,
loss=loss, loss=loss,
...@@ -155,7 +155,7 @@ class TestToStatic(unittest.TestCase): ...@@ -155,7 +155,7 @@ class TestToStatic(unittest.TestCase):
engine.fit(dataset, batch_size=batch_size) engine.fit(dataset, batch_size=batch_size)
engine.evaluate(dataset, batch_size=batch_size) engine.evaluate(dataset, batch_size=batch_size)
engine.predict(dataset, batch_size=batch_size) engine.predict(dataset, batch_size=batch_size)
assert _non_static_mode() == False assert not _non_static_mode()
class TestLazyInit(unittest.TestCase): class TestLazyInit(unittest.TestCase):
......
...@@ -593,7 +593,7 @@ class TestHessianNoBatch(unittest.TestCase): ...@@ -593,7 +593,7 @@ class TestHessianNoBatch(unittest.TestCase):
numerical_hessian = utils._np_concat_matrix_sequence(numerical_hessian) numerical_hessian = utils._np_concat_matrix_sequence(numerical_hessian)
self.x.stop_gradient = False self.x.stop_gradient = False
hessian = paddle.incubate.autograd.Hessian(func, self.x) hessian = paddle.incubate.autograd.Hessian(func, self.x)
assert hessian[:].stop_gradient == False assert not hessian[:].stop_gradient
np.testing.assert_allclose( np.testing.assert_allclose(
hessian[:].numpy(), numerical_hessian, self.rtol, self.atol hessian[:].numpy(), numerical_hessian, self.rtol, self.atol
) )
......
...@@ -116,9 +116,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -116,9 +116,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.optimizer(avg_cost, strategy, train_prog, startup_prog) self.optimizer(avg_cost, strategy, train_prog, startup_prog)
ops = [op.type for op in avg_cost.block.ops] ops = [op.type for op in avg_cost.block.ops]
vars = [x.name for x in train_prog.list_vars()] vars = [x.name for x in train_prog.list_vars()]
parameters = [ parameters = [x.name for x in train_prog.list_vars() if x.persistable]
x.name for x in train_prog.list_vars() if x.persistable == True
]
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
self.assertIn('cast', ops) self.assertIn('cast', ops)
self.assertIn('check_finite_and_unscale', ops) self.assertIn('check_finite_and_unscale', ops)
...@@ -227,9 +225,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -227,9 +225,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
ops = [op.type for op in avg_cost.block.ops] ops = [op.type for op in avg_cost.block.ops]
vars = [x.name for x in train_prog.list_vars()] vars = [x.name for x in train_prog.list_vars()]
parameters = [ parameters = [x.name for x in train_prog.list_vars() if x.persistable]
x.name for x in train_prog.list_vars() if x.persistable == True
]
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
self.assertIn('subprog', ''.join(vars)) self.assertIn('subprog', ''.join(vars))
...@@ -316,9 +312,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -316,9 +312,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
ops = [op.type for op in avg_cost.block.ops] ops = [op.type for op in avg_cost.block.ops]
vars = [x.name for x in train_prog.list_vars()] vars = [x.name for x in train_prog.list_vars()]
parameters = [ parameters = [x.name for x in train_prog.list_vars() if x.persistable]
x.name for x in train_prog.list_vars() if x.persistable == True
]
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
self.assertIn('subprog', ''.join(vars)) self.assertIn('subprog', ''.join(vars))
...@@ -445,9 +439,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -445,9 +439,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
ops = [op.type for op in avg_cost.block.ops] ops = [op.type for op in avg_cost.block.ops]
vars = [x.name for x in train_prog.list_vars()] vars = [x.name for x in train_prog.list_vars()]
parameters = [ parameters = [x.name for x in train_prog.list_vars() if x.persistable]
x.name for x in train_prog.list_vars() if x.persistable == True
]
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
self.assertIn('cast', ops) self.assertIn('cast', ops)
...@@ -564,9 +556,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -564,9 +556,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
startup_prog, startup_prog,
regularization=regularization, regularization=regularization,
) )
parameters = [ parameters = [x.name for x in train_prog.list_vars() if x.persistable]
x.name for x in train_prog.list_vars() if x.persistable == True
]
ops = [op.type for op in avg_cost.block.ops] ops = [op.type for op in avg_cost.block.ops]
vars = [x.name for x in train_prog.list_vars()] vars = [x.name for x in train_prog.list_vars()]
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
...@@ -653,9 +643,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer): ...@@ -653,9 +643,7 @@ class TestFleetShardingMetaOptimizer(TestFleetMetaOptimizer):
self.optimizer( self.optimizer(
avg_cost, strategy, train_prog, startup_prog, grad_clip=clip avg_cost, strategy, train_prog, startup_prog, grad_clip=clip
) )
parameters = [ parameters = [x.name for x in train_prog.list_vars() if x.persistable]
x.name for x in train_prog.list_vars() if x.persistable == True
]
ops = [op.type for op in avg_cost.block.ops] ops = [op.type for op in avg_cost.block.ops]
vars = [x.name for x in train_prog.list_vars()] vars = [x.name for x in train_prog.list_vars()]
self.assertIn('@BroadCast', ''.join(vars)) self.assertIn('@BroadCast', ''.join(vars))
......
...@@ -420,13 +420,13 @@ class TestAmpScaler(unittest.TestCase): ...@@ -420,13 +420,13 @@ class TestAmpScaler(unittest.TestCase):
decr_every_n_nan_or_inf=2, decr_every_n_nan_or_inf=2,
use_dynamic_loss_scaling=True, use_dynamic_loss_scaling=True,
) )
self.assertEqual(scaler.is_enable() == True, True) self.assertEqual(scaler.is_enable(), True)
self.assertEqual(scaler.get_init_loss_scaling() == 1024, True) self.assertEqual(scaler.get_init_loss_scaling() == 1024, True)
self.assertEqual(scaler.get_incr_ratio() == 2.0, True) self.assertEqual(scaler.get_incr_ratio() == 2.0, True)
self.assertEqual(scaler.get_decr_ratio() == 0.5, True) self.assertEqual(scaler.get_decr_ratio() == 0.5, True)
self.assertEqual(scaler.get_incr_every_n_steps() == 1000, True) self.assertEqual(scaler.get_incr_every_n_steps() == 1000, True)
self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 2, True) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 2, True)
self.assertEqual(scaler.is_use_dynamic_loss_scaling() == True, True) self.assertEqual(scaler.is_use_dynamic_loss_scaling(), True)
scaler.set_decr_every_n_nan_or_inf(4) scaler.set_decr_every_n_nan_or_inf(4)
self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 4, True) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 4, True)
scaler.set_decr_ratio(0.1) scaler.set_decr_ratio(0.1)
...@@ -460,7 +460,7 @@ class TestAmpScaler(unittest.TestCase): ...@@ -460,7 +460,7 @@ class TestAmpScaler(unittest.TestCase):
scaler3 = paddle.amp.GradScaler(enable=False) scaler3 = paddle.amp.GradScaler(enable=False)
scaler3.load_state_dict(scaler_state) scaler3.load_state_dict(scaler_state)
self.assertEqual(scaler3.is_enable() == False, True) self.assertFalse(scaler3.is_enable())
def test_state_dict_and_load_state_dict_error(self): def test_state_dict_and_load_state_dict_error(self):
def test_error(): def test_error():
......
...@@ -419,13 +419,13 @@ class TestAmpScaler(unittest.TestCase): ...@@ -419,13 +419,13 @@ class TestAmpScaler(unittest.TestCase):
decr_every_n_nan_or_inf=2, decr_every_n_nan_or_inf=2,
use_dynamic_loss_scaling=True, use_dynamic_loss_scaling=True,
) )
self.assertEqual(scaler.is_enable() == True, True) self.assertEqual(scaler.is_enable(), True)
self.assertEqual(scaler.get_init_loss_scaling() == 1024, True) self.assertEqual(scaler.get_init_loss_scaling() == 1024, True)
self.assertEqual(scaler.get_incr_ratio() == 2.0, True) self.assertEqual(scaler.get_incr_ratio() == 2.0, True)
self.assertEqual(scaler.get_decr_ratio() == 0.5, True) self.assertEqual(scaler.get_decr_ratio() == 0.5, True)
self.assertEqual(scaler.get_incr_every_n_steps() == 1000, True) self.assertEqual(scaler.get_incr_every_n_steps() == 1000, True)
self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 2, True) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 2, True)
self.assertEqual(scaler.is_use_dynamic_loss_scaling() == True, True) self.assertEqual(scaler.is_use_dynamic_loss_scaling(), True)
scaler.set_decr_every_n_nan_or_inf(4) scaler.set_decr_every_n_nan_or_inf(4)
self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 4, True) self.assertEqual(scaler.get_decr_every_n_nan_or_inf() == 4, True)
scaler.set_decr_ratio(0.1) scaler.set_decr_ratio(0.1)
...@@ -459,7 +459,7 @@ class TestAmpScaler(unittest.TestCase): ...@@ -459,7 +459,7 @@ class TestAmpScaler(unittest.TestCase):
scaler3 = paddle.amp.GradScaler(enable=False) scaler3 = paddle.amp.GradScaler(enable=False)
scaler3.load_state_dict(scaler_state) scaler3.load_state_dict(scaler_state)
self.assertEqual(scaler3.is_enable() == False, True) self.assertFalse(scaler3.is_enable())
def test_state_dict_and_load_state_dict_error(self): def test_state_dict_and_load_state_dict_error(self):
def test_error(): def test_error():
......
...@@ -356,7 +356,7 @@ class conv2d(fluid.dygraph.Layer): ...@@ -356,7 +356,7 @@ class conv2d(fluid.dygraph.Layer):
): ):
super(conv2d, self).__init__() super(conv2d, self).__init__()
if use_bias == False: if not use_bias:
con_bias_attr = False con_bias_attr = False
else: else:
con_bias_attr = fluid.ParamAttr( con_bias_attr = fluid.ParamAttr(
...@@ -426,7 +426,7 @@ class DeConv2D(fluid.dygraph.Layer): ...@@ -426,7 +426,7 @@ class DeConv2D(fluid.dygraph.Layer):
): ):
super(DeConv2D, self).__init__() super(DeConv2D, self).__init__()
if use_bias == False: if not use_bias:
de_bias_attr = False de_bias_attr = False
else: else:
de_bias_attr = fluid.ParamAttr( de_bias_attr = fluid.ParamAttr(
......
...@@ -93,7 +93,7 @@ class TestTensorSize(unittest.TestCase): ...@@ -93,7 +93,7 @@ class TestTensorSize(unittest.TestCase):
prog_trans = paddle.jit.ProgramTranslator() prog_trans = paddle.jit.ProgramTranslator()
prog_trans.enable(to_static) prog_trans.enable(to_static)
x = paddle.ones([1, 2, 3]) x = paddle.ones([1, 2, 3])
if to_static == False: if not to_static:
return tensor_size(x) return tensor_size(x)
return tensor_size(x).numpy() return tensor_size(x).numpy()
......
...@@ -134,7 +134,7 @@ class TestConvBnFusePass(PassAutoScanTest): ...@@ -134,7 +134,7 @@ class TestConvBnFusePass(PassAutoScanTest):
data_layout=data_format, data_layout=data_format,
is_test=True, is_test=True,
) )
if has_bias == True: if has_bias:
conv2d_op.inputs["Bias"] = ["conv2d_bias"] conv2d_op.inputs["Bias"] = ["conv2d_bias"]
ops = [conv2d_op, bn_op] ops = [conv2d_op, bn_op]
...@@ -156,7 +156,7 @@ class TestConvBnFusePass(PassAutoScanTest): ...@@ -156,7 +156,7 @@ class TestConvBnFusePass(PassAutoScanTest):
}, },
outputs=["batch_norm_Y"], outputs=["batch_norm_Y"],
) )
if has_bias == True: if has_bias:
program_config.weights["conv2d_bias"] = TensorConfig( program_config.weights["conv2d_bias"] = TensorConfig(
data_gen=partial(generate_conv2d_Bias) data_gen=partial(generate_conv2d_Bias)
) )
...@@ -202,7 +202,7 @@ class TestConvBnFusePass(PassAutoScanTest): ...@@ -202,7 +202,7 @@ class TestConvBnFusePass(PassAutoScanTest):
def teller2(program_config, predictor_config): def teller2(program_config, predictor_config):
return ( return (
predictor_config.mkldnn_enabled() predictor_config.mkldnn_enabled()
and program_config.ops[0].attrs['has_bias'] == True and program_config.ops[0].attrs['has_bias']
) )
self.add_ignore_check_case( self.add_ignore_check_case(
......
...@@ -43,11 +43,11 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest): ...@@ -43,11 +43,11 @@ class TestEmbeddingEltwiseLayerNormFusePass(PassAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool: def is_program_valid(self, program_config: ProgramConfig) -> bool:
# is_sparse is only support False # is_sparse is only support False
if program_config.ops[0].attrs['is_sparse'] == True: if program_config.ops[0].attrs['is_sparse']:
return False return False
# is_distributed only support False # is_distributed only support False
if program_config.ops[0].attrs['is_distributed'] == True: if program_config.ops[0].attrs['is_distributed']:
return False return False
# axis only support -1 and the last dim. # axis only support -1 and the last dim.
......
...@@ -100,7 +100,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): ...@@ -100,7 +100,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest):
outputs={"Out": ["affine_channel_ouput"]}, outputs={"Out": ["affine_channel_ouput"]},
data_layout=data_format, data_layout=data_format,
) )
if has_bias == True: if has_bias:
conv2d_op.inputs["Bias"] = ["conv2d_bias"] conv2d_op.inputs["Bias"] = ["conv2d_bias"]
ops = [conv2d_op, ac_op] ops = [conv2d_op, ac_op]
...@@ -123,7 +123,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): ...@@ -123,7 +123,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest):
}, },
outputs=["affine_channel_ouput"], outputs=["affine_channel_ouput"],
) )
if has_bias == True: if has_bias:
program_config.weights["conv2d_bias"] = TensorConfig( program_config.weights["conv2d_bias"] = TensorConfig(
data_gen=partial(generate_bias) data_gen=partial(generate_bias)
) )
...@@ -145,7 +145,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest): ...@@ -145,7 +145,7 @@ class TestConvAffineChannelFusePass(PassAutoScanTest):
def teller2(program_config, predictor_config): def teller2(program_config, predictor_config):
return ( return (
predictor_config.mkldnn_enabled() predictor_config.mkldnn_enabled()
and program_config.ops[0].attrs['has_bias'] == True and program_config.ops[0].attrs['has_bias']
) )
self.add_ignore_check_case( self.add_ignore_check_case(
......
...@@ -304,7 +304,7 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest): ...@@ -304,7 +304,7 @@ class TrtConvertConcatTest(TrtLayerAutoScanTest):
self.dynamic_shape.opt_input_shape = {} self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
if dynamic_shape == True: if dynamic_shape:
return 1, 4 return 1, 4
else: else:
if attrs[0]['axis'] != 0: if attrs[0]['axis'] != 0:
......
...@@ -123,7 +123,7 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest): ...@@ -123,7 +123,7 @@ class TrtConvertDropoutTest(TrtLayerAutoScanTest):
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
if attrs[0]['dropout_implementation'] == "upscale_in_train": if attrs[0]['dropout_implementation'] == "upscale_in_train":
return 0, 2 return 0, 2
elif self.dims == 1 and dynamic_shape == False: elif self.dims == 1 and not dynamic_shape:
return 0, 3 return 0, 3
else: else:
return 1, 2 return 1, 2
......
...@@ -85,7 +85,7 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): ...@@ -85,7 +85,7 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
"index_data": TensorConfig( "index_data": TensorConfig(
data_gen=partial( data_gen=partial(
generate_input2 generate_input2
if index_type_int32 == True if index_type_int32
else generate_input4, else generate_input4,
index, index,
) )
...@@ -180,7 +180,7 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest): ...@@ -180,7 +180,7 @@ class TrtConvertGatherTest(TrtLayerAutoScanTest):
if self.input_num == 3: if self.input_num == 3:
return 0, 5 return 0, 5
else: else:
if dynamic_shape and self.index_type_int32 == True: if dynamic_shape and self.index_type_int32:
return 1, 3 return 1, 3
else: else:
return 0, 4 return 0, 4
......
...@@ -107,7 +107,7 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest): ...@@ -107,7 +107,7 @@ class TrtConvertGeluTest(TrtLayerAutoScanTest):
if compile_version >= valid_version: if compile_version >= valid_version:
return 1, 2 return 1, 2
else: else:
if attrs[0]['approximate'] == True: if attrs[0]['approximate']:
return 0, 3 return 0, 3
else: else:
return 1, 2 return 1, 2
......
...@@ -137,7 +137,7 @@ class TrtConvertNearestInterpTest(TrtLayerAutoScanTest): ...@@ -137,7 +137,7 @@ class TrtConvertNearestInterpTest(TrtLayerAutoScanTest):
and self.dynamic_shape.min_input_shape and self.dynamic_shape.min_input_shape
): ):
return True return True
if program_config.ops[0].attrs['align_corners'] == True: if program_config.ops[0].attrs['align_corners']:
return True return True
return False return False
......
...@@ -29,7 +29,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ...@@ -29,7 +29,7 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest):
ksize = program_config.ops[0].attrs['ksize'] ksize = program_config.ops[0].attrs['ksize']
pooling_type = program_config.ops[0].attrs['pooling_type'] pooling_type = program_config.ops[0].attrs['pooling_type']
global_pooling = program_config.ops[0].attrs['global_pooling'] global_pooling = program_config.ops[0].attrs['global_pooling']
if global_pooling == False: if not global_pooling:
if pooling_type == 'avg': if pooling_type == 'avg':
for index in range(len(ksize)): for index in range(len(ksize)):
if ksize[index] <= paddings[index]: if ksize[index] <= paddings[index]:
...@@ -174,10 +174,10 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest): ...@@ -174,10 +174,10 @@ class TrtConvertPool2dTest(TrtLayerAutoScanTest):
def teller(program_config, predictor_config): def teller(program_config, predictor_config):
if ( if (
program_config.ops[0].attrs['pooling_type'] == 'avg' program_config.ops[0].attrs['pooling_type'] == 'avg'
and program_config.ops[0].attrs['global_pooling'] == False and not program_config.ops[0].attrs['global_pooling']
and program_config.ops[0].attrs['exclusive'] == True and program_config.ops[0].attrs['exclusive']
and program_config.ops[0].attrs['adaptive'] == False and not program_config.ops[0].attrs['adaptive']
and program_config.ops[0].attrs['ceil_mode'] == True and program_config.ops[0].attrs['ceil_mode']
): ):
return True return True
return False return False
......
...@@ -159,10 +159,10 @@ class TrtConvertRoiAlignTest(TrtLayerAutoScanTest): ...@@ -159,10 +159,10 @@ class TrtConvertRoiAlignTest(TrtLayerAutoScanTest):
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
if self.num_input == 0: if self.num_input == 0:
if dynamic_shape == True: if dynamic_shape:
return 0, 5 return 0, 5
elif self.num_input == 1: elif self.num_input == 1:
if dynamic_shape == True: if dynamic_shape:
return 1, 3 return 1, 3
else: else:
return 0, 4 return 0, 4
......
...@@ -77,7 +77,7 @@ class TrtConvertShuffleChannelTest(TrtLayerAutoScanTest): ...@@ -77,7 +77,7 @@ class TrtConvertShuffleChannelTest(TrtLayerAutoScanTest):
ver = paddle_infer.get_trt_compile_version() ver = paddle_infer.get_trt_compile_version()
if ( if (
ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8000 ver[0] * 1000 + ver[1] * 100 + ver[2] * 10 < 8000
and dynamic_shape == True and dynamic_shape
): ):
return 0, 3 return 0, 3
else: else:
......
...@@ -192,7 +192,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest): ...@@ -192,7 +192,7 @@ class TrtConvertSkipLayernormTest(TrtLayerAutoScanTest):
self.dynamic_shape.opt_input_shape = {} self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
if dynamic_shape == True: if dynamic_shape:
return 1, 3 return 1, 3
else: else:
return 0, 4 return 0, 4
......
...@@ -181,7 +181,7 @@ class TrtConvertStackTest(TrtLayerAutoScanTest): ...@@ -181,7 +181,7 @@ class TrtConvertStackTest(TrtLayerAutoScanTest):
self.dynamic_shape.opt_input_shape = {} self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
if dynamic_shape == True: if dynamic_shape:
return 1, 4 return 1, 4
else: else:
return 0, 5 return 0, 5
......
...@@ -81,7 +81,7 @@ class TrtConvertTileTest(TrtLayerAutoScanTest): ...@@ -81,7 +81,7 @@ class TrtConvertTileTest(TrtLayerAutoScanTest):
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
ver = paddle_infer.get_trt_compile_version() ver = paddle_infer.get_trt_compile_version()
if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 >= 7000: if ver[0] * 1000 + ver[1] * 100 + ver[0] * 10 >= 7000:
if dynamic_shape == True: if dynamic_shape:
return 0, 3 return 0, 3
else: else:
return 1, 2 return 1, 2
......
...@@ -126,7 +126,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -126,7 +126,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
if self.dims == 1: if self.dims == 1:
return 0, 4 return 0, 4
if self.sort == False: if not self.sort:
return 0, 4 return 0, 4
return 1, 3 return 1, 3
......
...@@ -123,7 +123,7 @@ class TrtConvertTransposeTest(TrtLayerAutoScanTest): ...@@ -123,7 +123,7 @@ class TrtConvertTransposeTest(TrtLayerAutoScanTest):
self.dynamic_shape.opt_input_shape = {} self.dynamic_shape.opt_input_shape = {}
def generate_trt_nodes_num(attrs, dynamic_shape): def generate_trt_nodes_num(attrs, dynamic_shape):
if dynamic_shape == True: if dynamic_shape:
return 1, 2 return 1, 2
else: else:
if attrs[0]['axis'][0] == 0: if attrs[0]['axis'][0] == 0:
......
...@@ -28,7 +28,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): ...@@ -28,7 +28,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest):
def sample_program_configs(self): def sample_program_configs(self):
def generate_input1(attrs: List[Dict[str, Any]], batch, channel): def generate_input1(attrs: List[Dict[str, Any]], batch, channel):
if attrs[0]['iou_aware'] == True: if attrs[0]['iou_aware']:
return np.ones([batch, 3 * (channel + 6), 13, 13]).astype( return np.ones([batch, 3 * (channel + 6), 13, 13]).astype(
np.float32 np.float32
) )
...@@ -108,7 +108,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest): ...@@ -108,7 +108,7 @@ class TrtConvertYoloBoxTest(TrtLayerAutoScanTest):
self, program_config self, program_config
) -> (paddle_infer.Config, List[int], float): ) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs): def generate_dynamic_shape(attrs):
if attrs[0]['iou_aware'] == True: if attrs[0]['iou_aware']:
channel = 3 * (attrs[0]['class_num'] + 6) channel = 3 * (attrs[0]['class_num'] + 6)
self.dynamic_shape.min_input_shape = { self.dynamic_shape.min_input_shape = {
"yolo_box_input": [1, channel, 12, 12], "yolo_box_input": [1, channel, 12, 12],
......
...@@ -128,7 +128,7 @@ class TestInt8(TestElementwiseAddOp): ...@@ -128,7 +128,7 @@ class TestInt8(TestElementwiseAddOp):
def test_check_output(self): def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.init_scales() self.init_scales()
self.check_output(check_dygraph=(self.use_mkldnn == False)) self.check_output(check_dygraph=(not self.use_mkldnn))
def test_check_grad_normal(self): def test_check_grad_normal(self):
pass pass
...@@ -165,9 +165,7 @@ class TestInt8Scales(TestInt8): ...@@ -165,9 +165,7 @@ class TestInt8Scales(TestInt8):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.init_scales() self.init_scales()
int_atol = 1 # different quantization techniques int_atol = 1 # different quantization techniques
self.check_output( self.check_output(check_dygraph=(not self.use_mkldnn), atol=int_atol)
check_dygraph=(self.use_mkldnn == False), atol=int_atol
)
class TestUint8Scales(TestInt8Scales): class TestUint8Scales(TestInt8Scales):
......
...@@ -101,7 +101,7 @@ class TestInt8(ElementwiseMulOp): ...@@ -101,7 +101,7 @@ class TestInt8(ElementwiseMulOp):
def test_check_output(self): def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.init_scales() self.init_scales()
self.check_output(check_dygraph=(self.use_mkldnn == False)) self.check_output(check_dygraph=(not self.use_mkldnn))
def test_check_grad_normal(self): def test_check_grad_normal(self):
pass pass
...@@ -138,9 +138,7 @@ class TestInt8Scales(TestInt8): ...@@ -138,9 +138,7 @@ class TestInt8Scales(TestInt8):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.init_scales() self.init_scales()
int_atol = 1 # different quantization techniques int_atol = 1 # different quantization techniques
self.check_output( self.check_output(check_dygraph=(not self.use_mkldnn), atol=int_atol)
check_dygraph=(self.use_mkldnn == False), atol=int_atol
)
class TestUint8Scales(TestInt8Scales): class TestUint8Scales(TestInt8Scales):
......
...@@ -49,7 +49,7 @@ def resize_short(img, target_size): ...@@ -49,7 +49,7 @@ def resize_short(img, target_size):
def crop_image(img, target_size, center): def crop_image(img, target_size, center):
width, height = img.size width, height = img.size
size = target_size size = target_size
if center == True: if center:
w_start = (width - size) / 2 w_start = (width - size) / 2
h_start = (height - size) / 2 h_start = (height - size) / 2
else: else:
......
...@@ -371,25 +371,22 @@ class OpTest(unittest.TestCase): ...@@ -371,25 +371,22 @@ class OpTest(unittest.TestCase):
return True return True
def is_xpu_op_test(): def is_xpu_op_test():
return hasattr(cls, "use_xpu") and cls.use_xpu == True return hasattr(cls, "use_xpu") and cls.use_xpu
def is_mkldnn_op_test(): def is_mkldnn_op_test():
return hasattr(cls, "use_mkldnn") and cls.use_mkldnn == True return hasattr(cls, "use_mkldnn") and cls.use_mkldnn
def is_rocm_op_test(): def is_rocm_op_test():
return core.is_compiled_with_rocm() return core.is_compiled_with_rocm()
def is_npu_op_test(): def is_npu_op_test():
return hasattr(cls, "use_npu") and cls.use_npu == True return hasattr(cls, "use_npu") and cls.use_npu
def is_mlu_op_test(): def is_mlu_op_test():
return hasattr(cls, "use_mlu") and cls.use_mlu == True return hasattr(cls, "use_mlu") and cls.use_mlu
def is_custom_device_op_test(): def is_custom_device_op_test():
return ( return hasattr(cls, "use_custom_device") and cls.use_custom_device
hasattr(cls, "use_custom_device")
and cls.use_custom_device == True
)
if not hasattr(cls, "op_type"): if not hasattr(cls, "op_type"):
raise AssertionError( raise AssertionError(
...@@ -465,17 +462,17 @@ class OpTest(unittest.TestCase): ...@@ -465,17 +462,17 @@ class OpTest(unittest.TestCase):
) )
def is_mkldnn_op(self): def is_mkldnn_op(self):
return (hasattr(self, "use_mkldnn") and self.use_mkldnn == True) or ( return (hasattr(self, "use_mkldnn") and self.use_mkldnn) or (
hasattr(self, "attrs") hasattr(self, "attrs")
and "use_mkldnn" in self.attrs and "use_mkldnn" in self.attrs
and self.attrs["use_mkldnn"] == True and self.attrs["use_mkldnn"]
) )
def is_xpu_op(self): def is_xpu_op(self):
return (hasattr(self, "use_xpu") and self.use_xpu == True) or ( return (hasattr(self, "use_xpu") and self.use_xpu) or (
hasattr(self, "attrs") hasattr(self, "attrs")
and "use_xpu" in self.attrs and "use_xpu" in self.attrs
and self.attrs["use_xpu"] == True and self.attrs["use_xpu"]
) )
# set the self.output_dtype . # set the self.output_dtype .
...@@ -1542,7 +1539,7 @@ class OpTest(unittest.TestCase): ...@@ -1542,7 +1539,7 @@ class OpTest(unittest.TestCase):
): ):
# disable legacy dygraph check when check_eager is True # disable legacy dygraph check when check_eager is True
if check_eager == True: if check_eager:
check_dygraph = False check_dygraph = False
def find_imperative_actual(target_name, dygraph_outs, place): def find_imperative_actual(target_name, dygraph_outs, place):
...@@ -1912,7 +1909,7 @@ class OpTest(unittest.TestCase): ...@@ -1912,7 +1909,7 @@ class OpTest(unittest.TestCase):
) )
if check_eager: if check_eager:
assert check_dygraph == False assert not check_dygraph
return outs, eager_dygraph_outs, fetch_list return outs, eager_dygraph_outs, fetch_list
elif check_dygraph: elif check_dygraph:
return outs, dygraph_outs, fetch_list return outs, dygraph_outs, fetch_list
...@@ -2002,7 +1999,7 @@ class OpTest(unittest.TestCase): ...@@ -2002,7 +1999,7 @@ class OpTest(unittest.TestCase):
): ):
# disable legacy dygraph check when check_eager is True # disable legacy dygraph check when check_eager is True
if check_eager == True: if check_eager:
check_dygraph = False check_dygraph = False
self.__class__.op_type = self.op_type self.__class__.op_type = self.op_type
...@@ -2024,7 +2021,7 @@ class OpTest(unittest.TestCase): ...@@ -2024,7 +2021,7 @@ class OpTest(unittest.TestCase):
check_eager=check_eager, check_eager=check_eager,
) )
if check_eager: if check_eager:
assert check_dygraph == False assert not check_dygraph
outs, eager_dygraph_outs, fetch_list = res outs, eager_dygraph_outs, fetch_list = res
elif check_dygraph: elif check_dygraph:
outs, dygraph_outs, fetch_list = res outs, dygraph_outs, fetch_list = res
...@@ -2143,7 +2140,7 @@ class OpTest(unittest.TestCase): ...@@ -2143,7 +2140,7 @@ class OpTest(unittest.TestCase):
): ):
# disable legacy dygraph check when check_eager is True # disable legacy dygraph check when check_eager is True
if check_eager == True: if check_eager:
check_dygraph = False check_dygraph = False
self._check_grad_helper() self._check_grad_helper()
...@@ -2180,7 +2177,7 @@ class OpTest(unittest.TestCase): ...@@ -2180,7 +2177,7 @@ class OpTest(unittest.TestCase):
): ):
# disable legacy dygraph check when check_eager is True # disable legacy dygraph check when check_eager is True
if check_eager == True: if check_eager:
check_dygraph = False check_dygraph = False
self.scope = core.Scope() self.scope = core.Scope()
...@@ -2207,7 +2204,7 @@ class OpTest(unittest.TestCase): ...@@ -2207,7 +2204,7 @@ class OpTest(unittest.TestCase):
# oneDNN numeric gradient should use CPU kernel # oneDNN numeric gradient should use CPU kernel
use_onednn = False use_onednn = False
if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"] == True: if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"]:
op_attrs["use_mkldnn"] = False op_attrs["use_mkldnn"] = False
use_onednn = True use_onednn = True
......
...@@ -51,7 +51,7 @@ class XPUOpTest(OpTest): ...@@ -51,7 +51,7 @@ class XPUOpTest(OpTest):
if cls.dtype == np.float16: if cls.dtype == np.float16:
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
if core.is_float16_supported(place) == False: if not core.is_float16_supported(place):
return return
if cls.dtype == np.float64: if cls.dtype == np.float64:
...@@ -98,7 +98,7 @@ class XPUOpTest(OpTest): ...@@ -98,7 +98,7 @@ class XPUOpTest(OpTest):
return return
if self.dtype == np.float16: if self.dtype == np.float16:
if core.is_float16_supported(place) == False: if not core.is_float16_supported(place):
return return
if self.dtype == np.float16: if self.dtype == np.float16:
...@@ -172,7 +172,7 @@ class XPUOpTest(OpTest): ...@@ -172,7 +172,7 @@ class XPUOpTest(OpTest):
return return
if self.dtype == np.float16: if self.dtype == np.float16:
if core.is_float16_supported(place) == False: if not core.is_float16_supported(place):
return return
if self.dtype == np.float16: if self.dtype == np.float16:
...@@ -254,7 +254,7 @@ class XPUOpTest(OpTest): ...@@ -254,7 +254,7 @@ class XPUOpTest(OpTest):
# oneDNN numeric gradient should use CPU kernel # oneDNN numeric gradient should use CPU kernel
use_onednn = False use_onednn = False
if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"] == True: if "use_mkldnn" in op_attrs and op_attrs["use_mkldnn"]:
op_attrs["use_mkldnn"] = False op_attrs["use_mkldnn"] = False
use_onednn = True use_onednn = True
......
...@@ -167,7 +167,7 @@ def get_user_defined_strategy(config): ...@@ -167,7 +167,7 @@ def get_user_defined_strategy(config):
strategy.is_fl_ps_mode = ( strategy.is_fl_ps_mode = (
True if config.get("runner.is_fl_ps_mode") == 1 else False True if config.get("runner.is_fl_ps_mode") == 1 else False
) )
if strategy.is_fl_ps_mode == True: if strategy.is_fl_ps_mode:
strategy.pipeline = False strategy.pipeline = False
micro_num = 1 micro_num = 1
strategy.pipeline_configs = { strategy.pipeline_configs = {
......
...@@ -1126,11 +1126,11 @@ class TestMultiTensorAdam(unittest.TestCase): ...@@ -1126,11 +1126,11 @@ class TestMultiTensorAdam(unittest.TestCase):
) )
for idx in range(2): for idx in range(2):
if place == 'gpu' and use_amp == True: if place == 'gpu' and use_amp:
model = paddle.amp.decorate(models=model, level='O2') model = paddle.amp.decorate(models=model, level='O2')
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
if place == 'gpu' and use_amp == True: if place == 'gpu' and use_amp:
with paddle.amp.auto_cast(level='O2'): with paddle.amp.auto_cast(level='O2'):
output = model(input) output = model(input)
loss = paddle.mean(output) loss = paddle.mean(output)
......
...@@ -302,11 +302,11 @@ class TestAdamWOpMultiPrecison(unittest.TestCase): ...@@ -302,11 +302,11 @@ class TestAdamWOpMultiPrecison(unittest.TestCase):
) )
for idx in range(2): for idx in range(2):
if place == 'gpu' and use_amp == True: if place == 'gpu' and use_amp:
model = paddle.amp.decorate(models=model, level='O2') model = paddle.amp.decorate(models=model, level='O2')
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
if place == 'gpu' and use_amp == True: if place == 'gpu' and use_amp:
with paddle.amp.auto_cast(level='O2'): with paddle.amp.auto_cast(level='O2'):
output = model(input) output = model(input)
loss = paddle.mean(output) loss = paddle.mean(output)
......
...@@ -177,8 +177,8 @@ def train(use_cuda, thread_num, cpu_num): ...@@ -177,8 +177,8 @@ def train(use_cuda, thread_num, cpu_num):
fetch_list=[array, acc, prediction, avg_loss.name] fetch_list=[array, acc, prediction, avg_loss.name]
) )
assert numpy.allclose(array_v[0], prediction_v) == True assert numpy.allclose(array_v[0], prediction_v)
assert numpy.allclose(array_v[1], acc_v) == True assert numpy.allclose(array_v[1], acc_v)
loss_val = numpy.mean(loss_val) loss_val = numpy.mean(loss_val)
if step % 10 == 0: if step % 10 == 0:
......
...@@ -313,7 +313,7 @@ class TestBatchNormOpInference(unittest.TestCase): ...@@ -313,7 +313,7 @@ class TestBatchNormOpInference(unittest.TestCase):
# dims will be in NCHW order as it is MKL-DNN way # dims will be in NCHW order as it is MKL-DNN way
# of memory descripting. So we need to convert NCHW # of memory descripting. So we need to convert NCHW
# dims into NHWC. # dims into NHWC.
if data_layout == "NHWC" and self.use_mkldnn == True: if data_layout == "NHWC" and self.use_mkldnn:
# Create executor to have MKL-DNN cache # Create executor to have MKL-DNN cache
# cleared after NHWC unit test # cleared after NHWC unit test
place = core.CPUPlace() place = core.CPUPlace()
......
...@@ -391,7 +391,7 @@ class TestBatchNormUseGlobalStats(unittest.TestCase): ...@@ -391,7 +391,7 @@ class TestBatchNormUseGlobalStats(unittest.TestCase):
) )
net2.weight = net1.weight net2.weight = net1.weight
net2.bias = net1.bias net2.bias = net1.bias
if self.trainable_statistics == True: if self.trainable_statistics:
net1.training = False net1.training = False
net2.training = False net2.training = False
y1 = net1(x) y1 = net1(x)
......
...@@ -20,8 +20,8 @@ import paddle.fluid.core as core ...@@ -20,8 +20,8 @@ import paddle.fluid.core as core
def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0):
pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False) pb_w = p_box[:, 2] - p_box[:, 0] + (not norm)
pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False) pb_h = p_box[:, 3] - p_box[:, 1] + (not norm)
pb_x = pb_w * 0.5 + p_box[:, 0] pb_x = pb_w * 0.5 + p_box[:, 0]
pb_y = pb_h * 0.5 + p_box[:, 1] pb_y = pb_h * 0.5 + p_box[:, 1]
shape = (1, p_box.shape[0]) if axis == 0 else (p_box.shape[0], 1) shape = (1, p_box.shape[0]) if axis == 0 else (p_box.shape[0], 1)
...@@ -55,8 +55,8 @@ def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0): ...@@ -55,8 +55,8 @@ def box_decoder(t_box, p_box, pb_v, output_box, norm, axis=0):
def box_encoder(t_box, p_box, pb_v, output_box, norm): def box_encoder(t_box, p_box, pb_v, output_box, norm):
pb_w = p_box[:, 2] - p_box[:, 0] + (norm == False) pb_w = p_box[:, 2] - p_box[:, 0] + (not norm)
pb_h = p_box[:, 3] - p_box[:, 1] + (norm == False) pb_h = p_box[:, 3] - p_box[:, 1] + (not norm)
pb_x = pb_w * 0.5 + p_box[:, 0] pb_x = pb_w * 0.5 + p_box[:, 0]
pb_y = pb_h * 0.5 + p_box[:, 1] pb_y = pb_h * 0.5 + p_box[:, 1]
shape = (1, p_box.shape[0]) shape = (1, p_box.shape[0])
......
...@@ -58,7 +58,7 @@ class TestCenterLossOp(OpTest): ...@@ -58,7 +58,7 @@ class TestCenterLossOp(OpTest):
'CenterUpdateRate': rate, 'CenterUpdateRate': rate,
} }
if self.need_update == True: if self.need_update:
self.outputs = { self.outputs = {
'SampleCenterDiff': output, 'SampleCenterDiff': output,
'Loss': loss, 'Loss': loss,
......
...@@ -115,7 +115,7 @@ class TestEqualReduceAPI(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestEqualReduceAPI(unittest.TestCase):
x = paddle.ones(shape=[10, 10], dtype="int32") x = paddle.ones(shape=[10, 10], dtype="int32")
y = paddle.ones(shape=[10, 10], dtype="int32") y = paddle.ones(shape=[10, 10], dtype="int32")
out = paddle.equal_all(x, y) out = paddle.equal_all(x, y)
assert out.numpy()[0] == True assert out.numpy()[0] is np.True_
paddle.enable_static() paddle.enable_static()
......
...@@ -477,13 +477,12 @@ class TestConv2DOp(OpTest): ...@@ -477,13 +477,12 @@ class TestConv2DOp(OpTest):
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output_with_place( self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
) )
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16 or ( if self.dtype == np.float16 or (
hasattr(self, "no_need_check_grad") hasattr(self, "no_need_check_grad") and self.no_need_check_grad
and self.no_need_check_grad == True
): ):
return return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
...@@ -493,13 +492,12 @@ class TestConv2DOp(OpTest): ...@@ -493,13 +492,12 @@ class TestConv2DOp(OpTest):
{'Input', 'Filter'}, {'Input', 'Filter'},
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if self.dtype == np.float16 or ( if self.dtype == np.float16 or (
hasattr(self, "no_need_check_grad") hasattr(self, "no_need_check_grad") and self.no_need_check_grad
and self.no_need_check_grad == True
): ):
return return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
...@@ -510,13 +508,12 @@ class TestConv2DOp(OpTest): ...@@ -510,13 +508,12 @@ class TestConv2DOp(OpTest):
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Filter']), no_grad_set=set(['Filter']),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
if self.dtype == np.float16 or ( if self.dtype == np.float16 or (
hasattr(self, "no_need_check_grad") hasattr(self, "no_need_check_grad") and self.no_need_check_grad
and self.no_need_check_grad == True
): ):
return return
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
...@@ -526,7 +523,7 @@ class TestConv2DOp(OpTest): ...@@ -526,7 +523,7 @@ class TestConv2DOp(OpTest):
['Filter'], ['Filter'],
'Output', 'Output',
no_grad_set=set(['Input']), no_grad_set=set(['Input']),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def init_test_case(self): def init_test_case(self):
...@@ -804,7 +801,7 @@ class TestConv2DOp_v2(OpTest): ...@@ -804,7 +801,7 @@ class TestConv2DOp_v2(OpTest):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cuda() else core.CPUPlace()
self.check_output_with_place( self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
) )
def test_check_grad(self): def test_check_grad(self):
...@@ -817,7 +814,7 @@ class TestConv2DOp_v2(OpTest): ...@@ -817,7 +814,7 @@ class TestConv2DOp_v2(OpTest):
{'Input', 'Filter'}, {'Input', 'Filter'},
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
...@@ -831,7 +828,7 @@ class TestConv2DOp_v2(OpTest): ...@@ -831,7 +828,7 @@ class TestConv2DOp_v2(OpTest):
'Output', 'Output',
max_relative_error=0.02, max_relative_error=0.02,
no_grad_set=set(['Filter']), no_grad_set=set(['Filter']),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
...@@ -844,7 +841,7 @@ class TestConv2DOp_v2(OpTest): ...@@ -844,7 +841,7 @@ class TestConv2DOp_v2(OpTest):
['Filter'], ['Filter'],
'Output', 'Output',
no_grad_set=set(['Input']), no_grad_set=set(['Input']),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -183,10 +183,10 @@ class TestConv2DTransposeOp(OpTest): ...@@ -183,10 +183,10 @@ class TestConv2DTransposeOp(OpTest):
if self.use_cudnn: if self.use_cudnn:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place( self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
) )
else: else:
self.check_output(check_dygraph=(self.use_mkldnn == False)) self.check_output(check_dygraph=(not self.use_mkldnn))
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
if self.need_check_grad: if self.need_check_grad:
...@@ -724,10 +724,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp): ...@@ -724,10 +724,10 @@ class TestCUDNN_FP16(TestConv2DTransposeOp):
if self.use_cudnn: if self.use_cudnn:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place( self.check_output_with_place(
place, atol=0.02, check_dygraph=(self.use_mkldnn == False) place, atol=0.02, check_dygraph=(not self.use_mkldnn)
) )
else: else:
self.check_output(check_dygraph=(self.use_mkldnn == False)) self.check_output(check_dygraph=(not self.use_mkldnn))
@unittest.skipIf( @unittest.skipIf(
......
...@@ -327,7 +327,7 @@ class TestConv3DOp(OpTest): ...@@ -327,7 +327,7 @@ class TestConv3DOp(OpTest):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace() place = core.CUDAPlace(0) if self.has_cudnn() else core.CPUPlace()
self.check_output_with_place( self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
) )
def test_check_grad(self): def test_check_grad(self):
...@@ -340,7 +340,7 @@ class TestConv3DOp(OpTest): ...@@ -340,7 +340,7 @@ class TestConv3DOp(OpTest):
{'Input', 'Filter'}, {'Input', 'Filter'},
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
...@@ -354,7 +354,7 @@ class TestConv3DOp(OpTest): ...@@ -354,7 +354,7 @@ class TestConv3DOp(OpTest):
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Filter']), no_grad_set=set(['Filter']),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
...@@ -368,7 +368,7 @@ class TestConv3DOp(OpTest): ...@@ -368,7 +368,7 @@ class TestConv3DOp(OpTest):
'Output', 'Output',
max_relative_error=0.03, max_relative_error=0.03,
no_grad_set=set(['Input']), no_grad_set=set(['Input']),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def init_test_case(self): def init_test_case(self):
......
...@@ -34,7 +34,7 @@ class TestDataSetDownload(unittest.TestCase): ...@@ -34,7 +34,7 @@ class TestDataSetDownload(unittest.TestCase):
except Exception as e: except Exception as e:
catch_exp = True catch_exp = True
self.assertTrue(catch_exp == False) self.assertTrue(not catch_exp)
file_path = DATA_HOME + "/flowers/imagelabels.mat" file_path = DATA_HOME + "/flowers/imagelabels.mat"
......
...@@ -1330,8 +1330,8 @@ class TestDistBase(unittest.TestCase): ...@@ -1330,8 +1330,8 @@ class TestDistBase(unittest.TestCase):
tr_cmd += " --diff_batch" tr_cmd += " --diff_batch"
self.__use_cuda = False self.__use_cuda = False
self.__use_xpu = False self.__use_xpu = False
assert self.__use_cuda == False, "gloo not support use cuda" assert not self.__use_cuda, "gloo not support use cuda"
assert self.__use_xpu == False, "gloo not support use xpu" assert not self.__use_xpu, "gloo not support use xpu"
tr_cmd += " --use_cpu" tr_cmd += " --use_cpu"
env.update( env.update(
{ {
...@@ -1345,7 +1345,7 @@ class TestDistBase(unittest.TestCase): ...@@ -1345,7 +1345,7 @@ class TestDistBase(unittest.TestCase):
} }
) )
assert self._use_dgc == False, "gloo not support use dgc" assert not self._use_dgc, "gloo not support use dgc"
if self._accumulate_gradient: if self._accumulate_gradient:
tr_cmd += " --accumulate_gradient" tr_cmd += " --accumulate_gradient"
...@@ -1353,7 +1353,7 @@ class TestDistBase(unittest.TestCase): ...@@ -1353,7 +1353,7 @@ class TestDistBase(unittest.TestCase):
if self._find_unused_parameters: if self._find_unused_parameters:
tr_cmd += " --find_unused_parameters" tr_cmd += " --find_unused_parameters"
assert self._pipeline_mode == False, "gloo not support use pipeline" assert not self._pipeline_mode, "gloo not support use pipeline"
if self._enable_backward_deps: # build strategy, save it if self._enable_backward_deps: # build strategy, save it
tr_cmd += " --enable_backward_deps" tr_cmd += " --enable_backward_deps"
...@@ -1361,8 +1361,8 @@ class TestDistBase(unittest.TestCase): ...@@ -1361,8 +1361,8 @@ class TestDistBase(unittest.TestCase):
if self._fuse_all_reduce is not None: if self._fuse_all_reduce is not None:
tr_cmd += " --fuse_all_reduce {}".format(self._fuse_all_reduce) tr_cmd += " --fuse_all_reduce {}".format(self._fuse_all_reduce)
assert self._use_fleet_api == False, "gloo not support use fleet api" assert not self._use_fleet_api, "gloo not support use fleet api"
assert self._use_fleet_api_20 == False, "gloo not support use fleet api" assert not self._use_fleet_api_20, "gloo not support use fleet api"
return tr_cmd, env return tr_cmd, env
def _get_nccl2_trainer_cmd( def _get_nccl2_trainer_cmd(
......
...@@ -46,12 +46,12 @@ class TestElementwiseAddOp(OpTest): ...@@ -46,12 +46,12 @@ class TestElementwiseAddOp(OpTest):
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
def check_eager(self): def check_eager(self):
return self.use_mkldnn == False and self.axis == -1 return not self.use_mkldnn and self.axis == -1
def test_check_output(self): def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output( self.check_output(
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
check_eager=self.check_eager(), check_eager=self.check_eager(),
) )
...@@ -62,7 +62,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -62,7 +62,7 @@ class TestElementwiseAddOp(OpTest):
self.check_grad( self.check_grad(
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
check_eager=self.check_eager(), check_eager=self.check_eager(),
) )
...@@ -74,7 +74,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -74,7 +74,7 @@ class TestElementwiseAddOp(OpTest):
['Y'], ['Y'],
'Out', 'Out',
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
check_eager=self.check_eager(), check_eager=self.check_eager(),
) )
...@@ -86,7 +86,7 @@ class TestElementwiseAddOp(OpTest): ...@@ -86,7 +86,7 @@ class TestElementwiseAddOp(OpTest):
['X'], ['X'],
'Out', 'Out',
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
check_eager=self.check_eager(), check_eager=self.check_eager(),
) )
...@@ -115,7 +115,7 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp): ...@@ -115,7 +115,7 @@ class TestFP16ElementwiseAddOp(TestElementwiseAddOp):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
if core.is_float16_supported(place): if core.is_float16_supported(place):
self.check_output_with_place( self.check_output_with_place(
place, atol=1e-3, check_dygraph=(self.use_mkldnn == False) place, atol=1e-3, check_dygraph=(not self.use_mkldnn)
) )
......
...@@ -49,13 +49,11 @@ class ElementwiseMulOp(OpTest): ...@@ -49,13 +49,11 @@ class ElementwiseMulOp(OpTest):
def test_check_output(self): def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_output(check_dygraph=(self.use_mkldnn == False)) self.check_output(check_dygraph=(not self.use_mkldnn))
def test_check_grad_normal(self): def test_check_grad_normal(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
self.check_grad( self.check_grad(['X', 'Y'], 'Out', check_dygraph=(not self.use_mkldnn))
['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False)
)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
...@@ -63,7 +61,7 @@ class ElementwiseMulOp(OpTest): ...@@ -63,7 +61,7 @@ class ElementwiseMulOp(OpTest):
['Y'], ['Y'],
'Out', 'Out',
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -72,7 +70,7 @@ class ElementwiseMulOp(OpTest): ...@@ -72,7 +70,7 @@ class ElementwiseMulOp(OpTest):
['X'], ['X'],
'Out', 'Out',
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def init_input_output(self): def init_input_output(self):
......
...@@ -47,8 +47,8 @@ class TestEmptyLikeAPICommon(unittest.TestCase): ...@@ -47,8 +47,8 @@ class TestEmptyLikeAPICommon(unittest.TestCase):
) )
elif data_type in ['bool']: elif data_type in ['bool']:
total_num = out.size total_num = out.size
true_num = np.sum(out == True) true_num = np.sum(out)
false_num = np.sum(out == False) false_num = np.sum(~out)
self.assertTrue( self.assertTrue(
total_num == true_num + false_num, total_num == true_num + false_num,
'The value should always be True or False.', 'The value should always be True or False.',
......
...@@ -43,8 +43,8 @@ class TestEmptyOp(OpTest): ...@@ -43,8 +43,8 @@ class TestEmptyOp(OpTest):
) )
elif data_type in ['bool']: elif data_type in ['bool']:
total_num = outs[0].size total_num = outs[0].size
true_num = np.sum(outs[0] == True) true_num = np.sum(outs[0])
false_num = np.sum(outs[0] == False) false_num = np.sum(~outs[0])
self.assertTrue( self.assertTrue(
total_num == true_num + false_num, total_num == true_num + false_num,
'The value should always be True or False.', 'The value should always be True or False.',
...@@ -132,8 +132,8 @@ class TestEmptyOp_ShapeTensor(OpTest): ...@@ -132,8 +132,8 @@ class TestEmptyOp_ShapeTensor(OpTest):
) )
elif data_type in ['bool']: elif data_type in ['bool']:
total_num = outs[0].size total_num = outs[0].size
true_num = np.sum(outs[0] == True) true_num = np.sum(outs[0])
false_num = np.sum(outs[0] == False) false_num = np.sum(~outs[0])
self.assertTrue( self.assertTrue(
total_num == true_num + false_num, total_num == true_num + false_num,
'The value should always be True or False.', 'The value should always be True or False.',
...@@ -182,8 +182,8 @@ class TestEmptyOp_ShapeTensorList(OpTest): ...@@ -182,8 +182,8 @@ class TestEmptyOp_ShapeTensorList(OpTest):
) )
elif data_type in ['bool']: elif data_type in ['bool']:
total_num = outs[0].size total_num = outs[0].size
true_num = np.sum(outs[0] == True) true_num = np.sum(outs[0])
false_num = np.sum(outs[0] == False) false_num = np.sum(~outs[0])
self.assertTrue( self.assertTrue(
total_num == true_num + false_num, total_num == true_num + false_num,
'The value should always be True or False.', 'The value should always be True or False.',
......
...@@ -29,16 +29,16 @@ class TestImperativeLayerTrainable(unittest.TestCase): ...@@ -29,16 +29,16 @@ class TestImperativeLayerTrainable(unittest.TestCase):
linear = dygraph.Linear(10, 10) linear = dygraph.Linear(10, 10)
y = linear(label) y = linear(label)
self.assertTrue(y.stop_gradient == False) self.assertFalse(y.stop_gradient)
linear.weight.trainable = False linear.weight.trainable = False
linear.bias.trainable = False linear.bias.trainable = False
self.assertTrue(linear.weight.trainable == False) self.assertFalse(linear.weight.trainable)
self.assertTrue(linear.weight.stop_gradient == True) self.assertTrue(linear.weight.stop_gradient)
y = linear(label) y = linear(label)
self.assertTrue(y.stop_gradient == True) self.assertTrue(y.stop_gradient)
with self.assertRaises(ValueError): with self.assertRaises(ValueError):
linear.weight.trainable = "1" linear.weight.trainable = "1"
......
...@@ -28,13 +28,13 @@ np.random.seed(10) ...@@ -28,13 +28,13 @@ np.random.seed(10)
def mean_wrapper(x, axis=None, keepdim=False, reduce_all=False): def mean_wrapper(x, axis=None, keepdim=False, reduce_all=False):
if reduce_all == True: if reduce_all:
return paddle.mean(x, range(len(x.shape)), keepdim) return paddle.mean(x, range(len(x.shape)), keepdim)
return paddle.mean(x, axis, keepdim) return paddle.mean(x, axis, keepdim)
def reduce_mean_wrapper(x, axis=0, keepdim=False, reduce_all=False): def reduce_mean_wrapper(x, axis=0, keepdim=False, reduce_all=False):
if reduce_all == True: if reduce_all:
return paddle.mean(x, range(len(x.shape)), keepdim) return paddle.mean(x, range(len(x.shape)), keepdim)
return paddle.mean(x, axis, keepdim) return paddle.mean(x, axis, keepdim)
......
...@@ -910,10 +910,10 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase): ...@@ -910,10 +910,10 @@ class TestMultiTensorMomentumDygraph(unittest.TestCase):
multi_precision=use_amp, multi_precision=use_amp,
) )
for idx in range(5): for idx in range(5):
if place == 'gpu' and use_amp == True: if place == 'gpu' and use_amp:
model = paddle.amp.decorate(models=model, level='O2') model = paddle.amp.decorate(models=model, level='O2')
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
if place == 'gpu' and use_amp == True: if place == 'gpu' and use_amp:
with paddle.amp.auto_cast(level='O2'): with paddle.amp.auto_cast(level='O2'):
output = model(input) output = model(input)
loss = paddle.mean(output) loss = paddle.mean(output)
......
...@@ -146,12 +146,8 @@ def iou(box_a, box_b, norm): ...@@ -146,12 +146,8 @@ def iou(box_a, box_b, norm):
xmax_b = max(box_b[0], box_b[2]) xmax_b = max(box_b[0], box_b[2])
ymax_b = max(box_b[1], box_b[3]) ymax_b = max(box_b[1], box_b[3])
area_a = (ymax_a - ymin_a + (norm == False)) * ( area_a = (ymax_a - ymin_a + (not norm)) * (xmax_a - xmin_a + (not norm))
xmax_a - xmin_a + (norm == False) area_b = (ymax_b - ymin_b + (not norm)) * (xmax_b - xmin_b + (not norm))
)
area_b = (ymax_b - ymin_b + (norm == False)) * (
xmax_b - xmin_b + (norm == False)
)
if area_a <= 0 and area_b <= 0: if area_a <= 0 and area_b <= 0:
return 0.0 return 0.0
...@@ -160,9 +156,7 @@ def iou(box_a, box_b, norm): ...@@ -160,9 +156,7 @@ def iou(box_a, box_b, norm):
xb = min(xmax_a, xmax_b) xb = min(xmax_a, xmax_b)
yb = min(ymax_a, ymax_b) yb = min(ymax_a, ymax_b)
inter_area = max(xb - xa + (norm == False), 0.0) * max( inter_area = max(xb - xa + (not norm), 0.0) * max(yb - ya + (not norm), 0.0)
yb - ya + (norm == False), 0.0
)
iou_ratio = inter_area / (area_a + area_b - inter_area) iou_ratio = inter_area / (area_a + area_b - inter_area)
......
...@@ -55,7 +55,7 @@ def multiclass_nms(boxes, scores, category_idxs, iou_threshold, top_k): ...@@ -55,7 +55,7 @@ def multiclass_nms(boxes, scores, category_idxs, iou_threshold, top_k):
mask[cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs]] = True mask[cur_category_boxes_idxs[cur_category_keep_boxes_sub_idxs]] = True
keep_boxes_idxs = _find(mask == True) keep_boxes_idxs = _find(mask)
topK_sub_indices = np.argsort(-scores[keep_boxes_idxs])[:top_k] topK_sub_indices = np.argsort(-scores[keep_boxes_idxs])[:top_k]
return keep_boxes_idxs[topK_sub_indices] return keep_boxes_idxs[topK_sub_indices]
......
...@@ -784,7 +784,7 @@ class TestRecomputeOptimizer(unittest.TestCase): ...@@ -784,7 +784,7 @@ class TestRecomputeOptimizer(unittest.TestCase):
type="mean", inputs={"X": b2_out}, outputs={"Out": mean_out} type="mean", inputs={"X": b2_out}, outputs={"Out": mean_out}
) )
if return_input == True: if return_input:
return mul_x, mul_out, b1_out, b2_out, mean_out return mul_x, mul_out, b1_out, b2_out, mean_out
return mul_out, b1_out, b2_out, mean_out return mul_out, b1_out, b2_out, mean_out
......
...@@ -58,8 +58,8 @@ class TestParallelExecutorDropExeScope(unittest.TestCase): ...@@ -58,8 +58,8 @@ class TestParallelExecutorDropExeScope(unittest.TestCase):
train_exe.run(feed={"X": x}, fetch_list=[loss.name]) train_exe.run(feed={"X": x}, fetch_list=[loss.name])
test_exe.run(feed={"X": x}, fetch_list=[loss.name]) test_exe.run(feed={"X": x}, fetch_list=[loss.name])
assert train_exe._need_create_local_exe_scopes() == False assert not train_exe._need_create_local_exe_scopes()
assert test_exe._need_create_local_exe_scopes() == False assert not test_exe._need_create_local_exe_scopes()
# drop the local execution scope immediately # drop the local execution scope immediately
train_exe.drop_local_exe_scopes() train_exe.drop_local_exe_scopes()
......
...@@ -181,7 +181,7 @@ def pool2D_forward_naive( ...@@ -181,7 +181,7 @@ def pool2D_forward_naive(
if padding_algorithm == "VALID": if padding_algorithm == "VALID":
paddings = [0, 0, 0, 0] paddings = [0, 0, 0, 0]
if ceil_mode != False: if ceil_mode is not False:
raise ValueError( raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)"
" must be False. " " must be False. "
...@@ -346,10 +346,10 @@ class TestPool2D_Op_Mixin(object): ...@@ -346,10 +346,10 @@ class TestPool2D_Op_Mixin(object):
if self.has_cudnn(): if self.has_cudnn():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place( self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
) )
else: else:
self.check_output(check_dygraph=(self.use_mkldnn == False)) self.check_output(check_dygraph=(not self.use_mkldnn))
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
...@@ -362,14 +362,14 @@ class TestPool2D_Op_Mixin(object): ...@@ -362,14 +362,14 @@ class TestPool2D_Op_Mixin(object):
set(['X']), set(['X']),
'Out', 'Out',
max_relative_error=0.07, max_relative_error=0.07,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
elif self.pool_type != "max": elif self.pool_type != "max":
self.check_grad( self.check_grad(
set(['X']), set(['X']),
'Out', 'Out',
max_relative_error=0.07, max_relative_error=0.07,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def init_data_format(self): def init_data_format(self):
...@@ -512,7 +512,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): ...@@ -512,7 +512,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True):
self.check_output_with_place( self.check_output_with_place(
place, place,
atol=1e-3, atol=1e-3,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad(self): def test_check_grad(self):
...@@ -528,7 +528,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True): ...@@ -528,7 +528,7 @@ def create_test_cudnn_fp16_class(parent, check_grad=True):
set(['X']), set(['X']),
'Out', 'Out',
max_relative_error=0.07, max_relative_error=0.07,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op") cls_name = "{0}_{1}".format(parent.__name__, "CUDNNFp16Op")
...@@ -553,7 +553,7 @@ def create_test_fp16_class(parent, check_grad=True): ...@@ -553,7 +553,7 @@ def create_test_fp16_class(parent, check_grad=True):
self.check_output_with_place( self.check_output_with_place(
place, place,
atol=1e-3, atol=1e-3,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad(self): def test_check_grad(self):
...@@ -569,7 +569,7 @@ def create_test_fp16_class(parent, check_grad=True): ...@@ -569,7 +569,7 @@ def create_test_fp16_class(parent, check_grad=True):
set(['X']), set(['X']),
'Out', 'Out',
max_relative_error=0.07, max_relative_error=0.07,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op") cls_name = "{0}_{1}".format(parent.__name__, "Fp16Op")
......
...@@ -68,7 +68,7 @@ def pool3D_forward_naive( ...@@ -68,7 +68,7 @@ def pool3D_forward_naive(
if padding_algorithm == "VALID": if padding_algorithm == "VALID":
paddings = [0, 0, 0, 0, 0, 0] paddings = [0, 0, 0, 0, 0, 0]
if ceil_mode != False: if ceil_mode is not False:
raise ValueError( raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)"
" must be False. " " must be False. "
......
...@@ -321,12 +321,12 @@ class TestSGDMultiPrecision2_0(unittest.TestCase): ...@@ -321,12 +321,12 @@ class TestSGDMultiPrecision2_0(unittest.TestCase):
optimizer = paddle.optimizer.SGD( optimizer = paddle.optimizer.SGD(
parameters=model.parameters(), multi_precision=mp parameters=model.parameters(), multi_precision=mp
) )
if mp == True: if mp:
model = paddle.amp.decorate(models=model, level='O2') model = paddle.amp.decorate(models=model, level='O2')
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
for idx in range(5): for idx in range(5):
if mp == True: if mp:
with paddle.amp.auto_cast(level='O2'): with paddle.amp.auto_cast(level='O2'):
output = model(input) output = model(input)
loss = paddle.mean(output) loss = paddle.mean(output)
...@@ -429,12 +429,12 @@ class TestSGDMultiPrecision1_0(unittest.TestCase): ...@@ -429,12 +429,12 @@ class TestSGDMultiPrecision1_0(unittest.TestCase):
parameter_list=model.parameters(), parameter_list=model.parameters(),
multi_precision=mp, multi_precision=mp,
) )
if mp == True: if mp:
model = paddle.amp.decorate(models=model, level='O2') model = paddle.amp.decorate(models=model, level='O2')
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
for idx in range(5): for idx in range(5):
if mp == True: if mp:
with paddle.amp.auto_cast(level='O2'): with paddle.amp.auto_cast(level='O2'):
output = model(input) output = model(input)
loss = paddle.mean(output) loss = paddle.mean(output)
......
...@@ -78,10 +78,10 @@ class TestSoftmaxOp(OpTest): ...@@ -78,10 +78,10 @@ class TestSoftmaxOp(OpTest):
if self.use_cudnn: if self.use_cudnn:
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place( self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False) place, atol=1e-5, check_dygraph=(not self.use_mkldnn)
) )
else: else:
self.check_output(check_dygraph=(self.use_mkldnn == False)) self.check_output(check_dygraph=(not self.use_mkldnn))
def test_check_grad(self): def test_check_grad(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
...@@ -93,14 +93,14 @@ class TestSoftmaxOp(OpTest): ...@@ -93,14 +93,14 @@ class TestSoftmaxOp(OpTest):
["X"], ["X"],
"Out", "Out",
max_relative_error=0.01, max_relative_error=0.01,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
else: else:
self.check_grad( self.check_grad(
["X"], ["X"],
"Out", "Out",
max_relative_error=0.01, max_relative_error=0.01,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
...@@ -389,9 +389,7 @@ class TestSoftmaxBF16Op(OpTest): ...@@ -389,9 +389,7 @@ class TestSoftmaxBF16Op(OpTest):
def test_check_output(self): def test_check_output(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
self.check_output_with_place( self.check_output_with_place(place, check_dygraph=(not self.use_mkldnn))
place, check_dygraph=(self.use_mkldnn == False)
)
def test_check_grad(self): def test_check_grad(self):
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
...@@ -400,7 +398,7 @@ class TestSoftmaxBF16Op(OpTest): ...@@ -400,7 +398,7 @@ class TestSoftmaxBF16Op(OpTest):
["X"], ["X"],
"Out", "Out",
numeric_grad_delta=0.05, numeric_grad_delta=0.05,
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
......
...@@ -131,7 +131,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest): ...@@ -131,7 +131,7 @@ class TestSoftmaxWithCrossEntropyOp(OpTest):
softmax, labels, self.soft_label, self.axis, self.ignore_index softmax, labels, self.soft_label, self.axis, self.ignore_index
) )
if self.use_softmax == False: if not self.use_softmax:
self.inputs = {"Logits": softmax, "Label": labels} self.inputs = {"Logits": softmax, "Label": labels}
else: else:
self.inputs = {"Logits": logits, "Label": labels} self.inputs = {"Logits": logits, "Label": labels}
......
...@@ -221,7 +221,7 @@ class TestSparseAttentionOp(OpTest): ...@@ -221,7 +221,7 @@ class TestSparseAttentionOp(OpTest):
self.key_padding_mask = key_padding_mask.astype(self.dtype) self.key_padding_mask = key_padding_mask.astype(self.dtype)
self.attn_mask = attn_mask.astype(self.dtype) self.attn_mask = attn_mask.astype(self.dtype)
if self.use_mask == True: if self.use_mask:
result, result_sdd, result_softmax = ref_batch_sparse_attention( result, result_sdd, result_softmax = ref_batch_sparse_attention(
self.q, self.q,
self.k, self.k,
...@@ -236,7 +236,7 @@ class TestSparseAttentionOp(OpTest): ...@@ -236,7 +236,7 @@ class TestSparseAttentionOp(OpTest):
self.q, self.k, self.v, self.offset, self.columns self.q, self.k, self.v, self.offset, self.columns
) )
if self.use_mask == True: if self.use_mask:
self.inputs = { self.inputs = {
'Q': self.q, 'Q': self.q,
'K': self.k, 'K': self.k,
...@@ -326,7 +326,7 @@ class TestSparseAttentionAPI(unittest.TestCase): ...@@ -326,7 +326,7 @@ class TestSparseAttentionAPI(unittest.TestCase):
) )
key_padding_mask_shape = (self.shape[0], self.shape[2]) key_padding_mask_shape = (self.shape[0], self.shape[2])
attn_mask_shape = (self.shape[2], self.shape[2]) attn_mask_shape = (self.shape[2], self.shape[2])
if self.use_mask == True: if self.use_mask:
key_padding_mask = paddle.static.data( key_padding_mask = paddle.static.data(
name="KeyPaddingMask", name="KeyPaddingMask",
shape=key_padding_mask_shape, shape=key_padding_mask_shape,
...@@ -367,7 +367,7 @@ class TestSparseAttentionAPI(unittest.TestCase): ...@@ -367,7 +367,7 @@ class TestSparseAttentionAPI(unittest.TestCase):
attn_mask_np = attn_mask_np.astype(self.dtype) attn_mask_np = attn_mask_np.astype(self.dtype)
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
if self.use_mask == True: if self.use_mask:
fetches_result = exe.run( fetches_result = exe.run(
feed={ feed={
"Q": Q_np, "Q": Q_np,
...@@ -436,7 +436,7 @@ class TestSparseAttentionAPI(unittest.TestCase): ...@@ -436,7 +436,7 @@ class TestSparseAttentionAPI(unittest.TestCase):
paddle_kp_mask = paddle.to_tensor(key_padding_mask, place=self.place) paddle_kp_mask = paddle.to_tensor(key_padding_mask, place=self.place)
paddle_attn_mask = paddle.to_tensor(attn_mask, place=self.place) paddle_attn_mask = paddle.to_tensor(attn_mask, place=self.place)
if self.use_mask == True: if self.use_mask:
paddle_result = F.sparse_attention( paddle_result = F.sparse_attention(
paddle_query, paddle_query,
paddle_key, paddle_key,
......
...@@ -1147,10 +1147,10 @@ class TestVarBase(unittest.TestCase): ...@@ -1147,10 +1147,10 @@ class TestVarBase(unittest.TestCase):
if var2: if var2:
var2_bool = True var2_bool = True
assert var1_bool == False, "if var1 should be false" assert not var1_bool, "if var1 should be false"
assert var2_bool == True, "if var2 should be true" assert var2_bool, "if var2 should be true"
assert bool(var1) == False, "bool(var1) is False" assert not bool(var1), "bool(var1) is False"
assert bool(var2) == True, "bool(var2) is True" assert bool(var2), "bool(var2) is True"
def test_if(self): def test_if(self):
with _test_eager_guard(): with _test_eager_guard():
......
...@@ -68,10 +68,10 @@ class TestWhereAPI(unittest.TestCase): ...@@ -68,10 +68,10 @@ class TestWhereAPI(unittest.TestCase):
self.out = np.where(self.cond, self.x, self.y) self.out = np.where(self.cond, self.x, self.y)
def ref_x_backward(self, dout): def ref_x_backward(self, dout):
return np.where((self.cond == True), dout, 0) return np.where(self.cond, dout, 0)
def ref_y_backward(self, dout): def ref_y_backward(self, dout):
return np.where((self.cond == False), dout, 0) return np.where(~self.cond, dout, 0)
def test_api(self, use_cuda=False): def test_api(self, use_cuda=False):
for x_stop_gradient in [False, True]: for x_stop_gradient in [False, True]:
......
...@@ -377,7 +377,7 @@ class XPUTestBatchNormOp(XPUOpTestWrapper): ...@@ -377,7 +377,7 @@ class XPUTestBatchNormOp(XPUOpTestWrapper):
) )
net2.weight = net1.weight net2.weight = net1.weight
net2.bias = net1.bias net2.bias = net1.bias
if self.trainable_statistics == True: if self.trainable_statistics:
net1.training = False net1.training = False
net2.training = False net2.training = False
y1 = net1(x) y1 = net1(x)
......
...@@ -261,10 +261,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ...@@ -261,10 +261,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper):
self.check_output_with_place(self.place) self.check_output_with_place(self.place)
def test_check_grad(self): def test_check_grad(self):
if ( if hasattr(self, "no_need_check_grad") and self.no_need_check_grad:
hasattr(self, "no_need_check_grad")
and self.no_need_check_grad == True
):
return return
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
...@@ -273,10 +270,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ...@@ -273,10 +270,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper):
) )
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
if ( if hasattr(self, "no_need_check_grad") and self.no_need_check_grad:
hasattr(self, "no_need_check_grad")
and self.no_need_check_grad == True
):
return return
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
...@@ -285,10 +279,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper): ...@@ -285,10 +279,7 @@ class XPUTestConv2DOp(XPUOpTestWrapper):
) )
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
if ( if hasattr(self, "no_need_check_grad") and self.no_need_check_grad:
hasattr(self, "no_need_check_grad")
and self.no_need_check_grad == True
):
return return
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
...@@ -433,10 +424,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): ...@@ -433,10 +424,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper):
def test_check_grad(self): def test_check_grad(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
if ( if hasattr(self, "no_need_check_grad") and self.no_need_check_grad:
hasattr(self, "no_need_check_grad")
and self.no_need_check_grad == True
):
return return
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
...@@ -446,10 +434,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): ...@@ -446,10 +434,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper):
def test_check_grad_no_filter(self): def test_check_grad_no_filter(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
if ( if hasattr(self, "no_need_check_grad") and self.no_need_check_grad:
hasattr(self, "no_need_check_grad")
and self.no_need_check_grad == True
):
return return
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
...@@ -459,10 +444,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper): ...@@ -459,10 +444,7 @@ class XPUTestConv2DOp_v2(XPUOpTestWrapper):
def test_check_grad_no_input(self): def test_check_grad_no_input(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode # TODO(wangzhongpu): support mkldnn op in dygraph mode
if ( if hasattr(self, "no_need_check_grad") and self.no_need_check_grad:
hasattr(self, "no_need_check_grad")
and self.no_need_check_grad == True
):
return return
if core.is_compiled_with_xpu(): if core.is_compiled_with_xpu():
paddle.enable_static() paddle.enable_static()
......
...@@ -52,7 +52,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): ...@@ -52,7 +52,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
} }
out = self.inputs['X'] * (1.0 - self.dropout_prob) out = self.inputs['X'] * (1.0 - self.dropout_prob)
if self.is_test == False: if not self.is_test:
mask = None mask = None
if self.dropout_prob == 0.0: if self.dropout_prob == 0.0:
mask = np.ones(self.shape).astype(self.dtype) mask = np.ones(self.shape).astype(self.dtype)
...@@ -78,7 +78,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper): ...@@ -78,7 +78,7 @@ class XPUTestDropoutOp(XPUOpTestWrapper):
def test_check_grad_normal(self): def test_check_grad_normal(self):
if ( if (
hasattr(self.__class__, "no_need_check_grad") hasattr(self.__class__, "no_need_check_grad")
and self.__class__.no_need_check_grad == True and self.__class__.no_need_check_grad
): ):
return return
......
...@@ -61,7 +61,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): ...@@ -61,7 +61,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper):
place, place,
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
...@@ -72,7 +72,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): ...@@ -72,7 +72,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper):
['Y'], ['Y'],
'Out', 'Out',
no_grad_set=set("X"), no_grad_set=set("X"),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
...@@ -83,7 +83,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper): ...@@ -83,7 +83,7 @@ class XPUTestElementwiseMulOp(XPUOpTestWrapper):
['X'], ['X'],
'Out', 'Out',
no_grad_set=set('Y'), no_grad_set=set('Y'),
check_dygraph=(self.use_mkldnn == False), check_dygraph=(not self.use_mkldnn),
) )
def init_input_output(self): def init_input_output(self):
......
...@@ -72,8 +72,8 @@ class XPUTestEmptyOp(XPUOpTestWrapper): ...@@ -72,8 +72,8 @@ class XPUTestEmptyOp(XPUOpTestWrapper):
) )
elif data_type in ['bool']: elif data_type in ['bool']:
total_num = outs[0].size total_num = outs[0].size
true_num = np.sum(outs[0] == True) true_num = np.sum(outs[0])
false_num = np.sum(outs[0] == False) false_num = np.sum(~outs[0])
self.assertTrue( self.assertTrue(
total_num == true_num + false_num, total_num == true_num + false_num,
'The value should always be True or False.', 'The value should always be True or False.',
......
...@@ -106,14 +106,14 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper): ...@@ -106,14 +106,14 @@ class XPUTestFuseGemmOp(XPUOpTestWrapper):
- 0.5, - 0.5,
} }
if self.trans_x == True: if self.trans_x:
numpy_input_x = ( numpy_input_x = (
self.inputs['X'].reshape((self.x_shape[0], -1)).T self.inputs['X'].reshape((self.x_shape[0], -1)).T
) )
else: else:
numpy_input_x = self.inputs['X'].reshape((-1, self.x_shape[-1])) numpy_input_x = self.inputs['X'].reshape((-1, self.x_shape[-1]))
if self.trans_y == True: if self.trans_y:
numpy_input_y = self.inputs['Y'].T numpy_input_y = self.inputs['Y'].T
else: else:
numpy_input_y = self.inputs['Y'] numpy_input_y = self.inputs['Y']
......
...@@ -106,7 +106,7 @@ def generate_compatible_shapes( ...@@ -106,7 +106,7 @@ def generate_compatible_shapes(
shape_Y = [BATCH_SIZE] + shape_Y shape_Y = [BATCH_SIZE] + shape_Y
if dim_Y == 3 and dim_X == 2: if dim_Y == 3 and dim_X == 2:
if transpose_X == False: if not transpose_X:
shape_X[1] = shape_X[1] * BATCH_SIZE shape_X[1] = shape_X[1] * BATCH_SIZE
else: else:
shape_X[0] = shape_X[0] * BATCH_SIZE shape_X[0] = shape_X[0] * BATCH_SIZE
...@@ -326,7 +326,7 @@ class TestMatmulBaseGenerator(XPUOpTest): ...@@ -326,7 +326,7 @@ class TestMatmulBaseGenerator(XPUOpTest):
def test_check_grad_normal(self): def test_check_grad_normal(self):
if ( if (
hasattr(self.__class__, "no_need_check_grad") hasattr(self.__class__, "no_need_check_grad")
and self.__class__.no_need_check_grad == True and self.__class__.no_need_check_grad
): ):
return return
...@@ -338,7 +338,7 @@ class TestMatmulBaseGenerator(XPUOpTest): ...@@ -338,7 +338,7 @@ class TestMatmulBaseGenerator(XPUOpTest):
def test_check_grad_ignore_x(self): def test_check_grad_ignore_x(self):
if ( if (
hasattr(self.__class__, "no_need_check_grad") hasattr(self.__class__, "no_need_check_grad")
and self.__class__.no_need_check_grad == True and self.__class__.no_need_check_grad
): ):
return return
...@@ -350,7 +350,7 @@ class TestMatmulBaseGenerator(XPUOpTest): ...@@ -350,7 +350,7 @@ class TestMatmulBaseGenerator(XPUOpTest):
def test_check_grad_ignore_y(self): def test_check_grad_ignore_y(self):
if ( if (
hasattr(self.__class__, "no_need_check_grad") hasattr(self.__class__, "no_need_check_grad")
and self.__class__.no_need_check_grad == True and self.__class__.no_need_check_grad
): ):
return return
......
...@@ -101,7 +101,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper): ...@@ -101,7 +101,7 @@ class XPUTestMatmulV2Op(XPUOpTestWrapper):
def test_check_grad(self): def test_check_grad(self):
if ( if (
hasattr(self.__class__, "no_need_check_grad") hasattr(self.__class__, "no_need_check_grad")
and self.__class__.no_need_check_grad == True and self.__class__.no_need_check_grad
): ):
return return
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
......
...@@ -178,7 +178,7 @@ def pool2D_forward_naive( ...@@ -178,7 +178,7 @@ def pool2D_forward_naive(
if padding_algorithm == "VALID": if padding_algorithm == "VALID":
paddings = [0, 0, 0, 0] paddings = [0, 0, 0, 0]
if ceil_mode != False: if ceil_mode is not False:
raise ValueError( raise ValueError(
"When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)" "When Attr(pool_padding) is \"VALID\", Attr(ceil_mode)"
" must be False. " " must be False. "
......
...@@ -91,10 +91,10 @@ class TestXPUWhereAPI(unittest.TestCase): ...@@ -91,10 +91,10 @@ class TestXPUWhereAPI(unittest.TestCase):
self.out = np.where(self.cond, self.x, self.y) self.out = np.where(self.cond, self.x, self.y)
def ref_x_backward(self, dout): def ref_x_backward(self, dout):
return np.where(self.cond == True, dout, 0) return np.where(self.cond, dout, 0)
def ref_y_backward(self, dout): def ref_y_backward(self, dout):
return np.where(self.cond == False, dout, 0) return np.where(~self.cond, dout, 0)
def test_api(self): def test_api(self):
for x_stop_gradient in [False, True]: for x_stop_gradient in [False, True]:
......
...@@ -450,7 +450,7 @@ def summary_string(model, input_size=None, dtypes=None, input=None): ...@@ -450,7 +450,7 @@ def summary_string(model, input_size=None, dtypes=None, input=None):
total_output += np.sum(np.prod(output_shape, axis=-1)) total_output += np.sum(np.prod(output_shape, axis=-1))
if "trainable" in summary[layer]: if "trainable" in summary[layer]:
if summary[layer]["trainable"] == True: if summary[layer]["trainable"]:
trainable_params += summary[layer]["trainable_params"] trainable_params += summary[layer]["trainable_params"]
summary_str += line_new + "\n" summary_str += line_new + "\n"
......
...@@ -515,7 +515,7 @@ def dropout_orig2prim(op, seed_t, x): ...@@ -515,7 +515,7 @@ def dropout_orig2prim(op, seed_t, x):
), 'Can not lower dropout into prim ops with seedtensor.' ), 'Can not lower dropout into prim ops with seedtensor.'
mask = bernoulli(shape=x.shape, dtype=x.dtype, p=op.attr('dropout_prob')) mask = bernoulli(shape=x.shape, dtype=x.dtype, p=op.attr('dropout_prob'))
if op.attr('dropout_implementation') == 'upscale_in_train': if op.attr('dropout_implementation') == 'upscale_in_train':
if op.attr('is_test') == False: if not op.attr('is_test'):
out = div( out = div(
mul(x, mask), mul(x, mask),
fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype), fill_const(1.0 - op.attr('dropout_prob'), x.shape, x.dtype),
...@@ -524,7 +524,7 @@ def dropout_orig2prim(op, seed_t, x): ...@@ -524,7 +524,7 @@ def dropout_orig2prim(op, seed_t, x):
else: else:
return primops.cast(mask, dtype=paddle.uint8), x return primops.cast(mask, dtype=paddle.uint8), x
elif op.attr('dropout_implementation') == 'downgrade_in_infer': elif op.attr('dropout_implementation') == 'downgrade_in_infer':
if op.attr('is_test') == False: if not op.attr('is_test'):
return primops.cast(mask, dtype=paddle.uint8), mul(x, mask) return primops.cast(mask, dtype=paddle.uint8), mul(x, mask)
else: else:
return primops.cast(mask, dtype=paddle.uint8), mul( return primops.cast(mask, dtype=paddle.uint8), mul(
......
...@@ -2109,7 +2109,7 @@ def class_center_sample(label, num_classes, num_samples, group=None): ...@@ -2109,7 +2109,7 @@ def class_center_sample(label, num_classes, num_samples, group=None):
#Tensor(shape=[7], dtype=int64, place=CUDAPlace(1), stop_gradient=True, #Tensor(shape=[7], dtype=int64, place=CUDAPlace(1), stop_gradient=True,
# [0, 1, 2, 3, 5, 7, 8]) # [0, 1, 2, 3, 5, 7, 8])
""" """
if not (group == False or group is None or hasattr(group, 'is_member')): if not (group is False or group is None or hasattr(group, 'is_member')):
raise ValueError( raise ValueError(
'Expected group is False, None or instance of paddle.distributed.collective.Group \ 'Expected group is False, None or instance of paddle.distributed.collective.Group \
(got group: {})'.format( (got group: {})'.format(
...@@ -2124,7 +2124,7 @@ def class_center_sample(label, num_classes, num_samples, group=None): ...@@ -2124,7 +2124,7 @@ def class_center_sample(label, num_classes, num_samples, group=None):
ring_id = 0 ring_id = 0
rank = 0 rank = 0
nranks = 1 nranks = 1
if group != False: if group is not False:
if core.is_compiled_with_dist(): if core.is_compiled_with_dist():
parallel_env = paddle.distributed.ParallelEnv() parallel_env = paddle.distributed.ParallelEnv()
global_rank = parallel_env.rank global_rank = parallel_env.rank
......
...@@ -2033,7 +2033,7 @@ def margin_cross_entropy( ...@@ -2033,7 +2033,7 @@ def margin_cross_entropy(
""" """
assert reduction in ['mean', 'sum', 'none', None] assert reduction in ['mean', 'sum', 'none', None]
if not (group == False or group is None or hasattr(group, 'is_member')): if not (group is False or group is None or hasattr(group, 'is_member')):
raise ValueError( raise ValueError(
'Expected group is False, None or instance of paddle.distributed.collective.Group \ 'Expected group is False, None or instance of paddle.distributed.collective.Group \
(got group: {})'.format( (got group: {})'.format(
...@@ -2048,7 +2048,7 @@ def margin_cross_entropy( ...@@ -2048,7 +2048,7 @@ def margin_cross_entropy(
ring_id = 0 ring_id = 0
rank = 0 rank = 0
nranks = 1 nranks = 1
if group != False: if group is not False:
ring_id = 0 if group is None else group.id ring_id = 0 if group is None else group.id
if core.is_compiled_with_dist(): if core.is_compiled_with_dist():
parallel_env = paddle.distributed.ParallelEnv() parallel_env = paddle.distributed.ParallelEnv()
...@@ -2537,7 +2537,7 @@ def cross_entropy( ...@@ -2537,7 +2537,7 @@ def cross_entropy(
"should be 'sum', 'mean' or 'none', but received %s, which is not allowed." "should be 'sum', 'mean' or 'none', but received %s, which is not allowed."
% reduction % reduction
) )
if ignore_index > 0 and soft_label == True: if ignore_index > 0 and soft_label:
raise ValueError( raise ValueError(
"When soft_label == True, the value of 'ignore_index' in softmax_cross_entropy" "When soft_label == True, the value of 'ignore_index' in softmax_cross_entropy"
"should be '-100', but received %s, which is not allowed." "should be '-100', but received %s, which is not allowed."
...@@ -2560,12 +2560,12 @@ def cross_entropy( ...@@ -2560,12 +2560,12 @@ def cross_entropy(
label = paddle.unsqueeze(label, axis=axis) label = paddle.unsqueeze(label, axis=axis)
if in_dygraph_mode(): if in_dygraph_mode():
if soft_label == False: if not soft_label:
valid_label = ( valid_label = (
paddle.cast(label != ignore_index, dtype=label.dtype) * label paddle.cast(label != ignore_index, dtype=label.dtype) * label
) )
if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): if core.is_compiled_with_npu() or core.is_compiled_with_mlu():
if soft_label == False: if not soft_label:
_, _, out = _legacy_C_ops.softmax_with_cross_entropy( _, _, out = _legacy_C_ops.softmax_with_cross_entropy(
input, input,
valid_label, valid_label,
...@@ -2603,7 +2603,7 @@ def cross_entropy( ...@@ -2603,7 +2603,7 @@ def cross_entropy(
if weight is not None: if weight is not None:
# trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases.
if soft_label == True: if soft_label:
# chajchaj: # chajchaj:
# weight's shape is C, where C is class num. # weight's shape is C, where C is class num.
# for 1d case: label's shape is [N,C], weight_gather's shape is N. # for 1d case: label's shape is [N,C], weight_gather's shape is N.
...@@ -2710,7 +2710,7 @@ def cross_entropy( ...@@ -2710,7 +2710,7 @@ def cross_entropy(
return out return out
elif _in_legacy_dygraph(): elif _in_legacy_dygraph():
if soft_label == False: if not soft_label:
valid_label = ( valid_label = (
paddle.cast(label != ignore_index, dtype=label.dtype) * label paddle.cast(label != ignore_index, dtype=label.dtype) * label
) )
...@@ -2725,7 +2725,7 @@ def cross_entropy( ...@@ -2725,7 +2725,7 @@ def cross_entropy(
"Target {} is out of upper bound.".format(label_max.item()) "Target {} is out of upper bound.".format(label_max.item())
) )
if core.is_compiled_with_npu() or core.is_compiled_with_mlu(): if core.is_compiled_with_npu() or core.is_compiled_with_mlu():
if soft_label == False: if not soft_label:
_, _, out = _legacy_C_ops.softmax_with_cross_entropy( _, _, out = _legacy_C_ops.softmax_with_cross_entropy(
input, input,
valid_label, valid_label,
...@@ -2774,7 +2774,7 @@ def cross_entropy( ...@@ -2774,7 +2774,7 @@ def cross_entropy(
if weight is not None: if weight is not None:
# trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases.
if soft_label == True: if soft_label:
# chajchaj: # chajchaj:
# weight's shape is C, where C is class num. # weight's shape is C, where C is class num.
# for 1d case: label's shape is [N,C], weight_gather's shape is N. # for 1d case: label's shape is [N,C], weight_gather's shape is N.
...@@ -2921,7 +2921,7 @@ def cross_entropy( ...@@ -2921,7 +2921,7 @@ def cross_entropy(
weight, 'weight', ['float32', 'float64'], 'softmax_cross_entropy' weight, 'weight', ['float32', 'float64'], 'softmax_cross_entropy'
) )
weight_name = name if reduction == 'none' else None weight_name = name if reduction == 'none' else None
if soft_label == True: if soft_label:
# chajchaj: # chajchaj:
# trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases. # trans weight from class to sample, shape:N or [N,H,W] for 1d and 2d cases.
# weight's shape is C, where C is class num. # weight's shape is C, where C is class num.
......
...@@ -110,7 +110,7 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False): ...@@ -110,7 +110,7 @@ def _update_padding_nd(padding, num_dims, channel_last=False, ceil_mode=False):
) )
) )
if padding == "VALID": if padding == "VALID":
if ceil_mode != False: if ceil_mode is not False:
raise ValueError( raise ValueError(
"When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. " "When Attr(padding) is \"VALID\", Attr(ceil_mode) must be False. "
"Received ceil_mode: True." "Received ceil_mode: True."
......
...@@ -76,7 +76,7 @@ class PairwiseDistance(Layer): ...@@ -76,7 +76,7 @@ class PairwiseDistance(Layer):
main_str = 'p={p}' main_str = 'p={p}'
if self.epsilon != 1e-6: if self.epsilon != 1e-6:
main_str += ', epsilon={epsilon}' main_str += ', epsilon={epsilon}'
if self.keepdim != False: if self.keepdim is not False:
main_str += ', keepdim={keepdim}' main_str += ', keepdim={keepdim}'
if self.name != None: if self.name != None:
main_str += ', name={name}' main_str += ', name={name}'
......
...@@ -71,7 +71,7 @@ class _InstanceNormBase(Layer): ...@@ -71,7 +71,7 @@ class _InstanceNormBase(Layer):
): ):
super(_InstanceNormBase, self).__init__() super(_InstanceNormBase, self).__init__()
if weight_attr == False or bias_attr == False: if weight_attr is False or bias_attr is False:
assert ( assert (
weight_attr == bias_attr weight_attr == bias_attr
), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm" ), "weight_attr and bias_attr must be set to False at the same time in InstanceNorm"
...@@ -80,7 +80,7 @@ class _InstanceNormBase(Layer): ...@@ -80,7 +80,7 @@ class _InstanceNormBase(Layer):
self._bias_attr = bias_attr self._bias_attr = bias_attr
self._num_features = num_features self._num_features = num_features
if weight_attr != False and bias_attr != False: if weight_attr is not False and bias_attr is not False:
self.scale = self.create_parameter( self.scale = self.create_parameter(
attr=self._weight_attr, attr=self._weight_attr,
shape=[num_features], shape=[num_features],
...@@ -382,7 +382,7 @@ class GroupNorm(Layer): ...@@ -382,7 +382,7 @@ class GroupNorm(Layer):
param_shape = [self._num_channels] param_shape = [self._num_channels]
if weight_attr == False: if weight_attr is False:
self.weight = self.create_parameter( self.weight = self.create_parameter(
attr=None, shape=param_shape, default_initializer=Constant(1.0) attr=None, shape=param_shape, default_initializer=Constant(1.0)
) )
...@@ -398,7 +398,7 @@ class GroupNorm(Layer): ...@@ -398,7 +398,7 @@ class GroupNorm(Layer):
and self._weight_attr.learning_rate == 0.0 and self._weight_attr.learning_rate == 0.0
) )
if bias_attr == False: if bias_attr is False:
self.bias = self.create_parameter( self.bias = self.create_parameter(
attr=None, attr=None,
shape=param_shape, shape=param_shape,
...@@ -619,7 +619,7 @@ class _BatchNormBase(Layer): ...@@ -619,7 +619,7 @@ class _BatchNormBase(Layer):
param_shape = [num_features] param_shape = [num_features]
# create parameter # create parameter
if weight_attr == False: if weight_attr is False:
self.weight = self.create_parameter( self.weight = self.create_parameter(
attr=None, attr=None,
shape=param_shape, shape=param_shape,
...@@ -639,7 +639,7 @@ class _BatchNormBase(Layer): ...@@ -639,7 +639,7 @@ class _BatchNormBase(Layer):
and self._weight_attr.learning_rate == 0.0 and self._weight_attr.learning_rate == 0.0
) )
if bias_attr == False: if bias_attr is False:
self.bias = self.create_parameter( self.bias = self.create_parameter(
attr=None, attr=None,
shape=param_shape, shape=param_shape,
...@@ -1315,7 +1315,10 @@ class SyncBatchNorm(_BatchNormBase): ...@@ -1315,7 +1315,10 @@ class SyncBatchNorm(_BatchNormBase):
layer._name, layer._name,
) )
if layer._weight_attr != False and layer._bias_attr != False: if (
layer._weight_attr is not False
and layer._bias_attr is not False
):
with no_grad(): with no_grad():
layer_output.weight = layer.weight layer_output.weight = layer.weight
layer_output.bias = layer.bias layer_output.bias = layer.bias
......
...@@ -964,9 +964,9 @@ class RNNBase(LayerList): ...@@ -964,9 +964,9 @@ class RNNBase(LayerList):
for direction in range(self.num_directions): for direction in range(self.num_directions):
suffix = '_reverse' if direction == 1 else '' suffix = '_reverse' if direction == 1 else ''
param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}']) param_names.extend(['weight_ih_l{}{}', 'weight_hh_l{}{}'])
if bias_ih_attr != False: if bias_ih_attr is not False:
param_names.append('bias_ih_l{}{}') param_names.append('bias_ih_l{}{}')
if bias_hh_attr != False: if bias_hh_attr is not False:
param_names.append('bias_hh_l{}{}') param_names.append('bias_hh_l{}{}')
param_names = [x.format(layer, suffix) for x in param_names] param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, self.parameters()): for name, param in zip(param_names, self.parameters()):
...@@ -1187,7 +1187,7 @@ class RNNBase(LayerList): ...@@ -1187,7 +1187,7 @@ class RNNBase(LayerList):
main_str = '{input_size}, {hidden_size}' main_str = '{input_size}, {hidden_size}'
if self.num_layers != 1: if self.num_layers != 1:
main_str += ', num_layers={num_layers}' main_str += ', num_layers={num_layers}'
if self.time_major != False: if self.time_major is not False:
main_str += ', time_major={time_major}' main_str += ', time_major={time_major}'
if self.dropout != 0: if self.dropout != 0:
main_str += ', dropout={dropout}' main_str += ', dropout={dropout}'
......
...@@ -298,7 +298,7 @@ class FakeQuantChannelWiseAbsMax(Layer): ...@@ -298,7 +298,7 @@ class FakeQuantChannelWiseAbsMax(Layer):
reduce_type=None, reduce_type=None,
): ):
assert ( assert (
quant_on_weight == True quant_on_weight
), "Channel_wise only can be used on weight quantization." ), "Channel_wise only can be used on weight quantization."
super(FakeQuantChannelWiseAbsMax, self).__init__() super(FakeQuantChannelWiseAbsMax, self).__init__()
self._quant_bits = quant_bits self._quant_bits = quant_bits
......
...@@ -1237,7 +1237,7 @@ def _build_table( ...@@ -1237,7 +1237,7 @@ def _build_table(
if statistic_data.event_summary.items: if statistic_data.event_summary.items:
all_row_values = [] all_row_values = []
name_column_width = 52 name_column_width = 52
if thread_sep == True: if thread_sep:
thread_items = statistic_data.event_summary.thread_items thread_items = statistic_data.event_summary.thread_items
else: else:
thread_items = { thread_items = {
...@@ -1721,7 +1721,7 @@ def _build_table( ...@@ -1721,7 +1721,7 @@ def _build_table(
'ProfileStep' 'ProfileStep'
].general_gpu_time ].general_gpu_time
) )
if thread_sep == True: if thread_sep:
userdefined_thread_items = ( userdefined_thread_items = (
statistic_data.event_summary.userdefined_thread_items statistic_data.event_summary.userdefined_thread_items
) )
......
...@@ -164,7 +164,7 @@ def load_profiler_result(filename: str): ...@@ -164,7 +164,7 @@ def load_profiler_result(filename: str):
def in_profiler_mode(): def in_profiler_mode():
return _is_profiler_used == True return _is_profiler_used
def wrap_optimizers(): def wrap_optimizers():
...@@ -182,7 +182,7 @@ def wrap_optimizers(): ...@@ -182,7 +182,7 @@ def wrap_optimizers():
return warpper return warpper
global _has_optimizer_wrapped global _has_optimizer_wrapped
if _has_optimizer_wrapped == True: if _has_optimizer_wrapped:
return return
import paddle.optimizer as optimizer import paddle.optimizer as optimizer
......
...@@ -398,7 +398,10 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm): ...@@ -398,7 +398,10 @@ class SyncBatchNorm(paddle.nn.SyncBatchNorm):
layer._name, layer._name,
) )
if layer._weight_attr != False and layer._bias_attr != False: if (
layer._weight_attr is not False
and layer._bias_attr is not False
):
with no_grad(): with no_grad():
layer_output.weight = layer.weight layer_output.weight = layer.weight
layer_output.bias = layer.bias layer_output.bias = layer.bias
......
...@@ -466,9 +466,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): ...@@ -466,9 +466,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
out = _C_ops.abs(input) out = _C_ops.abs(input)
reduce_all = ( reduce_all = (
True True if axis == None or axis == [] or asvector else False
if axis == None or axis == [] or asvector == True
else False
) )
axis = axis if axis != None and axis != [] else [0] axis = axis if axis != None and axis != [] else [0]
if reduce_all: if reduce_all:
...@@ -487,9 +485,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): ...@@ -487,9 +485,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None):
dtype=helper.input_dtype() dtype=helper.input_dtype()
) )
reduce_all = ( reduce_all = True if axis == None or axis == [] or asvector else False
True if axis == None or axis == [] or asvector == True else False
)
axis = axis if axis != None and axis != [] else [0] axis = axis if axis != None and axis != [] else [0]
reduce_type = ( reduce_type = (
...@@ -1322,7 +1318,7 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None): ...@@ -1322,7 +1318,7 @@ def cov(x, rowvar=True, ddof=True, fweights=None, aweights=None, name=None):
avg = nx.sum(axis=1) / w_sum avg = nx.sum(axis=1) / w_sum
nx_w = nx nx_w = nx
if w is not None and aweights is not None and ddof == True: if w is not None and aweights is not None and ddof:
norm_factor = w_sum - (w * aweights).sum() / w_sum norm_factor = w_sum - (w * aweights).sum() / w_sum
else: else:
norm_factor = w_sum - ddof norm_factor = w_sum - ddof
......
...@@ -3206,7 +3206,7 @@ def tile(x, repeat_times, name=None): ...@@ -3206,7 +3206,7 @@ def tile(x, repeat_times, name=None):
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile' x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'tile'
) )
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient:
raise ValueError( raise ValueError(
"When the date type is bool for the input 'x' of tile op, you " "When the date type is bool for the input 'x' of tile op, you "
"must set its stop_gradient to be True by " "must set its stop_gradient to be True by "
...@@ -3288,7 +3288,7 @@ def expand_as(x, y, name=None): ...@@ -3288,7 +3288,7 @@ def expand_as(x, y, name=None):
) )
check_type(y, 'y', Variable, 'expand_as') check_type(y, 'y', Variable, 'expand_as')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient:
raise ValueError( raise ValueError(
"When the data type of input 'x' for expand_as is bool, " "When the data type of input 'x' for expand_as is bool, "
"you must set its stop_gradient to be False by " "you must set its stop_gradient to be False by "
...@@ -3359,7 +3359,7 @@ def broadcast_to(x, shape, name=None): ...@@ -3359,7 +3359,7 @@ def broadcast_to(x, shape, name=None):
x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'broadcast_to' x, 'x', ['bool', 'float32', 'float64', 'int32', 'int64'], 'broadcast_to'
) )
check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to') check_type(shape, 'shape', (list, tuple, Variable), 'broadcast_to')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient:
raise ValueError( raise ValueError(
"When the data type of input 'x' for broadcast_to is bool, " "When the data type of input 'x' for broadcast_to is bool, "
"you must set its stop_gradient to be False by " "you must set its stop_gradient to be False by "
...@@ -3457,7 +3457,7 @@ def expand(x, shape, name=None): ...@@ -3457,7 +3457,7 @@ def expand(x, shape, name=None):
'expand', 'expand',
) )
check_type(shape, 'shape', (list, tuple, Variable), 'expand') check_type(shape, 'shape', (list, tuple, Variable), 'expand')
if convert_dtype(x.dtype) == 'bool' and x.stop_gradient == False: if convert_dtype(x.dtype) == 'bool' and not x.stop_gradient:
raise ValueError( raise ValueError(
"When the data type of input 'x' for expand is bool, " "When the data type of input 'x' for expand is bool, "
"you must set its stop_gradient to be False by " "you must set its stop_gradient to be False by "
......
...@@ -188,7 +188,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None): ...@@ -188,7 +188,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None):
""" """
assert ( assert (
core.is_compiled_with_rocm() == False not core.is_compiled_with_rocm()
), "multinomial op is not supported on ROCM yet." ), "multinomial op is not supported on ROCM yet."
if in_dygraph_mode(): if in_dygraph_mode():
......
...@@ -228,9 +228,9 @@ class Conll05st(Dataset): ...@@ -228,9 +228,9 @@ class Conll05st(Dataset):
lbl_seq = [] lbl_seq = []
verb_word = '' verb_word = ''
for l in lbl: for l in lbl:
if l == '*' and is_in_bracket == False: if l == '*' and not is_in_bracket:
lbl_seq.append('O') lbl_seq.append('O')
elif l == '*' and is_in_bracket == True: elif l == '*' and is_in_bracket:
lbl_seq.append('I-' + cur_tag) lbl_seq.append('I-' + cur_tag)
elif l == '*)': elif l == '*)':
lbl_seq.append('I-' + cur_tag) lbl_seq.append('I-' + cur_tag)
......
...@@ -46,28 +46,25 @@ def analysisPyXml(rootPath, ut): ...@@ -46,28 +46,25 @@ def analysisPyXml(rootPath, ut):
command = 'sed -n %sp %s' % (line_number, clazz_filename) command = 'sed -n %sp %s' % (line_number, clazz_filename)
_code, output = commands.getstatusoutput(command) _code, output = commands.getstatusoutput(command)
if _code == 0: if _code == 0:
if ( if not output.strip().startswith(
output.strip().startswith( (
( 'from',
'from', 'import',
'import', '__all__',
'__all__', 'def',
'def', 'class',
'class', '"""',
'"""', '@',
'@', '\'\'\'',
'\'\'\'', 'logger',
'logger', '_logger',
'_logger', 'logging',
'logging', 'r"""',
'r"""', 'pass',
'pass', 'try',
'try', 'except',
'except', 'if __name__ == "__main__"',
'if __name__ == "__main__"',
)
) )
== False
): ):
pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" # a='b'/a="b"/a=0 pattern = r"""(.*) = ('*')|(.*) = ("*")|(.*) = (\d)|(.*) = (-\d)|(.*) = (None)|(.*) = (True)|(.*) = (False)|(.*) = (URL_PREFIX*)|(.*) = (\[)|(.*) = (\{)|(.*) = (\()""" # a='b'/a="b"/a=0
if re.match(pattern, output.strip()) == None: if re.match(pattern, output.strip()) == None:
......
...@@ -40,7 +40,7 @@ def parse_log_file(log_file): ...@@ -40,7 +40,7 @@ def parse_log_file(log_file):
for line in f.read().strip().split('\n')[::-1]: for line in f.read().strip().split('\n')[::-1]:
try: try:
result = json.loads(line) result = json.loads(line)
if result.get("disabled", False) == True: if result.get("disabled", False):
return None return None
return result return result
except ValueError: except ValueError:
......
...@@ -349,7 +349,7 @@ class PRChecker(object): ...@@ -349,7 +349,7 @@ class PRChecker(object):
file_list.append(filename) file_list.append(filename)
else: else:
isWhiteFile = self.get_is_white_file(filename) isWhiteFile = self.get_is_white_file(filename)
if isWhiteFile == False: if not isWhiteFile:
file_list.append(filename) file_list.append(filename)
else: else:
filterFiles.append(filename) filterFiles.append(filename)
...@@ -417,7 +417,7 @@ class PRChecker(object): ...@@ -417,7 +417,7 @@ class PRChecker(object):
== tempfilename.split(".")[0] == tempfilename.split(".")[0]
): ):
f_judge_in_added_ut = True f_judge_in_added_ut = True
if f_judge_in_added_ut == True: if f_judge_in_added_ut:
print( print(
"Adding new unit tests not hit mapFiles: %s" "Adding new unit tests not hit mapFiles: %s"
% f_judge % f_judge
......
...@@ -91,7 +91,7 @@ def analysisFNDAFile(rootPath, test): ...@@ -91,7 +91,7 @@ def analysisFNDAFile(rootPath, test):
if matchObj == None: if matchObj == None:
OP_REGIST = False OP_REGIST = False
break break
if OP_REGIST == False: if not OP_REGIST:
related_file_list.append(clazz_filename) related_file_list.append(clazz_filename)
os.system( os.system(
'echo %s >> %s' % (clazz_filename, related_ut_map_file) 'echo %s >> %s' % (clazz_filename, related_ut_map_file)
......
...@@ -122,14 +122,14 @@ def generate_all_ops_inputs_outputs_map(op_descs): ...@@ -122,14 +122,14 @@ def generate_all_ops_inputs_outputs_map(op_descs):
outpus = list() outpus = list()
for input_ in op_proto[INPUTS]: for input_ in op_proto[INPUTS]:
if ( if (
op_proto[INPUTS][input_][EXTRA] != True not op_proto[INPUTS][input_][EXTRA]
and op_proto[INPUTS][input_][INTERMEDIATE] != True and not op_proto[INPUTS][input_][INTERMEDIATE]
): ):
inputs.append(input_) inputs.append(input_)
for output_ in op_proto[OUTPUTS]: for output_ in op_proto[OUTPUTS]:
if ( if (
op_proto[OUTPUTS][output_][EXTRA] != True not op_proto[OUTPUTS][output_][EXTRA]
and op_proto[OUTPUTS][output_][INTERMEDIATE] != True and not op_proto[OUTPUTS][output_][INTERMEDIATE]
): ):
outpus.append(output_) outpus.append(output_)
ops_inputs_map[op_type] = inputs ops_inputs_map[op_type] = inputs
...@@ -214,9 +214,9 @@ def get_constraint(op_type, op_proto): ...@@ -214,9 +214,9 @@ def get_constraint(op_type, op_proto):
optional_input_num_ = 0 optional_input_num_ = 0
for input_ in op_proto[INPUTS]: for input_ in op_proto[INPUTS]:
if ( if (
op_proto[INPUTS][input_][EXTRA] != True not op_proto[INPUTS][input_][EXTRA]
and op_proto[INPUTS][input_][INTERMEDIATE] != True and not op_proto[INPUTS][input_][INTERMEDIATE]
and op_proto[INPUTS][input_][DISPENSABLE] == True and op_proto[INPUTS][input_][DISPENSABLE]
): ):
optional_input_num_ += 1 optional_input_num_ += 1
if optional_input_num_ > 1: if optional_input_num_ > 1:
...@@ -306,11 +306,11 @@ def convert_op_proto_into_mlir(op_descs): ...@@ -306,11 +306,11 @@ def convert_op_proto_into_mlir(op_descs):
# 2.3.1 inputs # 2.3.1 inputs
for input_ in op_proto[INPUTS]: for input_ in op_proto[INPUTS]:
if ( if (
op_proto[INPUTS][input_][EXTRA] != True not op_proto[INPUTS][input_][EXTRA]
and op_proto[INPUTS][input_][INTERMEDIATE] != True and not op_proto[INPUTS][input_][INTERMEDIATE]
): ):
if op_proto[INPUTS][input_][DISPENSABLE] != True: if not op_proto[INPUTS][input_][DISPENSABLE]:
if op_proto[INPUTS][input_][DUPLICABLE] != True: if not op_proto[INPUTS][input_][DUPLICABLE]:
ARGUMENTS = ( ARGUMENTS = (
ARGUMENTS + " PD_Tensor:$" + input_ + "," ARGUMENTS + " PD_Tensor:$" + input_ + ","
) )
...@@ -319,7 +319,7 @@ def convert_op_proto_into_mlir(op_descs): ...@@ -319,7 +319,7 @@ def convert_op_proto_into_mlir(op_descs):
ARGUMENTS + " PD_Tensor_Array:$" + input_ + "," ARGUMENTS + " PD_Tensor_Array:$" + input_ + ","
) )
else: else:
if op_proto[INPUTS][input_][DUPLICABLE] != True: if not op_proto[INPUTS][input_][DUPLICABLE]:
ARGUMENTS = ( ARGUMENTS = (
ARGUMENTS ARGUMENTS
+ " Optional<PD_Tensor>:$" + " Optional<PD_Tensor>:$"
...@@ -350,7 +350,7 @@ def convert_op_proto_into_mlir(op_descs): ...@@ -350,7 +350,7 @@ def convert_op_proto_into_mlir(op_descs):
# 2.3.2 attributes # 2.3.2 attributes
for attr in op_proto[ATTRS]: for attr in op_proto[ATTRS]:
if (op_proto[ATTRS][attr][EXTRA] == True) or ( if (op_proto[ATTRS][attr][EXTRA]) or (
attr in skipped_attr_list attr in skipped_attr_list
): ):
continue continue
...@@ -434,10 +434,10 @@ def convert_op_proto_into_mlir(op_descs): ...@@ -434,10 +434,10 @@ def convert_op_proto_into_mlir(op_descs):
outputs = "" outputs = ""
for output_ in op_proto[OUTPUTS]: for output_ in op_proto[OUTPUTS]:
if ( if (
op_proto[OUTPUTS][output_][EXTRA] != True not op_proto[OUTPUTS][output_][EXTRA]
and op_proto[OUTPUTS][output_][INTERMEDIATE] != True and not op_proto[OUTPUTS][output_][INTERMEDIATE]
): ):
if op_proto[OUTPUTS][output_][DUPLICABLE] != True: if not op_proto[OUTPUTS][output_][DUPLICABLE]:
outputs = outputs + "PD_Tensor:${},".format(output_) outputs = outputs + "PD_Tensor:${},".format(output_)
else: else:
outputs = outputs + "PD_Tensor_Array:${},".format( outputs = outputs + "PD_Tensor_Array:${},".format(
......
...@@ -376,7 +376,7 @@ Please use '.. code-block:: python' to format the sample code.""" ...@@ -376,7 +376,7 @@ Please use '.. code-block:: python' to format the sample code."""
# None - no sample code found; # None - no sample code found;
# False - it need other special equipment or environment. # False - it need other special equipment or environment.
# so, the following conditional statements are intentionally arranged. # so, the following conditional statements are intentionally arranged.
if matched == True: if matched:
tfname = os.path.join( tfname = os.path.join(
SAMPLECODE_TEMPDIR, SAMPLECODE_TEMPDIR,
'{}_example{}'.format( '{}_example{}'.format(
...@@ -395,7 +395,7 @@ Please use '.. code-block:: python' to format the sample code.""" ...@@ -395,7 +395,7 @@ Please use '.. code-block:: python' to format the sample code."""
) )
) )
SUMMARY_INFO['skiptest'].append("{}-{}".format(name, cb['id'])) SUMMARY_INFO['skiptest'].append("{}-{}".format(name, cb['id']))
elif matched == False: elif not matched:
logger.info( logger.info(
'{}\' code block (name:{}, id:{}) required({}) not match capacity({}).'.format( '{}\' code block (name:{}, id:{}) required({}) not match capacity({}).'.format(
name, name,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册