From 22abb6b3d023fd305a58b90738f8201aa549709e Mon Sep 17 00:00:00 2001 From: Leo Chen Date: Wed, 23 Feb 2022 10:36:02 +0800 Subject: [PATCH] fix 'is with a literal' warning (#39798) * fix 'is with a literal' * fix typo --- python/paddle/fluid/dygraph/amp/auto_cast.py | 2 +- python/paddle/fluid/framework.py | 2 +- python/paddle/fluid/tests/unittests/op_test.py | 2 +- python/paddle/nn/functional/loss.py | 4 ++-- python/paddle/nn/functional/norm.py | 2 +- python/paddle/nn/layer/conv.py | 2 +- python/paddle/nn/layer/norm.py | 4 ++-- python/paddle/nn/layer/rnn.py | 2 +- python/paddle/nn/layer/vision.py | 2 +- python/paddle/tensor/linalg.py | 2 +- python/paddle/tensor/math.py | 4 ++-- 11 files changed, 14 insertions(+), 14 deletions(-) diff --git a/python/paddle/fluid/dygraph/amp/auto_cast.py b/python/paddle/fluid/dygraph/amp/auto_cast.py index 37134764e9..41a7d3d774 100644 --- a/python/paddle/fluid/dygraph/amp/auto_cast.py +++ b/python/paddle/fluid/dygraph/amp/auto_cast.py @@ -161,7 +161,7 @@ def pure_fp16_initialize(models): for idx in range(len(models)): for layer in models[idx].sublayers(include_self=True): layer._casted_by_pure_fp16 = True - if (layer._dtype is 'float16') or isinstance( + if (layer._dtype == 'float16') or isinstance( layer, (paddle.nn.BatchNorm, paddle.nn.BatchNorm1D, paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D, paddle.nn.LayerNorm)): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 780b8acc4f..d0a94238a7 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -2544,7 +2544,7 @@ class Operator(object): warnings.warn("The Op(%s) is not support to set device." % type) if 'force_cpu' in op_attrs: - if (type is 'less_than' and op_attrs['force_cpu'] != None + if (type == 'less_than' and op_attrs['force_cpu'] != None ) or op_attrs['force_cpu'] != False: warnings.warn( "The Attr(force_cpu) of Op(%s) will be deprecated in the future, " diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index ec3b68086b..92cba4fca5 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -380,7 +380,7 @@ class OpTest(unittest.TestCase): hasattr(self, 'output_dtype') and self.output_dtype == np.uint16) or ( hasattr(self, 'mkldnn_data_type') and - getattr(self, 'mkldnn_data_type') is "bfloat16") or ( + getattr(self, 'mkldnn_data_type') == "bfloat16") or ( hasattr(self, 'attrs') and 'mkldnn_data_type' in self.attrs and self.attrs['mkldnn_data_type'] == 'bfloat16') diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 636d2f645c..94c516f476 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -131,7 +131,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', fluid.data_feeder.check_variable_and_dtype( label, 'label', ['float32', 'float64'], 'binary_cross_entropy') - sub_name = name if weight is None and reduction is 'none' else None + sub_name = name if weight is None and reduction == 'none' else None helper = LayerHelper("binary_cross_entropy", name=sub_name) out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( @@ -144,7 +144,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', if weight is not None: if isinstance(weight, paddle.static.Variable): - weight_name = name if reduction is 'none' else None + weight_name = name if reduction == 'none' else None out = paddle.multiply(out, weight, name=weight_name) else: raise ValueError( diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index c59d0eb5e6..a5de268ec2 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -217,7 +217,7 @@ def batch_norm(x, helper = LayerHelper('batch_norm', **locals()) - param_dtype = x.dtype if x.dtype is not 'float16' else 'float32' + param_dtype = x.dtype if x.dtype != 'float16' else 'float32' saved_mean = helper.create_variable_for_type_inference( dtype=param_dtype, stop_gradient=True) saved_variance = helper.create_variable_for_type_inference( diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index fd7355e162..bb1cbbfc03 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -162,7 +162,7 @@ class _ConvNd(Layer): main_str += ', stride={_stride}' if self._padding != 0: main_str += ', padding={_padding}' - if self._padding_mode is not 'zeros': + if self._padding_mode != 'zeros': main_str += ', padding_mode={_padding_mode}' if self.output_padding != 0: main_str += ', output_padding={output_padding}' diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 8113073d75..7c3e3ad8de 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -668,7 +668,7 @@ class _BatchNormBase(Layer): def extra_repr(self): main_str = 'num_features={}, momentum={}, epsilon={}'.format( self._num_features, self._momentum, self._epsilon) - if self._data_format is not 'NCHW': + if self._data_format != 'NCHW': main_str += ', data_format={}'.format(self._data_format) if self._name is not None: main_str += ', name={}'.format(self._name) @@ -1252,7 +1252,7 @@ class LocalResponseNorm(Layer): def extra_repr(self): main_str = 'size={}, alpha={}, beta={}, k={}'.format( self.size, self.alpha, self.beta, self.k) - if self.data_format is not 'NCHW': + if self.data_format != 'NCHW': main_str += ', data_format={}'.format(self.data_format) if self.name is not None: main_str += ', name={}'.format(self.name) diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 2bb1f13111..09a0d3cb41 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -391,7 +391,7 @@ class SimpleRNNCell(RNNCellBase): def extra_repr(self): s = '{input_size}, {hidden_size}' - if self.activation is not "tanh": + if self.activation != "tanh": s += ', activation={activation}' return s.format(**self.__dict__) diff --git a/python/paddle/nn/layer/vision.py b/python/paddle/nn/layer/vision.py index 7f8b51ca10..0531afb4ee 100644 --- a/python/paddle/nn/layer/vision.py +++ b/python/paddle/nn/layer/vision.py @@ -82,7 +82,7 @@ class PixelShuffle(Layer): def extra_repr(self): main_str = 'upscale_factor={}'.format(self._upscale_factor) - if self._data_format is not 'NCHW': + if self._data_format != 'NCHW': main_str += ', data_format={}'.format(self._data_format) if self._name is not None: main_str += ', name={}'.format(self._name) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 91d688b761..fef1652040 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -2792,7 +2792,7 @@ def eigvalsh(x, UPLO='L', name=None): raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) - if UPLO is not 'L' and UPLO is not 'U': + if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index a36bf1c432..ce29e9dce8 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -3439,7 +3439,7 @@ def erfinv_(x, name=None): return _C_ops.erfinv_(x) def rad2deg(x, name=None): - """ + r""" Convert each of the elements of input x from angles in radians to degrees. Equation: @@ -3498,7 +3498,7 @@ def rad2deg(x, name=None): return out def deg2rad(x, name=None): - """ + r""" Convert each of the elements of input x from degrees to angles in radians. Equation: -- GitLab