diff --git a/python/paddle/fluid/dygraph/amp/auto_cast.py b/python/paddle/fluid/dygraph/amp/auto_cast.py index 37134764e9d1c8a7ffbd0a2f53668d16f90a1683..41a7d3d774793140e5942f936ba4538d728db207 100644 --- a/python/paddle/fluid/dygraph/amp/auto_cast.py +++ b/python/paddle/fluid/dygraph/amp/auto_cast.py @@ -161,7 +161,7 @@ def pure_fp16_initialize(models): for idx in range(len(models)): for layer in models[idx].sublayers(include_self=True): layer._casted_by_pure_fp16 = True - if (layer._dtype is 'float16') or isinstance( + if (layer._dtype == 'float16') or isinstance( layer, (paddle.nn.BatchNorm, paddle.nn.BatchNorm1D, paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D, paddle.nn.LayerNorm)): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index 780b8acc4fde67f4b47589869b258dd99a022125..d0a94238a7aeb21f9d1baf8154cbe3b7f2b77a72 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -2544,7 +2544,7 @@ class Operator(object): warnings.warn("The Op(%s) is not support to set device." % type) if 'force_cpu' in op_attrs: - if (type is 'less_than' and op_attrs['force_cpu'] != None + if (type == 'less_than' and op_attrs['force_cpu'] != None ) or op_attrs['force_cpu'] != False: warnings.warn( "The Attr(force_cpu) of Op(%s) will be deprecated in the future, " diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index ec3b68086b06593b035b60825a52b0ec32b8281d..92cba4fca5aba07df065b0a7927f239ae0c505ea 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -380,7 +380,7 @@ class OpTest(unittest.TestCase): hasattr(self, 'output_dtype') and self.output_dtype == np.uint16) or ( hasattr(self, 'mkldnn_data_type') and - getattr(self, 'mkldnn_data_type') is "bfloat16") or ( + getattr(self, 'mkldnn_data_type') == "bfloat16") or ( hasattr(self, 'attrs') and 'mkldnn_data_type' in self.attrs and self.attrs['mkldnn_data_type'] == 'bfloat16') diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 636d2f645c5b0bd156ee7491ab847ec5c89f9ad7..94c516f476ede38e60de0bb6d01aed0a61850572 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -131,7 +131,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', fluid.data_feeder.check_variable_and_dtype( label, 'label', ['float32', 'float64'], 'binary_cross_entropy') - sub_name = name if weight is None and reduction is 'none' else None + sub_name = name if weight is None and reduction == 'none' else None helper = LayerHelper("binary_cross_entropy", name=sub_name) out = helper.create_variable_for_type_inference(dtype=input.dtype) helper.append_op( @@ -144,7 +144,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', if weight is not None: if isinstance(weight, paddle.static.Variable): - weight_name = name if reduction is 'none' else None + weight_name = name if reduction == 'none' else None out = paddle.multiply(out, weight, name=weight_name) else: raise ValueError( diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index c59d0eb5e6d115cb06ba27091180178cb813fa35..a5de268ec2314a6e7ff8014c9ce1bb8efa598b2b 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -217,7 +217,7 @@ def batch_norm(x, helper = LayerHelper('batch_norm', **locals()) - param_dtype = x.dtype if x.dtype is not 'float16' else 'float32' + param_dtype = x.dtype if x.dtype != 'float16' else 'float32' saved_mean = helper.create_variable_for_type_inference( dtype=param_dtype, stop_gradient=True) saved_variance = helper.create_variable_for_type_inference( diff --git a/python/paddle/nn/layer/conv.py b/python/paddle/nn/layer/conv.py index fd7355e162ae77fdca683436e5cd69fa2f3c3351..bb1cbbfc03e550f0d57c7dc30aecfa6e12ce2f75 100644 --- a/python/paddle/nn/layer/conv.py +++ b/python/paddle/nn/layer/conv.py @@ -162,7 +162,7 @@ class _ConvNd(Layer): main_str += ', stride={_stride}' if self._padding != 0: main_str += ', padding={_padding}' - if self._padding_mode is not 'zeros': + if self._padding_mode != 'zeros': main_str += ', padding_mode={_padding_mode}' if self.output_padding != 0: main_str += ', output_padding={output_padding}' diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 8113073d757d6e978caa99c7199e61a2cb8a79d4..7c3e3ad8dee9f66bffe07249e17a64cc4d8fa513 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -668,7 +668,7 @@ class _BatchNormBase(Layer): def extra_repr(self): main_str = 'num_features={}, momentum={}, epsilon={}'.format( self._num_features, self._momentum, self._epsilon) - if self._data_format is not 'NCHW': + if self._data_format != 'NCHW': main_str += ', data_format={}'.format(self._data_format) if self._name is not None: main_str += ', name={}'.format(self._name) @@ -1252,7 +1252,7 @@ class LocalResponseNorm(Layer): def extra_repr(self): main_str = 'size={}, alpha={}, beta={}, k={}'.format( self.size, self.alpha, self.beta, self.k) - if self.data_format is not 'NCHW': + if self.data_format != 'NCHW': main_str += ', data_format={}'.format(self.data_format) if self.name is not None: main_str += ', name={}'.format(self.name) diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 2bb1f1311107b18f2a5e30ca8f1661a45e9b4c92..09a0d3cb41cbcb1a867e2e61e37946bf0d059805 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -391,7 +391,7 @@ class SimpleRNNCell(RNNCellBase): def extra_repr(self): s = '{input_size}, {hidden_size}' - if self.activation is not "tanh": + if self.activation != "tanh": s += ', activation={activation}' return s.format(**self.__dict__) diff --git a/python/paddle/nn/layer/vision.py b/python/paddle/nn/layer/vision.py index 7f8b51ca10818ec10a794f2910f066f16cf26278..0531afb4eeeeb92c4e888bb2df972e4920b971cd 100644 --- a/python/paddle/nn/layer/vision.py +++ b/python/paddle/nn/layer/vision.py @@ -82,7 +82,7 @@ class PixelShuffle(Layer): def extra_repr(self): main_str = 'upscale_factor={}'.format(self._upscale_factor) - if self._data_format is not 'NCHW': + if self._data_format != 'NCHW': main_str += ', data_format={}'.format(self._data_format) if self._name is not None: main_str += ', name={}'.format(self._name) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 91d688b761a11c1bf66319d04b8d8a7ea4f20e28..fef1652040835091b127324b1a5f6048f6a40bae 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -2792,7 +2792,7 @@ def eigvalsh(x, UPLO='L', name=None): raise ValueError( "The input matrix must be batches of square matrices. But received x's dimention: {}". format(x_shape)) - if UPLO is not 'L' and UPLO is not 'U': + if UPLO != 'L' and UPLO != 'U': raise ValueError( "UPLO must be L or U. But received UPLO is: {}".format(UPLO)) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index a36bf1c432515348397042b1ab01a2c5e7fd89f2..ce29e9dce81809a9745d6efbe6da419878423e00 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -3439,7 +3439,7 @@ def erfinv_(x, name=None): return _C_ops.erfinv_(x) def rad2deg(x, name=None): - """ + r""" Convert each of the elements of input x from angles in radians to degrees. Equation: @@ -3498,7 +3498,7 @@ def rad2deg(x, name=None): return out def deg2rad(x, name=None): - """ + r""" Convert each of the elements of input x from degrees to angles in radians. Equation: