未验证 提交 22abb6b3 编写于 作者: L Leo Chen 提交者: GitHub

fix 'is with a literal' warning (#39798)

* fix 'is with a literal'

* fix typo
上级 9880595a
...@@ -161,7 +161,7 @@ def pure_fp16_initialize(models): ...@@ -161,7 +161,7 @@ def pure_fp16_initialize(models):
for idx in range(len(models)): for idx in range(len(models)):
for layer in models[idx].sublayers(include_self=True): for layer in models[idx].sublayers(include_self=True):
layer._casted_by_pure_fp16 = True layer._casted_by_pure_fp16 = True
if (layer._dtype is 'float16') or isinstance( if (layer._dtype == 'float16') or isinstance(
layer, (paddle.nn.BatchNorm, paddle.nn.BatchNorm1D, layer, (paddle.nn.BatchNorm, paddle.nn.BatchNorm1D,
paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D, paddle.nn.BatchNorm2D, paddle.nn.BatchNorm3D,
paddle.nn.LayerNorm)): paddle.nn.LayerNorm)):
......
...@@ -2544,7 +2544,7 @@ class Operator(object): ...@@ -2544,7 +2544,7 @@ class Operator(object):
warnings.warn("The Op(%s) is not support to set device." % warnings.warn("The Op(%s) is not support to set device." %
type) type)
if 'force_cpu' in op_attrs: if 'force_cpu' in op_attrs:
if (type is 'less_than' and op_attrs['force_cpu'] != None if (type == 'less_than' and op_attrs['force_cpu'] != None
) or op_attrs['force_cpu'] != False: ) or op_attrs['force_cpu'] != False:
warnings.warn( warnings.warn(
"The Attr(force_cpu) of Op(%s) will be deprecated in the future, " "The Attr(force_cpu) of Op(%s) will be deprecated in the future, "
......
...@@ -380,7 +380,7 @@ class OpTest(unittest.TestCase): ...@@ -380,7 +380,7 @@ class OpTest(unittest.TestCase):
hasattr(self, 'output_dtype') and hasattr(self, 'output_dtype') and
self.output_dtype == np.uint16) or ( self.output_dtype == np.uint16) or (
hasattr(self, 'mkldnn_data_type') and hasattr(self, 'mkldnn_data_type') and
getattr(self, 'mkldnn_data_type') is "bfloat16") or ( getattr(self, 'mkldnn_data_type') == "bfloat16") or (
hasattr(self, 'attrs') and hasattr(self, 'attrs') and
'mkldnn_data_type' in self.attrs and 'mkldnn_data_type' in self.attrs and
self.attrs['mkldnn_data_type'] == 'bfloat16') self.attrs['mkldnn_data_type'] == 'bfloat16')
......
...@@ -131,7 +131,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', ...@@ -131,7 +131,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
fluid.data_feeder.check_variable_and_dtype( fluid.data_feeder.check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'binary_cross_entropy') label, 'label', ['float32', 'float64'], 'binary_cross_entropy')
sub_name = name if weight is None and reduction is 'none' else None sub_name = name if weight is None and reduction == 'none' else None
helper = LayerHelper("binary_cross_entropy", name=sub_name) helper = LayerHelper("binary_cross_entropy", name=sub_name)
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op( helper.append_op(
...@@ -144,7 +144,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean', ...@@ -144,7 +144,7 @@ def binary_cross_entropy(input, label, weight=None, reduction='mean',
if weight is not None: if weight is not None:
if isinstance(weight, paddle.static.Variable): if isinstance(weight, paddle.static.Variable):
weight_name = name if reduction is 'none' else None weight_name = name if reduction == 'none' else None
out = paddle.multiply(out, weight, name=weight_name) out = paddle.multiply(out, weight, name=weight_name)
else: else:
raise ValueError( raise ValueError(
......
...@@ -217,7 +217,7 @@ def batch_norm(x, ...@@ -217,7 +217,7 @@ def batch_norm(x,
helper = LayerHelper('batch_norm', **locals()) helper = LayerHelper('batch_norm', **locals())
param_dtype = x.dtype if x.dtype is not 'float16' else 'float32' param_dtype = x.dtype if x.dtype != 'float16' else 'float32'
saved_mean = helper.create_variable_for_type_inference( saved_mean = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True) dtype=param_dtype, stop_gradient=True)
saved_variance = helper.create_variable_for_type_inference( saved_variance = helper.create_variable_for_type_inference(
......
...@@ -162,7 +162,7 @@ class _ConvNd(Layer): ...@@ -162,7 +162,7 @@ class _ConvNd(Layer):
main_str += ', stride={_stride}' main_str += ', stride={_stride}'
if self._padding != 0: if self._padding != 0:
main_str += ', padding={_padding}' main_str += ', padding={_padding}'
if self._padding_mode is not 'zeros': if self._padding_mode != 'zeros':
main_str += ', padding_mode={_padding_mode}' main_str += ', padding_mode={_padding_mode}'
if self.output_padding != 0: if self.output_padding != 0:
main_str += ', output_padding={output_padding}' main_str += ', output_padding={output_padding}'
......
...@@ -668,7 +668,7 @@ class _BatchNormBase(Layer): ...@@ -668,7 +668,7 @@ class _BatchNormBase(Layer):
def extra_repr(self): def extra_repr(self):
main_str = 'num_features={}, momentum={}, epsilon={}'.format( main_str = 'num_features={}, momentum={}, epsilon={}'.format(
self._num_features, self._momentum, self._epsilon) self._num_features, self._momentum, self._epsilon)
if self._data_format is not 'NCHW': if self._data_format != 'NCHW':
main_str += ', data_format={}'.format(self._data_format) main_str += ', data_format={}'.format(self._data_format)
if self._name is not None: if self._name is not None:
main_str += ', name={}'.format(self._name) main_str += ', name={}'.format(self._name)
...@@ -1252,7 +1252,7 @@ class LocalResponseNorm(Layer): ...@@ -1252,7 +1252,7 @@ class LocalResponseNorm(Layer):
def extra_repr(self): def extra_repr(self):
main_str = 'size={}, alpha={}, beta={}, k={}'.format( main_str = 'size={}, alpha={}, beta={}, k={}'.format(
self.size, self.alpha, self.beta, self.k) self.size, self.alpha, self.beta, self.k)
if self.data_format is not 'NCHW': if self.data_format != 'NCHW':
main_str += ', data_format={}'.format(self.data_format) main_str += ', data_format={}'.format(self.data_format)
if self.name is not None: if self.name is not None:
main_str += ', name={}'.format(self.name) main_str += ', name={}'.format(self.name)
......
...@@ -391,7 +391,7 @@ class SimpleRNNCell(RNNCellBase): ...@@ -391,7 +391,7 @@ class SimpleRNNCell(RNNCellBase):
def extra_repr(self): def extra_repr(self):
s = '{input_size}, {hidden_size}' s = '{input_size}, {hidden_size}'
if self.activation is not "tanh": if self.activation != "tanh":
s += ', activation={activation}' s += ', activation={activation}'
return s.format(**self.__dict__) return s.format(**self.__dict__)
......
...@@ -82,7 +82,7 @@ class PixelShuffle(Layer): ...@@ -82,7 +82,7 @@ class PixelShuffle(Layer):
def extra_repr(self): def extra_repr(self):
main_str = 'upscale_factor={}'.format(self._upscale_factor) main_str = 'upscale_factor={}'.format(self._upscale_factor)
if self._data_format is not 'NCHW': if self._data_format != 'NCHW':
main_str += ', data_format={}'.format(self._data_format) main_str += ', data_format={}'.format(self._data_format)
if self._name is not None: if self._name is not None:
main_str += ', name={}'.format(self._name) main_str += ', name={}'.format(self._name)
......
...@@ -2792,7 +2792,7 @@ def eigvalsh(x, UPLO='L', name=None): ...@@ -2792,7 +2792,7 @@ def eigvalsh(x, UPLO='L', name=None):
raise ValueError( raise ValueError(
"The input matrix must be batches of square matrices. But received x's dimention: {}". "The input matrix must be batches of square matrices. But received x's dimention: {}".
format(x_shape)) format(x_shape))
if UPLO is not 'L' and UPLO is not 'U': if UPLO != 'L' and UPLO != 'U':
raise ValueError( raise ValueError(
"UPLO must be L or U. But received UPLO is: {}".format(UPLO)) "UPLO must be L or U. But received UPLO is: {}".format(UPLO))
......
...@@ -3439,7 +3439,7 @@ def erfinv_(x, name=None): ...@@ -3439,7 +3439,7 @@ def erfinv_(x, name=None):
return _C_ops.erfinv_(x) return _C_ops.erfinv_(x)
def rad2deg(x, name=None): def rad2deg(x, name=None):
""" r"""
Convert each of the elements of input x from angles in radians to degrees. Convert each of the elements of input x from angles in radians to degrees.
Equation: Equation:
...@@ -3498,7 +3498,7 @@ def rad2deg(x, name=None): ...@@ -3498,7 +3498,7 @@ def rad2deg(x, name=None):
return out return out
def deg2rad(x, name=None): def deg2rad(x, name=None):
""" r"""
Convert each of the elements of input x from degrees to angles in radians. Convert each of the elements of input x from degrees to angles in radians.
Equation: Equation:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册