未验证 提交 c1838da6 编写于 作者: K Kim 提交者: GitHub

[CodeStyle][PLR1701] unify multiple isinstance expressions as one (#52150)

上级 c05feb90
...@@ -250,7 +250,7 @@ def compute_fbank_matrix( ...@@ -250,7 +250,7 @@ def compute_fbank_matrix(
if norm == 'slaney': if norm == 'slaney':
enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels]) enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels])
weights *= enorm.unsqueeze(1) weights *= enorm.unsqueeze(1)
elif isinstance(norm, int) or isinstance(norm, float): elif isinstance(norm, (int, float)):
weights = paddle.nn.functional.normalize(weights, p=norm, axis=-1) weights = paddle.nn.functional.normalize(weights, p=norm, axis=-1)
return weights return weights
......
...@@ -323,7 +323,7 @@ class DistributedOperatorHelper: ...@@ -323,7 +323,7 @@ class DistributedOperatorHelper:
output = self._serial_op(*args, **kwargs) output = self._serial_op(*args, **kwargs)
new_op_size = len(cur_block.ops) new_op_size = len(cur_block.ops)
if isinstance(output, tuple) or isinstance(output, list): if isinstance(output, (tuple, list)):
new_output = list(output) new_output = list(output)
elif isinstance(output, Variable): elif isinstance(output, Variable):
new_output = [output] new_output = [output]
......
...@@ -1948,9 +1948,7 @@ class Resharder: ...@@ -1948,9 +1948,7 @@ class Resharder:
) )
idx = idx_list[0] idx = idx_list[0]
elif isinstance(op_desc, SliceOpDesc) or isinstance( elif isinstance(op_desc, (SliceOpDesc, AllGatherConcatOpDesc)):
op_desc, AllGatherConcatOpDesc
):
target_tensor = None target_tensor = None
if isinstance(op_desc, SliceOpDesc): if isinstance(op_desc, SliceOpDesc):
assert ( assert (
......
...@@ -425,7 +425,7 @@ class UtilBase: ...@@ -425,7 +425,7 @@ class UtilBase:
def feed_gen(batch_size, feeded_vars_dims, feeded_vars_filelist): def feed_gen(batch_size, feeded_vars_dims, feeded_vars_filelist):
def reader(batch_size, fn, dim): def reader(batch_size, fn, dim):
data = [] data = []
if isinstance(dim, list) or isinstance(dim, tuple): if isinstance(dim, (list, tuple)):
shape = list(dim) shape = list(dim)
_temp = 1 _temp = 1
for x in dim: for x in dim:
......
...@@ -45,11 +45,14 @@ class LocalSGDOptimizer(MetaOptimizerBase): ...@@ -45,11 +45,14 @@ class LocalSGDOptimizer(MetaOptimizerBase):
if self.role_maker._worker_num() <= 1: if self.role_maker._worker_num() <= 1:
return False return False
return ( return isinstance(
isinstance(self.inner_opt, paddle.optimizer.momentum.Momentum) self.inner_opt,
or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum) (
or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD) paddle.optimizer.momentum.Momentum,
or isinstance(self.inner_opt, paddle.fluid.optimizer.SGD) paddle.fluid.optimizer.Momentum,
paddle.optimizer.sgd.SGD,
paddle.fluid.optimizer.SGD,
),
) )
def _disable_strategy(self, dist_strategy): def _disable_strategy(self, dist_strategy):
...@@ -228,11 +231,14 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase): ...@@ -228,11 +231,14 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase):
if self.role_maker._worker_num() <= 1: if self.role_maker._worker_num() <= 1:
return False return False
return ( return isinstance(
isinstance(self.inner_opt, paddle.optimizer.Momentum) self.inner_opt,
or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum) (
or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD) paddle.optimizer.Momentum,
or isinstance(self.inner_opt, paddle.fluid.optimizer.SGD) paddle.fluid.optimizer.Momentum,
paddle.optimizer.sgd.SGD,
paddle.fluid.optimizer.SGD,
),
) )
def _disable_strategy(self, dist_strategy): def _disable_strategy(self, dist_strategy):
......
...@@ -795,9 +795,7 @@ class TrtLayerAutoScanTest(AutoScanTest): ...@@ -795,9 +795,7 @@ class TrtLayerAutoScanTest(AutoScanTest):
if isinstance(threshold, float): if isinstance(threshold, float):
atol = threshold atol = threshold
rtol = 1e-8 rtol = 1e-8
elif isinstance(threshold, list) or isinstance( elif isinstance(threshold, (list, tuple)):
threshold, tuple
):
atol = threshold[0] atol = threshold[0]
rtol = threshold[1] rtol = threshold[1]
else: else:
......
...@@ -110,7 +110,7 @@ class TestBilinearInterpOneDNNOp(OpTest): ...@@ -110,7 +110,7 @@ class TestBilinearInterpOneDNNOp(OpTest):
scale_w = 0 scale_w = 0
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
scale_h = float(self.scale) scale_h = float(self.scale)
scale_w = float(self.scale) scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
......
...@@ -104,7 +104,7 @@ class TestNearestInterpV2MKLDNNOp(OpTest): ...@@ -104,7 +104,7 @@ class TestNearestInterpV2MKLDNNOp(OpTest):
scale_w = 0 scale_w = 0
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
scale_h = float(self.scale) scale_h = float(self.scale)
scale_w = float(self.scale) scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
......
...@@ -37,17 +37,15 @@ def bicubic_interp_test( ...@@ -37,17 +37,15 @@ def bicubic_interp_test(
align_corners=True, align_corners=True,
align_mode=0, align_mode=0,
): ):
if isinstance(scale, float) or isinstance(scale, int): if isinstance(scale, (float, int)):
scale_list = [] scale_list = []
for _ in range(len(x.shape) - 2): for _ in range(len(x.shape) - 2):
scale_list.append(scale) scale_list.append(scale)
scale = list(map(float, scale_list)) scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple): elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale)) scale = list(map(float, scale))
if SizeTensor is not None: if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance( if not isinstance(SizeTensor, (list, tuple)):
SizeTensor, tuple
):
SizeTensor = [SizeTensor] SizeTensor = [SizeTensor]
return paddle._C_ops.bicubic_interp( return paddle._C_ops.bicubic_interp(
x, x,
...@@ -197,7 +195,7 @@ class TestBicubicInterpOp(OpTest): ...@@ -197,7 +195,7 @@ class TestBicubicInterpOp(OpTest):
in_w = self.input_shape[2] in_w = self.input_shape[2]
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0.0: if self.scale > 0.0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -236,7 +234,7 @@ class TestBicubicInterpOp(OpTest): ...@@ -236,7 +234,7 @@ class TestBicubicInterpOp(OpTest):
'data_layout': self.data_layout, 'data_layout': self.data_layout,
} }
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0.0: if self.scale > 0.0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
......
...@@ -37,12 +37,12 @@ def bilinear_interp_test( ...@@ -37,12 +37,12 @@ def bilinear_interp_test(
align_corners=True, align_corners=True,
align_mode=0, align_mode=0,
): ):
if isinstance(scale, float) or isinstance(scale, int): if isinstance(scale, (float, int)):
scale_list = [] scale_list = []
for _ in range(len(x.shape) - 2): for _ in range(len(x.shape) - 2):
scale_list.append(scale) scale_list.append(scale)
scale = list(map(float, scale_list)) scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple): elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale)) scale = list(map(float, scale))
if SizeTensor is not None: if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance( if not isinstance(SizeTensor, list) and not isinstance(
...@@ -169,7 +169,7 @@ class TestBilinearInterpOp(OpTest): ...@@ -169,7 +169,7 @@ class TestBilinearInterpOp(OpTest):
scale_h = 0 scale_h = 0
scale_w = 0 scale_w = 0
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0.0: if self.scale > 0.0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -210,7 +210,7 @@ class TestBilinearInterpOp(OpTest): ...@@ -210,7 +210,7 @@ class TestBilinearInterpOp(OpTest):
'data_layout': self.data_layout, 'data_layout': self.data_layout,
} }
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0.0: if self.scale > 0.0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -363,7 +363,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -363,7 +363,7 @@ class TestBilinearInterpOpUint8(OpTest):
).astype("uint8") ).astype("uint8")
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -400,7 +400,7 @@ class TestBilinearInterpOpUint8(OpTest): ...@@ -400,7 +400,7 @@ class TestBilinearInterpOpUint8(OpTest):
'align_mode': self.align_mode, 'align_mode': self.align_mode,
} }
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -537,7 +537,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -537,7 +537,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
if self.scale_by_1Dtensor: if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32") self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale: elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -564,7 +564,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -564,7 +564,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
......
...@@ -114,9 +114,7 @@ def operator_equal(a, b): ...@@ -114,9 +114,7 @@ def operator_equal(a, b):
raise ValueError("In operator_equal not equal\n") raise ValueError("In operator_equal not equal\n")
for k, v in a.__dict__.items(): for k, v in a.__dict__.items():
if isinstance(v, fluid.framework.Program) or isinstance( if isinstance(v, (fluid.framework.Program, fluid.framework.Block)):
v, fluid.framework.Block
):
continue continue
elif isinstance(v, core.OpDesc): elif isinstance(v, core.OpDesc):
...@@ -137,13 +135,10 @@ def operator_equal(a, b): ...@@ -137,13 +135,10 @@ def operator_equal(a, b):
def block_equal(a, b): def block_equal(a, b):
for k, v in a.__dict__.items(): for k, v in a.__dict__.items():
if ( if isinstance(
isinstance(v, core.ProgramDesc) v, (core.ProgramDesc, fluid.framework.Program, core.BlockDesc)
or isinstance(v, fluid.framework.Program)
or isinstance(v, core.BlockDesc)
): ):
continue continue
elif k == "ops": elif k == "ops":
assert len(a.ops) == len(b.ops) assert len(a.ops) == len(b.ops)
for i in range(0, len(a.ops)): for i in range(0, len(a.ops)):
......
...@@ -38,12 +38,12 @@ def linear_interp_test( ...@@ -38,12 +38,12 @@ def linear_interp_test(
align_corners=True, align_corners=True,
align_mode=0, align_mode=0,
): ):
if isinstance(scale, float) or isinstance(scale, int): if isinstance(scale, (float, int)):
scale_list = [] scale_list = []
for _ in range(len(x.shape) - 2): for _ in range(len(x.shape) - 2):
scale_list.append(scale) scale_list.append(scale)
scale = list(map(float, scale_list)) scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple): elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale)) scale = list(map(float, scale))
if SizeTensor is not None: if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance( if not isinstance(SizeTensor, list) and not isinstance(
...@@ -138,7 +138,7 @@ class TestLinearInterpOp(OpTest): ...@@ -138,7 +138,7 @@ class TestLinearInterpOp(OpTest):
in_w = self.input_shape[1] in_w = self.input_shape[1]
if self.scale > 0: if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
self.scale = float(self.scale) self.scale = float(self.scale)
if isinstance(self.scale, list): if isinstance(self.scale, list):
self.scale = float(self.scale[0]) self.scale = float(self.scale[0])
...@@ -170,7 +170,7 @@ class TestLinearInterpOp(OpTest): ...@@ -170,7 +170,7 @@ class TestLinearInterpOp(OpTest):
'data_layout': self.data_layout, 'data_layout': self.data_layout,
} }
if self.scale > 0: if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
self.scale = [float(self.scale)] self.scale = [float(self.scale)]
self.attrs['scale'] = self.scale self.attrs['scale'] = self.scale
self.outputs = {'Out': output_np} self.outputs = {'Out': output_np}
...@@ -262,7 +262,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): ...@@ -262,7 +262,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
in_w = self.input_shape[1] in_w = self.input_shape[1]
if self.scale > 0: if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
self.scale = float(self.scale) self.scale = float(self.scale)
if isinstance(self.scale, list): if isinstance(self.scale, list):
self.scale = float(self.scale[0]) self.scale = float(self.scale[0])
...@@ -302,7 +302,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): ...@@ -302,7 +302,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
'data_layout': self.data_layout, 'data_layout': self.data_layout,
} }
if self.scale > 0: if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]] self.scale = [self.scale[0], self.scale[0]]
...@@ -343,7 +343,7 @@ class TestResizeLinearOpUint8(OpTest): ...@@ -343,7 +343,7 @@ class TestResizeLinearOpUint8(OpTest):
input_np = np.random.random(self.input_shape).astype("uint8") input_np = np.random.random(self.input_shape).astype("uint8")
if self.scale > 0: if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
self.scale = float(self.scale) self.scale = float(self.scale)
if isinstance(self.scale, list): if isinstance(self.scale, list):
self.scale = float(self.scale[0]) self.scale = float(self.scale[0])
...@@ -371,7 +371,7 @@ class TestResizeLinearOpUint8(OpTest): ...@@ -371,7 +371,7 @@ class TestResizeLinearOpUint8(OpTest):
'align_mode': self.align_mode, 'align_mode': self.align_mode,
} }
if self.scale > 0: if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]] self.scale = [self.scale[0], self.scale[0]]
......
...@@ -39,12 +39,12 @@ def nearest_interp_test( ...@@ -39,12 +39,12 @@ def nearest_interp_test(
align_corners=True, align_corners=True,
align_mode=0, align_mode=0,
): ):
if isinstance(scale, float) or isinstance(scale, int): if isinstance(scale, (float, int)):
scale_list = [] scale_list = []
for _ in range(len(x.shape) - 2): for _ in range(len(x.shape) - 2):
scale_list.append(scale) scale_list.append(scale)
scale = list(map(float, scale_list)) scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple): elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale)) scale = list(map(float, scale))
if SizeTensor is not None: if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance( if not isinstance(SizeTensor, list) and not isinstance(
...@@ -233,7 +233,7 @@ class TestNearestInterpOp(OpTest): ...@@ -233,7 +233,7 @@ class TestNearestInterpOp(OpTest):
scale_h = 0 scale_h = 0
scale_w = 0 scale_w = 0
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale) scale_d = scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -305,7 +305,7 @@ class TestNearestInterpOp(OpTest): ...@@ -305,7 +305,7 @@ class TestNearestInterpOp(OpTest):
'data_layout': self.data_layout, 'data_layout': self.data_layout,
} }
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -438,7 +438,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -438,7 +438,7 @@ class TestNearestInterpOpUint8(OpTest):
).astype("uint8") ).astype("uint8")
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -472,7 +472,7 @@ class TestNearestInterpOpUint8(OpTest): ...@@ -472,7 +472,7 @@ class TestNearestInterpOpUint8(OpTest):
'align_corners': self.align_corners, 'align_corners': self.align_corners,
} }
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -583,7 +583,7 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -583,7 +583,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
if self.scale_by_1Dtensor: if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float64") self.inputs['Scale'] = np.array([self.scale]).astype("float64")
elif self.scale: elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -610,7 +610,7 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -610,7 +610,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
......
...@@ -39,12 +39,12 @@ def trilinear_interp_test( ...@@ -39,12 +39,12 @@ def trilinear_interp_test(
align_corners=True, align_corners=True,
align_mode=0, align_mode=0,
): ):
if isinstance(scale, float) or isinstance(scale, int): if isinstance(scale, (float, int)):
scale_list = [] scale_list = []
for _ in range(len(x.shape) - 2): for _ in range(len(x.shape) - 2):
scale_list.append(scale) scale_list.append(scale)
scale = list(map(float, scale_list)) scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple): elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale)) scale = list(map(float, scale))
if SizeTensor is not None: if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance( if not isinstance(SizeTensor, list) and not isinstance(
...@@ -219,7 +219,7 @@ class TestTrilinearInterpOp(OpTest): ...@@ -219,7 +219,7 @@ class TestTrilinearInterpOp(OpTest):
in_w = self.input_shape[3] in_w = self.input_shape[3]
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale) scale_d = scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -270,7 +270,7 @@ class TestTrilinearInterpOp(OpTest): ...@@ -270,7 +270,7 @@ class TestTrilinearInterpOp(OpTest):
'data_layout': data_layout, 'data_layout': data_layout,
} }
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -434,7 +434,7 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -434,7 +434,7 @@ class TestTrilinearInterpOpUint8(OpTest):
).astype("uint8") ).astype("uint8")
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale) scale_d = scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -477,7 +477,7 @@ class TestTrilinearInterpOpUint8(OpTest): ...@@ -477,7 +477,7 @@ class TestTrilinearInterpOpUint8(OpTest):
'align_mode': self.align_mode, 'align_mode': self.align_mode,
} }
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -611,7 +611,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -611,7 +611,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
if self.scale_by_1Dtensor: if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32") self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale: elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale) scale_d = scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -642,7 +642,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -642,7 +642,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
......
...@@ -71,7 +71,7 @@ def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None): ...@@ -71,7 +71,7 @@ def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None):
def set_input(scope, op, inputs, place): def set_input(scope, op, inputs, place):
def __set_input__(var_name, var): def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray): if isinstance(var, (tuple, np.ndarray)):
tensor = scope.find_var(var_name).get_tensor() tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple): if isinstance(var, tuple):
tensor.set_recursive_sequence_lengths(var[1]) tensor.set_recursive_sequence_lengths(var[1])
......
...@@ -141,7 +141,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): ...@@ -141,7 +141,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper):
scale_h = 0 scale_h = 0
scale_w = 0 scale_w = 0
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0.0: if self.scale > 0.0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -182,7 +182,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): ...@@ -182,7 +182,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper):
'data_layout': self.data_layout, 'data_layout': self.data_layout,
} }
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0.0: if self.scale > 0.0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -389,7 +389,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): ...@@ -389,7 +389,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper):
if self.scale_by_1Dtensor: if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32") self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale: elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -416,7 +416,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper): ...@@ -416,7 +416,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper):
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
......
...@@ -212,7 +212,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): ...@@ -212,7 +212,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper):
scale_h = 0 scale_h = 0
scale_w = 0 scale_w = 0
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale) scale_d = scale_h = scale_w = float(self.scale)
self.scale = [self.scale] self.scale = [self.scale]
...@@ -450,7 +450,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): ...@@ -450,7 +450,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper):
if self.scale_by_1Dtensor: if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32") self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale: elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
scale_h = scale_w = float(self.scale) scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
...@@ -477,7 +477,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper): ...@@ -477,7 +477,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper):
self.attrs['out_h'] = self.out_h self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w self.attrs['out_w'] = self.out_w
if self.scale: if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int): if isinstance(self.scale, (float, int)):
if self.scale > 0: if self.scale > 0:
self.scale = [self.scale] self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1: if isinstance(self.scale, list) and len(self.scale) == 1:
......
...@@ -81,11 +81,7 @@ class ProgressBar: ...@@ -81,11 +81,7 @@ class ProgressBar:
for i, (k, val) in enumerate(values): for i, (k, val) in enumerate(values):
if k == "loss": if k == "loss":
val = ( val = val if isinstance(val, (list, np.ndarray)) else [val]
val
if isinstance(val, list) or isinstance(val, np.ndarray)
else [val]
)
if isinstance(val[0], np.uint16): if isinstance(val[0], np.uint16):
values[i] = ("loss", list(convert_uint16_to_float(val))) values[i] = ("loss", list(convert_uint16_to_float(val)))
......
...@@ -192,7 +192,7 @@ def load_var(var_name, shape_list, dtype, save_path): ...@@ -192,7 +192,7 @@ def load_var(var_name, shape_list, dtype, save_path):
def reader(batch_size, fn, dim): def reader(batch_size, fn, dim):
data = [] data = []
if isinstance(dim, list) or isinstance(dim, tuple): if isinstance(dim, (list, tuple)):
shape = list(dim) shape = list(dim)
_temp = 1 _temp = 1
for x in dim: for x in dim:
......
...@@ -915,9 +915,7 @@ def save(layer, path, input_spec=None, **configs): ...@@ -915,9 +915,7 @@ def save(layer, path, input_spec=None, **configs):
) )
if not ( if not (
isinstance(layer, Layer) isinstance(layer, (Layer, StaticFunction)) or inspect.isfunction(layer)
or inspect.isfunction(layer)
or isinstance(layer, StaticFunction)
): ):
raise TypeError( raise TypeError(
"The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s." "The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s."
......
...@@ -179,9 +179,7 @@ class StaticAnalysisVisitor: ...@@ -179,9 +179,7 @@ class StaticAnalysisVisitor:
self.ancestor_wrappers.append(cur_wrapper) self.ancestor_wrappers.append(cur_wrapper)
for child in gast.iter_child_nodes(node): for child in gast.iter_child_nodes(node):
if isinstance(child, gast.FunctionDef) or isinstance( if isinstance(child, (gast.FunctionDef, gast.AsyncFunctionDef)):
child, gast.AsyncFunctionDef
):
# TODO: current version is function name mapping to its type # TODO: current version is function name mapping to its type
# consider complex case involving parameters # consider complex case involving parameters
self.var_env.enter_scope( self.var_env.enter_scope(
......
...@@ -399,9 +399,7 @@ def interpolate( ...@@ -399,9 +399,7 @@ def interpolate(
if size is None and scale_factor is None: if size is None and scale_factor is None:
raise ValueError("One of size and scale_factor must not be None.") raise ValueError("One of size and scale_factor must not be None.")
if (isinstance(size, list) or isinstance(size, tuple)) and len( if isinstance(size, (tuple, list)) and (len(size) != x.ndim - 2):
size
) != x.ndim - 2:
raise ValueError( raise ValueError(
'The x and size should satisfy rank(x) - 2 == len(size).' 'The x and size should satisfy rank(x) - 2 == len(size).'
) )
...@@ -427,11 +425,7 @@ def interpolate( ...@@ -427,11 +425,7 @@ def interpolate(
) )
if resample == 'AREA': if resample == 'AREA':
if ( if isinstance(size, (list, tuple, Variable)):
isinstance(size, list)
or isinstance(size, tuple)
or isinstance(size, Variable)
):
if len(size) == 0: if len(size) == 0:
raise ValueError("output size can not be empty") raise ValueError("output size can not be empty")
if size is None: if size is None:
...@@ -464,7 +458,7 @@ def interpolate( ...@@ -464,7 +458,7 @@ def interpolate(
) )
def _is_list_or_turple_(data): def _is_list_or_turple_(data):
return isinstance(data, list) or isinstance(data, tuple) return isinstance(data, (list, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW': if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
data_layout = 'NCHW' data_layout = 'NCHW'
...@@ -581,18 +575,14 @@ def interpolate( ...@@ -581,18 +575,14 @@ def interpolate(
if isinstance(scale, Variable): if isinstance(scale, Variable):
scale.stop_gradient = True scale.stop_gradient = True
inputs["Scale"] = scale inputs["Scale"] = scale
elif ( elif isinstance(scale, (float, int, numpy.ndarray)):
isinstance(scale, float)
or isinstance(scale, int)
or isinstance(scale, numpy.ndarray)
):
if scale <= 0: if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.") raise ValueError("Attr(scale) should be greater than zero.")
scale_list = [] scale_list = []
for i in range(len(x.shape) - 2): for i in range(len(x.shape) - 2):
scale_list.append(scale) scale_list.append(scale)
attrs['scale'] = list(map(float, scale_list)) attrs['scale'] = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple): elif isinstance(scale, (list, tuple)):
if len(scale) != len(x.shape) - 2: if len(scale) != len(x.shape) - 2:
raise ValueError( raise ValueError(
"scale_shape length should be {} for " "scale_shape length should be {} for "
...@@ -2275,7 +2265,7 @@ def fold( ...@@ -2275,7 +2265,7 @@ def fold(
assert len(x.shape) == 3, "input should be the format of [N, C, L]" assert len(x.shape) == 3, "input should be the format of [N, C, L]"
def _is_list_or_turple_(data): def _is_list_or_turple_(data):
return isinstance(data, list) or isinstance(data, tuple) return isinstance(data, (list, tuple))
if isinstance(output_sizes, int): if isinstance(output_sizes, int):
output_sizes = [output_sizes, output_sizes] output_sizes = [output_sizes, output_sizes]
......
...@@ -50,9 +50,7 @@ class ConstantInitializer(Initializer): ...@@ -50,9 +50,7 @@ class ConstantInitializer(Initializer):
""" """
block = self._check_block(block) block = self._check_block(block)
assert isinstance(var, framework.Variable) or isinstance( assert isinstance(var, (framework.Variable, framework.EagerParamBase))
var, framework.EagerParamBase
)
assert isinstance(block, framework.Block) assert isinstance(block, framework.Block)
if in_dygraph_mode(): if in_dygraph_mode():
......
...@@ -1860,7 +1860,7 @@ class Layer: ...@@ -1860,7 +1860,7 @@ class Layer:
raise ValueError( raise ValueError(
"{} is not found in the provided dict.".format(key) "{} is not found in the provided dict.".format(key)
) )
if isinstance(state, dict) or isinstance(state, list): if isinstance(state, (dict, list)):
if len(state) != len(param): if len(state) != len(param):
missing_keys.append(key) missing_keys.append(key)
raise ValueError( raise ValueError(
......
...@@ -593,7 +593,7 @@ class RNNCellBase(Layer): ...@@ -593,7 +593,7 @@ class RNNCellBase(Layer):
def _is_shape_sequence(seq): def _is_shape_sequence(seq):
"""For shape, list/tuple of integer is the finest-grained objection""" """For shape, list/tuple of integer is the finest-grained objection"""
if isinstance(seq, list) or isinstance(seq, tuple): if isinstance(seq, (list, tuple)):
if reduce( if reduce(
lambda flag, x: isinstance(x, int) and flag, seq, True lambda flag, x: isinstance(x, int) and flag, seq, True
): ):
......
...@@ -1087,11 +1087,7 @@ class MAOutputScaleLayer(Layer): ...@@ -1087,11 +1087,7 @@ class MAOutputScaleLayer(Layer):
def forward(self, *inputs, **kwargs): def forward(self, *inputs, **kwargs):
out = self._layer(*inputs, **kwargs) out = self._layer(*inputs, **kwargs)
# TODO (jc): support the ops of several outputs # TODO (jc): support the ops of several outputs
if ( if isinstance(out, (list, tuple, dict)):
isinstance(out, list)
or isinstance(out, tuple)
or isinstance(out, dict)
):
return out return out
else: else:
return self._ma_output_scale(out) return self._ma_output_scale(out)
...@@ -1129,7 +1125,7 @@ class FakeQuantMAOutputScaleLayer(Layer): ...@@ -1129,7 +1125,7 @@ class FakeQuantMAOutputScaleLayer(Layer):
def forward(self, *inputs, **kwargs): def forward(self, *inputs, **kwargs):
out = self._layer(*inputs, **kwargs) out = self._layer(*inputs, **kwargs)
# TODO (jc): support the ops of several outputs # TODO (jc): support the ops of several outputs
if (isinstance(out, list) or isinstance(out, tuple)) and len(out) > 1: if (isinstance(out, (list, tuple))) and len(out) > 1:
return out return out
else: else:
return self._fake_quant_output(out) return self._fake_quant_output(out)
......
...@@ -791,11 +791,7 @@ class LinearWarmup(LRScheduler): ...@@ -791,11 +791,7 @@ class LinearWarmup(LRScheduler):
last_epoch=-1, last_epoch=-1,
verbose=False, verbose=False,
): ):
type_check = ( type_check = isinstance(learning_rate, (float, int, LRScheduler))
isinstance(learning_rate, float)
or isinstance(learning_rate, int)
or isinstance(learning_rate, LRScheduler)
)
if not type_check: if not type_check:
raise TypeError( raise TypeError(
"the type of learning_rate should be [int, float or LRScheduler], the current type is {}".format( "the type of learning_rate should be [int, float or LRScheduler], the current type is {}".format(
......
...@@ -961,13 +961,11 @@ def conv2d( ...@@ -961,13 +961,11 @@ def conv2d(
# padding # padding
def _update_padding(padding, data_format): def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4: if isinstance(padding, (list, tuple)) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): if isinstance(padding[0], (list, tuple)) and (
data_format == "NCHW"
):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]): if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError( raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions " "Non-zero padding(%s) in the batch or channel dimensions "
...@@ -975,7 +973,9 @@ def conv2d( ...@@ -975,7 +973,9 @@ def conv2d(
) )
padding = padding[2:4] padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): elif isinstance(padding[0], (list, tuple)) and (
data_format == "NHWC"
):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]): if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError( raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions " "Non-zero padding(%s) in the batch or channel dimensions "
...@@ -1257,13 +1257,11 @@ def conv3d( ...@@ -1257,13 +1257,11 @@ def conv3d(
dilation = paddle.utils.convert_to_list(dilation, 3, 'dilation') dilation = paddle.utils.convert_to_list(dilation, 3, 'dilation')
def _update_padding(padding, data_format): def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5: if isinstance(padding, (list, tuple)) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): if isinstance(padding[0], (list, tuple)) and (
data_format == "NCDHW"
):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]): if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError( raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions " "Non-zero padding(%s) in the batch or channel dimensions "
...@@ -1271,7 +1269,9 @@ def conv3d( ...@@ -1271,7 +1269,9 @@ def conv3d(
) )
padding = padding[2:5] padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): elif isinstance(padding[0], (list, tuple)) and (
data_format == "NDHWC"
):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]): if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError( raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions " "Non-zero padding(%s) in the batch or channel dimensions "
...@@ -1282,7 +1282,7 @@ def conv3d( ...@@ -1282,7 +1282,7 @@ def conv3d(
padding = paddle.utils.convert_to_list(padding, 6, 'padding') padding = paddle.utils.convert_to_list(padding, 6, 'padding')
if paddle.utils._is_symmetric_padding(padding, 3): if paddle.utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]] padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6: elif isinstance(padding, (list, tuple)) and len(padding) == 6:
padding = paddle.utils.convert_to_list(padding, 6, 'padding') padding = paddle.utils.convert_to_list(padding, 6, 'padding')
if paddle.utils._is_symmetric_padding(padding, 3): if paddle.utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]] padding = [padding[0], padding[2], padding[4]]
...@@ -1580,13 +1580,11 @@ def conv2d_transpose( ...@@ -1580,13 +1580,11 @@ def conv2d_transpose(
raise ValueError("use_cudnn should be True or False") raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format): def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4: if isinstance(padding, (list, tuple)) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"): if isinstance(padding[0], (list, tuple)) and (
data_format == "NCHW"
):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]): if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError( raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions " "Non-zero padding(%s) in the batch or channel dimensions "
...@@ -1594,7 +1592,9 @@ def conv2d_transpose( ...@@ -1594,7 +1592,9 @@ def conv2d_transpose(
) )
padding = padding[2:4] padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"): elif isinstance(padding[0], (list, tuple)) and (
data_format == "NHWC"
):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]): if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError( raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions " "Non-zero padding(%s) in the batch or channel dimensions "
...@@ -1951,13 +1951,11 @@ def conv3d_transpose( ...@@ -1951,13 +1951,11 @@ def conv3d_transpose(
raise ValueError("use_cudnn should be True or False") raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format): def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5: if isinstance(padding, (list, tuple)) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"): if isinstance(padding[0], (list, tuple)) and (
data_format == "NCDHW"
):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]): if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError( raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions " "Non-zero padding(%s) in the batch or channel dimensions "
...@@ -1965,7 +1963,9 @@ def conv3d_transpose( ...@@ -1965,7 +1963,9 @@ def conv3d_transpose(
) )
padding = padding[2:5] padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"): elif isinstance(padding[0], (list, tuple)) and (
data_format == "NDHWC"
):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]): if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError( raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions " "Non-zero padding(%s) in the batch or channel dimensions "
...@@ -1975,7 +1975,7 @@ def conv3d_transpose( ...@@ -1975,7 +1975,7 @@ def conv3d_transpose(
padding = [ele for a_list in padding for ele in a_list] padding = [ele for a_list in padding for ele in a_list]
padding = paddle.utils.convert_to_list(padding, 6, 'padding') padding = paddle.utils.convert_to_list(padding, 6, 'padding')
elif is_list_or_tuple(padding) and len(padding) == 6: elif isinstance(padding, (list, tuple)) and len(padding) == 6:
padding = paddle.utils.convert_to_list(padding, 6, 'padding') padding = paddle.utils.convert_to_list(padding, 6, 'padding')
else: else:
......
...@@ -1438,12 +1438,7 @@ class QuantizationFreezePass: ...@@ -1438,12 +1438,7 @@ class QuantizationFreezePass:
return "%s.dequantized" % (var_name) return "%s.dequantized" % (var_name)
def _is_float(self, v): def _is_float(self, v):
return ( return isinstance(v, (float, np.float16, np.float32, np.float64))
isinstance(v, float)
or isinstance(v, np.float16)
or isinstance(v, np.float32)
or isinstance(v, np.float64)
)
class ConvertToInt8Pass: class ConvertToInt8Pass:
......
...@@ -60,10 +60,7 @@ class TestFuseLinearBn(unittest.TestCase): ...@@ -60,10 +60,7 @@ class TestFuseLinearBn(unittest.TestCase):
quant_h = ptq.quantize(model_h, fuse=True, fuse_list=f_l) quant_h = ptq.quantize(model_h, fuse=True, fuse_list=f_l)
for name, layer in quant_model.named_sublayers(): for name, layer in quant_model.named_sublayers():
if name in f_l: if name in f_l:
assert not ( assert not (isinstance(layer, (nn.BatchNorm1D, nn.BatchNorm2D)))
isinstance(layer, nn.BatchNorm1D)
or isinstance(layer, nn.BatchNorm2D)
)
out = model(inputs) out = model(inputs)
out_h = model_h(inputs) out_h = model_h(inputs)
out_quant = quant_model(inputs) out_quant = quant_model(inputs)
...@@ -294,10 +291,7 @@ class TestImperativePTQfuse(TestImperativePTQ): ...@@ -294,10 +291,7 @@ class TestImperativePTQfuse(TestImperativePTQ):
quant_model = self.ptq.quantize(model, fuse=True, fuse_list=f_l) quant_model = self.ptq.quantize(model, fuse=True, fuse_list=f_l)
for name, layer in quant_model.named_sublayers(): for name, layer in quant_model.named_sublayers():
if name in f_l: if name in f_l:
assert not ( assert not (isinstance(layer, (nn.BatchNorm1D, nn.BatchNorm2D)))
isinstance(layer, nn.BatchNorm1D)
or isinstance(layer, nn.BatchNorm2D)
)
before_acc_top1 = self.model_test( before_acc_top1 = self.model_test(
quant_model, self.batch_num, self.batch_size quant_model, self.batch_num, self.batch_size
) )
......
...@@ -238,11 +238,7 @@ def generate_layer_fn(op_type): ...@@ -238,11 +238,7 @@ def generate_layer_fn(op_type):
outputs = {} outputs = {}
out = kwargs.pop(_convert_(o_name), []) out = kwargs.pop(_convert_(o_name), [])
if out: if out:
out_var = ( out_var = out[0] if isinstance(out, (list, tuple)) else out
out[0]
if (isinstance(out, list) or isinstance(out, tuple))
else out
)
else: else:
out_var = helper.create_variable_for_type_inference(dtype=dtype) out_var = helper.create_variable_for_type_inference(dtype=dtype)
outputs[o_name] = [out_var] outputs[o_name] = [out_var]
......
...@@ -1651,7 +1651,7 @@ def add_n(inputs, name=None): ...@@ -1651,7 +1651,7 @@ def add_n(inputs, name=None):
else: else:
helper = LayerHelper('add_n', **locals()) helper = LayerHelper('add_n', **locals())
check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n') check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')
if isinstance(inputs, list) or isinstance(inputs, tuple): if isinstance(inputs, (list, tuple)):
if len(inputs) > 0: if len(inputs) > 0:
for input in inputs: for input in inputs:
check_variable_and_dtype( check_variable_and_dtype(
......
...@@ -495,7 +495,7 @@ def prior_box( ...@@ -495,7 +495,7 @@ def prior_box(
""" """
def _is_list_or_tuple_(data): def _is_list_or_tuple_(data):
return isinstance(data, list) or isinstance(data, tuple) return isinstance(data, (list, tuple))
if not _is_list_or_tuple_(min_sizes): if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes] min_sizes = [min_sizes]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册