未验证 提交 c1838da6 编写于 作者: K Kim 提交者: GitHub

[CodeStyle][PLR1701] unify multiple isinstance expressions as one (#52150)

上级 c05feb90
......@@ -250,7 +250,7 @@ def compute_fbank_matrix(
if norm == 'slaney':
enorm = 2.0 / (mel_f[2 : n_mels + 2] - mel_f[:n_mels])
weights *= enorm.unsqueeze(1)
elif isinstance(norm, int) or isinstance(norm, float):
elif isinstance(norm, (int, float)):
weights = paddle.nn.functional.normalize(weights, p=norm, axis=-1)
return weights
......
......@@ -323,7 +323,7 @@ class DistributedOperatorHelper:
output = self._serial_op(*args, **kwargs)
new_op_size = len(cur_block.ops)
if isinstance(output, tuple) or isinstance(output, list):
if isinstance(output, (tuple, list)):
new_output = list(output)
elif isinstance(output, Variable):
new_output = [output]
......
......@@ -1948,9 +1948,7 @@ class Resharder:
)
idx = idx_list[0]
elif isinstance(op_desc, SliceOpDesc) or isinstance(
op_desc, AllGatherConcatOpDesc
):
elif isinstance(op_desc, (SliceOpDesc, AllGatherConcatOpDesc)):
target_tensor = None
if isinstance(op_desc, SliceOpDesc):
assert (
......
......@@ -425,7 +425,7 @@ class UtilBase:
def feed_gen(batch_size, feeded_vars_dims, feeded_vars_filelist):
def reader(batch_size, fn, dim):
data = []
if isinstance(dim, list) or isinstance(dim, tuple):
if isinstance(dim, (list, tuple)):
shape = list(dim)
_temp = 1
for x in dim:
......
......@@ -45,11 +45,14 @@ class LocalSGDOptimizer(MetaOptimizerBase):
if self.role_maker._worker_num() <= 1:
return False
return (
isinstance(self.inner_opt, paddle.optimizer.momentum.Momentum)
or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum)
or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD)
or isinstance(self.inner_opt, paddle.fluid.optimizer.SGD)
return isinstance(
self.inner_opt,
(
paddle.optimizer.momentum.Momentum,
paddle.fluid.optimizer.Momentum,
paddle.optimizer.sgd.SGD,
paddle.fluid.optimizer.SGD,
),
)
def _disable_strategy(self, dist_strategy):
......@@ -228,11 +231,14 @@ class AdaptiveLocalSGDOptimizer(MetaOptimizerBase):
if self.role_maker._worker_num() <= 1:
return False
return (
isinstance(self.inner_opt, paddle.optimizer.Momentum)
or isinstance(self.inner_opt, paddle.fluid.optimizer.Momentum)
or isinstance(self.inner_opt, paddle.optimizer.sgd.SGD)
or isinstance(self.inner_opt, paddle.fluid.optimizer.SGD)
return isinstance(
self.inner_opt,
(
paddle.optimizer.Momentum,
paddle.fluid.optimizer.Momentum,
paddle.optimizer.sgd.SGD,
paddle.fluid.optimizer.SGD,
),
)
def _disable_strategy(self, dist_strategy):
......
......@@ -795,9 +795,7 @@ class TrtLayerAutoScanTest(AutoScanTest):
if isinstance(threshold, float):
atol = threshold
rtol = 1e-8
elif isinstance(threshold, list) or isinstance(
threshold, tuple
):
elif isinstance(threshold, (list, tuple)):
atol = threshold[0]
rtol = threshold[1]
else:
......
......@@ -110,7 +110,7 @@ class TestBilinearInterpOneDNNOp(OpTest):
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
scale_h = float(self.scale)
scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......
......@@ -104,7 +104,7 @@ class TestNearestInterpV2MKLDNNOp(OpTest):
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
scale_h = float(self.scale)
scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......
......@@ -37,17 +37,15 @@ def bicubic_interp_test(
align_corners=True,
align_mode=0,
):
if isinstance(scale, float) or isinstance(scale, int):
if isinstance(scale, (float, int)):
scale_list = []
for _ in range(len(x.shape) - 2):
scale_list.append(scale)
scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale))
if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance(
SizeTensor, tuple
):
if not isinstance(SizeTensor, (list, tuple)):
SizeTensor = [SizeTensor]
return paddle._C_ops.bicubic_interp(
x,
......@@ -197,7 +195,7 @@ class TestBicubicInterpOp(OpTest):
in_w = self.input_shape[2]
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0.0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -236,7 +234,7 @@ class TestBicubicInterpOp(OpTest):
'data_layout': self.data_layout,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0.0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......
......@@ -37,12 +37,12 @@ def bilinear_interp_test(
align_corners=True,
align_mode=0,
):
if isinstance(scale, float) or isinstance(scale, int):
if isinstance(scale, (float, int)):
scale_list = []
for _ in range(len(x.shape) - 2):
scale_list.append(scale)
scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale))
if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance(
......@@ -169,7 +169,7 @@ class TestBilinearInterpOp(OpTest):
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0.0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -210,7 +210,7 @@ class TestBilinearInterpOp(OpTest):
'data_layout': self.data_layout,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0.0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -363,7 +363,7 @@ class TestBilinearInterpOpUint8(OpTest):
).astype("uint8")
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -400,7 +400,7 @@ class TestBilinearInterpOpUint8(OpTest):
'align_mode': self.align_mode,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -537,7 +537,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -564,7 +564,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......
......@@ -114,9 +114,7 @@ def operator_equal(a, b):
raise ValueError("In operator_equal not equal\n")
for k, v in a.__dict__.items():
if isinstance(v, fluid.framework.Program) or isinstance(
v, fluid.framework.Block
):
if isinstance(v, (fluid.framework.Program, fluid.framework.Block)):
continue
elif isinstance(v, core.OpDesc):
......@@ -137,13 +135,10 @@ def operator_equal(a, b):
def block_equal(a, b):
for k, v in a.__dict__.items():
if (
isinstance(v, core.ProgramDesc)
or isinstance(v, fluid.framework.Program)
or isinstance(v, core.BlockDesc)
if isinstance(
v, (core.ProgramDesc, fluid.framework.Program, core.BlockDesc)
):
continue
elif k == "ops":
assert len(a.ops) == len(b.ops)
for i in range(0, len(a.ops)):
......
......@@ -38,12 +38,12 @@ def linear_interp_test(
align_corners=True,
align_mode=0,
):
if isinstance(scale, float) or isinstance(scale, int):
if isinstance(scale, (float, int)):
scale_list = []
for _ in range(len(x.shape) - 2):
scale_list.append(scale)
scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale))
if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance(
......@@ -138,7 +138,7 @@ class TestLinearInterpOp(OpTest):
in_w = self.input_shape[1]
if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
self.scale = float(self.scale)
if isinstance(self.scale, list):
self.scale = float(self.scale[0])
......@@ -170,7 +170,7 @@ class TestLinearInterpOp(OpTest):
'data_layout': self.data_layout,
}
if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
self.scale = [float(self.scale)]
self.attrs['scale'] = self.scale
self.outputs = {'Out': output_np}
......@@ -262,7 +262,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
in_w = self.input_shape[1]
if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
self.scale = float(self.scale)
if isinstance(self.scale, list):
self.scale = float(self.scale[0])
......@@ -302,7 +302,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
'data_layout': self.data_layout,
}
if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
......@@ -343,7 +343,7 @@ class TestResizeLinearOpUint8(OpTest):
input_np = np.random.random(self.input_shape).astype("uint8")
if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
self.scale = float(self.scale)
if isinstance(self.scale, list):
self.scale = float(self.scale[0])
......@@ -371,7 +371,7 @@ class TestResizeLinearOpUint8(OpTest):
'align_mode': self.align_mode,
}
if self.scale > 0:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
self.scale = [self.scale[0], self.scale[0]]
......
......@@ -39,12 +39,12 @@ def nearest_interp_test(
align_corners=True,
align_mode=0,
):
if isinstance(scale, float) or isinstance(scale, int):
if isinstance(scale, (float, int)):
scale_list = []
for _ in range(len(x.shape) - 2):
scale_list.append(scale)
scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale))
if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance(
......@@ -233,7 +233,7 @@ class TestNearestInterpOp(OpTest):
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -305,7 +305,7 @@ class TestNearestInterpOp(OpTest):
'data_layout': self.data_layout,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -438,7 +438,7 @@ class TestNearestInterpOpUint8(OpTest):
).astype("uint8")
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -472,7 +472,7 @@ class TestNearestInterpOpUint8(OpTest):
'align_corners': self.align_corners,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -583,7 +583,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float64")
elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -610,7 +610,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......
......@@ -39,12 +39,12 @@ def trilinear_interp_test(
align_corners=True,
align_mode=0,
):
if isinstance(scale, float) or isinstance(scale, int):
if isinstance(scale, (float, int)):
scale_list = []
for _ in range(len(x.shape) - 2):
scale_list.append(scale)
scale = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
elif isinstance(scale, (list, tuple)):
scale = list(map(float, scale))
if SizeTensor is not None:
if not isinstance(SizeTensor, list) and not isinstance(
......@@ -219,7 +219,7 @@ class TestTrilinearInterpOp(OpTest):
in_w = self.input_shape[3]
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -270,7 +270,7 @@ class TestTrilinearInterpOp(OpTest):
'data_layout': data_layout,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -434,7 +434,7 @@ class TestTrilinearInterpOpUint8(OpTest):
).astype("uint8")
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -477,7 +477,7 @@ class TestTrilinearInterpOpUint8(OpTest):
'align_mode': self.align_mode,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -611,7 +611,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -642,7 +642,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......
......@@ -71,7 +71,7 @@ def create_op(scope, op_type, inputs, outputs, attrs, cache_list=None):
def set_input(scope, op, inputs, place):
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
if isinstance(var, (tuple, np.ndarray)):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_recursive_sequence_lengths(var[1])
......
......@@ -141,7 +141,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper):
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0.0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -182,7 +182,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper):
'data_layout': self.data_layout,
}
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0.0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -389,7 +389,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper):
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -416,7 +416,7 @@ class XPUTestBilinearInterpV2Op(XPUOpTestWrapper):
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......
......@@ -212,7 +212,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper):
scale_h = 0
scale_w = 0
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_d = scale_h = scale_w = float(self.scale)
self.scale = [self.scale]
......@@ -450,7 +450,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper):
if self.scale_by_1Dtensor:
self.inputs['Scale'] = np.array([self.scale]).astype("float32")
elif self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
scale_h = scale_w = float(self.scale)
if isinstance(self.scale, list) and len(self.scale) == 1:
......@@ -477,7 +477,7 @@ class XPUNearestInterpOpWrapper(XPUOpTestWrapper):
self.attrs['out_h'] = self.out_h
self.attrs['out_w'] = self.out_w
if self.scale:
if isinstance(self.scale, float) or isinstance(self.scale, int):
if isinstance(self.scale, (float, int)):
if self.scale > 0:
self.scale = [self.scale]
if isinstance(self.scale, list) and len(self.scale) == 1:
......
......@@ -81,11 +81,7 @@ class ProgressBar:
for i, (k, val) in enumerate(values):
if k == "loss":
val = (
val
if isinstance(val, list) or isinstance(val, np.ndarray)
else [val]
)
val = val if isinstance(val, (list, np.ndarray)) else [val]
if isinstance(val[0], np.uint16):
values[i] = ("loss", list(convert_uint16_to_float(val)))
......
......@@ -192,7 +192,7 @@ def load_var(var_name, shape_list, dtype, save_path):
def reader(batch_size, fn, dim):
data = []
if isinstance(dim, list) or isinstance(dim, tuple):
if isinstance(dim, (list, tuple)):
shape = list(dim)
_temp = 1
for x in dim:
......
......@@ -915,9 +915,7 @@ def save(layer, path, input_spec=None, **configs):
)
if not (
isinstance(layer, Layer)
or inspect.isfunction(layer)
or isinstance(layer, StaticFunction)
isinstance(layer, (Layer, StaticFunction)) or inspect.isfunction(layer)
):
raise TypeError(
"The input of paddle.jit.save should be 'Layer' or 'Function', but received input type is %s."
......
......@@ -179,9 +179,7 @@ class StaticAnalysisVisitor:
self.ancestor_wrappers.append(cur_wrapper)
for child in gast.iter_child_nodes(node):
if isinstance(child, gast.FunctionDef) or isinstance(
child, gast.AsyncFunctionDef
):
if isinstance(child, (gast.FunctionDef, gast.AsyncFunctionDef)):
# TODO: current version is function name mapping to its type
# consider complex case involving parameters
self.var_env.enter_scope(
......
......@@ -399,9 +399,7 @@ def interpolate(
if size is None and scale_factor is None:
raise ValueError("One of size and scale_factor must not be None.")
if (isinstance(size, list) or isinstance(size, tuple)) and len(
size
) != x.ndim - 2:
if isinstance(size, (tuple, list)) and (len(size) != x.ndim - 2):
raise ValueError(
'The x and size should satisfy rank(x) - 2 == len(size).'
)
......@@ -427,11 +425,7 @@ def interpolate(
)
if resample == 'AREA':
if (
isinstance(size, list)
or isinstance(size, tuple)
or isinstance(size, Variable)
):
if isinstance(size, (list, tuple, Variable)):
if len(size) == 0:
raise ValueError("output size can not be empty")
if size is None:
......@@ -464,7 +458,7 @@ def interpolate(
)
def _is_list_or_turple_(data):
return isinstance(data, list) or isinstance(data, tuple)
return isinstance(data, (list, tuple))
if data_format == 'NCHW' or data_format == 'NCDHW' or data_format == 'NCW':
data_layout = 'NCHW'
......@@ -581,18 +575,14 @@ def interpolate(
if isinstance(scale, Variable):
scale.stop_gradient = True
inputs["Scale"] = scale
elif (
isinstance(scale, float)
or isinstance(scale, int)
or isinstance(scale, numpy.ndarray)
):
elif isinstance(scale, (float, int, numpy.ndarray)):
if scale <= 0:
raise ValueError("Attr(scale) should be greater than zero.")
scale_list = []
for i in range(len(x.shape) - 2):
scale_list.append(scale)
attrs['scale'] = list(map(float, scale_list))
elif isinstance(scale, list) or isinstance(scale, tuple):
elif isinstance(scale, (list, tuple)):
if len(scale) != len(x.shape) - 2:
raise ValueError(
"scale_shape length should be {} for "
......@@ -2275,7 +2265,7 @@ def fold(
assert len(x.shape) == 3, "input should be the format of [N, C, L]"
def _is_list_or_turple_(data):
return isinstance(data, list) or isinstance(data, tuple)
return isinstance(data, (list, tuple))
if isinstance(output_sizes, int):
output_sizes = [output_sizes, output_sizes]
......
......@@ -50,9 +50,7 @@ class ConstantInitializer(Initializer):
"""
block = self._check_block(block)
assert isinstance(var, framework.Variable) or isinstance(
var, framework.EagerParamBase
)
assert isinstance(var, (framework.Variable, framework.EagerParamBase))
assert isinstance(block, framework.Block)
if in_dygraph_mode():
......
......@@ -1860,7 +1860,7 @@ class Layer:
raise ValueError(
"{} is not found in the provided dict.".format(key)
)
if isinstance(state, dict) or isinstance(state, list):
if isinstance(state, (dict, list)):
if len(state) != len(param):
missing_keys.append(key)
raise ValueError(
......
......@@ -593,7 +593,7 @@ class RNNCellBase(Layer):
def _is_shape_sequence(seq):
"""For shape, list/tuple of integer is the finest-grained objection"""
if isinstance(seq, list) or isinstance(seq, tuple):
if isinstance(seq, (list, tuple)):
if reduce(
lambda flag, x: isinstance(x, int) and flag, seq, True
):
......
......@@ -1087,11 +1087,7 @@ class MAOutputScaleLayer(Layer):
def forward(self, *inputs, **kwargs):
out = self._layer(*inputs, **kwargs)
# TODO (jc): support the ops of several outputs
if (
isinstance(out, list)
or isinstance(out, tuple)
or isinstance(out, dict)
):
if isinstance(out, (list, tuple, dict)):
return out
else:
return self._ma_output_scale(out)
......@@ -1129,7 +1125,7 @@ class FakeQuantMAOutputScaleLayer(Layer):
def forward(self, *inputs, **kwargs):
out = self._layer(*inputs, **kwargs)
# TODO (jc): support the ops of several outputs
if (isinstance(out, list) or isinstance(out, tuple)) and len(out) > 1:
if (isinstance(out, (list, tuple))) and len(out) > 1:
return out
else:
return self._fake_quant_output(out)
......
......@@ -791,11 +791,7 @@ class LinearWarmup(LRScheduler):
last_epoch=-1,
verbose=False,
):
type_check = (
isinstance(learning_rate, float)
or isinstance(learning_rate, int)
or isinstance(learning_rate, LRScheduler)
)
type_check = isinstance(learning_rate, (float, int, LRScheduler))
if not type_check:
raise TypeError(
"the type of learning_rate should be [int, float or LRScheduler], the current type is {}".format(
......
......@@ -961,13 +961,11 @@ def conv2d(
# padding
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if isinstance(padding, (list, tuple)) and len(padding) == 4:
if isinstance(padding[0], (list, tuple)) and (
data_format == "NCHW"
):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
......@@ -975,7 +973,9 @@ def conv2d(
)
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
elif isinstance(padding[0], (list, tuple)) and (
data_format == "NHWC"
):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
......@@ -1257,13 +1257,11 @@ def conv3d(
dilation = paddle.utils.convert_to_list(dilation, 3, 'dilation')
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if isinstance(padding, (list, tuple)) and len(padding) == 5:
if isinstance(padding[0], (list, tuple)) and (
data_format == "NCDHW"
):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
......@@ -1271,7 +1269,9 @@ def conv3d(
)
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
elif isinstance(padding[0], (list, tuple)) and (
data_format == "NDHWC"
):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
......@@ -1282,7 +1282,7 @@ def conv3d(
padding = paddle.utils.convert_to_list(padding, 6, 'padding')
if paddle.utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
elif is_list_or_tuple(padding) and len(padding) == 6:
elif isinstance(padding, (list, tuple)) and len(padding) == 6:
padding = paddle.utils.convert_to_list(padding, 6, 'padding')
if paddle.utils._is_symmetric_padding(padding, 3):
padding = [padding[0], padding[2], padding[4]]
......@@ -1580,13 +1580,11 @@ def conv2d_transpose(
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 4:
if is_list_or_tuple(padding[0]) and (data_format == "NCHW"):
if isinstance(padding, (list, tuple)) and len(padding) == 4:
if isinstance(padding[0], (list, tuple)) and (
data_format == "NCHW"
):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
......@@ -1594,7 +1592,9 @@ def conv2d_transpose(
)
padding = padding[2:4]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NHWC"):
elif isinstance(padding[0], (list, tuple)) and (
data_format == "NHWC"
):
if not (padding[0] == [0, 0] and padding[3] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
......@@ -1951,13 +1951,11 @@ def conv3d_transpose(
raise ValueError("use_cudnn should be True or False")
def _update_padding(padding, data_format):
def is_list_or_tuple(ele):
if isinstance(ele, list) or isinstance(ele, tuple):
return True
return False
if is_list_or_tuple(padding) and len(padding) == 5:
if is_list_or_tuple(padding[0]) and (data_format == "NCDHW"):
if isinstance(padding, (list, tuple)) and len(padding) == 5:
if isinstance(padding[0], (list, tuple)) and (
data_format == "NCDHW"
):
if not (padding[0] == [0, 0] and padding[1] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
......@@ -1965,7 +1963,9 @@ def conv3d_transpose(
)
padding = padding[2:5]
padding = [ele for a_list in padding for ele in a_list]
elif is_list_or_tuple(padding[0]) and (data_format == "NDHWC"):
elif isinstance(padding[0], (list, tuple)) and (
data_format == "NDHWC"
):
if not (padding[0] == [0, 0] and padding[4] == [0, 0]):
raise ValueError(
"Non-zero padding(%s) in the batch or channel dimensions "
......@@ -1975,7 +1975,7 @@ def conv3d_transpose(
padding = [ele for a_list in padding for ele in a_list]
padding = paddle.utils.convert_to_list(padding, 6, 'padding')
elif is_list_or_tuple(padding) and len(padding) == 6:
elif isinstance(padding, (list, tuple)) and len(padding) == 6:
padding = paddle.utils.convert_to_list(padding, 6, 'padding')
else:
......
......@@ -1438,12 +1438,7 @@ class QuantizationFreezePass:
return "%s.dequantized" % (var_name)
def _is_float(self, v):
return (
isinstance(v, float)
or isinstance(v, np.float16)
or isinstance(v, np.float32)
or isinstance(v, np.float64)
)
return isinstance(v, (float, np.float16, np.float32, np.float64))
class ConvertToInt8Pass:
......
......@@ -60,10 +60,7 @@ class TestFuseLinearBn(unittest.TestCase):
quant_h = ptq.quantize(model_h, fuse=True, fuse_list=f_l)
for name, layer in quant_model.named_sublayers():
if name in f_l:
assert not (
isinstance(layer, nn.BatchNorm1D)
or isinstance(layer, nn.BatchNorm2D)
)
assert not (isinstance(layer, (nn.BatchNorm1D, nn.BatchNorm2D)))
out = model(inputs)
out_h = model_h(inputs)
out_quant = quant_model(inputs)
......@@ -294,10 +291,7 @@ class TestImperativePTQfuse(TestImperativePTQ):
quant_model = self.ptq.quantize(model, fuse=True, fuse_list=f_l)
for name, layer in quant_model.named_sublayers():
if name in f_l:
assert not (
isinstance(layer, nn.BatchNorm1D)
or isinstance(layer, nn.BatchNorm2D)
)
assert not (isinstance(layer, (nn.BatchNorm1D, nn.BatchNorm2D)))
before_acc_top1 = self.model_test(
quant_model, self.batch_num, self.batch_size
)
......
......@@ -238,11 +238,7 @@ def generate_layer_fn(op_type):
outputs = {}
out = kwargs.pop(_convert_(o_name), [])
if out:
out_var = (
out[0]
if (isinstance(out, list) or isinstance(out, tuple))
else out
)
out_var = out[0] if isinstance(out, (list, tuple)) else out
else:
out_var = helper.create_variable_for_type_inference(dtype=dtype)
outputs[o_name] = [out_var]
......
......@@ -1651,7 +1651,7 @@ def add_n(inputs, name=None):
else:
helper = LayerHelper('add_n', **locals())
check_type(inputs, 'inputs', (Variable, tuple, list), 'add_n')
if isinstance(inputs, list) or isinstance(inputs, tuple):
if isinstance(inputs, (list, tuple)):
if len(inputs) > 0:
for input in inputs:
check_variable_and_dtype(
......
......@@ -495,7 +495,7 @@ def prior_box(
"""
def _is_list_or_tuple_(data):
return isinstance(data, list) or isinstance(data, tuple)
return isinstance(data, (list, tuple))
if not _is_list_or_tuple_(min_sizes):
min_sizes = [min_sizes]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册