未验证 提交 c0697296 编写于 作者: 张春乔 提交者: GitHub

[CodeStyle][UP034] remove (()) cases (#52060)

* add up34

* modify var name in loop

* revert changes in test_slice

* Revert "modify var name in loop"

This reverts commit 6d748e371afb417054ed0c6b36fd11e87959a90d.

* temporarily ignore test_slice.py

* add comment

* empty commit, re-trigger all ci

* fix inc

---------
Co-authored-by: NSigureMo <sigure.qaq@gmail.com>
上级 8082ba8a
...@@ -26,7 +26,7 @@ def main(forward_op_yaml_paths, backward_op_yaml_paths): ...@@ -26,7 +26,7 @@ def main(forward_op_yaml_paths, backward_op_yaml_paths):
with open(op_yaml_path, "rt", encoding="utf-8") as f: with open(op_yaml_path, "rt", encoding="utf-8") as f:
op_list = yaml.safe_load(f) op_list = yaml.safe_load(f)
if op_list is not None: if op_list is not None:
ops.update(to_named_dict((op_list))) ops.update(to_named_dict(op_list))
cross_validate(ops) cross_validate(ops)
......
...@@ -80,7 +80,7 @@ select = [ ...@@ -80,7 +80,7 @@ select = [
# "UP031", # "UP031",
# "UP032", # "UP032",
"UP033", "UP033",
# "UP034", "UP034",
"UP035", "UP035",
"UP036", "UP036",
...@@ -168,6 +168,11 @@ unfixable = [ ...@@ -168,6 +168,11 @@ unfixable = [
] ]
[tool.ruff.per-file-ignores] [tool.ruff.per-file-ignores]
# Ignore unused imports in __init__.py
"__init__.py" = ["F401"] "__init__.py" = ["F401"]
# Temporarily ignore test_slice.py to avoid PR-CI-CINN failure, please fix!
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_slice.py" = ["UP034"]
# Ignore version check in setup.py
"setup.py" = ["UP036"] "setup.py" = ["UP036"]
# Ignore unnecessary lambda in dy2st unittest test_lambda
"python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py" = ["PLC3002"] "python/paddle/fluid/tests/unittests/dygraph_to_static/test_lambda.py" = ["PLC3002"]
...@@ -270,7 +270,7 @@ def profiler(args): ...@@ -270,7 +270,7 @@ def profiler(args):
with open(result_path, 'w') as fp: with open(result_path, 'w') as fp:
json.dump(result_dict, fp) json.dump(result_dict, fp)
print("profile done! avg speed : {} step / s.".format((avg_tput))) print("profile done! avg speed : {} step / s.".format(avg_tput))
except paddle.framework.core.EOFException: except paddle.framework.core.EOFException:
data_loader._inner_dataloader.reset() data_loader._inner_dataloader.reset()
......
...@@ -219,7 +219,7 @@ class AscendParserBase: ...@@ -219,7 +219,7 @@ class AscendParserBase:
tensor = core.GETensor(tensor_desc) tensor = core.GETensor(tensor_desc)
data = ( data = (
(value * np.ones((shape))) (value * np.ones(shape))
.reshape(shape) .reshape(shape)
.astype(self.ascend_helper.dtype2np(dtype)) .astype(self.ascend_helper.dtype2np(dtype))
) )
...@@ -282,7 +282,7 @@ class AscendParserBase: ...@@ -282,7 +282,7 @@ class AscendParserBase:
) )
tensor = core.GETensor(tensor_desc) tensor = core.GETensor(tensor_desc)
data = np.ones((2)).astype("int32").reshape([2]) data = np.ones(2).astype("int32").reshape([2])
data[0] = 64 data[0] = 64
buf = data.tobytes() buf = data.tobytes()
data_8 = np.frombuffer(buf, dtype=np.uint8) data_8 = np.frombuffer(buf, dtype=np.uint8)
......
...@@ -241,7 +241,7 @@ class Normal(distribution.Distribution): ...@@ -241,7 +241,7 @@ class Normal(distribution.Distribution):
) )
return paddle.add( return paddle.add(
0.5 + zero_tmp, 0.5 + zero_tmp,
0.5 * math.log(2 * math.pi) + paddle.log((self.scale + zero_tmp)), 0.5 * math.log(2 * math.pi) + paddle.log(self.scale + zero_tmp),
name=name, name=name,
) )
......
...@@ -1185,7 +1185,7 @@ class TestStickBreakingTransform(unittest.TestCase): ...@@ -1185,7 +1185,7 @@ class TestStickBreakingTransform(unittest.TestCase):
def test_codomain(self): def test_codomain(self):
self.assertTrue(isinstance(self._t._codomain, variable.Variable)) self.assertTrue(isinstance(self._t._codomain, variable.Variable))
@param.param_func(((np.random.random((10)),),)) @param.param_func(((np.random.random(10),),))
def test_forward(self, input): def test_forward(self, input):
np.testing.assert_allclose( np.testing.assert_allclose(
self._t.inverse(self._t.forward(paddle.to_tensor(input))), self._t.inverse(self._t.forward(paddle.to_tensor(input))),
...@@ -1202,7 +1202,7 @@ class TestStickBreakingTransform(unittest.TestCase): ...@@ -1202,7 +1202,7 @@ class TestStickBreakingTransform(unittest.TestCase):
def test_inverse_shape(self, shape, expected_shape): def test_inverse_shape(self, shape, expected_shape):
self.assertEqual(self._t.inverse_shape(shape), expected_shape) self.assertEqual(self._t.inverse_shape(shape), expected_shape)
@param.param_func(((np.random.random((10)),),)) @param.param_func(((np.random.random(10),),))
def test_forward_log_det_jacobian(self, x): def test_forward_log_det_jacobian(self, x):
self.assertEqual( self.assertEqual(
self._t.forward_log_det_jacobian(paddle.to_tensor(x)).shape, [1] self._t.forward_log_det_jacobian(paddle.to_tensor(x)).shape, [1]
......
...@@ -456,7 +456,7 @@ def _dygraph_fn(): ...@@ -456,7 +456,7 @@ def _dygraph_fn():
x = np.random.random((1, 3)).astype('float32') x = np.random.random((1, 3)).astype('float32')
with fluid.dygraph.guard(): with fluid.dygraph.guard():
fluid.dygraph.to_variable(x) fluid.dygraph.to_variable(x)
np.random.random((1)) np.random.random(1)
class TestDygraphApiRecognition(unittest.TestCase): class TestDygraphApiRecognition(unittest.TestCase):
......
...@@ -204,7 +204,7 @@ def test_optim_break_in_while(x): ...@@ -204,7 +204,7 @@ def test_optim_break_in_while(x):
class TestContinueInFor(unittest.TestCase): class TestContinueInFor(unittest.TestCase):
def setUp(self): def setUp(self):
self.input = np.zeros((1)).astype('int64') self.input = np.zeros(1).astype('int64')
self.place = ( self.place = (
fluid.CUDAPlace(0) fluid.CUDAPlace(0)
if fluid.is_compiled_with_cuda() if fluid.is_compiled_with_cuda()
......
...@@ -138,7 +138,7 @@ class TestConvertWithCache(unittest.TestCase): ...@@ -138,7 +138,7 @@ class TestConvertWithCache(unittest.TestCase):
@to_static @to_static
def sum_even_until_limit(max_len, limit): def sum_even_until_limit(max_len, limit):
ret_sum = fluid.dygraph.to_variable(np.zeros((1)).astype('int32')) ret_sum = fluid.dygraph.to_variable(np.zeros(1).astype('int32'))
for i in range(max_len): for i in range(max_len):
if i % 2 > 0: if i % 2 > 0:
continue continue
...@@ -150,8 +150,8 @@ def sum_even_until_limit(max_len, limit): ...@@ -150,8 +150,8 @@ def sum_even_until_limit(max_len, limit):
def sum_under_while(limit): def sum_under_while(limit):
i = fluid.dygraph.to_variable(np.zeros((1)).astype('int32')) i = fluid.dygraph.to_variable(np.zeros(1).astype('int32'))
ret_sum = fluid.dygraph.to_variable(np.zeros((1)).astype('int32')) ret_sum = fluid.dygraph.to_variable(np.zeros(1).astype('int32'))
while i <= limit: while i <= limit:
ret_sum += i ret_sum += i
i += 1 i += 1
......
...@@ -171,7 +171,7 @@ def test_dic_pop_2(x): ...@@ -171,7 +171,7 @@ def test_dic_pop_2(x):
class TestDictPop(unittest.TestCase): class TestDictPop(unittest.TestCase):
def setUp(self): def setUp(self):
self.input = np.random.random((3)).astype('int32') self.input = np.random.random(3).astype('int32')
self.place = ( self.place = (
paddle.CUDAPlace(0) paddle.CUDAPlace(0)
if paddle.is_compiled_with_cuda() if paddle.is_compiled_with_cuda()
......
...@@ -219,7 +219,7 @@ class TestListWithoutControlFlow(unittest.TestCase): ...@@ -219,7 +219,7 @@ class TestListWithoutControlFlow(unittest.TestCase):
self.init_dygraph_func() self.init_dygraph_func()
def init_data(self): def init_data(self):
self.input = np.random.random((3)).astype('int32') self.input = np.random.random(3).astype('int32')
def init_dygraph_func(self): def init_dygraph_func(self):
self.all_dygraph_funcs = [ self.all_dygraph_funcs = [
...@@ -275,7 +275,7 @@ class TestListInIf(TestListWithoutControlFlow): ...@@ -275,7 +275,7 @@ class TestListInIf(TestListWithoutControlFlow):
class TestListInWhileLoop(TestListWithoutControlFlow): class TestListInWhileLoop(TestListWithoutControlFlow):
def init_data(self): def init_data(self):
self.input = np.random.random((3)).astype('int32') self.input = np.random.random(3).astype('int32')
self.iter_num = 3 self.iter_num = 3
def init_dygraph_func(self): def init_dygraph_func(self):
......
...@@ -265,7 +265,7 @@ def test_return_nested(x): ...@@ -265,7 +265,7 @@ def test_return_nested(x):
class TestReturnBase(unittest.TestCase): class TestReturnBase(unittest.TestCase):
def setUp(self): def setUp(self):
self.input = np.ones((1)).astype('int32') self.input = np.ones(1).astype('int32')
self.place = ( self.place = (
fluid.CUDAPlace(0) fluid.CUDAPlace(0)
if fluid.is_compiled_with_cuda() if fluid.is_compiled_with_cuda()
......
...@@ -108,7 +108,7 @@ def train_static(args, batch_generator): ...@@ -108,7 +108,7 @@ def train_static(args, batch_generator):
# the best cross-entropy value with label smoothing # the best cross-entropy value with label smoothing
loss_normalizer = -( loss_normalizer = -(
(1.0 - args.label_smooth_eps) (1.0 - args.label_smooth_eps)
* np.log((1.0 - args.label_smooth_eps)) * np.log(1.0 - args.label_smooth_eps)
+ args.label_smooth_eps + args.label_smooth_eps
* np.log( * np.log(
args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20 args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20
...@@ -221,8 +221,7 @@ def train_dygraph(args, batch_generator): ...@@ -221,8 +221,7 @@ def train_dygraph(args, batch_generator):
) )
# the best cross-entropy value with label smoothing # the best cross-entropy value with label smoothing
loss_normalizer = -( loss_normalizer = -(
(1.0 - args.label_smooth_eps) (1.0 - args.label_smooth_eps) * np.log(1.0 - args.label_smooth_eps)
* np.log((1.0 - args.label_smooth_eps))
+ args.label_smooth_eps + args.label_smooth_eps
* np.log(args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20) * np.log(args.label_smooth_eps / (args.trg_vocab_size - 1) + 1e-20)
) )
......
...@@ -123,10 +123,8 @@ class TestBase(IPUOpTest): ...@@ -123,10 +123,8 @@ class TestBase(IPUOpTest):
pad_batch = self.batch_size - dy_batch pad_batch = self.batch_size - dy_batch
for k, v in feed.items(): for k, v in feed.items():
pad_size = tuple( pad_size = tuple(
( (0, 0 if i != 0 else pad_batch)
(0, 0 if i != 0 else pad_batch) for i in range(len(v.shape))
for i in range(len(v.shape))
)
) )
feed[k] = np.pad(v, pad_size, 'constant', constant_values=0) feed[k] = np.pad(v, pad_size, 'constant', constant_values=0)
......
...@@ -62,24 +62,20 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): ...@@ -62,24 +62,20 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest):
if ( if (
int( int(
( (
( input_shape[2]
input_shape[2] - (dilations[0] * (filter_shape[2] - 1) + 1)
- (dilations[0] * (filter_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
) )
/ strides[0]
+ 1
) )
<= 0 <= 0
or int( or int(
( (
( input_shape[3]
input_shape[3] - (dilations[1] * (filter_shape[3] - 1) + 1)
- (dilations[1] * (filter_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
) )
/ strides[1]
+ 1
) )
<= 0 <= 0
): ):
...@@ -88,28 +84,24 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): ...@@ -88,28 +84,24 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest):
if ( if (
int( int(
( (
( input_shape[2]
input_shape[2] + paddings[0]
+ paddings[0] + paddings[1]
+ paddings[1] - (dilations[0] * (filter_shape[2] - 1) + 1)
- (dilations[0] * (filter_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
) )
/ strides[0]
+ 1
) )
<= 0 <= 0
or int( or int(
( (
( input_shape[3]
input_shape[3] + paddings[2]
+ paddings[2] + paddings[3]
+ paddings[3] - (dilations[1] * (filter_shape[3] - 1) + 1)
- (dilations[1] * (filter_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
) )
/ strides[1]
+ 1
) )
<= 0 <= 0
): ):
...@@ -206,27 +198,23 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): ...@@ -206,27 +198,23 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest):
f_shape[0], f_shape[0],
int( int(
( (
( x_shape[2]
x_shape[2] + padding[0]
+ padding[0] + padding[1]
+ padding[1] - (dilations[0] * (f_shape[2] - 1) + 1)
- (dilations[0] * (f_shape[2] - 1) + 1)
)
/ strides[0]
+ 1
) )
/ strides[0]
+ 1
), ),
int( int(
( (
( x_shape[3]
x_shape[3] + padding[2]
+ padding[2] + padding[3]
+ padding[3] - (dilations[1] * (f_shape[3] - 1) + 1)
- (dilations[1] * (f_shape[3] - 1) + 1)
)
/ strides[1]
+ 1
) )
/ strides[1]
+ 1
), ),
] ]
...@@ -241,18 +229,14 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest): ...@@ -241,18 +229,14 @@ class TestConvElementwiseAdd2ActPass(PassAutoScanTest):
x_shape[0], x_shape[0],
f_shape[0], f_shape[0],
int( int(
( (x_shape[2] - (dilations[0] * (f_shape[2] - 1) + 1))
(x_shape[2] - (dilations[0] * (f_shape[2] - 1) + 1)) / strides[0]
/ strides[0] + 1
+ 1
)
), ),
int( int(
( (x_shape[3] - (dilations[1] * (f_shape[3] - 1) + 1))
(x_shape[3] - (dilations[1] * (f_shape[3] - 1) + 1)) / strides[1]
/ strides[1] + 1
+ 1
)
), ),
] ]
bias_index = 1 bias_index = 1
......
...@@ -42,7 +42,7 @@ class TestMkldnnPreluOp(MkldnnAutoScanTest): ...@@ -42,7 +42,7 @@ class TestMkldnnPreluOp(MkldnnAutoScanTest):
elif kwargs["mode"] == "channel": elif kwargs["mode"] == "channel":
if len(kwargs['in_shape']) <= 1: if len(kwargs['in_shape']) <= 1:
# not valid case, just return 0 # not valid case, just return 0
return np.zeros((1)).astype(np.float32) return np.zeros(1).astype(np.float32)
if kwargs['data_format'] == 'NCHW': if kwargs['data_format'] == 'NCHW':
return np.random.random(kwargs['in_shape'][1]).astype( return np.random.random(kwargs['in_shape'][1]).astype(
np.float32 np.float32
...@@ -54,7 +54,7 @@ class TestMkldnnPreluOp(MkldnnAutoScanTest): ...@@ -54,7 +54,7 @@ class TestMkldnnPreluOp(MkldnnAutoScanTest):
else: else:
if len(kwargs['in_shape']) <= 1: if len(kwargs['in_shape']) <= 1:
# not valid case, just return 0 # not valid case, just return 0
return np.zeros((1)).astype(np.float32) return np.zeros(1).astype(np.float32)
return np.random.random(kwargs['in_shape']).astype(np.float32) return np.random.random(kwargs['in_shape']).astype(np.float32)
prelu_op = OpConfig( prelu_op = OpConfig(
......
...@@ -35,7 +35,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -35,7 +35,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def generate_input1(dims, batch): def generate_input1(dims, batch):
if dims == 1: if dims == 1:
return np.zeros((batch)).astype(np.float32) return np.zeros(batch).astype(np.float32)
elif dims == 2: elif dims == 2:
return np.ones((batch, 4)).astype(np.float32) return np.ones((batch, 4)).astype(np.float32)
elif dims == 3: elif dims == 3:
...@@ -45,7 +45,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -45,7 +45,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def generate_input2(dims, batch): def generate_input2(dims, batch):
if dims == 1: if dims == 1:
return np.zeros((batch)).astype(np.float32) return np.zeros(batch).astype(np.float32)
elif dims == 2: elif dims == 2:
return np.ones((batch, 4)).astype(np.float32) return np.ones((batch, 4)).astype(np.float32)
elif dims == 3: elif dims == 3:
...@@ -55,7 +55,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest): ...@@ -55,7 +55,7 @@ class TrtConvertActivationTest(TrtLayerAutoScanTest):
def generate_input3(dims, batch): def generate_input3(dims, batch):
if dims == 1: if dims == 1:
return np.zeros((batch)).astype(np.float32) return np.zeros(batch).astype(np.float32)
elif dims == 2: elif dims == 2:
return np.ones((batch, 4)).astype(np.float32) return np.ones((batch, 4)).astype(np.float32)
elif dims == 3: elif dims == 3:
......
...@@ -90,7 +90,7 @@ class TestExpandV2ExpandShapesTensor1OneDNNOp(TestExpandV2OneDNNOp): ...@@ -90,7 +90,7 @@ class TestExpandV2ExpandShapesTensor1OneDNNOp(TestExpandV2OneDNNOp):
self.expand_shapes_tensor = [] self.expand_shapes_tensor = []
for index, ele in enumerate(self.expand_shape): for index, ele in enumerate(self.expand_shape):
self.expand_shapes_tensor.append( self.expand_shapes_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
def set_additional_inputs(self): def set_additional_inputs(self):
......
...@@ -64,7 +64,7 @@ class TestFCINT8OneDNNOp(OpTest): ...@@ -64,7 +64,7 @@ class TestFCINT8OneDNNOp(OpTest):
self.out_float = np.dot(self.x_float, self.y_float) self.out_float = np.dot(self.x_float, self.y_float)
if self.use_bias: if self.use_bias:
self.bias = np.random.random((10)).astype("float32") * 10 self.bias = np.random.random(10).astype("float32") * 10
self.out_float += self.bias self.out_float += self.bias
self.out_scale, self.out = self.quantize(self.out_float) self.out_scale, self.out = self.quantize(self.out_float)
......
...@@ -78,7 +78,7 @@ class TestFillZerosLike4DShapeTensorListPriorityOneDNNOp( ...@@ -78,7 +78,7 @@ class TestFillZerosLike4DShapeTensorListPriorityOneDNNOp(
self.shape_tensor_list = [] self.shape_tensor_list = []
for index, elem in enumerate(shape): for index, elem in enumerate(shape):
self.shape_tensor_list.append( self.shape_tensor_list.append(
("x" + str(index), np.ones((1)).astype('int32') * elem) ("x" + str(index), np.ones(1).astype('int32') * elem)
) )
self.inputs = {'ShapeTensorList': self.shape_tensor_list} self.inputs = {'ShapeTensorList': self.shape_tensor_list}
......
...@@ -119,17 +119,17 @@ class TestDnnlMatMulOpMixedDimsXWiderTransposeX(TestDnnlMatMulWithGradOp): ...@@ -119,17 +119,17 @@ class TestDnnlMatMulOpMixedDimsXWiderTransposeX(TestDnnlMatMulWithGradOp):
class TestDnnlMatMulOpVectorMultiply(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpVectorMultiply(TestDnnlMatMulWithGradOp):
def generate_data(self): def generate_data(self):
self.x = np.random.random((5)).astype("float32") self.x = np.random.random(5).astype("float32")
self.y = np.random.random((5)).astype("float32") self.y = np.random.random(5).astype("float32")
self.out = np.matmul(self.x, self.y) self.out = np.matmul(self.x, self.y)
class TestDnnlMatMulOpVectorMultiplyTranspose(TestDnnlMatMulWithGradOp): class TestDnnlMatMulOpVectorMultiplyTranspose(TestDnnlMatMulWithGradOp):
def generate_data(self): def generate_data(self):
self.x = np.random.random((5)).astype("float32") self.x = np.random.random(5).astype("float32")
x_resized = np.copy(self.x) x_resized = np.copy(self.x)
x_resized = np.expand_dims(x_resized, 1) x_resized = np.expand_dims(x_resized, 1)
self.y = np.random.random((6)).astype("float32") self.y = np.random.random(6).astype("float32")
y_resized = np.copy(self.y) y_resized = np.copy(self.y)
y_resized = np.expand_dims(y_resized, 0) y_resized = np.expand_dims(y_resized, 0)
self.out = np.matmul(x_resized, y_resized) self.out = np.matmul(x_resized, y_resized)
......
...@@ -103,14 +103,14 @@ class TestPReluModeChannelAlpha1DOneDNNOp(TestPReluModeChannelOneDNNOp): ...@@ -103,14 +103,14 @@ class TestPReluModeChannelAlpha1DOneDNNOp(TestPReluModeChannelOneDNNOp):
def init_attrs(self): def init_attrs(self):
self.mode = "channel" self.mode = "channel"
self.x = np.random.random((1, 100, 1)).astype("float32") self.x = np.random.random((1, 100, 1)).astype("float32")
self.alpha = np.random.random((100)).astype("float32") self.alpha = np.random.random(100).astype("float32")
class TestPReluModeAllAlpha1DOneDNNOp(TestPReluModeAllOneDNNOp): class TestPReluModeAllAlpha1DOneDNNOp(TestPReluModeAllOneDNNOp):
def init_attrs(self): def init_attrs(self):
self.mode = "channel" self.mode = "channel"
self.x = np.random.random((1, 1, 100)).astype("float32") self.x = np.random.random((1, 1, 100)).astype("float32")
self.alpha = np.random.random((1)).astype("float32") self.alpha = np.random.random(1).astype("float32")
# BF16 TESTS # BF16 TESTS
......
...@@ -119,7 +119,7 @@ class TestReshape2OneDNNOpDimInfer1_attr_ShapeTensor(TestReshape2OneDNNOp): ...@@ -119,7 +119,7 @@ class TestReshape2OneDNNOpDimInfer1_attr_ShapeTensor(TestReshape2OneDNNOp):
shape_tensor = [] shape_tensor = []
for index, ele in enumerate(self.new_shape): for index, ele in enumerate(self.new_shape):
shape_tensor.append( shape_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs["ShapeTensor"] = shape_tensor self.inputs["ShapeTensor"] = shape_tensor
...@@ -138,7 +138,7 @@ class TestReshape2OneDNNOpDimInfer1_attr_ShapeTensorAndShape( ...@@ -138,7 +138,7 @@ class TestReshape2OneDNNOpDimInfer1_attr_ShapeTensorAndShape(
shape_tensor = [] shape_tensor = []
for index, ele in enumerate(self.new_shape): for index, ele in enumerate(self.new_shape):
shape_tensor.append( shape_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs["Shape"] = np.array((1, 2, 3, 4), dtype="int32") self.inputs["Shape"] = np.array((1, 2, 3, 4), dtype="int32")
......
...@@ -160,7 +160,7 @@ class TestSliceOneDNNOp_decs_dim_starts_ListTensor( ...@@ -160,7 +160,7 @@ class TestSliceOneDNNOp_decs_dim_starts_ListTensor(
def set_inputs(self): def set_inputs(self):
starts_tensor = [] starts_tensor = []
for index, ele in enumerate(self.starts): for index, ele in enumerate(self.starts):
starts_tensor.append(("x1", np.ones((1)).astype('int32') * 2)) starts_tensor.append(("x1", np.ones(1).astype('int32') * 2))
self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
def config(self): def config(self):
......
...@@ -96,7 +96,7 @@ class TestSplitSectionsTensorBF16OneDNNOp(TestSplitSectionsBF16OneDNNOp): ...@@ -96,7 +96,7 @@ class TestSplitSectionsTensorBF16OneDNNOp(TestSplitSectionsBF16OneDNNOp):
self.sections_tensor_list = [] self.sections_tensor_list = []
for index, ele in enumerate(self.sections): for index, ele in enumerate(self.sections):
self.sections_tensor_list.append( self.sections_tensor_list.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.sections = [-1, -1, -1] self.sections = [-1, -1, -1]
indices_or_sections = [2, 3] # sections indices_or_sections = [2, 3] # sections
......
...@@ -114,7 +114,7 @@ class TestSplitSectionsTensorOneDNNOp(TestSplitSectionsOneDNNOp): ...@@ -114,7 +114,7 @@ class TestSplitSectionsTensorOneDNNOp(TestSplitSectionsOneDNNOp):
self.sections_tensor_list = [] self.sections_tensor_list = []
for index, ele in enumerate(self.sections): for index, ele in enumerate(self.sections):
self.sections_tensor_list.append( self.sections_tensor_list.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.sections = [-1, -1, -1] self.sections = [-1, -1, -1]
indices_or_sections = [2, 3] # sections indices_or_sections = [2, 3] # sections
......
...@@ -70,7 +70,7 @@ class TestSequencePadOp(OpTest): ...@@ -70,7 +70,7 @@ class TestSequencePadOp(OpTest):
start_idx = end_idx start_idx = end_idx
out_data = np.array(padded_sequences) out_data = np.array(padded_sequences)
length = np.array(self.x_len_lod[0]).reshape((-1)) length = np.array(self.x_len_lod[0]).reshape(-1)
self.outputs = {'Out': out_data, 'Length': length} self.outputs = {'Out': out_data, 'Length': length}
def setUp(self): def setUp(self):
...@@ -90,7 +90,7 @@ class TestSequencePadOp2(TestSequencePadOp): ...@@ -90,7 +90,7 @@ class TestSequencePadOp2(TestSequencePadOp):
def set_attr(self): def set_attr(self):
self.x_shape = [12, 10] self.x_shape = [12, 10]
self.x_len_lod = [[2, 3, 4, 3]] self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = np.random.random((10)) self.pad_value = np.random.random(10)
self.padded_length = -1 self.padded_length = -1
self.dtype = 'float64' self.dtype = 'float64'
...@@ -108,7 +108,7 @@ class TestSequencePadOp4(TestSequencePadOp): ...@@ -108,7 +108,7 @@ class TestSequencePadOp4(TestSequencePadOp):
def set_attr(self): def set_attr(self):
self.x_shape = [12, 10] self.x_shape = [12, 10]
self.x_len_lod = [[2, 3, 4, 3]] self.x_len_lod = [[2, 3, 4, 3]]
self.pad_value = np.random.random((10)) self.pad_value = np.random.random(10)
self.padded_length = 7 self.padded_length = 7
self.dtype = 'float64' self.dtype = 'float64'
......
...@@ -98,7 +98,7 @@ class TestSequenceUnpadOpError(unittest.TestCase): ...@@ -98,7 +98,7 @@ class TestSequenceUnpadOpError(unittest.TestCase):
def test_length_variable(): def test_length_variable():
x1 = paddle.static.data(name='x1', shape=[10, 5], dtype='float32') x1 = paddle.static.data(name='x1', shape=[10, 5], dtype='float32')
len1 = np.random.random((10)).astype("int64") len1 = np.random.random(10).astype("int64")
paddle.static.nn.sequence_lod.sequence_pad(x=x1, length=len1) paddle.static.nn.sequence_lod.sequence_pad(x=x1, length=len1)
self.assertRaises(TypeError, test_length_variable) self.assertRaises(TypeError, test_length_variable)
......
...@@ -2444,7 +2444,7 @@ class TestSoftRelu(TestActivation): ...@@ -2444,7 +2444,7 @@ class TestSoftRelu(TestActivation):
t = np.copy(x) t = np.copy(x)
t[t < -threshold] = -threshold t[t < -threshold] = -threshold
t[t > threshold] = threshold t[t > threshold] = threshold
out = np.log((np.exp(t) + 1)) out = np.log(np.exp(t) + 1)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.attrs = {'threshold': threshold} self.attrs = {'threshold': threshold}
......
...@@ -794,14 +794,14 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp): ...@@ -794,14 +794,14 @@ class TestAdamWOpLayerwiseLR(TestAdamWOp):
out = linear1(x) out = linear1(x)
out = linear2(out) out = linear2(out)
fc1_w_mon1 = np.zeros((linear1.weight.shape)).astype("float32") fc1_w_mon1 = np.zeros(linear1.weight.shape).astype("float32")
fc1_w_mon2 = np.zeros((linear1.weight.shape)).astype("float32") fc1_w_mon2 = np.zeros(linear1.weight.shape).astype("float32")
fc1_b_mon1 = np.zeros((linear1.bias.shape)).astype("float32") fc1_b_mon1 = np.zeros(linear1.bias.shape).astype("float32")
fc1_b_mon2 = np.zeros((linear1.bias.shape)).astype("float32") fc1_b_mon2 = np.zeros(linear1.bias.shape).astype("float32")
fc2_w_mon1 = np.zeros((linear2.weight.shape)).astype("float32") fc2_w_mon1 = np.zeros(linear2.weight.shape).astype("float32")
fc2_w_mon2 = np.zeros((linear2.weight.shape)).astype("float32") fc2_w_mon2 = np.zeros(linear2.weight.shape).astype("float32")
fc2_b_mon1 = np.zeros((linear2.bias.shape)).astype("float32") fc2_b_mon1 = np.zeros(linear2.bias.shape).astype("float32")
fc2_b_mon2 = np.zeros((linear2.bias.shape)).astype("float32") fc2_b_mon2 = np.zeros(linear2.bias.shape).astype("float32")
cost = paddle.nn.functional.square_error_cost( cost = paddle.nn.functional.square_error_cost(
input=out, label=y input=out, label=y
......
...@@ -230,7 +230,7 @@ class TestAddMMOp4(OpTest): ...@@ -230,7 +230,7 @@ class TestAddMMOp4(OpTest):
self.dtype = np.float64 self.dtype = np.float64
self.init_dtype_type() self.init_dtype_type()
self.inputs = { self.inputs = {
'Input': np.random.random((100)).astype(self.dtype), 'Input': np.random.random(100).astype(self.dtype),
'X': np.random.random((20, 10)).astype(self.dtype), 'X': np.random.random((20, 10)).astype(self.dtype),
'Y': np.random.random((10, 100)).astype(self.dtype), 'Y': np.random.random((10, 100)).astype(self.dtype),
} }
...@@ -296,7 +296,7 @@ class TestAddMMAPI(unittest.TestCase): ...@@ -296,7 +296,7 @@ class TestAddMMAPI(unittest.TestCase):
self.assertRaises(ValueError, test_error1) self.assertRaises(ValueError, test_error1)
def test_error2(): def test_error2():
data_x_wrong = np.ones((2)).astype(np.float32) data_x_wrong = np.ones(2).astype(np.float32)
x = paddle.to_tensor(data_x_wrong) x = paddle.to_tensor(data_x_wrong)
y = paddle.to_tensor(data_y) y = paddle.to_tensor(data_y)
input = paddle.to_tensor(data_input) input = paddle.to_tensor(data_input)
...@@ -318,7 +318,7 @@ class TestAddMMAPI(unittest.TestCase): ...@@ -318,7 +318,7 @@ class TestAddMMAPI(unittest.TestCase):
self.assertRaises(ValueError, test_error3) self.assertRaises(ValueError, test_error3)
def test_error4(): def test_error4():
data_input_wrong = np.ones((5)).astype(np.float32) data_input_wrong = np.ones(5).astype(np.float32)
x = paddle.to_tensor(data_x) x = paddle.to_tensor(data_x)
y = paddle.to_tensor(data_y) y = paddle.to_tensor(data_y)
input = paddle.to_tensor(data_input_wrong) input = paddle.to_tensor(data_input_wrong)
...@@ -358,7 +358,7 @@ class TestAddMMAPI(unittest.TestCase): ...@@ -358,7 +358,7 @@ class TestAddMMAPI(unittest.TestCase):
def test_api_normal_2(self): def test_api_normal_2(self):
data_x = np.ones((3, 10)).astype(np.float32) data_x = np.ones((3, 10)).astype(np.float32)
data_y = np.ones((10, 3)).astype(np.float32) data_y = np.ones((10, 3)).astype(np.float32)
data_input = np.ones((3)).astype(np.float32) data_input = np.ones(3).astype(np.float32)
data_alpha = 0.1 data_alpha = 0.1
data_beta = 1.0 data_beta = 1.0
...@@ -383,7 +383,7 @@ class TestAddMMAPI(unittest.TestCase): ...@@ -383,7 +383,7 @@ class TestAddMMAPI(unittest.TestCase):
def test_api_normal_3(self): def test_api_normal_3(self):
data_x = np.ones((3, 10)).astype(np.float32) data_x = np.ones((3, 10)).astype(np.float32)
data_y = np.ones((10, 3)).astype(np.float32) data_y = np.ones((10, 3)).astype(np.float32)
data_input = np.ones((1)).astype(np.float32) data_input = np.ones(1).astype(np.float32)
data_alpha = 0.1 data_alpha = 0.1
data_beta = 1.0 data_beta = 1.0
......
...@@ -32,7 +32,7 @@ class TestCheckFiniteAndUnscaleOp(OpTest): ...@@ -32,7 +32,7 @@ class TestCheckFiniteAndUnscaleOp(OpTest):
self.python_out_sig = ["out0", "FoundInfinite"] self.python_out_sig = ["out0", "FoundInfinite"]
self.init_dtype() self.init_dtype()
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
scale = np.random.random((1)).astype(self.dtype) scale = np.random.random(1).astype(self.dtype)
self.inputs = {'X': [('x0', x)], 'Scale': scale} self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = { self.outputs = {
...@@ -55,7 +55,7 @@ class TestCheckFiniteAndUnscaleOpWithNan(OpTest): ...@@ -55,7 +55,7 @@ class TestCheckFiniteAndUnscaleOpWithNan(OpTest):
self.python_out_sig = ["out0", "FoundInfinite"] self.python_out_sig = ["out0", "FoundInfinite"]
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
x[128][128] = np.nan x[128][128] = np.nan
scale = np.random.random((1)).astype(self.dtype) scale = np.random.random(1).astype(self.dtype)
self.inputs = {'X': [('x0', x)], 'Scale': scale} self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = { self.outputs = {
...@@ -80,7 +80,7 @@ class TestCheckFiniteAndUnscaleOpWithInf(OpTest): ...@@ -80,7 +80,7 @@ class TestCheckFiniteAndUnscaleOpWithInf(OpTest):
self.python_out_sig = ["out0", "FoundInfinite"] self.python_out_sig = ["out0", "FoundInfinite"]
x = np.random.random((1024, 1024)).astype(self.dtype) x = np.random.random((1024, 1024)).astype(self.dtype)
x[128][128] = np.inf x[128][128] = np.inf
scale = np.random.random((1)).astype(self.dtype) scale = np.random.random(1).astype(self.dtype)
self.inputs = {'X': [('x0', x)], 'Scale': scale} self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = { self.outputs = {
......
...@@ -24,7 +24,7 @@ def output_hist(out): ...@@ -24,7 +24,7 @@ def output_hist(out):
hist, _ = np.histogram(out, bins=2) hist, _ = np.histogram(out, bins=2)
hist = hist.astype("float32") hist = hist.astype("float32")
hist /= float(out.size) hist /= float(out.size)
prob = 0.5 * np.ones((2)) prob = 0.5 * np.ones(2)
return hist, prob return hist, prob
......
...@@ -446,7 +446,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -446,7 +446,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append( size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
......
...@@ -798,7 +798,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest): ...@@ -798,7 +798,7 @@ class TestBilinearInterpOp_attr_tensor(OpTest):
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append( size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
......
...@@ -293,7 +293,7 @@ class TestBoxCoderOpWithVariance(OpTest): ...@@ -293,7 +293,7 @@ class TestBoxCoderOpWithVariance(OpTest):
self.python_api = wrapper_box_coder self.python_api = wrapper_box_coder
lod = [[1, 1, 1, 1, 1]] lod = [[1, 1, 1, 1, 1]]
prior_box = np.random.random((30, 4)).astype('float32') prior_box = np.random.random((30, 4)).astype('float32')
prior_box_var = np.random.random((4)).astype('float32') prior_box_var = np.random.random(4).astype('float32')
target_box = np.random.random((30, 81, 4)).astype('float32') target_box = np.random.random((30, 81, 4)).astype('float32')
code_type = "DecodeCenterSize" code_type = "DecodeCenterSize"
box_normalized = False box_normalized = False
...@@ -325,7 +325,7 @@ class TestBoxCoderOpWithVarianceDygraphAPI(unittest.TestCase): ...@@ -325,7 +325,7 @@ class TestBoxCoderOpWithVarianceDygraphAPI(unittest.TestCase):
def setUp(self): def setUp(self):
self.lod = [[1, 1, 1, 1, 1]] self.lod = [[1, 1, 1, 1, 1]]
self.prior_box = np.random.random((30, 4)).astype('float32') self.prior_box = np.random.random((30, 4)).astype('float32')
self.prior_box_var = np.random.random((4)).astype('float32') self.prior_box_var = np.random.random(4).astype('float32')
self.target_box = np.random.random((30, 81, 4)).astype('float32') self.target_box = np.random.random((30, 81, 4)).astype('float32')
self.code_type = "DecodeCenterSize" self.code_type = "DecodeCenterSize"
self.box_normalized = False self.box_normalized = False
......
...@@ -106,7 +106,7 @@ class TestAllocContinuousSpace(OpTest): ...@@ -106,7 +106,7 @@ class TestAllocContinuousSpace(OpTest):
coalesce_tensor_var = np.concatenate([input for input in inputs]) coalesce_tensor_var = np.concatenate([input for input in inputs])
if set_constant: if set_constant:
coalesce_tensor_var = np.ones((len(coalesce_tensor_var))) * constant coalesce_tensor_var = np.ones(len(coalesce_tensor_var)) * constant
outputs = [ outputs = [
(out[0], np.ones(out[1].shape).astype(self.dtype) * constant) (out[0], np.ones(out[1].shape).astype(self.dtype) * constant)
for out in outputs for out in outputs
......
...@@ -332,7 +332,7 @@ def create_paddle_case(op_type, callback): ...@@ -332,7 +332,7 @@ def create_paddle_case(op_type, callback):
op = eval("paddle.%s" % (self.op_type)) op = eval("paddle.%s" % (self.op_type))
out = op(x, y) out = op(x, y)
exe = paddle.static.Executor(self.place) exe = paddle.static.Executor(self.place)
input_x = np.arange(0, 5).reshape((5)).astype(np.int32) input_x = np.arange(0, 5).reshape(5).astype(np.int32)
input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(np.int32) input_y = np.array([5, 3, 2]).reshape((3, 1)).astype(np.int32)
real_result = callback(input_x, input_y) real_result = callback(input_x, input_y)
(res,) = exe.run( (res,) = exe.run(
......
...@@ -110,8 +110,8 @@ class TestComplexMatMulLayer(unittest.TestCase): ...@@ -110,8 +110,8 @@ class TestComplexMatMulLayer(unittest.TestCase):
x = np.random.random((2, 1, 100)).astype( x = np.random.random((2, 1, 100)).astype(
dtype dtype
) + 1j * np.random.random((2, 1, 100)).astype(dtype) ) + 1j * np.random.random((2, 1, 100)).astype(dtype)
y = np.random.random((100)).astype(dtype) + 1j * np.random.random( y = np.random.random(100).astype(dtype) + 1j * np.random.random(
(100) 100
).astype(dtype) ).astype(dtype)
np_result = np.matmul(x, y) np_result = np.matmul(x, y)
......
...@@ -147,7 +147,7 @@ class TestCropTensorOpTensorAttr(OpTest): ...@@ -147,7 +147,7 @@ class TestCropTensorOpTensorAttr(OpTest):
shape_tensor = [] shape_tensor = []
for index, ele in enumerate(self.crop_shape): for index, ele in enumerate(self.crop_shape):
shape_tensor.append( shape_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = { self.inputs = {
'X': np.random.random(self.x_shape).astype("float64"), 'X': np.random.random(self.x_shape).astype("float64"),
...@@ -159,7 +159,7 @@ class TestCropTensorOpTensorAttr(OpTest): ...@@ -159,7 +159,7 @@ class TestCropTensorOpTensorAttr(OpTest):
offsets_tensor = [] offsets_tensor = []
for index, ele in enumerate(self.offsets): for index, ele in enumerate(self.offsets):
offsets_tensor.append( offsets_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = { self.inputs = {
'X': np.random.random(self.x_shape).astype("float64"), 'X': np.random.random(self.x_shape).astype("float64"),
......
...@@ -860,7 +860,7 @@ class CrossEntropyLoss(unittest.TestCase): ...@@ -860,7 +860,7 @@ class CrossEntropyLoss(unittest.TestCase):
N = 100 N = 100
C = 200 C = 200
input_np = np.random.random([N, C]).astype(self.dtype) input_np = np.random.random([N, C]).astype(self.dtype)
label_np = -np.ones((N)).astype(np.int64) label_np = -np.ones(N).astype(np.int64)
paddle.enable_static() paddle.enable_static()
prog = fluid.Program() prog = fluid.Program()
startup_prog = fluid.Program() startup_prog = fluid.Program()
......
...@@ -186,7 +186,7 @@ class TestSumOp6(TestSumOp1): ...@@ -186,7 +186,7 @@ class TestSumOp6(TestSumOp1):
class TestSumOp7(TestSumOp1): class TestSumOp7(TestSumOp1):
def set_attrs_input_output(self): def set_attrs_input_output(self):
self.x = np.random.random((100)).astype(self.dtype_) self.x = np.random.random(100).astype(self.dtype_)
self.out = self.x.cumsum(axis=0) self.out = self.x.cumsum(axis=0)
......
...@@ -73,7 +73,7 @@ class TestCumulativeTrapezoidWithOutDxX(TestCumulativeTrapezoidAPI): ...@@ -73,7 +73,7 @@ class TestCumulativeTrapezoidWithOutDxX(TestCumulativeTrapezoidAPI):
class TestCumulativeTrapezoidBroadcast(TestCumulativeTrapezoidAPI): class TestCumulativeTrapezoidBroadcast(TestCumulativeTrapezoidAPI):
def set_args(self): def set_args(self):
self.y = np.random.random((3, 3, 4)).astype('float32') self.y = np.random.random((3, 3, 4)).astype('float32')
self.x = np.random.random((3)).astype('float32') self.x = np.random.random(3).astype('float32')
self.dx = None self.dx = None
self.axis = 1 self.axis = 1
......
...@@ -96,7 +96,7 @@ class TestDropoutOpInput1d(OpTest): ...@@ -96,7 +96,7 @@ class TestDropoutOpInput1d(OpTest):
self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False} self.attrs = {'dropout_prob': 0.0, 'fix_seed': True, 'is_test': False}
self.outputs = { self.outputs = {
'Out': self.inputs['X'], 'Out': self.inputs['X'],
'Mask': np.ones((2000)).astype('uint8'), 'Mask': np.ones(2000).astype('uint8'),
} }
# Because prim op compare res with dygraph # Because prim op compare res with dygraph
# when p = 0 dropout api return x,in dygraph mode x_grad = out_grad, # when p = 0 dropout api return x,in dygraph mode x_grad = out_grad,
......
...@@ -348,7 +348,7 @@ class TestEigWrongDimsError(unittest.TestCase): ...@@ -348,7 +348,7 @@ class TestEigWrongDimsError(unittest.TestCase):
def test_error(self): def test_error(self):
paddle.device.set_device("cpu") paddle.device.set_device("cpu")
paddle.disable_static() paddle.disable_static()
a = np.random.random((3)).astype('float32') a = np.random.random(3).astype('float32')
x = paddle.to_tensor(a) x = paddle.to_tensor(a)
self.assertRaises(ValueError, paddle.linalg.eig, x) self.assertRaises(ValueError, paddle.linalg.eig, x)
......
...@@ -106,7 +106,7 @@ class TestElementWiseAddOp(unittest.TestCase): ...@@ -106,7 +106,7 @@ class TestElementWiseAddOp(unittest.TestCase):
def test_check_forward_backward_with_scale_and_bias(self): def test_check_forward_backward_with_scale_and_bias(self):
np.random.seed(123) np.random.seed(123)
self.x = np.random.random((4, 32, 220, 220)).astype(np.float32) self.x = np.random.random((4, 32, 220, 220)).astype(np.float32)
self.y = np.random.random((32)).astype(np.float32) self.y = np.random.random(32).astype(np.float32)
self.out = self.x + self.y.reshape(1, 32, 1, 1) self.out = self.x + self.y.reshape(1, 32, 1, 1)
self.axis = 1 self.axis = 1
self.check_forward_backward() self.check_forward_backward()
......
...@@ -160,7 +160,7 @@ class TestEmptyOp_ShapeTensorList(OpTest): ...@@ -160,7 +160,7 @@ class TestEmptyOp_ShapeTensorList(OpTest):
shape_tensor_list = [] shape_tensor_list = []
for index, ele in enumerate(self.shape): for index, ele in enumerate(self.shape):
shape_tensor_list.append( shape_tensor_list.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = {"ShapeTensorList": shape_tensor_list} self.inputs = {"ShapeTensorList": shape_tensor_list}
......
...@@ -87,7 +87,7 @@ class TestExpandOpRank1_tensor_attr(OpTest): ...@@ -87,7 +87,7 @@ class TestExpandOpRank1_tensor_attr(OpTest):
expand_times_tensor = [] expand_times_tensor = []
for index, ele in enumerate(self.expand_times): for index, ele in enumerate(self.expand_times):
expand_times_tensor.append( expand_times_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = { self.inputs = {
......
...@@ -89,7 +89,7 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): ...@@ -89,7 +89,7 @@ class TestExpandV2OpRank1_tensor_attr(OpTest):
expand_shapes_tensor = [] expand_shapes_tensor = []
for index, ele in enumerate(self.expand_shape): for index, ele in enumerate(self.expand_shape):
expand_shapes_tensor.append( expand_shapes_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = { self.inputs = {
......
...@@ -51,7 +51,7 @@ class MatrixGenerate: ...@@ -51,7 +51,7 @@ class MatrixGenerate:
if bias_dims == 2: if bias_dims == 2:
self.bias = np.random.random((1, oc)).astype("float32") self.bias = np.random.random((1, oc)).astype("float32")
else: else:
self.bias = np.random.random((oc)).astype("float32") self.bias = np.random.random(oc).astype("float32")
class TestFCOp(OpTest): class TestFCOp(OpTest):
......
...@@ -155,7 +155,7 @@ class TestFillConstantOp1_ShapeTensorList(OpTest): ...@@ -155,7 +155,7 @@ class TestFillConstantOp1_ShapeTensorList(OpTest):
shape_tensor_list = [] shape_tensor_list = []
for index, ele in enumerate(self.shape): for index, ele in enumerate(self.shape):
shape_tensor_list.append( shape_tensor_list.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = {"ShapeTensorList": shape_tensor_list} self.inputs = {"ShapeTensorList": shape_tensor_list}
...@@ -180,7 +180,7 @@ class TestFillConstantOp2_ShapeTensorList(OpTest): ...@@ -180,7 +180,7 @@ class TestFillConstantOp2_ShapeTensorList(OpTest):
shape_tensor_list = [] shape_tensor_list = []
for index, ele in enumerate(self.shape): for index, ele in enumerate(self.shape):
shape_tensor_list.append( shape_tensor_list.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = {"ShapeTensorList": shape_tensor_list} self.inputs = {"ShapeTensorList": shape_tensor_list}
......
...@@ -68,7 +68,7 @@ class TestFullOp(unittest.TestCase): ...@@ -68,7 +68,7 @@ class TestFullOp(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
input = paddle.arange(6, 10, dtype='float32') input = paddle.arange(6, 10, dtype='float32')
out = paddle.full_like(input, fill_value=888.88, dtype='float32') out = paddle.full_like(input, fill_value=888.88, dtype='float32')
out_numpy = np.random.random((4)).astype("float32") out_numpy = np.random.random(4).astype("float32")
out_numpy.fill(888.88) out_numpy.fill(888.88)
self.assertTrue((out.numpy() == out_numpy).all(), True) self.assertTrue((out.numpy() == out_numpy).all(), True)
paddle.enable_static() paddle.enable_static()
...@@ -77,7 +77,7 @@ class TestFullOp(unittest.TestCase): ...@@ -77,7 +77,7 @@ class TestFullOp(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
input = paddle.arange(6, 10, dtype='float32') input = paddle.arange(6, 10, dtype='float32')
out = paddle.full_like(input, fill_value=float('inf')) out = paddle.full_like(input, fill_value=float('inf'))
out_numpy = np.random.random((4)).astype("float32") out_numpy = np.random.random(4).astype("float32")
out_numpy.fill(float('inf')) out_numpy.fill(float('inf'))
self.assertTrue((out.numpy() == out_numpy).all(), True) self.assertTrue((out.numpy() == out_numpy).all(), True)
paddle.enable_static() paddle.enable_static()
......
...@@ -275,16 +275,16 @@ class APITestStaticFusedFFN(unittest.TestCase): ...@@ -275,16 +275,16 @@ class APITestStaticFusedFFN(unittest.TestCase):
linear1_weight_data = np.random.random( linear1_weight_data = np.random.random(
(d_model, dim_feedforward) (d_model, dim_feedforward)
).astype(dtype) ).astype(dtype)
linear1_bias_data = np.zeros((dim_feedforward)).astype(dtype) linear1_bias_data = np.zeros(dim_feedforward).astype(dtype)
linear2_weight_data = np.random.random( linear2_weight_data = np.random.random(
(dim_feedforward, d_model) (dim_feedforward, d_model)
).astype(dtype) ).astype(dtype)
linear2_bias_data = np.zeros((d_model)).astype(dtype) linear2_bias_data = np.zeros(d_model).astype(dtype)
ln1_scale_data = np.ones((d_model)).astype(layer_norm_dtype) ln1_scale_data = np.ones(d_model).astype(layer_norm_dtype)
ln1_bias_data = np.zeros((d_model)).astype(layer_norm_dtype) ln1_bias_data = np.zeros(d_model).astype(layer_norm_dtype)
ln2_scale_data = np.ones((d_model)).astype(layer_norm_dtype) ln2_scale_data = np.ones(d_model).astype(layer_norm_dtype)
ln2_bias_data = np.zeros((d_model)).astype(layer_norm_dtype) ln2_bias_data = np.zeros(d_model).astype(layer_norm_dtype)
res_list = [fused_out, ln_out] res_list = [fused_out, ln_out]
real_res = [] real_res = []
......
...@@ -100,7 +100,7 @@ class TestFusedGateAttentionOp(OpTest): ...@@ -100,7 +100,7 @@ class TestFusedGateAttentionOp(OpTest):
self.gating_b = _random((self.num_heads, self.head_dim)) self.gating_b = _random((self.num_heads, self.head_dim))
self.output_w = _random((self.num_heads, self.head_dim, self.out_dim)) self.output_w = _random((self.num_heads, self.head_dim, self.out_dim))
self.output_b = _random((self.out_dim)) self.output_b = _random(self.out_dim)
self.dout = _random( self.dout = _random(
(self.batch_size, self.msa_len, self.res_len, self.q_dim) (self.batch_size, self.msa_len, self.res_len, self.q_dim)
......
...@@ -123,7 +123,7 @@ class TestGaussianRandomOp_ShapeTensorList(TestGaussianRandomOp): ...@@ -123,7 +123,7 @@ class TestGaussianRandomOp_ShapeTensorList(TestGaussianRandomOp):
shape_tensor_list = [] shape_tensor_list = []
for index, ele in enumerate(self.shape): for index, ele in enumerate(self.shape):
shape_tensor_list.append( shape_tensor_list.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.attrs = { self.attrs = {
......
...@@ -136,7 +136,7 @@ class TestGRUOp(OpTest): ...@@ -136,7 +136,7 @@ class TestGRUOp(OpTest):
(self.num_layers * self.direction_num, batch_size, self.hidden_size) (self.num_layers * self.direction_num, batch_size, self.hidden_size)
).astype(self.dtype) ).astype(self.dtype)
state_out = np.ndarray((300)).astype("uint8") state_out = np.ndarray(300).astype("uint8")
self.inputs = { self.inputs = {
'Input': input, 'Input': input,
...@@ -162,7 +162,7 @@ class TestGRUOp(OpTest): ...@@ -162,7 +162,7 @@ class TestGRUOp(OpTest):
self.outputs = { self.outputs = {
'Out': output, 'Out': output,
'State': [('last_hidden', last_hidden)], 'State': [('last_hidden', last_hidden)],
'Reserve': np.ndarray((400)).astype("uint8"), 'Reserve': np.ndarray(400).astype("uint8"),
'DropoutState': state_out, 'DropoutState': state_out,
} }
......
...@@ -186,7 +186,7 @@ class TestGumbelSoftmaxOpSampleDistribution(OpTest): ...@@ -186,7 +186,7 @@ class TestGumbelSoftmaxOpSampleDistribution(OpTest):
# Construct statistics z for samples and # Construct statistics z for samples and
# z is approximately N(0,1) for unbiased count # z is approximately N(0,1) for unbiased count
expected = self.probs * self.shape[0] expected = self.probs * self.shape[0]
z = (self.counts - expected) / np.sqrt((expected * (1 - self.probs))) z = (self.counts - expected) / np.sqrt(expected * (1 - self.probs))
# A (lazy) approximate 99% two-sided test: # A (lazy) approximate 99% two-sided test:
# occurs with prob alpha~>=0.01 if unbiased # occurs with prob alpha~>=0.01 if unbiased
self.assertLess(np.max(np.abs(z)).item(), 2.58) self.assertLess(np.max(np.abs(z)).item(), 2.58)
......
...@@ -64,7 +64,7 @@ class TestDygraphFramework(unittest.TestCase): ...@@ -64,7 +64,7 @@ class TestDygraphFramework(unittest.TestCase):
"backward should not be usable in static graph mode" "backward should not be usable in static graph mode"
) )
except AssertionError as e: except AssertionError as e:
self.assertTrue((e is not None)) self.assertTrue(e is not None)
def test_dygraph_to_string(self): def test_dygraph_to_string(self):
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
......
...@@ -38,7 +38,7 @@ def output_hist(out): ...@@ -38,7 +38,7 @@ def output_hist(out):
hist, _ = np.histogram(out, range=(-1, 1)) hist, _ = np.histogram(out, range=(-1, 1))
hist = hist.astype("float32") hist = hist.astype("float32")
hist /= float(out.size) hist /= float(out.size)
prob = 0.1 * np.ones((10)) prob = 0.1 * np.ones(10)
return hist, prob return hist, prob
...@@ -632,7 +632,7 @@ class TestNumpyArrayInitializer(unittest.TestCase): ...@@ -632,7 +632,7 @@ class TestNumpyArrayInitializer(unittest.TestCase):
program = framework.Program() program = framework.Program()
block = program.global_block() block = program.global_block()
np_array = numpy.random.random((10000)).astype(dtype) np_array = numpy.random.random(10000).astype(dtype)
for _ in range(2): for _ in range(2):
block.create_parameter( block.create_parameter(
dtype=np_array.dtype, dtype=np_array.dtype,
......
...@@ -644,7 +644,7 @@ class TestAssign(unittest.TestCase): ...@@ -644,7 +644,7 @@ class TestAssign(unittest.TestCase):
program = framework.Program() program = framework.Program()
block = program.global_block() block = program.global_block()
np_array = numpy.random.random((10000)).astype(dtype) np_array = numpy.random.random(10000).astype(dtype)
for _ in range(2): for _ in range(2):
block.create_parameter( block.create_parameter(
dtype=np_array.dtype, dtype=np_array.dtype,
......
...@@ -1102,7 +1102,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase): ...@@ -1102,7 +1102,7 @@ class TestJitSaveLoadEmptyLayer(unittest.TestCase):
def test_save_load_empty_layer(self): def test_save_load_empty_layer(self):
layer = EmptyLayer() layer = EmptyLayer()
x = paddle.to_tensor(np.random.random((10)).astype('float32')) x = paddle.to_tensor(np.random.random(10).astype('float32'))
out = layer(x) out = layer(x)
paddle.jit.save(layer, self.model_path) paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path) load_layer = paddle.jit.load(self.model_path)
...@@ -1124,8 +1124,8 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase): ...@@ -1124,8 +1124,8 @@ class TestJitSaveLoadNoParamLayer(unittest.TestCase):
def test_save_load_no_param_layer(self): def test_save_load_no_param_layer(self):
layer = NoParamLayer() layer = NoParamLayer()
x = paddle.to_tensor(np.random.random((5)).astype('float32')) x = paddle.to_tensor(np.random.random(5).astype('float32'))
y = paddle.to_tensor(np.random.random((5)).astype('float32')) y = paddle.to_tensor(np.random.random(5).astype('float32'))
out = layer(x, y) out = layer(x, y)
paddle.jit.save(layer, self.model_path) paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path) load_layer = paddle.jit.load(self.model_path)
...@@ -1433,7 +1433,7 @@ class TestJitSaveLoadFinetuneLoad(unittest.TestCase): ...@@ -1433,7 +1433,7 @@ class TestJitSaveLoadFinetuneLoad(unittest.TestCase):
result_11 = layer_finetune(inps1) result_11 = layer_finetune(inps1)
self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5) self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
self.assertTrue(float(((result_01 - result_11)).abs().max()) < 1e-5) self.assertTrue(float((result_01 - result_11).abs().max()) < 1e-5)
# NOTE(weixin): When there are multiple test functions in an # NOTE(weixin): When there are multiple test functions in an
......
...@@ -27,7 +27,7 @@ class LinearTestCase(unittest.TestCase): ...@@ -27,7 +27,7 @@ class LinearTestCase(unittest.TestCase):
self.dtype = 'float32' self.dtype = 'float32'
self.input = np.ones((3, 1, 2)).astype(self.dtype) self.input = np.ones((3, 1, 2)).astype(self.dtype)
self.weight = np.ones((2, 2)).astype(self.dtype) self.weight = np.ones((2, 2)).astype(self.dtype)
self.bias = np.ones((2)).astype(self.dtype) self.bias = np.ones(2).astype(self.dtype)
self.place = ( self.place = (
paddle.CUDAPlace(0) paddle.CUDAPlace(0)
if core.is_compiled_with_cuda() if core.is_compiled_with_cuda()
......
...@@ -227,7 +227,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): ...@@ -227,7 +227,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append( size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
......
...@@ -295,7 +295,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp): ...@@ -295,7 +295,7 @@ class TestLinearInterpOpSizeTensor(TestLinearInterpOp):
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append( size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
......
...@@ -474,7 +474,7 @@ class TestCUDNNLstmOp(OpTest): ...@@ -474,7 +474,7 @@ class TestCUDNNLstmOp(OpTest):
init_c = np.zeros((self.num_layers, batch_size, hidden_size)).astype( init_c = np.zeros((self.num_layers, batch_size, hidden_size)).astype(
self.dtype self.dtype
) )
state_out = np.ndarray((300)).astype("uint8") state_out = np.ndarray(300).astype("uint8")
if core.is_compiled_with_rocm(): if core.is_compiled_with_rocm():
for i in range(len(flat_w)): for i in range(len(flat_w)):
...@@ -508,7 +508,7 @@ class TestCUDNNLstmOp(OpTest): ...@@ -508,7 +508,7 @@ class TestCUDNNLstmOp(OpTest):
'Out': output, 'Out': output,
"LastH": last_hidden, "LastH": last_hidden,
'LastC': last_cell, 'LastC': last_cell,
'Reserve': np.ndarray((400)).astype("uint8"), 'Reserve': np.ndarray(400).astype("uint8"),
'StateOut': state_out, 'StateOut': state_out,
} }
......
...@@ -268,7 +268,7 @@ class TestMeshGrid_ZeroDim(TestMeshgridOp): ...@@ -268,7 +268,7 @@ class TestMeshGrid_ZeroDim(TestMeshgridOp):
self.shape = self.get_x_shape() self.shape = self.get_x_shape()
ins = [] ins = []
outs = [] outs = []
ins.append(np.random.random(([])).astype(self.dtype)) ins.append(np.random.random([]).astype(self.dtype))
ins.append(np.random.random([2]).astype(self.dtype)) ins.append(np.random.random([2]).astype(self.dtype))
ins.append(np.random.random([3]).astype(self.dtype)) ins.append(np.random.random([3]).astype(self.dtype))
for i in range(len(self.shape)): for i in range(len(self.shape)):
......
...@@ -104,7 +104,7 @@ class TestMultiDotOp4Mat(TestMultiDotOp): ...@@ -104,7 +104,7 @@ class TestMultiDotOp4Mat(TestMultiDotOp):
class TestMultiDotOpFirst1D(TestMultiDotOp): class TestMultiDotOpFirst1D(TestMultiDotOp):
def get_inputs_and_outputs(self): def get_inputs_and_outputs(self):
self.A = np.random.random((4)).astype(self.dtype) self.A = np.random.random(4).astype(self.dtype)
self.B = np.random.random((4, 3)).astype(self.dtype) self.B = np.random.random((4, 3)).astype(self.dtype)
self.inputs = {'X': [('x0', self.A), ('x1', self.B)]} self.inputs = {'X': [('x0', self.A), ('x1', self.B)]}
self.outputs = {'Out': multi_dot([self.A, self.B])} self.outputs = {'Out': multi_dot([self.A, self.B])}
...@@ -112,7 +112,7 @@ class TestMultiDotOpFirst1D(TestMultiDotOp): ...@@ -112,7 +112,7 @@ class TestMultiDotOpFirst1D(TestMultiDotOp):
class TestMultiDotOp3MatFirst1D(TestMultiDotOp3Mat): class TestMultiDotOp3MatFirst1D(TestMultiDotOp3Mat):
def get_inputs_and_outputs(self): def get_inputs_and_outputs(self):
self.A = np.random.random((4)).astype(self.dtype) self.A = np.random.random(4).astype(self.dtype)
self.B = np.random.random((4, 3)).astype(self.dtype) self.B = np.random.random((4, 3)).astype(self.dtype)
self.C = np.random.random((3, 3)).astype(self.dtype) self.C = np.random.random((3, 3)).astype(self.dtype)
self.inputs = {'X': [('x0', self.A), ('x1', self.B), ('x2', self.C)]} self.inputs = {'X': [('x0', self.A), ('x1', self.B), ('x2', self.C)]}
...@@ -121,7 +121,7 @@ class TestMultiDotOp3MatFirst1D(TestMultiDotOp3Mat): ...@@ -121,7 +121,7 @@ class TestMultiDotOp3MatFirst1D(TestMultiDotOp3Mat):
class TestMultiDotOp4MatFirst1D(TestMultiDotOp4Mat): class TestMultiDotOp4MatFirst1D(TestMultiDotOp4Mat):
def get_inputs_and_outputs(self): def get_inputs_and_outputs(self):
self.A = np.random.random((4)).astype(self.dtype) self.A = np.random.random(4).astype(self.dtype)
self.B = np.random.random((4, 3)).astype(self.dtype) self.B = np.random.random((4, 3)).astype(self.dtype)
self.C = np.random.random((3, 4)).astype(self.dtype) self.C = np.random.random((3, 4)).astype(self.dtype)
self.D = np.random.random((4, 5)).astype(self.dtype) self.D = np.random.random((4, 5)).astype(self.dtype)
...@@ -139,7 +139,7 @@ class TestMultiDotOp4MatFirst1D(TestMultiDotOp4Mat): ...@@ -139,7 +139,7 @@ class TestMultiDotOp4MatFirst1D(TestMultiDotOp4Mat):
class TestMultiDotOpLast1D(TestMultiDotOp): class TestMultiDotOpLast1D(TestMultiDotOp):
def get_inputs_and_outputs(self): def get_inputs_and_outputs(self):
self.A = np.random.random((3, 6)).astype(self.dtype) self.A = np.random.random((3, 6)).astype(self.dtype)
self.B = np.random.random((6)).astype(self.dtype) self.B = np.random.random(6).astype(self.dtype)
self.inputs = {'X': [('x0', self.A), ('x1', self.B)]} self.inputs = {'X': [('x0', self.A), ('x1', self.B)]}
self.outputs = {'Out': multi_dot([self.A, self.B])} self.outputs = {'Out': multi_dot([self.A, self.B])}
...@@ -148,7 +148,7 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat): ...@@ -148,7 +148,7 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat):
def get_inputs_and_outputs(self): def get_inputs_and_outputs(self):
self.A = np.random.random((2, 4)).astype(self.dtype) self.A = np.random.random((2, 4)).astype(self.dtype)
self.B = np.random.random((4, 3)).astype(self.dtype) self.B = np.random.random((4, 3)).astype(self.dtype)
self.C = np.random.random((3)).astype(self.dtype) self.C = np.random.random(3).astype(self.dtype)
self.inputs = {'X': [('x0', self.A), ('x1', self.B), ('x2', self.C)]} self.inputs = {'X': [('x0', self.A), ('x1', self.B), ('x2', self.C)]}
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
...@@ -163,7 +163,7 @@ class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat): ...@@ -163,7 +163,7 @@ class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat):
self.A = np.random.random((2, 3)).astype(self.dtype) self.A = np.random.random((2, 3)).astype(self.dtype)
self.B = np.random.random((3, 2)).astype(self.dtype) self.B = np.random.random((3, 2)).astype(self.dtype)
self.C = np.random.random((2, 3)).astype(self.dtype) self.C = np.random.random((2, 3)).astype(self.dtype)
self.D = np.random.random((3)).astype(self.dtype) self.D = np.random.random(3).astype(self.dtype)
self.inputs = { self.inputs = {
'X': [ 'X': [
('x0', self.A), ('x0', self.A),
...@@ -178,7 +178,7 @@ class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat): ...@@ -178,7 +178,7 @@ class TestMultiDotOp4MatLast1D(TestMultiDotOp4Mat):
class TestMultiDotOpFirstAndLast1D(TestMultiDotOp): class TestMultiDotOpFirstAndLast1D(TestMultiDotOp):
def get_inputs_and_outputs(self): def get_inputs_and_outputs(self):
self.A = np.random.random((4,)).astype(self.dtype) self.A = np.random.random((4,)).astype(self.dtype)
self.B = np.random.random((4)).astype(self.dtype) self.B = np.random.random(4).astype(self.dtype)
self.inputs = {'X': [('x0', self.A), ('x1', self.B)]} self.inputs = {'X': [('x0', self.A), ('x1', self.B)]}
self.outputs = {'Out': multi_dot([self.A, self.B])} self.outputs = {'Out': multi_dot([self.A, self.B])}
...@@ -187,7 +187,7 @@ class TestMultiDotOp3MatFirstAndLast1D(TestMultiDotOp3Mat): ...@@ -187,7 +187,7 @@ class TestMultiDotOp3MatFirstAndLast1D(TestMultiDotOp3Mat):
def get_inputs_and_outputs(self): def get_inputs_and_outputs(self):
self.A = np.random.random((6,)).astype(self.dtype) self.A = np.random.random((6,)).astype(self.dtype)
self.B = np.random.random((6, 4)).astype(self.dtype) self.B = np.random.random((6, 4)).astype(self.dtype)
self.C = np.random.random((4)).astype(self.dtype) self.C = np.random.random(4).astype(self.dtype)
self.inputs = {'X': [('x0', self.A), ('x1', self.B), ('x2', self.C)]} self.inputs = {'X': [('x0', self.A), ('x1', self.B), ('x2', self.C)]}
self.outputs = {'Out': multi_dot([self.A, self.B, self.C])} self.outputs = {'Out': multi_dot([self.A, self.B, self.C])}
...@@ -197,7 +197,7 @@ class TestMultiDotOp4MatFirstAndLast1D(TestMultiDotOp4Mat): ...@@ -197,7 +197,7 @@ class TestMultiDotOp4MatFirstAndLast1D(TestMultiDotOp4Mat):
self.A = np.random.random((3,)).astype(self.dtype) self.A = np.random.random((3,)).astype(self.dtype)
self.B = np.random.random((3, 4)).astype(self.dtype) self.B = np.random.random((3, 4)).astype(self.dtype)
self.C = np.random.random((4, 2)).astype(self.dtype) self.C = np.random.random((4, 2)).astype(self.dtype)
self.D = np.random.random((2)).astype(self.dtype) self.D = np.random.random(2).astype(self.dtype)
self.inputs = { self.inputs = {
'X': [ 'X': [
('x0', self.A), ('x0', self.A),
......
...@@ -37,7 +37,7 @@ class TestMVOp(OpTest): ...@@ -37,7 +37,7 @@ class TestMVOp(OpTest):
def init_config(self): def init_config(self):
self.x = np.random.random((2, 100)).astype("float64") self.x = np.random.random((2, 100)).astype("float64")
self.vec = np.random.random((100)).astype("float64") self.vec = np.random.random(100).astype("float64")
class TestMVAPI(unittest.TestCase): class TestMVAPI(unittest.TestCase):
...@@ -46,7 +46,7 @@ class TestMVAPI(unittest.TestCase): ...@@ -46,7 +46,7 @@ class TestMVAPI(unittest.TestCase):
self.x_data = np.random.random((5, 100)).astype("float64") self.x_data = np.random.random((5, 100)).astype("float64")
self.x = paddle.to_tensor(self.x_data) self.x = paddle.to_tensor(self.x_data)
self.vec_data = np.random.random((100)).astype("float64") self.vec_data = np.random.random(100).astype("float64")
self.vec = paddle.to_tensor(self.vec_data) self.vec = paddle.to_tensor(self.vec_data)
z = paddle.mv(self.x, self.vec) z = paddle.mv(self.x, self.vec)
np_z = z.numpy() np_z = z.numpy()
......
...@@ -189,7 +189,7 @@ class TestNanmedian(unittest.TestCase): ...@@ -189,7 +189,7 @@ class TestNanmedian(unittest.TestCase):
x_np[2, 3:] = np.nan x_np[2, 3:] = np.nan
x_np_sorted = np.sort(x_np) x_np_sorted = np.sort(x_np)
nan_counts = np.count_nonzero(np.isnan(x_np).astype(np.int32), axis=1) nan_counts = np.count_nonzero(np.isnan(x_np).astype(np.int32), axis=1)
np_grad = np.zeros((shape)) np_grad = np.zeros(shape)
for i in range(shape[0]): for i in range(shape[0]):
valid_cnts = shape[1] - nan_counts[i] valid_cnts = shape[1] - nan_counts[i]
if valid_cnts == 0: if valid_cnts == 0:
......
...@@ -374,7 +374,7 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -374,7 +374,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append( size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
......
...@@ -874,7 +874,7 @@ class TestNearestInterpOp_attr_tensor(OpTest): ...@@ -874,7 +874,7 @@ class TestNearestInterpOp_attr_tensor(OpTest):
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append( size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
......
...@@ -128,7 +128,7 @@ class TestNpairLossOpError(unittest.TestCase): ...@@ -128,7 +128,7 @@ class TestNpairLossOpError(unittest.TestCase):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
anchor_np = np.random.random((2, 4)).astype("float32") anchor_np = np.random.random((2, 4)).astype("float32")
positive_np = np.random.random((2, 4)).astype("float32") positive_np = np.random.random((2, 4)).astype("float32")
labels_np = np.random.random((2)).astype("float32") labels_np = np.random.random(2).astype("float32")
anchor_data = paddle.static.data( anchor_data = paddle.static.data(
name='anchor', shape=[2, 4], dtype='float32' name='anchor', shape=[2, 4], dtype='float32'
) )
......
...@@ -27,7 +27,7 @@ class TestNumelOp(OpTest): ...@@ -27,7 +27,7 @@ class TestNumelOp(OpTest):
self.op_type = "size" self.op_type = "size"
self.python_api = paddle.numel self.python_api = paddle.numel
self.init() self.init()
x = np.random.random((self.shape)).astype(self.dtype) x = np.random.random(self.shape).astype(self.dtype)
self.inputs = { self.inputs = {
'Input': x, 'Input': x,
} }
...@@ -82,7 +82,7 @@ class TestNumelOpBF16(OpTest): ...@@ -82,7 +82,7 @@ class TestNumelOpBF16(OpTest):
self.python_api = paddle.numel self.python_api = paddle.numel
self.dtype = np.uint16 self.dtype = np.uint16
self.init() self.init()
x = np.random.random((self.shape)).astype(np.float32) x = np.random.random(self.shape).astype(np.float32)
self.inputs = {'Input': convert_float_to_uint16(x)} self.inputs = {'Input': convert_float_to_uint16(x)}
self.outputs = {'Out': np.array([np.size(x)])} self.outputs = {'Out': np.array([np.size(x)])}
......
...@@ -778,7 +778,7 @@ class TestCase4_AsyPadding(TestCase4): ...@@ -778,7 +778,7 @@ class TestCase4_AsyPadding(TestCase4):
self.shape = [2, 3, 7, 7] self.shape = [2, 3, 7, 7]
class TestCase5_AsyPadding((TestCase5)): class TestCase5_AsyPadding(TestCase5):
def init_test_case(self): def init_test_case(self):
self.ksize = [3, 3] self.ksize = [3, 3]
self.strides = [1, 1] self.strides = [1, 1]
......
...@@ -55,7 +55,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -55,7 +55,7 @@ class TestPyLayer(unittest.TestCase):
z2.mean().backward() z2.mean().backward()
self.assertTrue( self.assertTrue(
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 np.max(np.abs(input1.grad.numpy() - input2.grad.numpy())) < 1e-10
) )
def test_simple_pylayer_return_none_with_no_grad(self): def test_simple_pylayer_return_none_with_no_grad(self):
...@@ -91,7 +91,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -91,7 +91,7 @@ class TestPyLayer(unittest.TestCase):
z2.mean().backward() z2.mean().backward()
self.assertTrue( self.assertTrue(
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 np.max(np.abs(input1.grad.numpy() - input2.grad.numpy())) < 1e-10
) )
def test_simple_pylayer_single_output(self): def test_simple_pylayer_single_output(self):
...@@ -119,7 +119,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -119,7 +119,7 @@ class TestPyLayer(unittest.TestCase):
z2.mean().backward() z2.mean().backward()
self.assertTrue( self.assertTrue(
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 np.max(np.abs(input1.grad.numpy() - input2.grad.numpy())) < 1e-10
) )
def test_simple_pylayer_multi_output(self): def test_simple_pylayer_multi_output(self):
...@@ -149,7 +149,7 @@ class TestPyLayer(unittest.TestCase): ...@@ -149,7 +149,7 @@ class TestPyLayer(unittest.TestCase):
z2.mean().backward() z2.mean().backward()
self.assertTrue( self.assertTrue(
np.max(np.abs((input1.grad.numpy() - input2.grad.numpy()))) < 1e-10 np.max(np.abs(input1.grad.numpy() - input2.grad.numpy())) < 1e-10
) )
def test_pylayer_num_output_match(self): def test_pylayer_num_output_match(self):
......
...@@ -29,7 +29,7 @@ def output_hist(out): ...@@ -29,7 +29,7 @@ def output_hist(out):
hist, _ = np.histogram(out, range=(-10, 10)) hist, _ = np.histogram(out, range=(-10, 10))
hist = hist.astype("float32") hist = hist.astype("float32")
hist /= float(out.size) hist /= float(out.size)
prob = 0.1 * np.ones((10)) prob = 0.1 * np.ones(10)
return hist, prob return hist, prob
...@@ -74,7 +74,7 @@ class TestRandintOp_attr_tensorlist(OpTest): ...@@ -74,7 +74,7 @@ class TestRandintOp_attr_tensorlist(OpTest):
shape_tensor = [] shape_tensor = []
for index, ele in enumerate(self.new_shape): for index, ele in enumerate(self.new_shape):
shape_tensor.append( shape_tensor.append(
("x" + str(index), np.ones((1)).astype("int64") * ele) ("x" + str(index), np.ones(1).astype("int64") * ele)
) )
self.inputs = {'ShapeTensorList': shape_tensor} self.inputs = {'ShapeTensorList': shape_tensor}
self.init_attrs() self.init_attrs()
...@@ -160,7 +160,7 @@ class TestRandintImperative(unittest.TestCase): ...@@ -160,7 +160,7 @@ class TestRandintImperative(unittest.TestCase):
x3 = paddle.tensor.random.randint(n) x3 = paddle.tensor.random.randint(n)
for i in [x1, x2, x3]: for i in [x1, x2, x3]:
for j in i.numpy().tolist(): for j in i.numpy().tolist():
self.assertTrue((j >= 0 and j < n)) self.assertTrue(j >= 0 and j < n)
paddle.enable_static() paddle.enable_static()
......
...@@ -63,7 +63,7 @@ class TestRandpermOp(OpTest): ...@@ -63,7 +63,7 @@ class TestRandpermOp(OpTest):
self.dtype = "int64" self.dtype = "int64"
self.inputs = {} self.inputs = {}
self.outputs = {"Out": np.zeros((self.n)).astype(self.dtype)} self.outputs = {"Out": np.zeros(self.n).astype(self.dtype)}
self.init_attrs() self.init_attrs()
self.attrs = { self.attrs = {
"n": self.n, "n": self.n,
......
...@@ -24,7 +24,7 @@ from paddle.fluid import core ...@@ -24,7 +24,7 @@ from paddle.fluid import core
def gen_input_help(input, rank_offset, max_rank, max_size): def gen_input_help(input, rank_offset, max_rank, max_size):
input_row, input_col = input.shape input_row, input_col = input.shape
max_ins = np.max((max_size, input_row)) max_ins = np.max((max_size, input_row))
input_help = np.zeros((max_ins * max_rank * input_col)) input_help = np.zeros(max_ins * max_rank * input_col)
ins_rank = np.zeros((max_ins, 1)) ins_rank = np.zeros((max_ins, 1))
ins_rank.fill(-1) ins_rank.fill(-1)
......
...@@ -174,7 +174,7 @@ class TestReshapeOp_attr_ShapeTensor(OpTest): ...@@ -174,7 +174,7 @@ class TestReshapeOp_attr_ShapeTensor(OpTest):
shape_tensor = [] shape_tensor = []
for index, ele in enumerate(self.new_shape): for index, ele in enumerate(self.new_shape):
shape_tensor.append( shape_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = { self.inputs = {
......
...@@ -120,7 +120,7 @@ class TestRnnError(unittest.TestCase): ...@@ -120,7 +120,7 @@ class TestRnnError(unittest.TestCase):
self.assertRaises(TypeError, test_initial_states_type) self.assertRaises(TypeError, test_initial_states_type)
def test_sequence_length_type(): def test_sequence_length_type():
np_sequence_length = np.random.random((batch_size)).astype( np_sequence_length = np.random.random(batch_size).astype(
"float32" "float32"
) )
dynamic_rnn( dynamic_rnn(
......
...@@ -137,7 +137,7 @@ class TestRNNOp(OpTest): ...@@ -137,7 +137,7 @@ class TestRNNOp(OpTest):
init_c = np.zeros( init_c = np.zeros(
(self.num_layers * self.direction_num, batch_size, hidden_size) (self.num_layers * self.direction_num, batch_size, hidden_size)
).astype(self.dtype) ).astype(self.dtype)
state_out = np.ndarray((300)).astype("uint8") state_out = np.ndarray(300).astype("uint8")
self.inputs = { self.inputs = {
'Input': input, 'Input': input,
...@@ -163,7 +163,7 @@ class TestRNNOp(OpTest): ...@@ -163,7 +163,7 @@ class TestRNNOp(OpTest):
self.outputs = { self.outputs = {
'Out': output, 'Out': output,
"State": [('last_hidden', last_hidden), ('last_cell', last_cell)], "State": [('last_hidden', last_hidden), ('last_cell', last_cell)],
'Reserve': np.ndarray((400)).astype("uint8"), 'Reserve': np.ndarray(400).astype("uint8"),
'DropoutState': state_out, 'DropoutState': state_out,
} }
......
...@@ -28,7 +28,7 @@ class TestSeedOpFixSeed(OpTest): ...@@ -28,7 +28,7 @@ class TestSeedOpFixSeed(OpTest):
self.op_type = "seed" self.op_type = "seed"
self.inputs = {} self.inputs = {}
self.attrs = {"seed": 123} self.attrs = {"seed": 123}
self.outputs = {"Out": np.asarray((123)).astype('int')} self.outputs = {"Out": np.asarray(123).astype('int')}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output()
...@@ -39,7 +39,7 @@ class TestSeedOpDiffSeed(OpTest): ...@@ -39,7 +39,7 @@ class TestSeedOpDiffSeed(OpTest):
self.op_type = "seed" self.op_type = "seed"
self.inputs = {} self.inputs = {}
self.attrs = {"seed": 0} self.attrs = {"seed": 0}
self.outputs = {"Out": np.asarray((123)).astype('int')} self.outputs = {"Out": np.asarray(123).astype('int')}
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=["Out"]) self.check_output(no_check_set=["Out"])
......
...@@ -36,8 +36,8 @@ def sigmoid_focal_loss_forward( ...@@ -36,8 +36,8 @@ def sigmoid_focal_loss_forward(
a = int(idx / num_classes) a = int(idx / num_classes)
d = int(idx % num_classes) d = int(idx % num_classes)
label = label_data[a] label = label_data[a]
c_pos = float((int(label) == int(d + 1))) c_pos = float(int(label) == int(d + 1))
c_neg = float(((int(label) != -1) & (int(label) != (d + 1)))) c_neg = float((int(label) != -1) & (int(label) != (d + 1)))
fg_num = max(fg_num_data, 1) fg_num = max(fg_num_data, 1)
z_neg = (1.0 - alpha) / fg_num z_neg = (1.0 - alpha) / fg_num
z_pos = alpha / fg_num z_pos = alpha / fg_num
......
...@@ -128,7 +128,7 @@ class TestSimpleRNNOp(OpTest): ...@@ -128,7 +128,7 @@ class TestSimpleRNNOp(OpTest):
(self.num_layers * self.direction_num, batch_size, hidden_size) (self.num_layers * self.direction_num, batch_size, hidden_size)
).astype(self.dtype) ).astype(self.dtype)
state_out = np.ndarray((300)).astype("uint8") state_out = np.ndarray(300).astype("uint8")
self.inputs = { self.inputs = {
'Input': input, 'Input': input,
...@@ -154,7 +154,7 @@ class TestSimpleRNNOp(OpTest): ...@@ -154,7 +154,7 @@ class TestSimpleRNNOp(OpTest):
self.outputs = { self.outputs = {
'Out': output, 'Out': output,
'State': [('last_hidden', last_hidden)], 'State': [('last_hidden', last_hidden)],
'Reserve': np.ndarray((400)).astype("uint8"), 'Reserve': np.ndarray(400).astype("uint8"),
'DropoutState': state_out, 'DropoutState': state_out,
} }
......
...@@ -172,7 +172,7 @@ class TestSliceOp_starts_ListTensor(OpTest): ...@@ -172,7 +172,7 @@ class TestSliceOp_starts_ListTensor(OpTest):
starts_tensor = [] starts_tensor = []
for index, ele in enumerate(self.starts): for index, ele in enumerate(self.starts):
starts_tensor.append( starts_tensor.append(
("x" + str(index), np.ones((1)).astype('int64') * ele) ("x" + str(index), np.ones(1).astype('int64') * ele)
) )
self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
...@@ -212,7 +212,7 @@ class TestSliceOp_decs_dim_starts_ListTensor(OpTest): ...@@ -212,7 +212,7 @@ class TestSliceOp_decs_dim_starts_ListTensor(OpTest):
starts_tensor = [] starts_tensor = []
for index, ele in enumerate(self.starts): for index, ele in enumerate(self.starts):
starts_tensor.append( starts_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
...@@ -379,7 +379,7 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): ...@@ -379,7 +379,7 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest):
ends_tensor = [] ends_tensor = []
for index, ele in enumerate(self.ends): for index, ele in enumerate(self.ends):
ends_tensor.append( ends_tensor.append(
("y" + str(index), np.ones((1)).astype('int32') * ele) ("y" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = { self.inputs = {
......
...@@ -154,7 +154,7 @@ class TestSplitOp_SectionsTensor(OpTest): ...@@ -154,7 +154,7 @@ class TestSplitOp_SectionsTensor(OpTest):
sections_tensor = [] sections_tensor = []
for index, ele in enumerate(self.sections): for index, ele in enumerate(self.sections):
sections_tensor.append( sections_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SectionsTensorList'] = sections_tensor self.inputs['SectionsTensorList'] = sections_tensor
......
...@@ -324,7 +324,7 @@ class TestStridedSliceOp_starts_ListTensor(OpTest): ...@@ -324,7 +324,7 @@ class TestStridedSliceOp_starts_ListTensor(OpTest):
starts_tensor = [] starts_tensor = []
for index, ele in enumerate(self.starts): for index, ele in enumerate(self.starts):
starts_tensor.append( starts_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor} self.inputs = {'Input': self.input, 'StartsTensorList': starts_tensor}
...@@ -366,7 +366,7 @@ class TestStridedSliceOp_ends_ListTensor(OpTest): ...@@ -366,7 +366,7 @@ class TestStridedSliceOp_ends_ListTensor(OpTest):
ends_tensor = [] ends_tensor = []
for index, ele in enumerate(self.ends): for index, ele in enumerate(self.ends):
ends_tensor.append( ends_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = {'Input': self.input, 'EndsTensorList': ends_tensor} self.inputs = {'Input': self.input, 'EndsTensorList': ends_tensor}
...@@ -477,7 +477,7 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest): ...@@ -477,7 +477,7 @@ class TestStridedSliceOp_listTensor_Tensor(OpTest):
ends_tensor = [] ends_tensor = []
for index, ele in enumerate(self.ends): for index, ele in enumerate(self.ends):
ends_tensor.append( ends_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.op_type = "strided_slice" self.op_type = "strided_slice"
self.python_api = paddle.strided_slice self.python_api = paddle.strided_slice
......
...@@ -111,7 +111,7 @@ class TestTileOpRank1_tensor_attr(OpTest): ...@@ -111,7 +111,7 @@ class TestTileOpRank1_tensor_attr(OpTest):
repeat_times_tensor = [] repeat_times_tensor = []
for index, ele in enumerate(self.repeat_times): for index, ele in enumerate(self.repeat_times):
repeat_times_tensor.append( repeat_times_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = { self.inputs = {
......
...@@ -148,7 +148,7 @@ class TestTrapezoidWithOutDxX(TestTrapezoidAPI): ...@@ -148,7 +148,7 @@ class TestTrapezoidWithOutDxX(TestTrapezoidAPI):
class TestTrapezoidBroadcast(TestTrapezoidAPI): class TestTrapezoidBroadcast(TestTrapezoidAPI):
def set_args(self): def set_args(self):
self.y = np.random.random((3, 3, 4)).astype('float32') self.y = np.random.random((3, 3, 4)).astype('float32')
self.x = np.random.random((3)).astype('float32') self.x = np.random.random(3).astype('float32')
self.dx = None self.dx = None
self.axis = 1 self.axis = 1
......
...@@ -528,7 +528,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -528,7 +528,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append( size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
......
...@@ -888,7 +888,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest): ...@@ -888,7 +888,7 @@ class TestTrilinearInterpOp_attr_tensor(OpTest):
size_tensor = [] size_tensor = []
for index, ele in enumerate(self.out_size): for index, ele in enumerate(self.out_size):
size_tensor.append( size_tensor.append(
("x" + str(index), np.ones((1)).astype('int32') * ele) ("x" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs['SizeTensor'] = size_tensor self.inputs['SizeTensor'] = size_tensor
......
...@@ -70,7 +70,7 @@ class TestUniformRandomOpBF16AttrTensorList(TestUniformRandomOpBF16): ...@@ -70,7 +70,7 @@ class TestUniformRandomOpBF16AttrTensorList(TestUniformRandomOpBF16):
shape_tensor = [] shape_tensor = []
for index, ele in enumerate(self.new_shape): for index, ele in enumerate(self.new_shape):
shape_tensor.append( shape_tensor.append(
("x" + str(index), np.ones((1)).astype("int64") * ele) ("x" + str(index), np.ones(1).astype("int64") * ele)
) )
self.inputs = {'ShapeTensorList': shape_tensor} self.inputs = {'ShapeTensorList': shape_tensor}
self.init_attrs() self.init_attrs()
......
...@@ -33,7 +33,7 @@ def output_hist(out): ...@@ -33,7 +33,7 @@ def output_hist(out):
hist, _ = np.histogram(out, range=(-5, 10)) hist, _ = np.histogram(out, range=(-5, 10))
hist = hist.astype("float32") hist = hist.astype("float32")
hist /= float(out.size) hist /= float(out.size)
prob = 0.1 * np.ones((10)) prob = 0.1 * np.ones(10)
return hist, prob return hist, prob
...@@ -46,7 +46,7 @@ def output_hist_diag(out): ...@@ -46,7 +46,7 @@ def output_hist_diag(out):
hist, _ = np.histogram(out, range=(-5, 10)) hist, _ = np.histogram(out, range=(-5, 10))
hist = hist.astype("float32") hist = hist.astype("float32")
hist /= float(out.size) hist /= float(out.size)
prob = 0.1 * np.ones((10)) prob = 0.1 * np.ones(10)
return hist, prob return hist, prob
...@@ -58,7 +58,7 @@ class TestUniformRandomOp_attr_tensorlist(OpTest): ...@@ -58,7 +58,7 @@ class TestUniformRandomOp_attr_tensorlist(OpTest):
shape_tensor = [] shape_tensor = []
for index, ele in enumerate(self.new_shape): for index, ele in enumerate(self.new_shape):
shape_tensor.append( shape_tensor.append(
("x" + str(index), np.ones((1)).astype("int64") * ele) ("x" + str(index), np.ones(1).astype("int64") * ele)
) )
self.inputs = {'ShapeTensorList': shape_tensor} self.inputs = {'ShapeTensorList': shape_tensor}
self.init_attrs() self.init_attrs()
...@@ -90,7 +90,7 @@ class TestUniformRandomOp_attr_tensorlist_int32(OpTest): ...@@ -90,7 +90,7 @@ class TestUniformRandomOp_attr_tensorlist_int32(OpTest):
shape_tensor = [] shape_tensor = []
for index, ele in enumerate(self.new_shape): for index, ele in enumerate(self.new_shape):
shape_tensor.append( shape_tensor.append(
("x" + str(index), np.ones((1)).astype("int32") * ele) ("x" + str(index), np.ones(1).astype("int32") * ele)
) )
self.inputs = {'ShapeTensorList': shape_tensor} self.inputs = {'ShapeTensorList': shape_tensor}
self.init_attrs() self.init_attrs()
...@@ -471,7 +471,7 @@ class TestUniformRandomDygraphMode(unittest.TestCase): ...@@ -471,7 +471,7 @@ class TestUniformRandomDygraphMode(unittest.TestCase):
x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0) x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
x_np = x.numpy() x_np = x.numpy()
for i in range(10): for i in range(10):
self.assertTrue((x_np[i] > 0 and x_np[i] < 1.0)) self.assertTrue(x_np[i] > 0 and x_np[i] < 1.0)
class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase): class TestUniformRandomBatchSizeLikeOpError(unittest.TestCase):
...@@ -562,7 +562,7 @@ class TestUniformDygraphMode(unittest.TestCase): ...@@ -562,7 +562,7 @@ class TestUniformDygraphMode(unittest.TestCase):
) )
x_np = x.numpy() x_np = x.numpy()
for i in range(10): for i in range(10):
self.assertTrue((x_np[i] > 0 and x_np[i] < 1.0)) self.assertTrue(x_np[i] > 0 and x_np[i] < 1.0)
class TestUniformDtype(unittest.TestCase): class TestUniformDtype(unittest.TestCase):
......
...@@ -120,7 +120,7 @@ class TestUnsqueezeOp_AxesTensorList(OpTest): ...@@ -120,7 +120,7 @@ class TestUnsqueezeOp_AxesTensorList(OpTest):
axes_tensor_list = [] axes_tensor_list = []
for index, ele in enumerate(self.axes): for index, ele in enumerate(self.axes):
axes_tensor_list.append( axes_tensor_list.append(
("axes" + str(index), np.ones((1)).astype('int32') * ele) ("axes" + str(index), np.ones(1).astype('int32') * ele)
) )
self.inputs = { self.inputs = {
......
...@@ -173,7 +173,7 @@ class TestVariable(unittest.TestCase): ...@@ -173,7 +173,7 @@ class TestVariable(unittest.TestCase):
y_1 = y[:, 0] y_1 = y[:, 0]
feeder = fluid.DataFeeder(place=place, feed_list=[x]) feeder = fluid.DataFeeder(place=place, feed_list=[x])
data = [] data = []
data.append((np.random.randint(10, size=[13]).astype('float32'))) data.append(np.random.randint(10, size=[13]).astype('float32'))
exe.run(fluid.default_startup_program()) exe.run(fluid.default_startup_program())
local_out = exe.run( local_out = exe.run(
......
...@@ -21,13 +21,13 @@ import paddle ...@@ -21,13 +21,13 @@ import paddle
def sigmoid(x): def sigmoid(x):
return 1.0 / (1.0 + np.exp(((-1.0) * x))) return 1.0 / (1.0 + np.exp((-1.0) * x))
def YoloBox(x, img_size, attrs): def YoloBox(x, img_size, attrs):
(n, c, h, w) = x.shape (n, c, h, w) = x.shape
anchors = attrs['anchors'] anchors = attrs['anchors']
an_num = int((len(anchors) // 2)) an_num = int(len(anchors) // 2)
class_num = attrs['class_num'] class_num = attrs['class_num']
conf_thresh = attrs['conf_thresh'] conf_thresh = attrs['conf_thresh']
downsample = attrs['downsample_ratio'] downsample = attrs['downsample_ratio']
...@@ -145,7 +145,7 @@ class TestYoloBoxOp(OpTest): ...@@ -145,7 +145,7 @@ class TestYoloBoxOp(OpTest):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int((len(self.anchors) // 2)) an_num = int(len(self.anchors) // 2)
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
...@@ -166,7 +166,7 @@ class TestYoloBoxOp(OpTest): ...@@ -166,7 +166,7 @@ class TestYoloBoxOp(OpTest):
class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): class TestYoloBoxOpNoClipBbox(TestYoloBoxOp):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int((len(self.anchors) // 2)) an_num = int(len(self.anchors) // 2)
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
...@@ -187,7 +187,7 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp): ...@@ -187,7 +187,7 @@ class TestYoloBoxOpNoClipBbox(TestYoloBoxOp):
class TestYoloBoxOpScaleXY(TestYoloBoxOp): class TestYoloBoxOpScaleXY(TestYoloBoxOp):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int((len(self.anchors) // 2)) an_num = int(len(self.anchors) // 2)
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
...@@ -208,7 +208,7 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp): ...@@ -208,7 +208,7 @@ class TestYoloBoxOpScaleXY(TestYoloBoxOp):
class TestYoloBoxOpIoUAware(TestYoloBoxOp): class TestYoloBoxOpIoUAware(TestYoloBoxOp):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int((len(self.anchors) // 2)) an_num = int(len(self.anchors) // 2)
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
...@@ -295,7 +295,7 @@ class TestYoloBoxStatic(unittest.TestCase): ...@@ -295,7 +295,7 @@ class TestYoloBoxStatic(unittest.TestCase):
class TestYoloBoxOpHW(TestYoloBoxOp): class TestYoloBoxOpHW(TestYoloBoxOp):
def initTestCase(self): def initTestCase(self):
self.anchors = [10, 13, 16, 30, 33, 23] self.anchors = [10, 13, 16, 30, 33, 23]
an_num = int((len(self.anchors) // 2)) an_num = int(len(self.anchors) // 2)
self.batch_size = 32 self.batch_size = 32
self.class_num = 2 self.class_num = 2
self.conf_thresh = 0.5 self.conf_thresh = 0.5
......
...@@ -81,7 +81,7 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs): ...@@ -81,7 +81,7 @@ def YOLOv3Loss(x, gtbox, gtlabel, gtscore, attrs):
bias_x_y = -0.5 * (scale_x_y - 1.0) bias_x_y = -0.5 * (scale_x_y - 1.0)
input_size = downsample_ratio * h input_size = downsample_ratio * h
x = x.reshape((n, mask_num, 5 + class_num, h, w)).transpose((0, 1, 3, 4, 2)) x = x.reshape((n, mask_num, 5 + class_num, h, w)).transpose((0, 1, 3, 4, 2))
loss = np.zeros((n)).astype('float64') loss = np.zeros(n).astype('float64')
smooth_weight = min(1.0 / class_num, 1.0 / 40) smooth_weight = min(1.0 / class_num, 1.0 / 40)
label_pos = 1.0 - smooth_weight if use_label_smooth else 1.0 label_pos = 1.0 - smooth_weight if use_label_smooth else 1.0
......
...@@ -1011,7 +1011,7 @@ class XPUTestSoftReluOP(XPUOpTestWrapper): ...@@ -1011,7 +1011,7 @@ class XPUTestSoftReluOP(XPUOpTestWrapper):
t = np.copy(x) t = np.copy(x)
t[t < -threshold] = -threshold t[t < -threshold] = -threshold
t[t > threshold] = threshold t[t > threshold] = threshold
out = np.log((np.exp(t) + 1)) out = np.log(np.exp(t) + 1)
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Out': out} self.outputs = {'Out': out}
......
...@@ -484,30 +484,22 @@ class XPUTestAdamwOp2(XPUOpTestWrapper): ...@@ -484,30 +484,22 @@ class XPUTestAdamwOp2(XPUOpTestWrapper):
out = linear1(x) out = linear1(x)
out = linear2(out) out = linear2(out)
fc1_w_mon1 = np.zeros((linear1.weight.shape)).astype( fc1_w_mon1 = np.zeros(linear1.weight.shape).astype(
"float32" "float32"
) )
fc1_w_mon2 = np.zeros((linear1.weight.shape)).astype( fc1_w_mon2 = np.zeros(linear1.weight.shape).astype(
"float32" "float32"
) )
fc1_b_mon1 = np.zeros((linear1.bias.shape)).astype( fc1_b_mon1 = np.zeros(linear1.bias.shape).astype("float32")
fc1_b_mon2 = np.zeros(linear1.bias.shape).astype("float32")
fc2_w_mon1 = np.zeros(linear2.weight.shape).astype(
"float32" "float32"
) )
fc1_b_mon2 = np.zeros((linear1.bias.shape)).astype( fc2_w_mon2 = np.zeros(linear2.weight.shape).astype(
"float32"
)
fc2_w_mon1 = np.zeros((linear2.weight.shape)).astype(
"float32"
)
fc2_w_mon2 = np.zeros((linear2.weight.shape)).astype(
"float32"
)
fc2_b_mon1 = np.zeros((linear2.bias.shape)).astype(
"float32"
)
fc2_b_mon2 = np.zeros((linear2.bias.shape)).astype(
"float32" "float32"
) )
fc2_b_mon1 = np.zeros(linear2.bias.shape).astype("float32")
fc2_b_mon2 = np.zeros(linear2.bias.shape).astype("float32")
cost = paddle.nn.functional.square_error_cost( cost = paddle.nn.functional.square_error_cost(
input=out, label=y input=out, label=y
......
...@@ -40,7 +40,7 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper): ...@@ -40,7 +40,7 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper):
self.op_type = "check_finite_and_unscale" self.op_type = "check_finite_and_unscale"
self.init_dtype() self.init_dtype()
x = np.random.random((8, 8)).astype(self.dtype) x = np.random.random((8, 8)).astype(self.dtype)
scale = np.random.random((1)).astype(np.float32) scale = np.random.random(1).astype(np.float32)
self.inputs = {'X': [('x0', x)], 'Scale': scale} self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = { self.outputs = {
'FoundInfinite': np.array([0]), 'FoundInfinite': np.array([0]),
...@@ -64,7 +64,7 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper): ...@@ -64,7 +64,7 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper):
idx2 = np.random.randint(255) idx2 = np.random.randint(255)
x[idx1][idx2] = np.nan x[idx1][idx2] = np.nan
x[idx2][idx1] = np.nan x[idx2][idx1] = np.nan
scale = np.random.random((1)).astype(np.float32) scale = np.random.random(1).astype(np.float32)
self.inputs = {'X': [('x0', x)], 'Scale': scale} self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = { self.outputs = {
...@@ -91,7 +91,7 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper): ...@@ -91,7 +91,7 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper):
idx2 = np.random.randint(255) idx2 = np.random.randint(255)
x[idx1][idx2] = np.nan x[idx1][idx2] = np.nan
x[idx2][idx1] = np.nan x[idx2][idx1] = np.nan
scale = np.random.random((1)).astype(np.float32) scale = np.random.random(1).astype(np.float32)
myscale = np.array([0.05]).astype(self.dtype) myscale = np.array([0.05]).astype(self.dtype)
self.inputs = {'X': [('x0', x)], 'Scale': scale} self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = { self.outputs = {
...@@ -118,7 +118,7 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper): ...@@ -118,7 +118,7 @@ class XPUTestCheckFiniteAndUnscaleOp(XPUOpTestWrapper):
idx2 = np.random.randint(255) idx2 = np.random.randint(255)
x[idx1][idx2] = np.inf x[idx1][idx2] = np.inf
x[idx2][idx1] = np.nan x[idx2][idx1] = np.nan
scale = np.random.random((1)).astype(np.float32) scale = np.random.random(1).astype(np.float32)
myscale = np.array([0.05]).astype(self.dtype) myscale = np.array([0.05]).astype(self.dtype)
self.inputs = {'X': [('x0', x)], 'Scale': scale} self.inputs = {'X': [('x0', x)], 'Scale': scale}
self.outputs = { self.outputs = {
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册