未验证 提交 9c395d38 编写于 作者: H heyanru 提交者: GitHub

[Fluid Clean] remove unfold, deformable_roi_pooling, shard_index, hard_swish,...

[Fluid Clean] remove unfold, deformable_roi_pooling, shard_index, hard_swish, mish, uniform_random, unbind (#48451)
上级 f88713e1
...@@ -23,7 +23,7 @@ from paddle.fluid.framework import ( ...@@ -23,7 +23,7 @@ from paddle.fluid.framework import (
_non_static_mode, _non_static_mode,
in_dygraph_mode, in_dygraph_mode,
) )
from paddle.fluid.layers import nn, tensor from paddle.fluid.layers import tensor
from paddle.tensor import random from paddle.tensor import random
...@@ -187,7 +187,7 @@ class Uniform(distribution.Distribution): ...@@ -187,7 +187,7 @@ class Uniform(distribution.Distribution):
return output return output
else: else:
output_shape = shape + batch_shape output_shape = shape + batch_shape
output = nn.uniform_random( output = paddle.uniform(
output_shape, dtype=self.dtype, min=0.0, max=1.0, seed=seed output_shape, dtype=self.dtype, min=0.0, max=1.0, seed=seed
) * ( ) * (
tensor.zeros(output_shape, dtype=self.dtype) tensor.zeros(output_shape, dtype=self.dtype)
......
此差异已折叠。
...@@ -1097,7 +1097,6 @@ set_tests_properties(test_bilinear_interp_v2_op PROPERTIES TIMEOUT 120) ...@@ -1097,7 +1097,6 @@ set_tests_properties(test_bilinear_interp_v2_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_svd_op PROPERTIES TIMEOUT 80) set_tests_properties(test_svd_op PROPERTIES TIMEOUT 80)
set_tests_properties(test_einsum_op PROPERTIES TIMEOUT 120) set_tests_properties(test_einsum_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_qr_op PROPERTIES TIMEOUT 60) set_tests_properties(test_qr_op PROPERTIES TIMEOUT 60)
set_tests_properties(test_deformable_psroi_pooling PROPERTIES TIMEOUT 120)
set_tests_properties(test_trilinear_interp_v2_op PROPERTIES TIMEOUT 120) set_tests_properties(test_trilinear_interp_v2_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_imperative_static_runner_mnist PROPERTIES TIMEOUT 120) set_tests_properties(test_imperative_static_runner_mnist PROPERTIES TIMEOUT 120)
set_tests_properties(test_masked_select_op PROPERTIES TIMEOUT 120) set_tests_properties(test_masked_select_op PROPERTIES TIMEOUT 120)
......
...@@ -102,7 +102,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish( ...@@ -102,7 +102,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_HardSwish(
): ):
def set_params(self): def set_params(self):
self.operand = paddle.add self.operand = paddle.add
self.act = fluid.layers.hard_swish self.act = paddle.nn.functional.hardswish
class ElementwiseActivationMkldnnFusePassTest_Add_SQRT( class ElementwiseActivationMkldnnFusePassTest_Add_SQRT(
...@@ -202,7 +202,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish( ...@@ -202,7 +202,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_HardSwish(
): ):
def set_params(self): def set_params(self):
self.operand = paddle.subtract self.operand = paddle.subtract
self.act = fluid.layers.hard_swish self.act = paddle.nn.functional.hardswish
class ElementwiseActivationMkldnnFusePassTest_Sub_ABS( class ElementwiseActivationMkldnnFusePassTest_Sub_ABS(
...@@ -294,7 +294,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish( ...@@ -294,7 +294,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_HardSwish(
): ):
def set_params(self): def set_params(self):
self.operand = paddle.multiply self.operand = paddle.multiply
self.act = fluid.layers.hard_swish self.act = paddle.nn.functional.hardswish
class ElementwiseActivationMkldnnFusePassTest_Mul_SQRT( class ElementwiseActivationMkldnnFusePassTest_Mul_SQRT(
......
...@@ -88,7 +88,7 @@ class TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest): ...@@ -88,7 +88,7 @@ class TensorRTSubgraphPassSigmoidTest(TensorRTSubgraphPassActivationTest):
class TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassHardSwishTest(TensorRTSubgraphPassActivationTest):
def append_act(self, x): def append_act(self, x):
return fluid.layers.hard_swish(x) return paddle.nn.functional.hardswish(x)
class TensorRTSubgraphPassHardSigmoidTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassHardSigmoidTest(TensorRTSubgraphPassActivationTest):
...@@ -100,7 +100,7 @@ class TensorRTSubgraphPassHardSwishPluginTest( ...@@ -100,7 +100,7 @@ class TensorRTSubgraphPassHardSwishPluginTest(
TensorRTSubgraphPassActivationTest TensorRTSubgraphPassActivationTest
): ):
def append_act(self, x): def append_act(self, x):
return fluid.layers.hard_swish(x, threshold=4.0, scale=8.0) return paddle.nn.functional.hardswish(x)
class TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassClipTest(TensorRTSubgraphPassActivationTest):
...@@ -166,7 +166,7 @@ class TensorRTSubgraphPassMishTest(TensorRTSubgraphPassActivationTest): ...@@ -166,7 +166,7 @@ class TensorRTSubgraphPassMishTest(TensorRTSubgraphPassActivationTest):
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.mish(x) return paddle.nn.functional.mish(x)
class TensorRTSubgraphPassMishFp16SerializeTest( class TensorRTSubgraphPassMishFp16SerializeTest(
...@@ -179,7 +179,7 @@ class TensorRTSubgraphPassMishFp16SerializeTest( ...@@ -179,7 +179,7 @@ class TensorRTSubgraphPassMishFp16SerializeTest(
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.mish(x) return paddle.nn.functional.mish(x)
class TensorRTSubgraphPassDynamicMishFp16SerializeTest( class TensorRTSubgraphPassDynamicMishFp16SerializeTest(
...@@ -200,7 +200,7 @@ class TensorRTSubgraphPassDynamicMishFp16SerializeTest( ...@@ -200,7 +200,7 @@ class TensorRTSubgraphPassDynamicMishFp16SerializeTest(
) )
def append_act(self, x): def append_act(self, x):
return fluid.layers.mish(x) return paddle.nn.functional.mish(x)
class TensorRTSubgraphPassPreluAllTest(TensorRTSubgraphPassActivationTest): class TensorRTSubgraphPassPreluAllTest(TensorRTSubgraphPassActivationTest):
......
...@@ -2073,7 +2073,7 @@ class TestHardswishAPI(unittest.TestCase): ...@@ -2073,7 +2073,7 @@ class TestHardswishAPI(unittest.TestCase):
def test_fluid_api(self): def test_fluid_api(self):
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype) x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.hard_swish(x) out = paddle.nn.functional.hardswish(x)
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_hardswish(self.x_np) out_ref = ref_hardswish(self.x_np)
...@@ -2081,7 +2081,7 @@ class TestHardswishAPI(unittest.TestCase): ...@@ -2081,7 +2081,7 @@ class TestHardswishAPI(unittest.TestCase):
paddle.disable_static(self.place) paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np) x = paddle.to_tensor(self.x_np)
out = paddle.fluid.layers.hard_swish(x) out = paddle.nn.functional.hardswish(x)
np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05) np.testing.assert_allclose(out_ref, out.numpy(), rtol=1e-05)
paddle.enable_static() paddle.enable_static()
...@@ -3414,7 +3414,7 @@ def ref_mish(x, threshold=20.0): ...@@ -3414,7 +3414,7 @@ def ref_mish(x, threshold=20.0):
class TestMish(TestActivation): class TestMish(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "mish" self.op_type = "mish"
self.python_api = paddle.fluid.layers.nn.mish self.python_api = paddle.nn.functional.mish
self.init_dtype() self.init_dtype()
self.init_shape() self.init_shape()
...@@ -3480,7 +3480,7 @@ class TestMishAPI(unittest.TestCase): ...@@ -3480,7 +3480,7 @@ class TestMishAPI(unittest.TestCase):
paddle.enable_static() paddle.enable_static()
with fluid.program_guard(fluid.Program()): with fluid.program_guard(fluid.Program()):
x = fluid.data('X', self.x_np.shape, self.x_np.dtype) x = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = fluid.layers.mish(x) out = paddle.nn.functional.mish(x)
exe = fluid.Executor(self.place) exe = fluid.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out]) res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_mish(self.x_np) out_ref = ref_mish(self.x_np)
......
...@@ -41,15 +41,9 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -41,15 +41,9 @@ class TestGeneratorSeed(unittest.TestCase):
gen.manual_seed(111111111) gen.manual_seed(111111111)
st = paddle.get_cuda_rng_state() st = paddle.get_cuda_rng_state()
x = fluid.layers.uniform_random( x = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
[2, 10], dtype="float32", min=0.0, max=1.0 x_again = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
) x_third = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
x_again = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x_third = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
print("x: {}".format(x.numpy())) print("x: {}".format(x.numpy()))
print("x_again: {}".format(x_again.numpy())) print("x_again: {}".format(x_again.numpy()))
x = x + x_again + x_third x = x + x_again + x_third
...@@ -57,15 +51,9 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -57,15 +51,9 @@ class TestGeneratorSeed(unittest.TestCase):
paddle.set_cuda_rng_state(st) paddle.set_cuda_rng_state(st)
x1 = fluid.layers.uniform_random( x1 = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
[2, 10], dtype="float32", min=0.0, max=1.0 x1_again = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
) x1_third = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
x1_again = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x1_third = fluid.layers.uniform_random(
[2, 10], dtype="float32", min=0.0, max=1.0
)
x1 = x1 + x1_again + x1_third x1 = x1 + x1_again + x1_third
y1 = fluid.layers.dropout(x1, 0.5) y1 = fluid.layers.dropout(x1, 0.5)
y_np = y.numpy() y_np = y.numpy()
...@@ -128,7 +116,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -128,7 +116,7 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
# example 1: # example 1:
# attr shape is a list which doesn't contain tensor Variable. # attr shape is a list which doesn't contain tensor Variable.
x = fluid.layers.uniform_random(shape=[2, 10]) x = paddle.uniform(shape=[2, 10])
result_1 = fluid.layers.fc( result_1 = fluid.layers.fc(
input=x, input=x,
size=10, size=10,
......
...@@ -408,9 +408,7 @@ class TestDygraphGradientClip(unittest.TestCase): ...@@ -408,9 +408,7 @@ class TestDygraphGradientClip(unittest.TestCase):
def test_gradient_clip(self): def test_gradient_clip(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
linear = paddle.nn.Linear(5, 5) linear = paddle.nn.Linear(5, 5)
inputs = fluid.layers.uniform_random( inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
[16, 5], min=-10, max=10
).astype('float32')
out = linear(fluid.dygraph.to_variable(inputs)) out = linear(fluid.dygraph.to_variable(inputs))
loss = paddle.mean(out) loss = paddle.mean(out)
loss.backward() loss.backward()
...@@ -552,9 +550,9 @@ class TestDygraphGradientClipFP16(unittest.TestCase): ...@@ -552,9 +550,9 @@ class TestDygraphGradientClipFP16(unittest.TestCase):
models=model, optimizers=sgd_optimizer, level='O2' models=model, optimizers=sgd_optimizer, level='O2'
) )
scaler = paddle.amp.GradScaler(init_loss_scaling=1024) scaler = paddle.amp.GradScaler(init_loss_scaling=1024)
inputs = fluid.layers.uniform_random( inputs = paddle.uniform([1, 5], min=-10, max=10).astype(
[1, 5], min=-10, max=10 'float32'
).astype('float32') )
with paddle.amp.auto_cast(level='O2'): with paddle.amp.auto_cast(level='O2'):
out = model(fluid.dygraph.to_variable(inputs)) out = model(fluid.dygraph.to_variable(inputs))
loss = paddle.mean(out) loss = paddle.mean(out)
...@@ -600,9 +598,7 @@ class TestDygraphGradientClipFP16(unittest.TestCase): ...@@ -600,9 +598,7 @@ class TestDygraphGradientClipFP16(unittest.TestCase):
class TestDygraphGradientClipFP64(unittest.TestCase): class TestDygraphGradientClipFP64(unittest.TestCase):
def test_gradient_clip(self): def test_gradient_clip(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
inputs = fluid.layers.uniform_random( inputs = paddle.uniform([16, 5], min=-10, max=10).astype('float32')
[16, 5], min=-10, max=10
).astype('float32')
linear = paddle.nn.Linear(5, 5) linear = paddle.nn.Linear(5, 5)
out = linear(fluid.dygraph.to_variable(inputs)) out = linear(fluid.dygraph.to_variable(inputs))
loss = paddle.mean(out) loss = paddle.mean(out)
......
...@@ -1843,7 +1843,7 @@ class TestLayer(LayerTest): ...@@ -1843,7 +1843,7 @@ class TestLayer(LayerTest):
def test_shard_index(self): def test_shard_index(self):
with self.static_graph(): with self.static_graph():
x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64') x = fluid.layers.data(name="label", shape=[4, 1], dtype='int64')
shard_label = fluid.layers.shard_index( shard_label = paddle.shard_index(
input=x, index_num=20, nshards=2, shard_id=0 input=x, index_num=20, nshards=2, shard_id=0
) )
...@@ -2342,7 +2342,7 @@ class TestBook(LayerTest): ...@@ -2342,7 +2342,7 @@ class TestBook(LayerTest):
fluid.default_main_program(), fluid.default_startup_program() fluid.default_main_program(), fluid.default_startup_program()
): ):
input = self._get_data(name="input", shape=[16], dtype="float32") input = self._get_data(name="input", shape=[16], dtype="float32")
out = layers.mish(input, name='mish') out = paddle.nn.functional.mish(input, name='mish')
return out return out
def make_cross_entropy(self): def make_cross_entropy(self):
...@@ -2794,7 +2794,7 @@ class TestBook(LayerTest): ...@@ -2794,7 +2794,7 @@ class TestBook(LayerTest):
def test_unfold(self): def test_unfold(self):
with self.static_graph(): with self.static_graph():
x = layers.data(name='x', shape=[3, 20, 20], dtype='float32') x = layers.data(name='x', shape=[3, 20, 20], dtype='float32')
out = layers.unfold(x, [3, 3], 1, 1, 1) out = paddle.nn.functional.unfold(x, [3, 3], 1, 1, 1)
return out return out
def test_partial_concat(self): def test_partial_concat(self):
...@@ -2809,40 +2809,6 @@ class TestBook(LayerTest): ...@@ -2809,40 +2809,6 @@ class TestBook(LayerTest):
) )
return concat1, concat2 return concat1, concat2
def test_deform_roi_pooling(self):
with program_guard(
fluid.default_main_program(), fluid.default_startup_program()
):
input = layers.data(
name='input',
shape=[2, 3, 32, 32],
dtype='float32',
append_batch_size=False,
)
rois = layers.data(
name="rois", shape=[4], dtype='float32', lod_level=1
)
trans = layers.data(
name="trans",
shape=[2, 3, 32, 32],
dtype='float32',
append_batch_size=False,
)
out = layers.deformable_roi_pooling(
input=input,
rois=rois,
trans=trans,
no_trans=False,
spatial_scale=1.0,
group_size=(1, 1),
pooled_height=8,
pooled_width=8,
part_size=(8, 8),
sample_per_part=4,
trans_std=0.1,
)
return out
def test_addmm(self): def test_addmm(self):
with program_guard( with program_guard(
fluid.default_main_program(), fluid.default_startup_program() fluid.default_main_program(), fluid.default_startup_program()
......
...@@ -35,23 +35,17 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -35,23 +35,17 @@ class TestGeneratorSeed(unittest.TestCase):
fluid.enable_dygraph() fluid.enable_dygraph()
gen = paddle.seed(12312321111) gen = paddle.seed(12312321111)
x = fluid.layers.uniform_random([10], dtype="float32", min=0.0, max=1.0) x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
st1 = gen.get_state() st1 = gen.get_state()
x1 = fluid.layers.uniform_random( x1 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
[10], dtype="float32", min=0.0, max=1.0
)
gen.set_state(st1) gen.set_state(st1)
print(gen.get_state()) print(gen.get_state())
x2 = fluid.layers.uniform_random( x2 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
[10], dtype="float32", min=0.0, max=1.0
)
paddle.seed(12312321111) paddle.seed(12312321111)
x3 = fluid.layers.uniform_random( x3 = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
[10], dtype="float32", min=0.0, max=1.0
)
x_np = x.numpy() x_np = x.numpy()
x1_np = x1.numpy() x1_np = x1.numpy()
...@@ -72,8 +66,8 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -72,8 +66,8 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
# example 1: # example 1:
# attr shape is a list which doesn't contain tensor Variable. # attr shape is a list which doesn't contain tensor Variable.
result_1 = fluid.layers.uniform_random(shape=[3, 4]) result_1 = paddle.uniform(shape=[3, 4])
result_2 = fluid.layers.uniform_random(shape=[3, 4]) result_2 = paddle.uniform(shape=[3, 4])
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program) exe.run(startup_program)
...@@ -102,15 +96,11 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -102,15 +96,11 @@ class TestGeneratorSeed(unittest.TestCase):
gen = paddle.seed(111111111) gen = paddle.seed(111111111)
st = gen.get_state() st = gen.get_state()
# x = np.arange(1,101).reshape(2,50).astype("float32") # x = np.arange(1,101).reshape(2,50).astype("float32")
x = fluid.layers.uniform_random( x = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
[2, 10], dtype="float32", min=0.0, max=1.0
)
y = fluid.layers.dropout(x, 0.5) y = fluid.layers.dropout(x, 0.5)
gen.manual_seed(111111111) gen.manual_seed(111111111)
# gen.set_state(st) # gen.set_state(st)
x1 = fluid.layers.uniform_random( x1 = paddle.uniform([2, 10], dtype="float32", min=0.0, max=1.0)
[2, 10], dtype="float32", min=0.0, max=1.0
)
y1 = fluid.layers.dropout(x1, 0.5) y1 = fluid.layers.dropout(x1, 0.5)
y_np = y.numpy() y_np = y.numpy()
y1_np = y1.numpy() y1_np = y1.numpy()
...@@ -129,7 +119,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -129,7 +119,7 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
# example 1: # example 1:
# attr shape is a list which doesn't contain tensor Variable. # attr shape is a list which doesn't contain tensor Variable.
x_1 = fluid.layers.uniform_random(shape=[2, 10]) x_1 = paddle.uniform(shape=[2, 10])
y_1 = fluid.layers.dropout(x_1, 0.5) y_1 = fluid.layers.dropout(x_1, 0.5)
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program) exe.run(startup_program)
...@@ -235,8 +225,8 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -235,8 +225,8 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
# example 1: # example 1:
# attr shape is a list which doesn't contain tensor Variable. # attr shape is a list which doesn't contain tensor Variable.
result_1 = fluid.layers.uniform_random(shape=[3, 4]) result_1 = paddle.uniform(shape=[3, 4])
result_2 = fluid.layers.uniform_random(shape=[3, 4]) result_2 = paddle.uniform(shape=[3, 4])
exe = fluid.Executor(fluid.CPUPlace()) exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program) exe.run(startup_program)
...@@ -384,7 +374,7 @@ class TestGeneratorSeed(unittest.TestCase): ...@@ -384,7 +374,7 @@ class TestGeneratorSeed(unittest.TestCase):
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
# example 1: # example 1:
# attr shape is a list which doesn't contain tensor Variable. # attr shape is a list which doesn't contain tensor Variable.
x = fluid.layers.uniform_random(shape=[2, 10]) x = paddle.uniform(shape=[2, 10])
result_1 = fluid.layers.fc( result_1 = fluid.layers.fc(
input=x, input=x,
size=10, size=10,
......
...@@ -263,7 +263,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -263,7 +263,7 @@ class TestRegularizer(unittest.TestCase):
regularizer=paddle.regularizer.L1Decay() regularizer=paddle.regularizer.L1Decay()
) )
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.layers.uniform_random([2, 2, 3]) x = paddle.uniform([2, 2, 3])
out = fluid.layers.fc(x, 5, param_attr=fc_param_attr) out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)
loss = paddle.sum(out) loss = paddle.sum(out)
sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2) sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
......
...@@ -173,7 +173,7 @@ class TestRegularizer(unittest.TestCase): ...@@ -173,7 +173,7 @@ class TestRegularizer(unittest.TestCase):
regularizer=paddle.regularizer.L1Decay() regularizer=paddle.regularizer.L1Decay()
) )
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
x = fluid.layers.uniform_random([2, 2, 3]) x = paddle.uniform([2, 2, 3])
out = fluid.layers.fc(x, 5, param_attr=fc_param_attr) out = fluid.layers.fc(x, 5, param_attr=fc_param_attr)
loss = paddle.sum(out) loss = paddle.sum(out)
sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2) sgd = fluid.optimizer.SGD(learning_rate=0.1, regularization=l2)
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import unittest import unittest
import paddle import paddle
from paddle.fluid.layers.utils import try_set_static_shape_tensor
class StaticShapeInferrenceTest(unittest.TestCase): class StaticShapeInferrenceTest(unittest.TestCase):
...@@ -24,7 +25,8 @@ class StaticShapeInferrenceTest(unittest.TestCase): ...@@ -24,7 +25,8 @@ class StaticShapeInferrenceTest(unittest.TestCase):
name="x", shape=[-1, 2], dtype='float32' name="x", shape=[-1, 2], dtype='float32'
) )
shape = paddle.shape(data) # shape should be [-1, 2] shape = paddle.shape(data) # shape should be [-1, 2]
x = paddle.fluid.layers.uniform_random(shape) x = paddle.uniform(shape)
try_set_static_shape_tensor(x, shape)
self.assertEqual(x.shape, data.shape) self.assertEqual(x.shape, data.shape)
paddle.disable_static() paddle.disable_static()
......
...@@ -66,7 +66,7 @@ class TestLayersUnbind(unittest.TestCase): ...@@ -66,7 +66,7 @@ class TestLayersUnbind(unittest.TestCase):
def test_layers_unbind(self): def test_layers_unbind(self):
x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1') x_1 = fluid.data(shape=[2, 3], dtype='float32', name='x_1')
[out_0, out_1] = fluid.layers.unbind(input=x_1, axis=0) [out_0, out_1] = paddle.unbind(input=x_1, axis=0)
input_1 = np.random.random([2, 3]).astype("float32") input_1 = np.random.random([2, 3]).astype("float32")
axis = fluid.data(shape=[1], dtype='int32', name='axis') axis = fluid.data(shape=[1], dtype='int32', name='axis')
exe = fluid.Executor(place=fluid.CPUPlace()) exe = fluid.Executor(place=fluid.CPUPlace())
......
...@@ -162,23 +162,6 @@ class TestUniformRandomOpBF16SelectedRowsWithDiagInit( ...@@ -162,23 +162,6 @@ class TestUniformRandomOpBF16SelectedRowsWithDiagInit(
np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01) np.testing.assert_allclose(hist, prob, rtol=0, atol=0.01)
class TestUniformRandomOpBF16AttrTensorAPI(unittest.TestCase):
def test_attr_tensor_API(self):
startup_program = fluid.Program()
train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program):
dim_tensor = fluid.layers.fill_constant([1], "int64", 3)
ret = fluid.layers.nn.uniform_random(
[1, dim_tensor, 2], dtype=np.uint16
)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_program)
outs = exe.run(train_program, fetch_list=[ret])
class TestUniformRandomOpAPISeed(unittest.TestCase): class TestUniformRandomOpAPISeed(unittest.TestCase):
def test_attr_tensor_API(self): def test_attr_tensor_API(self):
_seed = 10 _seed = 10
...@@ -189,12 +172,8 @@ class TestUniformRandomOpAPISeed(unittest.TestCase): ...@@ -189,12 +172,8 @@ class TestUniformRandomOpAPISeed(unittest.TestCase):
_min = 5 _min = 5
_max = 10 _max = 10
ret = fluid.layers.nn.uniform_random( ret = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed)
[2, 3, 2], min=_min, max=_max, seed=_seed ret_2 = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed)
)
ret_2 = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed
)
res = paddle.equal(ret, ret_2) res = paddle.equal(ret, ret_2)
place = fluid.CPUPlace() place = fluid.CPUPlace()
exe = fluid.Executor(place) exe = fluid.Executor(place)
......
...@@ -199,26 +199,18 @@ class TestUniformRandomOpError(unittest.TestCase): ...@@ -199,26 +199,18 @@ class TestUniformRandomOpError(unittest.TestCase):
x1 = fluid.create_lod_tensor( x1 = fluid.create_lod_tensor(
np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace() np.zeros((4, 784)), [[1, 1, 1, 1]], fluid.CPUPlace()
) )
fluid.layers.uniform_random(x1) paddle.uniform(x1)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
def test_Variable2(): def test_Variable2():
x1 = np.zeros((4, 784)) x1 = np.zeros((4, 784))
fluid.layers.uniform_random(x1) paddle.uniform(x1)
self.assertRaises(TypeError, test_Variable2) self.assertRaises(TypeError, test_Variable2)
def test_dtype():
x2 = fluid.layers.data(
name='x2', shape=[4, 784], dtype='float32'
)
fluid.layers.uniform_random(x2, 'int32')
self.assertRaises(TypeError, test_dtype)
def test_out_dtype(): def test_out_dtype():
out = fluid.layers.uniform_random(shape=[3, 4], dtype='float64') out = paddle.uniform(shape=[3, 4], dtype='float64')
self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64) self.assertEqual(out.dtype, fluid.core.VarDesc.VarType.FP64)
test_out_dtype() test_out_dtype()
...@@ -323,7 +315,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): ...@@ -323,7 +315,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase):
train_program = fluid.Program() train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
dim_tensor = fluid.layers.fill_constant([1], "int64", 3) dim_tensor = fluid.layers.fill_constant([1], "int64", 3)
ret = fluid.layers.nn.uniform_random([1, dim_tensor, 2]) ret = paddle.uniform([1, dim_tensor, 2])
place = fluid.CPUPlace() place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -339,7 +331,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): ...@@ -339,7 +331,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase):
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
dim_1 = fluid.layers.fill_constant([1], "int64", 3) dim_1 = fluid.layers.fill_constant([1], "int64", 3)
dim_2 = fluid.layers.fill_constant([1], "int32", 2) dim_2 = fluid.layers.fill_constant([1], "int32", 2)
ret = fluid.layers.nn.uniform_random([1, dim_1, dim_2]) ret = paddle.uniform([1, dim_1, dim_2])
place = fluid.CPUPlace() place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -354,7 +346,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase): ...@@ -354,7 +346,7 @@ class TestUniformRandomOp_attr_tensor_API(unittest.TestCase):
train_program = fluid.Program() train_program = fluid.Program()
with fluid.program_guard(train_program, startup_program): with fluid.program_guard(train_program, startup_program):
shape = fluid.data(name='shape_tensor', shape=[2], dtype="int32") shape = fluid.data(name='shape_tensor', shape=[2], dtype="int32")
ret = fluid.layers.nn.uniform_random(shape) ret = paddle.uniform(shape)
place = fluid.CPUPlace() place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -377,12 +369,8 @@ class TestUniformRandomOp_API_seed(unittest.TestCase): ...@@ -377,12 +369,8 @@ class TestUniformRandomOp_API_seed(unittest.TestCase):
_min = 5 _min = 5
_max = 10 _max = 10
ret = fluid.layers.nn.uniform_random( ret = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed)
[2, 3, 2], min=_min, max=_max, seed=_seed ret_2 = paddle.uniform([2, 3, 2], min=_min, max=_max, seed=_seed)
)
ret_2 = fluid.layers.nn.uniform_random(
[2, 3, 2], min=_min, max=_max, seed=_seed
)
res = paddle.equal(ret, ret_2) res = paddle.equal(ret, ret_2)
place = fluid.CPUPlace() place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
...@@ -464,9 +452,7 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase): ...@@ -464,9 +452,7 @@ class TestUniformRandomOpSelectedRowsShapeTensorList(unittest.TestCase):
class TestUniformRandomDygraphMode(unittest.TestCase): class TestUniformRandomDygraphMode(unittest.TestCase):
def test_check_output(self): def test_check_output(self):
with fluid.dygraph.guard(): with fluid.dygraph.guard():
x = fluid.layers.uniform_random( x = paddle.uniform([10], dtype="float32", min=0.0, max=1.0)
[10], dtype="float32", min=0.0, max=1.0
)
x_np = x.numpy() x_np = x.numpy()
for i in range(10): for i in range(10):
self.assertTrue((x_np[i] > 0 and x_np[i] < 1.0)) self.assertTrue((x_np[i] > 0 and x_np[i] < 1.0))
...@@ -698,9 +684,7 @@ class TestUniformMinMaxTensor(UnittestBase): ...@@ -698,9 +684,7 @@ class TestUniformMinMaxTensor(UnittestBase):
min_v = paddle.to_tensor([0.1]) min_v = paddle.to_tensor([0.1])
max_v = paddle.to_tensor([0.9]) max_v = paddle.to_tensor([0.9])
y = paddle.uniform([2, 3, 10], min=min_v, max=max_v) y = paddle.uniform([2, 3, 10], min=min_v, max=max_v)
z = paddle.fluid.layers.uniform_random( z = paddle.uniform([2, 3, 10], min=min_v, max=max_v)
[2, 3, 10], min=min_v, max=max_v
)
out = feat + y + z out = feat + y + z
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册