未验证 提交 c2f15f05 编写于 作者: W wangzhen38 提交者: GitHub

[remove fluid] PRelu BilinearTensorProduct Conv2DTranspose SequenceConv RowConv (#48654)

* [remove fluid] PRelu BilinearTensorProduct

* [remove fluid] PRelu BilinearTensorProduct Conv2DTranspose SequenceConv RowConv

* [remove fluid] PRelu BilinearTensorProduct Conv2DTranspose SequenceConv RowConv

* [remove fluid] PRelu BilinearTensorProduct Conv2DTranspose SequenceConv RowConv

* [remove fluid] PRelu BilinearTensorProduct Conv2DTranspose SequenceConv RowConv

* [remove fluid] PRelu BilinearTensorProduct Conv2DTranspose SequenceConv RowConv

* [remove fluid] PRelu BilinearTensorProduct Conv2DTranspose SequenceConv RowConv

* [remove fluid] PRelu BilinearTensorProduct Conv2DTranspose SequenceConv RowConv
上级 9a9e0aa0
此差异已折叠。
...@@ -113,11 +113,11 @@ class TestDygraphBasicApi_ToVariable(unittest.TestCase): ...@@ -113,11 +113,11 @@ class TestDygraphBasicApi_ToVariable(unittest.TestCase):
# 1. test Apis that inherit from layers.Layer # 1. test Apis that inherit from layers.Layer
def dyfunc_BilinearTensorProduct(layer1, layer2): def dyfunc_BilinearTensorProduct(layer1, layer2):
bilinearTensorProduct = fluid.dygraph.nn.BilinearTensorProduct( bilinearTensorProduct = paddle.nn.Bilinear(
input1_dim=5, 5,
input2_dim=4, 4,
output_dim=1000, 1000,
param_attr=fluid.ParamAttr( weight_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.99) initializer=fluid.initializer.Constant(value=0.99)
), ),
bias_attr=fluid.ParamAttr( bias_attr=fluid.ParamAttr(
...@@ -165,12 +165,11 @@ def dyfunc_Conv3D(input): ...@@ -165,12 +165,11 @@ def dyfunc_Conv3D(input):
def dyfunc_Conv2DTranspose(input): def dyfunc_Conv2DTranspose(input):
conv2dTranspose = fluid.dygraph.nn.Conv2DTranspose( conv2dTranspose = paddle.nn.Conv2DTranspose(
num_channels=3, 3,
num_filters=12, 12,
filter_size=12, 12,
use_cudnn=False, weight_attr=fluid.ParamAttr(
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.99) initializer=fluid.initializer.Constant(value=0.99)
), ),
bias_attr=fluid.ParamAttr( bias_attr=fluid.ParamAttr(
...@@ -221,11 +220,12 @@ def dyfunc_Pool2D(input): ...@@ -221,11 +220,12 @@ def dyfunc_Pool2D(input):
def dyfunc_Prelu(input): def dyfunc_Prelu(input):
prelu0 = fluid.PRelu( prelu0 = paddle.nn.PReLU(
mode='all', weight_attr=fluid.ParamAttr(
param_attr=fluid.ParamAttr(initializer=fluid.initializer.Constant(1.0)), initializer=fluid.initializer.Constant(1.0)
),
) )
res = prelu0(input=input) res = prelu0(input)
return res return res
......
...@@ -37,7 +37,7 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "1" ...@@ -37,7 +37,7 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "1"
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.dygraph import to_variable from paddle.fluid.dygraph import to_variable
from paddle.fluid.dygraph.nn import BatchNorm, Conv2DTranspose from paddle.fluid.dygraph.nn import BatchNorm
from paddle.jit import ProgramTranslator from paddle.jit import ProgramTranslator
from paddle.jit.api import declarative from paddle.jit.api import declarative
...@@ -430,14 +430,13 @@ class DeConv2D(fluid.dygraph.Layer): ...@@ -430,14 +430,13 @@ class DeConv2D(fluid.dygraph.Layer):
initializer=fluid.initializer.Constant(0.0) initializer=fluid.initializer.Constant(0.0)
) )
self._deconv = Conv2DTranspose( self._deconv = paddle.nn.Conv2DTranspose(
num_channels, num_channels,
num_filters, num_filters,
filter_size=filter_size, filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
use_cudnn=use_cudnn, weight_attr=fluid.ParamAttr(
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.NormalInitializer( initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=stddev loc=0.0, scale=stddev
) )
......
...@@ -24,9 +24,7 @@ import paddle.fluid as fluid ...@@ -24,9 +24,7 @@ import paddle.fluid as fluid
class TestDygraphBilinearTensorProductAPIError(unittest.TestCase): class TestDygraphBilinearTensorProductAPIError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with fluid.program_guard(fluid.Program(), fluid.Program()): with fluid.program_guard(fluid.Program(), fluid.Program()):
layer = fluid.dygraph.nn.BilinearTensorProduct( layer = paddle.nn.Bilinear(5, 4, 1000)
input1_dim=5, input2_dim=4, output_dim=1000
)
# the input must be Variable. # the input must be Variable.
x0 = fluid.create_lod_tensor( x0 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace() np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()
......
...@@ -1084,86 +1084,5 @@ class TestTensorOutputSize4(TestTensorOutputSize1): ...@@ -1084,86 +1084,5 @@ class TestTensorOutputSize4(TestTensorOutputSize1):
return out return out
class TestTensorOutputSize5(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size5'
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = [17, paddle.assign([17])]
conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
num_channels=3,
num_filters=6,
filter_size=3,
output_size=output_size,
stride=2,
)
out = conv2d_trans(x)
return out
class TestTensorOutputSize6(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size6'
def var_prefix(self):
return "Var["
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = paddle.assign([17, 17])
conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
num_channels=3,
num_filters=6,
filter_size=3,
output_size=output_size,
stride=2,
)
out = conv2d_trans(x)
return out
class TestTensorOutputSize7(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size7'
def var_prefix(self):
return ""
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = 17
conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
num_channels=3,
num_filters=6,
filter_size=3,
output_size=output_size,
stride=2,
)
out = conv2d_trans(x)
return out
class TestTensorOutputSize8(TestTensorOutputSize1):
def path_prefix(self):
return 'conv2d_transpose_tensor_output_size8'
def var_prefix(self):
return ""
def call_func(self, x):
w_var = paddle.randn((3, 6, 3, 3), dtype='float32')
output_size = [17, 17]
conv2d_trans = paddle.fluid.dygraph.Conv2DTranspose(
num_channels=3,
num_filters=6,
filter_size=3,
output_size=output_size,
stride=2,
)
out = conv2d_trans(x)
return out
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -12,10 +12,12 @@ ...@@ -12,10 +12,12 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import unittest
import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import numpy as np
import unittest
def infinite_reader(): def infinite_reader():
......
...@@ -21,7 +21,7 @@ import numpy as np ...@@ -21,7 +21,7 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
from paddle.fluid.dygraph.nn import BatchNorm, Embedding, GroupNorm, PRelu from paddle.fluid.dygraph.nn import BatchNorm, Embedding, GroupNorm
from paddle.nn import Linear from paddle.nn import Linear
...@@ -212,9 +212,6 @@ class TestDygraphLoadStatic(unittest.TestCase): ...@@ -212,9 +212,6 @@ class TestDygraphLoadStatic(unittest.TestCase):
self.layer_norm_1 = paddle.nn.LayerNorm([10]) self.layer_norm_1 = paddle.nn.LayerNorm([10])
self.layer_norm_2 = paddle.nn.LayerNorm(10) self.layer_norm_2 = paddle.nn.LayerNorm(10)
self.prelu1 = PRelu("channel", channel=5)
self.prelu2 = PRelu("channel", channel=5)
self.group_norm1 = GroupNorm(8, 4) self.group_norm1 = GroupNorm(8, 4)
self.gourp_norm2 = GroupNorm(8, 4) self.gourp_norm2 = GroupNorm(8, 4)
......
...@@ -185,10 +185,10 @@ class Deconv2DLayer(fluid.dygraph.Layer): ...@@ -185,10 +185,10 @@ class Deconv2DLayer(fluid.dygraph.Layer):
): ):
super().__init__() super().__init__()
self._deconv = fluid.dygraph.Conv2DTranspose( self._deconv = paddle.nn.Conv2DTranspose(
num_channels=num_channels, num_channels,
num_filters=num_filters, num_filters,
filter_size=filter_size, filter_size,
stride=stride, stride=stride,
padding=padding, padding=padding,
bias_attr=None if use_bias else False, bias_attr=None if use_bias else False,
......
...@@ -33,8 +33,6 @@ from paddle.fluid.framework import ( ...@@ -33,8 +33,6 @@ from paddle.fluid.framework import (
default_main_program, default_main_program,
program_guard, program_guard,
) )
from paddle.fluid.initializer import Constant
from paddle.fluid.param_attr import ParamAttr
from paddle.tensor import random from paddle.tensor import random
...@@ -383,54 +381,6 @@ class TestLayer(LayerTest): ...@@ -383,54 +381,6 @@ class TestLayer(LayerTest):
np.testing.assert_allclose(n, min_eager_ret_value, rtol=1e-05) np.testing.assert_allclose(n, min_eager_ret_value, rtol=1e-05)
np.testing.assert_allclose(n2, max_eager_ret_value, rtol=1e-05) np.testing.assert_allclose(n2, max_eager_ret_value, rtol=1e-05)
def test_sequence_conv(self):
inp_np = np.arange(12).reshape([3, 4]).astype('float32')
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
with self.static_graph():
seq = layers.data(
name='seq_in',
shape=[3, 4],
dtype='float32',
lod_level=1,
append_batch_size=False,
)
out = layers.sequence_conv(seq, 2, act='sigmoid')
static_rlt = self.get_static_graph_result(
feed={
"seq_in": fluid.create_lod_tensor(
data=inp_np, recursive_seq_lens=[[1, 1, 1]], place=place
)
},
fetch_list=[out],
with_lod=True,
)[0]
with self.static_graph():
seq = layers.data(
name='seq_in',
shape=[3, 4],
dtype='float32',
lod_level=1,
append_batch_size=False,
)
seq_conv = nn.SequenceConv('seq_conv', num_filters=2, act='sigmoid')
out = seq_conv(seq)
static_rlt2 = self.get_static_graph_result(
feed={
"seq_in": fluid.create_lod_tensor(
data=inp_np, recursive_seq_lens=[[1, 1, 1]], place=place
)
},
fetch_list=[out],
with_lod=True,
)[0]
np.testing.assert_array_equal(
np.array(static_rlt), np.array(static_rlt2)
)
def test_conv2d_transpose(self): def test_conv2d_transpose(self):
inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32') inp_np = np.arange(0, 24).reshape([2, 3, 2, 2]).astype('float32')
with self.static_graph(): with self.static_graph():
...@@ -447,37 +397,37 @@ class TestLayer(LayerTest): ...@@ -447,37 +397,37 @@ class TestLayer(LayerTest):
)[0] )[0]
with self.static_graph(): with self.static_graph():
img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32') img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
conv2d_transpose = nn.Conv2DTranspose( conv2d_transpose = paddle.nn.Conv2DTranspose(
num_channels=3, 3,
num_filters=10, 10,
filter_size=27, 27,
act='sigmoid',
bias_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1),
) )
out = conv2d_transpose(img) out = conv2d_transpose(img)
out = paddle.nn.functional.sigmoid(out)
static_rlt2 = self.get_static_graph_result( static_rlt2 = self.get_static_graph_result(
feed={'pixel': inp_np}, fetch_list=[out] feed={'pixel': inp_np}, fetch_list=[out]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard(): with _test_eager_guard():
conv2d_transpose = nn.Conv2DTranspose( conv2d_transpose = paddle.nn.Conv2DTranspose(
num_channels=3, 3,
num_filters=10, 10,
filter_size=27, 27,
act='sigmoid',
bias_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1),
) )
dy_eager_rlt = conv2d_transpose(base.to_variable(inp_np)) dy_eager_rlt = conv2d_transpose(base.to_variable(inp_np))
dy_eager_rlt = paddle.nn.functional.sigmoid(dy_eager_rlt)
dy_eager_rlt_value = dy_eager_rlt.numpy() dy_eager_rlt_value = dy_eager_rlt.numpy()
conv2d_transpose = nn.Conv2DTranspose( conv2d_transpose = paddle.nn.Conv2DTranspose(
num_channels=3, 3,
num_filters=10, 10,
filter_size=27, 27,
act='sigmoid',
bias_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1),
) )
dy_rlt = conv2d_transpose(base.to_variable(inp_np)) dy_rlt = conv2d_transpose(base.to_variable(inp_np))
dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
dy_rlt_value = dy_rlt.numpy() dy_rlt_value = dy_rlt.numpy()
np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05) np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05) np.testing.assert_allclose(dy_rlt_value, static_rlt2, rtol=1e-05)
...@@ -492,14 +442,12 @@ class TestLayer(LayerTest): ...@@ -492,14 +442,12 @@ class TestLayer(LayerTest):
custom_weight custom_weight
) )
) )
conv2d1 = nn.Conv2DTranspose( conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
num_channels=3, num_filters=3, filter_size=[2, 2] conv2d2 = paddle.nn.Conv2DTranspose(
) 3,
conv2d2 = nn.Conv2DTranspose( 3,
num_channels=3, [2, 2],
num_filters=3, weight_attr=weight_attr,
filter_size=[2, 2],
param_attr=weight_attr,
) )
dy_ret1 = conv2d1(base.to_variable(images)) dy_ret1 = conv2d1(base.to_variable(images))
dy_ret2 = conv2d2(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images))
...@@ -537,14 +485,12 @@ class TestLayer(LayerTest): ...@@ -537,14 +485,12 @@ class TestLayer(LayerTest):
custom_weight custom_weight
) )
) )
conv2d1 = nn.Conv2DTranspose( conv2d1 = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
num_channels=3, num_filters=3, filter_size=[2, 2] conv2d2 = paddle.nn.Conv2DTranspose(
) 3,
conv2d2 = nn.Conv2DTranspose( 3,
num_channels=3, [2, 2],
num_filters=3, weight_attr=weight_attr,
filter_size=[2, 2],
param_attr=weight_attr,
) )
dy_ret1 = conv2d1(base.to_variable(images)) dy_ret1 = conv2d1(base.to_variable(images))
dy_ret2 = conv2d2(base.to_variable(images)) dy_ret2 = conv2d2(base.to_variable(images))
...@@ -578,9 +524,7 @@ class TestLayer(LayerTest): ...@@ -578,9 +524,7 @@ class TestLayer(LayerTest):
# the input of Conv2DTranspose must be Variable. # the input of Conv2DTranspose must be Variable.
def test_Variable(): def test_Variable():
images = np.ones([2, 3, 5, 5], dtype='float32') images = np.ones([2, 3, 5, 5], dtype='float32')
conv2d = nn.Conv2DTranspose( conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
num_channels=3, num_filters=3, filter_size=[2, 2]
)
conv2d_ret1 = conv2d(images) conv2d_ret1 = conv2d(images)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
...@@ -591,9 +535,7 @@ class TestLayer(LayerTest): ...@@ -591,9 +535,7 @@ class TestLayer(LayerTest):
images = layers.data( images = layers.data(
name='pixel', shape=[3, 5, 5], dtype='int32' name='pixel', shape=[3, 5, 5], dtype='int32'
) )
conv2d = nn.Conv2DTranspose( conv2d = paddle.nn.Conv2DTranspose(3, 3, [2, 2])
num_channels=3, num_filters=3, filter_size=[2, 2]
)
conv2d_ret2 = conv2d(images) conv2d_ret2 = conv2d(images)
self.assertRaises(TypeError, test_type) self.assertRaises(TypeError, test_type)
...@@ -628,53 +570,55 @@ class TestLayer(LayerTest): ...@@ -628,53 +570,55 @@ class TestLayer(LayerTest):
data_y = layers.data( data_y = layers.data(
name='y', shape=[1, 3], dtype="float32", append_batch_size=False name='y', shape=[1, 3], dtype="float32", append_batch_size=False
) )
btp = nn.BilinearTensorProduct( btp = paddle.nn.Bilinear(
3, 3,
3, 3,
6, 6,
bias_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1),
act='sigmoid',
) )
out = btp(data_x, data_y) out = btp(data_x, data_y)
out = paddle.nn.functional.sigmoid(out)
static_rlt2 = self.get_static_graph_result( static_rlt2 = self.get_static_graph_result(
feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out] feed={'x': inp_np_x, 'y': inp_np_y}, fetch_list=[out]
)[0] )[0]
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard(): with _test_eager_guard():
btp = nn.BilinearTensorProduct( btp = paddle.nn.Bilinear(
3, 3,
3, 3,
6, 6,
bias_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1),
act='sigmoid',
) )
dy_eager_rlt = btp( dy_eager_rlt = btp(
base.to_variable(inp_np_x), base.to_variable(inp_np_y) base.to_variable(inp_np_x), base.to_variable(inp_np_y)
) )
dy_eager_rlt = paddle.nn.functional.sigmoid(dy_eager_rlt)
dy_eager_rlt_value = dy_eager_rlt.numpy() dy_eager_rlt_value = dy_eager_rlt.numpy()
btp = nn.BilinearTensorProduct( btp = paddle.nn.Bilinear(
3, 3,
3, 3,
6, 6,
bias_attr=fluid.initializer.ConstantInitializer(value=1), bias_attr=fluid.initializer.ConstantInitializer(value=1),
act='sigmoid',
) )
dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y)) dy_rlt = btp(base.to_variable(inp_np_x), base.to_variable(inp_np_y))
dy_rlt = paddle.nn.functional.sigmoid(dy_rlt)
dy_rlt_value = dy_rlt.numpy() dy_rlt_value = dy_rlt.numpy()
with self.dynamic_graph(): with self.dynamic_graph():
with _test_eager_guard(): with _test_eager_guard():
btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid') btp2 = paddle.nn.Bilinear(3, 3, 6)
dy_eager_rlt2 = btp2( dy_eager_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y) base.to_variable(inp_np_x), base.to_variable(inp_np_y)
) )
dy_eager_rlt2 = paddle.nn.functional.sigmoid(dy_eager_rlt2)
dy_eager_rlt2_value = dy_eager_rlt2.numpy() dy_eager_rlt2_value = dy_eager_rlt2.numpy()
btp2 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid') btp2 = paddle.nn.Bilinear(3, 3, 6)
dy_rlt2 = btp2( dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y) base.to_variable(inp_np_x), base.to_variable(inp_np_y)
) )
dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
dy_rlt2_value = dy_rlt2.numpy() dy_rlt2_value = dy_rlt2.numpy()
with self.static_graph(): with self.static_graph():
...@@ -706,16 +650,16 @@ class TestLayer(LayerTest): ...@@ -706,16 +650,16 @@ class TestLayer(LayerTest):
custom_weight custom_weight
) )
) )
btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid') btp1 = paddle.nn.Bilinear(3, 3, 6)
btp2 = nn.BilinearTensorProduct( btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr)
3, 3, 6, act='sigmoid', param_attr=weight_attr
)
dy_rlt1 = btp1( dy_rlt1 = btp1(
base.to_variable(inp_np_x), base.to_variable(inp_np_y) base.to_variable(inp_np_x), base.to_variable(inp_np_y)
) )
dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1)
dy_rlt2 = btp2( dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y) base.to_variable(inp_np_x), base.to_variable(inp_np_y)
) )
dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
self.assertFalse( self.assertFalse(
np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()) np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
) )
...@@ -744,16 +688,16 @@ class TestLayer(LayerTest): ...@@ -744,16 +688,16 @@ class TestLayer(LayerTest):
custom_weight custom_weight
) )
) )
btp1 = nn.BilinearTensorProduct(3, 3, 6, act='sigmoid') btp1 = paddle.nn.Bilinear(3, 3, 6)
btp2 = nn.BilinearTensorProduct( btp2 = paddle.nn.Bilinear(3, 3, 6, weight_attr=weight_attr)
3, 3, 6, act='sigmoid', param_attr=weight_attr
)
dy_rlt1 = btp1( dy_rlt1 = btp1(
base.to_variable(inp_np_x), base.to_variable(inp_np_y) base.to_variable(inp_np_x), base.to_variable(inp_np_y)
) )
dy_rlt1 = paddle.nn.functional.sigmoid(dy_rlt1)
dy_rlt2 = btp2( dy_rlt2 = btp2(
base.to_variable(inp_np_x), base.to_variable(inp_np_y) base.to_variable(inp_np_x), base.to_variable(inp_np_y)
) )
dy_rlt2 = paddle.nn.functional.sigmoid(dy_rlt2)
self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())) self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
btp2.weight.set_value(btp1.weight.numpy()) btp2.weight.set_value(btp1.weight.numpy())
btp2.bias.set_value(btp1.bias) btp2.bias.set_value(btp1.bias)
...@@ -772,133 +716,6 @@ class TestLayer(LayerTest): ...@@ -772,133 +716,6 @@ class TestLayer(LayerTest):
) )
np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy()) np.testing.assert_array_equal(btp1.bias.numpy(), btp2.bias.numpy())
def prelu_test(self, mode):
inp_np = np.ones([5, 200, 100, 100]).astype('float32')
with self.static_graph():
data_t = layers.data(
name="input",
shape=[5, 200, 100, 100],
dtype="float32",
append_batch_size=False,
)
out = paddle.static.nn.prelu(
data_t, mode, param_attr=ParamAttr(initializer=Constant(1.0))
)
static_rlt = self.get_static_graph_result(
feed={"input": inp_np}, fetch_list=[out]
)[0]
with self.static_graph():
data_t = layers.data(
name="input",
shape=[5, 200, 100, 100],
dtype="float32",
append_batch_size=False,
)
prelu = nn.PRelu(
mode=mode,
channel=inp_np.shape[1],
input_shape=data_t.shape,
param_attr=ParamAttr(initializer=Constant(1.0)),
)
out = prelu(data_t)
static_rlt2 = self.get_static_graph_result(
feed={"input": inp_np}, fetch_list=[out]
)[0]
with self.dynamic_graph():
with _test_eager_guard():
prelu = nn.PRelu(
mode=mode,
channel=inp_np.shape[1],
input_shape=inp_np.shape,
param_attr=ParamAttr(initializer=Constant(1.0)),
)
dy_eager_rlt = prelu(base.to_variable(inp_np))
dy_eager_rlt_value = dy_eager_rlt.numpy()
prelu = nn.PRelu(
mode=mode,
channel=inp_np.shape[1],
input_shape=inp_np.shape,
param_attr=ParamAttr(initializer=Constant(1.0)),
)
dy_rlt = prelu(base.to_variable(inp_np))
dy_rlt_value = dy_rlt.numpy()
np.testing.assert_allclose(static_rlt2, static_rlt, rtol=1e-05)
np.testing.assert_allclose(dy_rlt_value, static_rlt, rtol=1e-05)
np.testing.assert_allclose(dy_eager_rlt_value, static_rlt, rtol=1e-05)
with self.dynamic_graph():
with _test_eager_guard():
inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
inp = base.to_variable(inp_np)
prelu1 = nn.PRelu(
mode=mode,
channel=inp_np.shape[1],
input_shape=inp_np.shape,
param_attr=ParamAttr(initializer=Constant(2.0)),
)
prelu2 = nn.PRelu(
mode=mode,
channel=inp_np.shape[1],
input_shape=inp_np.shape,
param_attr=ParamAttr(initializer=Constant(1.0)),
)
dy_rlt1 = prelu1(inp)
dy_rlt2 = prelu2(inp)
self.assertFalse(
np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy())
)
self.assertFalse(
np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
)
prelu2.weight.set_value(prelu1.weight.numpy())
dy_rlt1 = prelu1(inp)
dy_rlt2 = prelu2(inp)
np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
prelu2.weight = prelu1.weight
np.testing.assert_array_equal(
prelu1.weight.numpy(), prelu2.weight.numpy()
)
inp_np = np.random.randn(5, 200, 100, 100).astype("float32")
inp = base.to_variable(inp_np)
prelu1 = nn.PRelu(
mode=mode,
channel=inp_np.shape[1],
input_shape=inp_np.shape,
param_attr=ParamAttr(initializer=Constant(2.0)),
)
prelu2 = nn.PRelu(
mode=mode,
channel=inp_np.shape[1],
input_shape=inp_np.shape,
param_attr=ParamAttr(initializer=Constant(1.0)),
)
dy_rlt1 = prelu1(inp)
dy_rlt2 = prelu2(inp)
self.assertFalse(
np.array_equal(prelu1.weight.numpy(), prelu2.weight.numpy())
)
self.assertFalse(np.array_equal(dy_rlt1.numpy(), dy_rlt2.numpy()))
prelu2.weight.set_value(prelu1.weight.numpy())
dy_rlt1 = prelu1(inp)
dy_rlt2 = prelu2(inp)
np.testing.assert_array_equal(dy_rlt1.numpy(), dy_rlt2.numpy())
prelu2.weight = prelu1.weight
np.testing.assert_array_equal(
prelu1.weight.numpy(), prelu2.weight.numpy()
)
def test_prelu(self):
self.prelu_test("channel")
self.prelu_test("element")
self.prelu_test("all")
def test_embeding(self): def test_embeding(self):
inp_word = np.array([[[1]]]).astype('int64') inp_word = np.array([[[1]]]).astype('int64')
dict_size = 20 dict_size = 20
...@@ -1207,56 +1024,6 @@ class TestLayer(LayerTest): ...@@ -1207,56 +1024,6 @@ class TestLayer(LayerTest):
conv3d1.bias.numpy(), conv3d2.bias.numpy() conv3d1.bias.numpy(), conv3d2.bias.numpy()
) )
def test_row_conv(self):
input = np.arange(15).reshape([3, 5]).astype('float32')
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
with self.static_graph():
x = layers.data(
name='X',
shape=[3, 5],
dtype='float32',
lod_level=1,
append_batch_size=False,
)
ret = layers.row_conv(input=x, future_context_size=2)
static_ret = self.get_static_graph_result(
feed={
'X': fluid.create_lod_tensor(
data=input, recursive_seq_lens=[[1, 1, 1]], place=place
)
},
fetch_list=[ret],
with_lod=True,
)[0]
with self.static_graph():
x = layers.data(
name='X',
shape=[3, 5],
dtype='float32',
lod_level=1,
append_batch_size=False,
)
rowConv = nn.RowConv('RowConv', future_context_size=2)
ret = rowConv(x)
static_ret2 = self.get_static_graph_result(
feed={
'X': fluid.create_lod_tensor(
data=input, recursive_seq_lens=[[1, 1, 1]], place=place
)
},
fetch_list=[ret],
with_lod=True,
)[0]
# TODO: dygraph can't support LODTensor
np.testing.assert_allclose(static_ret, static_ret2, rtol=1e-05)
def func_group_norm(self): def func_group_norm(self):
if core.is_compiled_with_cuda(): if core.is_compiled_with_cuda():
place = core.CUDAPlace(0) place = core.CUDAPlace(0)
......
...@@ -16,7 +16,6 @@ import sys ...@@ -16,7 +16,6 @@ import sys
import time import time
import unittest import unittest
import paddle
import numpy as np import numpy as np
from test_multiprocess_dataloader_static import ( from test_multiprocess_dataloader_static import (
BATCH_SIZE, BATCH_SIZE,
......
...@@ -19,9 +19,6 @@ import numpy as np ...@@ -19,9 +19,6 @@ import numpy as np
sys.path.append("..") sys.path.append("..")
import paddle
import paddle.nn.functional as F
from op_test import OpTest from op_test import OpTest
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import ( from xpu.get_test_cover_info import (
...@@ -31,6 +28,7 @@ from xpu.get_test_cover_info import ( ...@@ -31,6 +28,7 @@ from xpu.get_test_cover_info import (
) )
import paddle import paddle
import paddle.nn.functional as F
paddle.enable_static() paddle.enable_static()
......
...@@ -12,18 +12,20 @@ ...@@ -12,18 +12,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle
import paddle.fluid as fluid
import numpy as np
import sys import sys
import unittest import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
sys.path.append("..") sys.path.append("..")
from op_test_xpu import XPUOpTest from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import ( from xpu.get_test_cover_info import (
XPUOpTestWrapper,
create_test_class, create_test_class,
get_xpu_op_support_types, get_xpu_op_support_types,
XPUOpTestWrapper,
) )
paddle.enable_static() paddle.enable_static()
......
...@@ -15,16 +15,12 @@ ...@@ -15,16 +15,12 @@
All layers just related to metric. All layers just related to metric.
""" """
from paddle.fluid.layer_helper import LayerHelper from paddle import _legacy_C_ops
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid.framework import Variable, _non_static_mode, _varbase_creator
from paddle.fluid.initializer import Constant from paddle.fluid.initializer import Constant
from paddle.fluid.framework import ( from paddle.fluid.layer_helper import LayerHelper
Variable,
_non_static_mode,
_varbase_creator,
)
from paddle.fluid.layers import tensor from paddle.fluid.layers import tensor
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle import _legacy_C_ops
__all__ = ['accuracy', 'auc'] __all__ = ['accuracy', 'auc']
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册