未验证 提交 db83b53a 编写于 作者: 姜永久 提交者: GitHub

update erf gumbel_softmax ..ops (#50077)

* update erf gumbel_softmax ..ops

* lint

* reset sequence_conv

* reset exponetial&interp nearest
上级 9c3a35b9
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
from scipy.special import erf
import paddle
......@@ -26,6 +26,7 @@ import paddle.fluid.dygraph as dg
class TestErfOp(OpTest):
def setUp(self):
self.op_type = "erf"
self.python_api = paddle.erf
self.dtype = self._init_dtype()
self.x_shape = [11, 17]
x = np.random.uniform(-1, 1, size=self.x_shape).astype(self.dtype)
......
......@@ -17,7 +17,7 @@ import unittest
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -42,10 +42,10 @@ class TestExpandV2OpRank1(OpTest):
self.expand_times = [1]
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1):
......@@ -80,6 +80,7 @@ class TestExpandV2OpRank4(TestExpandV2OpRank1):
class TestExpandV2OpRank1_tensor_attr(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.python_api = paddle.expand
self.init_data()
expand_shapes_tensor = []
for index, ele in enumerate(self.expand_shape):
......@@ -120,6 +121,7 @@ class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr):
class TestExpandV2OpRank1_tensor(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.python_api = paddle.expand
self.init_data()
self.inputs = {
......@@ -146,6 +148,7 @@ class TestExpandV2OpRank1_tensor(OpTest):
class TestExpandV2OpInteger(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.python_api = paddle.expand
self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int32")
}
......@@ -161,6 +164,7 @@ class TestExpandV2OpInteger(OpTest):
class TestExpandV2OpBoolean(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.python_api = paddle.expand
self.inputs = {'X': np.random.randint(2, size=(2, 4, 5)).astype("bool")}
self.attrs = {'shape': [2, 4, 5]}
output = np.tile(self.inputs['X'], (1, 1, 1))
......@@ -174,6 +178,7 @@ class TestExpandV2OpBoolean(OpTest):
class TestExpandV2OpInt64_t(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.python_api = paddle.expand
self.inputs = {
'X': np.random.randint(10, size=(2, 4, 5)).astype("int64")
}
......
......@@ -13,10 +13,11 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
paddle.enable_static()
......@@ -36,6 +37,7 @@ class TestGumbelSoftmaxOp(OpTest):
def setUp(self):
self.op_type = "gumbel_softmax"
self.python_api = F.gumbel_softmax
self.init_attrs()
np.random.seed(0)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
......@@ -53,6 +55,7 @@ class TestGumbelSoftmaxOp(OpTest):
class TestGumbelSoftmax_ZeroDim(OpTest):
def setUp(self):
self.op_type = "gumbel_softmax"
self.python_api = F.gumbel_softmax
self.dtype = "float64"
x = np.random.uniform(0.1, 1, []).astype(self.dtype)
out = np.array(1.0).astype(self.dtype)
......@@ -123,6 +126,7 @@ class TestGumbelSoftmaxOpSampleDistribution(OpTest):
def setUp(self):
self.op_type = "gumbel_softmax"
self.python_api = F.gumbel_softmax
self.init_attrs()
single_x = np.array([0.2, 0.3, 0.5])
batch_x = np.ones(self.shape) * single_x
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
......@@ -23,6 +23,7 @@ import paddle
class TestEmpty(OpTest):
def setUp(self):
self.op_type = "is_empty"
self.python_api = paddle.is_empty
self.inputs = {'X': np.array([1, 2, 3])}
self.outputs = {'Out': np.array([False])}
......@@ -33,6 +34,7 @@ class TestEmpty(OpTest):
class TestNotEmpty(TestEmpty):
def setUp(self):
self.op_type = "is_empty"
self.python_api = paddle.is_empty
self.inputs = {'X': np.array([])}
self.outputs = {'Out': np.array([True])}
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -24,6 +24,7 @@ import paddle.fluid as fluid
class TestMultiplexOp(OpTest):
def setUp(self):
self.op_type = "multiplex"
self.python_api = paddle.tensor.multiplex
rows = 4
index = np.arange(0, rows).astype('int32')
np.random.shuffle(index)
......
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from op_test import OpTest
from eager_op_test import OpTest
import paddle
import paddle.fluid as fluid
......@@ -24,6 +24,10 @@ from paddle.fluid.framework import Program, program_guard
from paddle.fluid.layer_helper import LayerHelper
def transpose_layout(x, src_layout, dst_layout):
return x.transpose([0, 2, 3, 1])
# default kNCHW
class TestTransferLayoutOpkNCHWTokNHWC(OpTest):
def setUp(self):
......@@ -31,6 +35,7 @@ class TestTransferLayoutOpkNCHWTokNHWC(OpTest):
self.inputs = {'X': ipt.astype('float32')}
self.outputs = {'Out': ipt.transpose([0, 2, 3, 1])}
self.attrs = {'src_layout': 0, 'dst_layout': 1} # kNHWC
self.python_api = transpose_layout
self.op_type = 'transfer_layout'
def test_check_output(self):
......
......@@ -17,12 +17,12 @@ import unittest
import gradient_checker
import numpy as np
from decorator_helper import prog_scope
from eager_op_test import OpTest, convert_float_to_uint16
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from paddle.fluid.tests.unittests.op_test import OpTest, convert_float_to_uint16
paddle.enable_static()
......@@ -47,10 +47,10 @@ class TestTransposeOp(OpTest):
self.use_mkldnn = False
def test_check_output(self):
self.check_output(no_check_set=['XShape'], check_eager=True)
self.check_output(no_check_set=['XShape'])
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
def initTestCase(self):
self.shape = (3, 40)
......@@ -150,11 +150,11 @@ class TestAutoTuneTransposeOp(OpTest):
self.use_mkldnn = False
def test_check_output(self):
self.check_output(no_check_set=['XShape'], check_eager=True)
self.check_output(no_check_set=['XShape'])
fluid.core.disable_autotune()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out')
class TestTransposeBF16Op(OpTest):
......@@ -162,6 +162,7 @@ class TestTransposeBF16Op(OpTest):
self.init_op_type()
self.initTestCase()
self.dtype = np.uint16
self.python_api = paddle.transpose
x = np.random.random(self.shape).astype("float32")
self.inputs = {'X': convert_float_to_uint16(x)}
......@@ -580,6 +581,7 @@ class TestTransposeAPI_ZeroDim(unittest.TestCase):
x = paddle.rand([])
x.stop_gradient = False
out = paddle.transpose(x, [])
if hasattr(out, 'retain_grads'):
out.retain_grads()
out.backward()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册