未验证 提交 b810961d 编写于 作者: H hong 提交者: GitHub

Add dist norm yamls (#41424) (#41615)

* add dist erfinv gumbel softmax

* fix test gumbel softmax op bug

* try to fix gumbel softmax error

* add label smooth backlist
上级 365975fd
......@@ -37,6 +37,7 @@ def dist(x, y, p):
class TestDistOp(OpTest):
def setUp(self):
self.op_type = 'dist'
self.python_api = paddle.dist
self.attrs = {}
self.init_case()
self.init_data_type()
......@@ -106,10 +107,14 @@ class TestDistOp(OpTest):
return x_grad, y_grad
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(["X", "Y"], "Out", user_defined_grads=self.gradient)
self.check_grad(
["X", "Y"],
"Out",
user_defined_grads=self.gradient,
check_eager=True)
class TestDistOpCase1(TestDistOp):
......@@ -174,4 +179,5 @@ class TestDistAPI(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -28,6 +28,7 @@ np.random.seed(0)
class TestErfinv(OpTest):
def setUp(self):
self.op_type = "erfinv"
self.python_api = paddle.erfinv
self.init_dtype()
self.shape = [11, 17]
self.x = np.random.uniform(-1, 1, size=self.shape).astype(self.dtype)
......@@ -42,7 +43,7 @@ class TestErfinv(OpTest):
self.dtype = np.float64
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(
......
......@@ -27,6 +27,7 @@ class TestExpandV2OpRank1(OpTest):
def setUp(self):
self.op_type = "expand_v2"
self.init_data()
self.python_api = paddle.expand
self.inputs = {'X': np.random.random(self.ori_shape).astype("float64")}
self.attrs = {'shape': self.shape}
......
......@@ -17,6 +17,7 @@ import paddle.fluid.core as core
import paddle
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
paddle.enable_static()
......@@ -177,12 +178,17 @@ class TestGumbelSoftmaxAPI(unittest.TestCase):
self.assertEqual(out_np.sum(), self.count_expected)
# test dygrapg api
paddle.disable_static()
x = paddle.to_tensor(self.x)
y = paddle.nn.functional.gumbel_softmax(x, hard=True)
out_np = np.array(y)
self.assertEqual(out_np.sum(), self.count_expected)
paddle.enable_static()
with paddle.fluid.dygraph.base.guard():
x = paddle.to_tensor(self.x)
y = paddle.nn.functional.gumbel_softmax(x, hard=True)
out_np = np.array(y)
self.assertEqual(out_np.sum(), self.count_expected)
with _test_eager_guard():
x = paddle.to_tensor(self.x)
y = paddle.nn.functional.gumbel_softmax(x, hard=True)
out_np = np.array(y)
self.assertEqual(out_np.sum(), self.count_expected)
class TestGumbelSoftmaxOpError(unittest.TestCase):
......
......@@ -1522,6 +1522,9 @@ def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None):
# [0.00000000, 0.00000000, 0.00000000, 0.00001258, 0.99998736, 0.00000000]]
"""
if in_dygraph_mode():
return _C_ops.final_state_gumbel_softmax(x, temperature, hard, axis)
if in_dynamic_mode():
return _C_ops.gumbel_softmax(x, 'temperature', temperature, 'hard',
hard, 'axis', axis)
......
......@@ -551,6 +551,9 @@ def dist(x, y, p=2, name=None):
out = paddle.dist(x, y, float("-inf"))
print(out) # out = [0.]
"""
if in_dygraph_mode():
return _C_ops.final_state_dist(x, y, p)
check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist')
check_variable_and_dtype(y, 'dtype', ['float32', 'float64'], 'dist')
check_type(p, 'p', (float, int), 'dist')
......
......@@ -3634,6 +3634,9 @@ def erfinv(x, name=None):
# out: [0, 0.4769, -inf]
"""
if in_dygraph_mode():
return _C_ops.final_state_erfinv( x )
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'erfinv')
if paddle.in_dynamic_mode():
......
......@@ -772,7 +772,7 @@
func : GumbelSoftmaxInferMeta
kernel :
func : gumbel_softmax
# backward : gumbel_softmax_grad
backward : gumbel_softmax_grad
# hard_shrink
- api : hard_shrink
......
# - backward_api : gumbel_softmax_grad
# forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
# args : (Tensor out, Tensor out_grad, int axis)
# output : Tensor(x_grad)
# infer_meta :
# func : GumbelSoftmaxGradInferMeta
# param : [out, out_grad, axis]
# kernel :
# func : gumbel_softmax_grad
- backward_api : abs_grad
forward : abs (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......@@ -517,6 +507,27 @@
kernel :
func : gelu_grad
- backward_api : graph_send_recv_grad
forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str pool_type = "SUM")
output : Tensor(x_grad)
infer_meta :
func : GeneralUnaryGradInferMeta
param : [x]
kernel :
func : graph_send_recv_grad
optional: out, dst_count
- backward_api : gumbel_softmax_grad
forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : GumbelSoftmaxGradInferMeta
param : [out, out_grad, axis]
kernel :
func : gumbel_softmax_grad
- backward_api : hard_shrink_grad
forward : hard_shrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册