未验证 提交 ac14920a 编写于 作者: H hong 提交者: GitHub

add scatter_add_nd, label_smooth, huber_loss yaml (#41462)

上级 55e26637
......@@ -1610,6 +1610,10 @@ def huber_loss(input, label, delta):
HuberLoss, = exe.run(feed={'input':input_data ,'label':label_data}, fetch_list=[loss.name])
print(HuberLoss) #[[1.5], [0.5], [0.5], [0. ]], dtype=float32
"""
if in_dygraph_mode():
out, residual = _C_ops.final_state_huber_loss(input, label, delta)
return out
helper = LayerHelper('huber_loss', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'huber_loss')
......
......@@ -7095,6 +7095,10 @@ def label_smooth(label,
smooth_label = layers.label_smooth(
label=one_hot_label, epsilon=0.1, dtype="float32")
"""
if in_dygraph_mode():
return _C_ops.final_state_label_smooth(label, prior_dist,
float(epsilon))
if epsilon > 1. or epsilon < 0.:
raise ValueError("The value of epsilon must be between 0 and 1.")
......@@ -8839,8 +8843,7 @@ def scatter_nd_add(ref, index, updates, name=None):
"""
if in_dygraph_mode():
op = getattr(_C_ops, 'scatter_nd_add')
return op(ref, index, updates)
return _C_ops.final_state_scatter_nd_add(ref, index, updates)
else:
if _in_legacy_dygraph():
op = getattr(_C_ops, 'scatter_nd_add')
......
......@@ -18,6 +18,7 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
import paddle
from paddle.fluid import compiler, Program, program_guard
......@@ -32,6 +33,8 @@ def huber_loss_forward(val, delta):
class TestHuberLossOp(OpTest):
def setUp(self):
self.op_type = 'huber_loss'
self.python_api = paddle.fluid.layers.huber_loss
self.python_out_sig = ["Out"]
self.delta = 1.0
self.init_input()
shape = self.set_shape()
......@@ -52,10 +55,10 @@ class TestHuberLossOp(OpTest):
return (100, 1)
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ingore_x(self):
self.check_grad(
......@@ -103,4 +106,5 @@ class TestHuberLossOpError(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -39,10 +39,10 @@ class TestLabelSmoothOp(OpTest):
self.outputs = {'Out': smoothed_label}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(["X"], "Out", check_eager=False)
self.check_grad(["X"], "Out", check_eager=True)
class TestLabelSmoothOpWithPriorDist(TestLabelSmoothOp):
......
......@@ -77,10 +77,10 @@ class TestScatterNdAddSimpleOp(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=False)
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithEmptyIndex(OpTest):
......@@ -101,10 +101,10 @@ class TestScatterNdAddWithEmptyIndex(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=False)
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithHighRankSame(OpTest):
......@@ -128,10 +128,10 @@ class TestScatterNdAddWithHighRankSame(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=False)
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
class TestScatterNdAddWithHighRankDiff(OpTest):
......@@ -154,10 +154,10 @@ class TestScatterNdAddWithHighRankDiff(OpTest):
self.outputs = {'Out': expect_np}
def test_check_output(self):
self.check_output(check_eager=False)
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X', 'Updates'], 'Out', check_eager=False)
self.check_grad(['X', 'Updates'], 'Out', check_eager=True)
#Test Python API
......
......@@ -1623,6 +1623,10 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
#[[[0.03333334 0.93333334 0.03333334]
# [0.93333334 0.03333334 0.93333334]]]
"""
if in_dygraph_mode():
return _C_ops.final_state_label_smooth(label, prior_dist,
float(epsilon))
if epsilon > 1. or epsilon < 0.:
raise ValueError("The value of epsilon must be between 0 and 1.")
......
......@@ -830,7 +830,7 @@
func : HuberLossInferMeta
kernel :
func : huber_loss
# backward : huber_loss_grad
backward : huber_loss_grad
- api : imag
args : (Tensor x)
......@@ -934,6 +934,19 @@
func : kthvalue
backward : kthvalue_grad
# label_smooth
- api : label_smooth
args : (Tensor label, Tensor prior_dist, float epsilon)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [label]
kernel :
func : label_smooth
data_type : label
optional : prior_dist
backward : label_smooth_grad
# leaky_relu
- api : leaky_relu
args : (Tensor x, float alpha)
......
......@@ -100,7 +100,7 @@
func : asinh_grad
- backward_api : atan2_grad
forward : cross (Tensor x, Tensor y) -> Tensor(out)
forward : atan2 (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
......@@ -193,7 +193,7 @@
func : cholesky_grad
- backward_api : cholesky_solve_grad
forward : cholesky (Tensor x, Tensor y, bool upper) -> Tensor(out)
forward : cholesky_solve (Tensor x, Tensor y, bool upper) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
......@@ -414,7 +414,7 @@
data_type : out_grad
- backward_api : erfinv_grad
forward : erf (Tensor x) -> Tensor(out)
forward : erfinv (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
......@@ -568,6 +568,16 @@
kernel :
func : hard_sigmoid_grad
- backward_api : huber_loss_grad
forward : huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
args : (Tensor residual, Tensor out_grad, float delta)
output : Tensor(input_grad), Tensor(label_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [residual, residual]
kernel :
func : huber_loss_grad
- backward_api : imag_grad
forward : imag (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
......@@ -639,7 +649,7 @@
func : leaky_relu_grad
- backward_api : lerp_grad
forward : transpose (Tensor x, Tensor y, Tensor weight) -> Tensor(out)
forward : lerp (Tensor x, Tensor y, Tensor weight) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
......@@ -898,7 +908,7 @@
func : mode_grad
- backward_api : modulo_grad
forward : add (Tensor x, Tensor y) -> Tensor(out)
forward : modulo (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
......@@ -1141,14 +1151,14 @@
no_need_buffer : updates
- backward_api : scatter_nd_add_grad
forward : scatter (Tensor x, Tensor index, Tensor updates) -> Tensor(out)
forward : scatter_nd_add (Tensor x, Tensor index, Tensor updates) -> Tensor(out)
args : (Tensor index, Tensor updates, Tensor out_grad)
output : Tensor(x_grad), Tensor(updates_grad)
infer_meta :
func : ScatterNdAddGradInferMeta
param : [index, updates, out_grad]
kernel :
func : scatter_nd_grad
func : scatter_nd_add_grad
no_need_buffer : updates
- backward_api : segment_pool_grad
......
{
"phi_apis":["conj", "dropout", "expand_as", "flatten", "nll_loss", "psroi_pool", "roi_align", "roi_pool"],
"phi_apis":["conj", "dropout", "expand_as", "flatten", "nll_loss", "psroi_pool", "roi_align", "roi_pool", "label_smooth"],
"phi_kernels":["equal_all"]
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册