未验证 提交 fa250aa1 编写于 作者: H hong 提交者: GitHub

Add expand as sigmoid api (#41311)

* update epxand and sigmoid with cross entropy

* skip expand as infrt check

* fix sigmoid cross entropy bug

* remove no grad set white list

* remove no grad set

* fix bug

* fix sigmoid error

* fix bug
上级 1b031987
...@@ -1463,6 +1463,10 @@ def sigmoid_cross_entropy_with_logits(x, ...@@ -1463,6 +1463,10 @@ def sigmoid_cross_entropy_with_logits(x,
ignore_index=-1, normalize=True) ignore_index=-1, normalize=True)
print(loss) print(loss)
""" """
if in_dygraph_mode():
return _C_ops.final_state_sigmoid_cross_entropy_with_logits(
x, label, normalize, int(ignore_index))
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits') 'sigmoid_cross_entropy_with_logits')
......
...@@ -17,6 +17,7 @@ import paddle.fluid as fluid ...@@ -17,6 +17,7 @@ import paddle.fluid as fluid
import numpy as np import numpy as np
import unittest import unittest
from op_test import OpTest from op_test import OpTest
from paddle.fluid.framework import _test_eager_guard
def call_bce_layer(logit, label, weight=None, reduction='mean', def call_bce_layer(logit, label, weight=None, reduction='mean',
...@@ -81,7 +82,7 @@ def test_dygraph(place, ...@@ -81,7 +82,7 @@ def test_dygraph(place,
reduction='mean', reduction='mean',
pos_weight_np=None, pos_weight_np=None,
functional=False): functional=False):
paddle.disable_static() with paddle.fluid.dygraph.base.guard():
logit = paddle.to_tensor(logit_np) logit = paddle.to_tensor(logit_np)
label = paddle.to_tensor(label_np) label = paddle.to_tensor(label_np)
weight = None weight = None
...@@ -96,7 +97,6 @@ def test_dygraph(place, ...@@ -96,7 +97,6 @@ def test_dygraph(place,
else: else:
dy_res = call_bce_layer(logit, label, weight, reduction, pos_weight) dy_res = call_bce_layer(logit, label, weight, reduction, pos_weight)
dy_result = dy_res.numpy() dy_result = dy_res.numpy()
paddle.enable_static()
return dy_result return dy_result
...@@ -154,9 +154,19 @@ class TestBCEWithLogitsLoss(unittest.TestCase): ...@@ -154,9 +154,19 @@ class TestBCEWithLogitsLoss(unittest.TestCase):
label_np, label_np,
reduction=reduction, reduction=reduction,
functional=True) functional=True)
with _test_eager_guard():
eager_functional = test_dygraph(
place,
logit_np,
label_np,
reduction=reduction,
functional=True)
self.assertTrue(np.allclose(static_functional, expected)) self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional)) self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected)) self.assertTrue(np.allclose(dy_functional, expected))
self.assertTrue(np.allclose(eager_functional, expected))
def test_BCEWithLogitsLoss_weight(self): def test_BCEWithLogitsLoss_weight(self):
logit_np = np.random.uniform( logit_np = np.random.uniform(
......
...@@ -21,78 +21,63 @@ import paddle ...@@ -21,78 +21,63 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
class TestExpandAsOpRank1(OpTest): class TestExpandAsBasic(OpTest):
def setUp(self): def setUp(self):
self.op_type = "expand_as_v2" self.op_type = "expand_as_v2"
self.python_api = paddle.expand_as self.python_api = paddle.expand_as
x = np.random.rand(100).astype("float64") x = np.random.rand(100).astype("float64")
target_tensor = np.random.rand(2, 100).astype("float64") target_tensor = np.random.rand(2, 100).astype("float64")
self.inputs = {'X': x} self.inputs = {'X': x, "Y": target_tensor}
self.attrs = {'target_shape': target_tensor.shape} self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [2, 1] bcast_dims = [2, 1]
output = np.tile(self.inputs['X'], bcast_dims) output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestExpandAsOpRank2(OpTest): class TestExpandAsOpRank2(TestExpandAsBasic):
def setUp(self): def setUp(self):
self.op_type = "expand_as_v2" self.op_type = "expand_as_v2"
self.python_api = paddle.expand_as
x = np.random.rand(10, 12).astype("float64") x = np.random.rand(10, 12).astype("float64")
target_tensor = np.random.rand(10, 12).astype("float64") target_tensor = np.random.rand(10, 12).astype("float64")
self.inputs = {'X': x} self.inputs = {'X': x, "Y": target_tensor}
self.attrs = {'target_shape': target_tensor.shape} self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [1, 1] bcast_dims = [1, 1]
output = np.tile(self.inputs['X'], bcast_dims) output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandAsOpRank3(OpTest): class TestExpandAsOpRank3(TestExpandAsBasic):
def setUp(self): def setUp(self):
self.op_type = "expand_as_v2" self.op_type = "expand_as_v2"
self.python_api = paddle.expand_as
x = np.random.rand(2, 3, 20).astype("float64") x = np.random.rand(2, 3, 20).astype("float64")
target_tensor = np.random.rand(2, 3, 20).astype("float64") target_tensor = np.random.rand(2, 3, 20).astype("float64")
self.inputs = {'X': x} self.inputs = {'X': x, "Y": target_tensor}
self.attrs = {'target_shape': target_tensor.shape} self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [1, 1, 1] bcast_dims = [1, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims) output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self): class TestExpandAsOpRank4(TestExpandAsBasic):
self.check_grad(['X'], 'Out')
class TestExpandAsOpRank4(OpTest):
def setUp(self): def setUp(self):
self.op_type = "expand_as_v2" self.op_type = "expand_as_v2"
self.python_api = paddle.expand_as
x = np.random.rand(1, 1, 7, 16).astype("float64") x = np.random.rand(1, 1, 7, 16).astype("float64")
target_tensor = np.random.rand(4, 6, 7, 16).astype("float64") target_tensor = np.random.rand(4, 6, 7, 16).astype("float64")
self.inputs = {'X': x} self.inputs = {'X': x, "Y": target_tensor}
self.attrs = {'target_shape': target_tensor.shape} self.attrs = {'target_shape': target_tensor.shape}
bcast_dims = [4, 6, 1, 1] bcast_dims = [4, 6, 1, 1]
output = np.tile(self.inputs['X'], bcast_dims) output = np.tile(self.inputs['X'], bcast_dims)
self.outputs = {'Out': output} self.outputs = {'Out': output}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out')
class TestExpandAsV2Error(unittest.TestCase): class TestExpandAsV2Error(unittest.TestCase):
def test_errors(self): def test_errors(self):
...@@ -130,4 +115,5 @@ class TestExpandAsV2API(unittest.TestCase): ...@@ -130,4 +115,5 @@ class TestExpandAsV2API(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -22,6 +22,12 @@ import paddle.fluid.core as core ...@@ -22,6 +22,12 @@ import paddle.fluid.core as core
import unittest import unittest
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
def test_fluid_sigmoid(x, label, normalize=False, ignore_index=-100):
return paddle.fluid.layers.sigmoid_cross_entropy_with_logits(
x, label, int(ignore_index), normalize=normalize)
class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
...@@ -30,6 +36,7 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): ...@@ -30,6 +36,7 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits" self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = test_fluid_sigmoid
batch_size = 64 batch_size = 64
num_classes = 20 num_classes = 20
self.inputs = { self.inputs = {
...@@ -49,10 +56,10 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest): ...@@ -49,10 +56,10 @@ class TestSigmoidCrossEntropyWithLogitsOp1(OpTest):
self.outputs = {'Out': -term1 - term2} self.outputs = {'Out': -term1 - term2}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): class TestSigmoidCrossEntropyWithLogitsOp2(OpTest):
...@@ -61,6 +68,7 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): ...@@ -61,6 +68,7 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits" self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = test_fluid_sigmoid
batch_size = 64 batch_size = 64
num_classes = 20 num_classes = 20
ignore_index = -1 ignore_index = -1
...@@ -83,10 +91,10 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest): ...@@ -83,10 +91,10 @@ class TestSigmoidCrossEntropyWithLogitsOp2(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): class TestSigmoidCrossEntropyWithLogitsOp3(OpTest):
...@@ -95,6 +103,7 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): ...@@ -95,6 +103,7 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits" self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = test_fluid_sigmoid
batch_size = 64 batch_size = 64
num_classes = 20 num_classes = 20
self.inputs = { self.inputs = {
...@@ -114,15 +123,16 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest): ...@@ -114,15 +123,16 @@ class TestSigmoidCrossEntropyWithLogitsOp3(OpTest):
self.outputs = {'Out': -term1 - term2} self.outputs = {'Out': -term1 - term2}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestSigmoidCrossEntropyWithNorm(OpTest): class TestSigmoidCrossEntropyWithNorm(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits" self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = test_fluid_sigmoid
batch_size = 64 batch_size = 64
num_classes = 20 num_classes = 20
ignore_index = -1 ignore_index = -1
...@@ -145,10 +155,10 @@ class TestSigmoidCrossEntropyWithNorm(OpTest): ...@@ -145,10 +155,10 @@ class TestSigmoidCrossEntropyWithNorm(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): class TestSigmoidCrossEntropyWithLogitsOp5(OpTest):
...@@ -157,6 +167,7 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): ...@@ -157,6 +167,7 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits" self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = test_fluid_sigmoid
batch_size = [10, 10] batch_size = [10, 10]
num_classes = 20 num_classes = 20
self.inputs = { self.inputs = {
...@@ -176,15 +187,16 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest): ...@@ -176,15 +187,16 @@ class TestSigmoidCrossEntropyWithLogitsOp5(OpTest):
self.outputs = {'Out': -term1 - term2} self.outputs = {'Out': -term1 - term2}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestSigmoidCrossEntropyWithNorm2(OpTest): class TestSigmoidCrossEntropyWithNorm2(OpTest):
def setUp(self): def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits" self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = test_fluid_sigmoid
batch_size = [10, 10] batch_size = [10, 10]
num_classes = 20 num_classes = 20
ignore_index = -1 ignore_index = -1
...@@ -207,25 +219,26 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest): ...@@ -207,25 +219,26 @@ class TestSigmoidCrossEntropyWithNorm2(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestSigmoidCrossEntropyWithLogitsOp6(OpTest):
class TestSigmoidCrossEntropyWithLogitsOp6(OpTest):
"""Test sigmoid_cross_entropy_with_logit_op with binary label """Test sigmoid_cross_entropy_with_logit_op with binary label
""" """
def setUp(self): def setUp(self):
self.op_type = "sigmoid_cross_entropy_with_logits" self.op_type = "sigmoid_cross_entropy_with_logits"
self.python_api = test_fluid_sigmoid
batch_size = [10, 10] batch_size = [10, 10]
num_classes = 20 num_classes = 20
self.inputs = { self.inputs = {
'X': logit( 'X': logit(
np.random.uniform(0, 1, tuple(batch_size + [num_classes])) np.random.uniform(0, 1, tuple(batch_size + [num_classes]))
.astype("float64")), .astype("float64")),
'Label': np.random.randint(0, 2, tuple(batch_size + [num_classes])) 'Label':
np.random.randint(0, 2, tuple(batch_size + [num_classes]))
.astype("float64") .astype("float64")
} }
...@@ -238,22 +251,23 @@ class TestSigmoidCrossEntropyWithLogitsOp6(OpTest): ...@@ -238,22 +251,23 @@ class TestSigmoidCrossEntropyWithLogitsOp6(OpTest):
self.outputs = {'Out': -term1 - term2} self.outputs = {'Out': -term1 - term2}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase): class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
with program_guard(Program(), Program()): with program_guard(Program(), Program()):
def test_Variable(): def test_Variable():
# the input of sigmoid_cross_entropy_with_logits must be Variable. # the input of sigmoid_cross_entropy_with_logits must be Variable.
x1 = fluid.create_lod_tensor( x1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()) np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]],
fluid.CPUPlace())
lab1 = fluid.create_lod_tensor( lab1 = fluid.create_lod_tensor(
np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.CPUPlace()) np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]],
fluid.CPUPlace())
fluid.layers.sigmoid_cross_entropy_with_logits(x1, lab1) fluid.layers.sigmoid_cross_entropy_with_logits(x1, lab1)
self.assertRaises(TypeError, test_Variable) self.assertRaises(TypeError, test_Variable)
...@@ -271,4 +285,5 @@ class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase): ...@@ -271,4 +285,5 @@ class TestSigmoidCrossEntropyWithLogitsOpError(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
import unittest import unittest
from op_test import OpTest from op_test import OpTest
from test_sigmoid_focal_loss_op import sigmoid_focal_loss_forward from test_sigmoid_focal_loss_op import sigmoid_focal_loss_forward
from paddle.fluid.framework import _test_eager_guard
def call_sfl_functional(logit, def call_sfl_functional(logit,
...@@ -140,6 +141,10 @@ class TestSigmoidFocalLoss(unittest.TestCase): ...@@ -140,6 +141,10 @@ class TestSigmoidFocalLoss(unittest.TestCase):
dy_result = test_dygraph(place, logit_np, label_np, dy_result = test_dygraph(place, logit_np, label_np,
normalizer_np, alpha, normalizer_np, alpha,
gamma, reduction) gamma, reduction)
with _test_eager_guard():
eager_result = test_dygraph(
place, logit_np, label_np, normalizer_np,
alpha, gamma, reduction)
expected = calc_sigmoid_focal_loss( expected = calc_sigmoid_focal_loss(
logit_np, label_np, normalizer_np, alpha, gamma, logit_np, label_np, normalizer_np, alpha, gamma,
reduction) reduction)
...@@ -148,6 +153,7 @@ class TestSigmoidFocalLoss(unittest.TestCase): ...@@ -148,6 +153,7 @@ class TestSigmoidFocalLoss(unittest.TestCase):
self.assertTrue( self.assertTrue(
np.allclose(static_result, dy_result)) np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected)) self.assertTrue(np.allclose(dy_result, expected))
self.assertTrue(np.allclose(eager_result, expected))
def test_SigmoidFocalLoss_error(self): def test_SigmoidFocalLoss_error(self):
paddle.disable_static() paddle.disable_static()
......
...@@ -259,11 +259,15 @@ def binary_cross_entropy_with_logits(logit, ...@@ -259,11 +259,15 @@ def binary_cross_entropy_with_logits(logit,
"should be 'sum', 'mean' or 'none', but received %s, which is not allowed." "should be 'sum', 'mean' or 'none', but received %s, which is not allowed."
% reduction) % reduction)
if in_dynamic_mode(): if _non_static_mode():
one = _varbase_creator(dtype=logit.dtype) one = _varbase_creator(dtype=logit.dtype)
_C_ops.fill_constant(one, 'value', _C_ops.fill_constant(one, 'value',
float(1.0), 'force_cpu', False, 'dtype', one.dtype, float(1.0), 'force_cpu', False, 'dtype', one.dtype,
'str_value', '1.0', 'shape', [1]) 'str_value', '1.0', 'shape', [1])
if in_dygraph_mode():
out = _C_ops.final_state_sigmoid_cross_entropy_with_logits(
logit, label, False, -100)
else:
out = _C_ops.sigmoid_cross_entropy_with_logits(logit, label) out = _C_ops.sigmoid_cross_entropy_with_logits(logit, label)
if pos_weight is not None: if pos_weight is not None:
log_weight = _C_ops.elementwise_add( log_weight = _C_ops.elementwise_add(
...@@ -2024,11 +2028,15 @@ def sigmoid_focal_loss(logit, ...@@ -2024,11 +2028,15 @@ def sigmoid_focal_loss(logit,
"Expected one dimension of normalizer in sigmoid_focal_loss but got {}.". "Expected one dimension of normalizer in sigmoid_focal_loss but got {}.".
format(normalizer_dims)) format(normalizer_dims))
if in_dynamic_mode(): if _non_static_mode():
one = _varbase_creator(dtype=logit.dtype) one = _varbase_creator(dtype=logit.dtype)
_C_ops.fill_constant(one, 'value', _C_ops.fill_constant(one, 'value',
float(1.0), 'force_cpu', False, 'dtype', one.dtype, float(1.0), 'force_cpu', False, 'dtype', one.dtype,
'str_value', '1.0', 'shape', logit.shape) 'str_value', '1.0', 'shape', logit.shape)
if in_dygraph_mode():
loss = _C_ops.final_state_sigmoid_cross_entropy_with_logits(
logit, label, False, -100)
else:
loss = _C_ops.sigmoid_cross_entropy_with_logits(logit, label) loss = _C_ops.sigmoid_cross_entropy_with_logits(logit, label)
pred = _C_ops.sigmoid(logit) pred = _C_ops.sigmoid(logit)
p_t = _C_ops.elementwise_add( p_t = _C_ops.elementwise_add(
......
...@@ -1837,6 +1837,9 @@ def expand_as(x, y, name=None): ...@@ -1837,6 +1837,9 @@ def expand_as(x, y, name=None):
np_out = out.numpy() np_out = out.numpy()
# [[1, 2, 3], [1, 2, 3]] # [[1, 2, 3], [1, 2, 3]]
""" """
if in_dygraph_mode():
return _C_ops.final_state_expand_as(x, None, y.shape)
if _non_static_mode(): if _non_static_mode():
return _C_ops.expand_as_v2(x, 'target_shape', y.shape) return _C_ops.expand_as_v2(x, 'target_shape', y.shape)
......
...@@ -566,6 +566,17 @@ ...@@ -566,6 +566,17 @@
func : erfinv func : erfinv
backward : erfinv_grad backward : erfinv_grad
# expand_as
- api : expand_as
args : (Tensor x, Tensor y, int[] target_shape)
output : Tensor
infer_meta :
func : ExpandAsInferMeta
kernel :
func : expand_as
optional : y
backward : expand_as_grad
- api : expm1 - api : expm1
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
......
...@@ -373,6 +373,16 @@ ...@@ -373,6 +373,16 @@
kernel : kernel :
func : erfinv_grad func : erfinv_grad
- backward_api : expand_as_grad
forward : expand_as (Tensor x, Tensor y, int[] target_shape) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] target_shape)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : expand_as_grad
- backward_api : expm1_grad - backward_api : expm1_grad
forward : expm1 (Tensor x) -> Tensor(out) forward : expm1 (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
......
{ {
"phi_apis":["conj", "nll_loss", "dropout", "flatten"], "phi_apis":["conj", "nll_loss", "flatten", "expand_as", "dropout"],
"phi_kernels":["equal_all"] "phi_kernels":["equal_all"]
} }
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册