未验证 提交 36f97cdc 编写于 作者: X xiongkun 提交者: GitHub

[Yaml] add yaml for 5 ops [ elementwise_pow, expm1, floor_divide, logsumexp, mish ] (#41288)

* add yaml for ele_max ele_min

* add yaml for: mish / logexpsum / expm1 / elemenwise_pow / elementwise_floordiv
上级 1d8246b0
...@@ -46,7 +46,7 @@ void LogsumexpGradKernel(const Context& dev_ctx, ...@@ -46,7 +46,7 @@ void LogsumexpGradKernel(const Context& dev_ctx,
const DenseTensor& in, const DenseTensor& in,
const DenseTensor& out, const DenseTensor& out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int>& axis, const std::vector<int64_t>& axis,
bool keepdim, bool keepdim,
bool reduce_all, bool reduce_all,
DenseTensor* in_grad) { DenseTensor* in_grad) {
...@@ -67,22 +67,27 @@ void LogsumexpGradKernel(const Context& dev_ctx, ...@@ -67,22 +67,27 @@ void LogsumexpGradKernel(const Context& dev_ctx,
} else { } else {
int rank = in.dims().size(); int rank = in.dims().size();
LogsumexpGradFunctor functor; LogsumexpGradFunctor functor;
std::vector<int32_t> axis32;
axis32.reserve(axis.size());
std::for_each(axis.begin(), axis.end(), [&axis32](const int64_t& t) {
axis32.push_back(t);
});
switch (rank) { switch (rank) {
case 1: case 1:
phi::funcs::ReduceGradFunctor<Context, T, 1, LogsumexpGradFunctor>( phi::funcs::ReduceGradFunctor<Context, T, 1, LogsumexpGradFunctor>(
dev_ctx, in, out, out_grad, in_grad, functor, axis); dev_ctx, in, out, out_grad, in_grad, functor, axis32);
break; break;
case 2: case 2:
phi::funcs::ReduceGradFunctor<Context, T, 2, LogsumexpGradFunctor>( phi::funcs::ReduceGradFunctor<Context, T, 2, LogsumexpGradFunctor>(
dev_ctx, in, out, out_grad, in_grad, functor, axis); dev_ctx, in, out, out_grad, in_grad, functor, axis32);
break; break;
case 3: case 3:
phi::funcs::ReduceGradFunctor<Context, T, 3, LogsumexpGradFunctor>( phi::funcs::ReduceGradFunctor<Context, T, 3, LogsumexpGradFunctor>(
dev_ctx, in, out, out_grad, in_grad, functor, axis); dev_ctx, in, out, out_grad, in_grad, functor, axis32);
break; break;
case 4: case 4:
phi::funcs::ReduceGradFunctor<Context, T, 4, LogsumexpGradFunctor>( phi::funcs::ReduceGradFunctor<Context, T, 4, LogsumexpGradFunctor>(
dev_ctx, in, out, out_grad, in_grad, functor, axis); dev_ctx, in, out, out_grad, in_grad, functor, axis32);
break; break;
} }
} }
......
...@@ -23,7 +23,7 @@ void LogsumexpGradKernel(const Context& ctx, ...@@ -23,7 +23,7 @@ void LogsumexpGradKernel(const Context& ctx,
const DenseTensor& in, const DenseTensor& in,
const DenseTensor& out, const DenseTensor& out,
const DenseTensor& out_grad, const DenseTensor& out_grad,
const std::vector<int>& axis, const std::vector<int64_t>& axis,
bool keepdim, bool keepdim,
bool reduce_all, bool reduce_all,
DenseTensor* in_grad); DenseTensor* in_grad);
......
...@@ -15349,7 +15349,9 @@ def mish(x, threshold=20, name=None): ...@@ -15349,7 +15349,9 @@ def mish(x, threshold=20, name=None):
out, = exe.run(feed={'x':x_data}, fetch_list=[y.name]) out, = exe.run(feed={'x':x_data}, fetch_list=[y.name])
print(out) # [[0.66666667, 1.66666667, 3., 4.]] print(out) # [[0.66666667, 1.66666667, 3., 4.]]
""" """
if _non_static_mode(): if in_dygraph_mode():
return _C_ops.final_state_mish(x, threshold)
if _in_legacy_dygraph():
return _C_ops.mish(x, 'threshold', threshold) return _C_ops.mish(x, 'threshold', threshold)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'mish')
......
...@@ -83,6 +83,7 @@ class TestActivation(OpTest): ...@@ -83,6 +83,7 @@ class TestActivation(OpTest):
class TestExpm1(TestActivation): class TestExpm1(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "expm1" self.op_type = "expm1"
self.python_api = paddle.expm1
self.init_dtype() self.init_dtype()
np.random.seed(2049) np.random.seed(2049)
...@@ -93,7 +94,10 @@ class TestExpm1(TestActivation): ...@@ -93,7 +94,10 @@ class TestExpm1(TestActivation):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def test_check_output(self):
self.check_output(check_eager=True)
class TestExpm1API(unittest.TestCase): class TestExpm1API(unittest.TestCase):
...@@ -3002,6 +3006,7 @@ def ref_mish(x, threshold=20.): ...@@ -3002,6 +3006,7 @@ def ref_mish(x, threshold=20.):
class TestMish(TestActivation): class TestMish(TestActivation):
def setUp(self): def setUp(self):
self.op_type = "mish" self.op_type = "mish"
self.python_api = paddle.fluid.layers.nn.mish
self.init_dtype() self.init_dtype()
np.random.seed(1024) np.random.seed(1024)
...@@ -3010,10 +3015,13 @@ class TestMish(TestActivation): ...@@ -3010,10 +3015,13 @@ class TestMish(TestActivation):
self.inputs = {'X': x} self.inputs = {'X': x}
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self):
self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
if self.dtype == np.float16: if self.dtype == np.float16:
return return
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestMishAPI(unittest.TestCase): class TestMishAPI(unittest.TestCase):
......
...@@ -29,6 +29,7 @@ class TestElementwiseModOp(OpTest): ...@@ -29,6 +29,7 @@ class TestElementwiseModOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_floordiv" self.op_type = "elementwise_floordiv"
self.python_api = paddle.floor_divide
self.dtype = np.int32 self.dtype = np.int32
self.axis = -1 self.axis = -1
self.init_dtype() self.init_dtype()
...@@ -44,7 +45,7 @@ class TestElementwiseModOp(OpTest): ...@@ -44,7 +45,7 @@ class TestElementwiseModOp(OpTest):
self.outputs = {'Out': self.out} self.outputs = {'Out': self.out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def init_input_output(self): def init_input_output(self):
self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype) self.x = np.random.uniform(0, 10000, [10, 10]).astype(self.dtype)
......
...@@ -17,11 +17,13 @@ import unittest ...@@ -17,11 +17,13 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest, skip_check_grad_ci from op_test import OpTest, skip_check_grad_ci
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
class TestElementwisePowOp(OpTest): class TestElementwisePowOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype("float64"), 'X': np.random.uniform(1, 2, [20, 5]).astype("float64"),
'Y': np.random.uniform(1, 2, [20, 5]).astype("float64") 'Y': np.random.uniform(1, 2, [20, 5]).astype("float64")
...@@ -29,15 +31,22 @@ class TestElementwisePowOp(OpTest): ...@@ -29,15 +31,22 @@ class TestElementwisePowOp(OpTest):
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self): def test_check_output(self):
self.check_output() if hasattr(self, 'attrs'):
self.check_output(check_eager=False)
else:
self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out') if hasattr(self, 'attrs'):
self.check_grad(['X', 'Y'], 'Out', check_eager=False)
else:
self.check_grad(['X', 'Y'], 'Out', check_eager=True)
class TestElementwisePowOp_big_shape_1(TestElementwisePowOp): class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float64"), 'X': np.random.uniform(1, 2, [10, 10]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float64") 'Y': np.random.uniform(0.1, 1, [10, 10]).astype("float64")
...@@ -48,6 +57,7 @@ class TestElementwisePowOp_big_shape_1(TestElementwisePowOp): ...@@ -48,6 +57,7 @@ class TestElementwisePowOp_big_shape_1(TestElementwisePowOp):
class TestElementwisePowOp_big_shape_2(TestElementwisePowOp): class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(1, 2, [10, 10]).astype("float64"), 'X': np.random.uniform(1, 2, [10, 10]).astype("float64"),
'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float64") 'Y': np.random.uniform(0.2, 2, [10, 10]).astype("float64")
...@@ -60,6 +70,7 @@ class TestElementwisePowOp_big_shape_2(TestElementwisePowOp): ...@@ -60,6 +70,7 @@ class TestElementwisePowOp_big_shape_2(TestElementwisePowOp):
class TestElementwisePowOp_scalar(TestElementwisePowOp): class TestElementwisePowOp_scalar(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float64), 'X': np.random.uniform(0.1, 1, [3, 3, 4]).astype(np.float64),
'Y': np.random.uniform(0.1, 1, [1]).astype(np.float64) 'Y': np.random.uniform(0.1, 1, [1]).astype(np.float64)
...@@ -70,6 +81,7 @@ class TestElementwisePowOp_scalar(TestElementwisePowOp): ...@@ -70,6 +81,7 @@ class TestElementwisePowOp_scalar(TestElementwisePowOp):
class TestElementwisePowOp_tensor(TestElementwisePowOp): class TestElementwisePowOp_tensor(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [100]).astype("float64"), 'X': np.random.uniform(0.1, 1, [100]).astype("float64"),
'Y': np.random.uniform(1, 3, [100]).astype("float64") 'Y': np.random.uniform(1, 3, [100]).astype("float64")
...@@ -80,6 +92,7 @@ class TestElementwisePowOp_tensor(TestElementwisePowOp): ...@@ -80,6 +92,7 @@ class TestElementwisePowOp_tensor(TestElementwisePowOp):
class TestElementwisePowOp_broadcast_0(TestElementwisePowOp): class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float64"), 'X': np.random.uniform(0.1, 1, [2, 1, 100]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float64") 'Y': np.random.uniform(0.1, 1, [100]).astype("float64")
...@@ -90,6 +103,7 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePowOp): ...@@ -90,6 +103,7 @@ class TestElementwisePowOp_broadcast_0(TestElementwisePowOp):
class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float64"), 'X': np.random.uniform(0.1, 1, [2, 100, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float64") 'Y': np.random.uniform(0.1, 1, [100]).astype("float64")
...@@ -103,6 +117,7 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp): ...@@ -103,6 +117,7 @@ class TestElementwisePowOp_broadcast_1(TestElementwisePowOp):
class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float64"), 'X': np.random.uniform(0.1, 1, [100, 3, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [100]).astype("float64") 'Y': np.random.uniform(0.1, 1, [100]).astype("float64")
...@@ -117,6 +132,7 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp): ...@@ -117,6 +132,7 @@ class TestElementwisePowOp_broadcast_2(TestElementwisePowOp):
class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float64"), 'X': np.random.uniform(0.1, 1, [2, 20, 5, 1]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float64") 'Y': np.random.uniform(0.1, 1, [20, 5]).astype("float64")
...@@ -131,6 +147,7 @@ class TestElementwisePowOp_broadcast_3(TestElementwisePowOp): ...@@ -131,6 +147,7 @@ class TestElementwisePowOp_broadcast_3(TestElementwisePowOp):
class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = { self.inputs = {
'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float64"), 'X': np.random.uniform(0.1, 1, [2, 10, 3, 5]).astype("float64"),
'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float64") 'Y': np.random.uniform(0.1, 1, [2, 10, 1, 5]).astype("float64")
...@@ -141,11 +158,15 @@ class TestElementwisePowOp_broadcast_4(TestElementwisePowOp): ...@@ -141,11 +158,15 @@ class TestElementwisePowOp_broadcast_4(TestElementwisePowOp):
class TestElementwisePowOpInt(OpTest): class TestElementwisePowOpInt(OpTest):
def setUp(self): def setUp(self):
self.op_type = "elementwise_pow" self.op_type = "elementwise_pow"
self.python_api = paddle.pow
self.inputs = {'X': np.asarray([1, 3, 6]), 'Y': np.asarray([1, 1, 1])} self.inputs = {'X': np.asarray([1, 3, 6]), 'Y': np.asarray([1, 1, 1])}
self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])} self.outputs = {'Out': np.power(self.inputs['X'], self.inputs['Y'])}
def test_check_output(self): def test_check_output(self):
self.check_output() if hasattr(self, 'attrs'):
self.check_output(check_eager=False)
else:
self.check_output(check_eager=True)
class TestElementwisePowGradOpInt(unittest.TestCase): class TestElementwisePowGradOpInt(unittest.TestCase):
......
...@@ -29,9 +29,16 @@ def ref_logsumexp(x, axis=None, keepdim=False, reduce_all=False): ...@@ -29,9 +29,16 @@ def ref_logsumexp(x, axis=None, keepdim=False, reduce_all=False):
return out return out
def logsumexp_wrapper(x, axis=None, keepdim=False, allreduce=False):
if allreduce:
return paddle.logsumexp(x, None, keepdim)
return paddle.logsumexp(x, axis, keepdim)
class TestLogsumexp(OpTest): class TestLogsumexp(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'logsumexp' self.op_type = 'logsumexp'
self.python_api = logsumexp_wrapper
self.shape = [2, 3, 4, 5] self.shape = [2, 3, 4, 5]
self.dtype = 'float64' self.dtype = 'float64'
self.axis = [-1] self.axis = [-1]
...@@ -61,13 +68,14 @@ class TestLogsumexp(OpTest): ...@@ -61,13 +68,14 @@ class TestLogsumexp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
['X'], ['Out'], ['X'], ['Out'],
user_defined_grads=self.user_defined_grads, user_defined_grads=self.user_defined_grads,
user_defined_grad_outputs=self.user_defined_grad_outputs) user_defined_grad_outputs=self.user_defined_grad_outputs,
check_eager=True)
def calc_grad(self): def calc_grad(self):
dy = np.ones(1, dtype=self.dtype) dy = np.ones(1, dtype=self.dtype)
......
...@@ -1220,7 +1220,9 @@ def mish(x, name=None): ...@@ -1220,7 +1220,9 @@ def mish(x, name=None):
x = paddle.to_tensor([-5., 0., 5.]) x = paddle.to_tensor([-5., 0., 5.])
out = F.mish(x) # [-0.03357624, 0., 4.99955208] out = F.mish(x) # [-0.03357624, 0., 4.99955208]
""" """
if in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_mish(x, 20)
if _in_legacy_dygraph():
return _C_ops.mish(x) return _C_ops.mish(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mish') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mish')
......
...@@ -1565,7 +1565,11 @@ def logsumexp(x, axis=None, keepdim=False, name=None): ...@@ -1565,7 +1565,11 @@ def logsumexp(x, axis=None, keepdim=False, name=None):
if axis is None or len(axis) == 0: if axis is None or len(axis) == 0:
axis = [0] axis = [0]
if paddle.in_dynamic_mode(): if in_dygraph_mode():
if reduce_all:
axis = range(len(x.shape))
return _C_ops.final_state_logsumexp(x, axis, keepdim, reduce_all)
if _in_legacy_dygraph():
return _C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all) return _C_ops.logsumexp(x, 'axis', axis, 'keepdim', keepdim, 'reduce_all', reduce_all)
check_variable_and_dtype(x, 'x', check_variable_and_dtype(x, 'x',
......
...@@ -422,6 +422,15 @@ ...@@ -422,6 +422,15 @@
func : eigh func : eigh
backward : eigh_grad backward : eigh_grad
- api : elementwise_pow
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : elementwise_pow
backward : elementwise_pow_grad
# elu # elu
- api : elu - api : elu
args : (Tensor x, float alpha) args : (Tensor x, float alpha)
...@@ -485,6 +494,16 @@ ...@@ -485,6 +494,16 @@
func : erfinv func : erfinv
backward : erfinv_grad backward : erfinv_grad
- api : expm1
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : expm1
backward : expm1_grad
- api : flatten - api : flatten
args : (Tensor x, int start_axis, int stop_axis) args : (Tensor x, int start_axis, int stop_axis)
output : Tensor output : Tensor
...@@ -511,6 +530,14 @@ ...@@ -511,6 +530,14 @@
func : floor func : floor
backward : floor_grad backward : floor_grad
- api : floor_divide
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : floor_divide
- api : fmax - api : fmax
args : (Tensor x, Tensor y, int axis) args : (Tensor x, Tensor y, int axis)
output : Tensor(out) output : Tensor(out)
...@@ -878,6 +905,15 @@ ...@@ -878,6 +905,15 @@
func : logsigmoid func : logsigmoid
backward : logsigmoid_grad backward : logsigmoid_grad
- api : logsumexp
args : (Tensor x, int64_t[] axis, bool keepdim, bool reduce_all)
output : Tensor(out)
infer_meta :
func : LogsumexpInferMeta
kernel :
func : logsumexp
backward : logsumexp_grad
# masked_select # masked_select
- api : masked_select - api : masked_select
args : (Tensor x, Tensor mask) args : (Tensor x, Tensor mask)
...@@ -954,6 +990,16 @@ ...@@ -954,6 +990,16 @@
func : minimum func : minimum
backward : minimum_grad backward : minimum_grad
- api : mish
args : (Tensor x, float lambda)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : mish
backward : mish_grad
- api : mode - api : mode
args : (Tensor x, int axis, bool keepdim) args : (Tensor x, int axis, bool keepdim)
output : Tensor(out), Tensor(indices) output : Tensor(out), Tensor(indices)
......
...@@ -271,6 +271,16 @@ ...@@ -271,6 +271,16 @@
kernel : kernel :
func : eigh_grad func : eigh_grad
- backward_api : elementwise_pow_grad
forward : elementwise_pow(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : elementwise_pow_grad
- backward_api : elu_grad - backward_api : elu_grad
forward : elu (Tensor x, float alpha) -> Tensor(out) forward : elu (Tensor x, float alpha) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, float alpha) args : (Tensor x, Tensor out, Tensor out_grad, float alpha)
...@@ -302,6 +312,16 @@ ...@@ -302,6 +312,16 @@
kernel : kernel :
func : erfinv_grad func : erfinv_grad
- backward_api : expm1_grad
forward : expm1 (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : expm1_grad
- backward_api : floor_grad - backward_api : floor_grad
forward : floor(Tensor x) -> Tensor(out) forward : floor(Tensor x) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
...@@ -514,6 +534,16 @@ ...@@ -514,6 +534,16 @@
kernel : kernel :
func : logsigmoid_grad func : logsigmoid_grad
- backward_api : logsumexp_grad
forward : logsumexp(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keepdim, bool reduce_all)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : logsumexp_grad
- backward_api : masked_select_grad - backward_api : masked_select_grad
forward : masked_select (Tensor x, Tensor mask) -> Tensor(out) forward : masked_select (Tensor x, Tensor mask) -> Tensor(out)
args : (Tensor x, Tensor mask, Tensor out_grad) args : (Tensor x, Tensor mask, Tensor out_grad)
...@@ -607,6 +637,16 @@ ...@@ -607,6 +637,16 @@
kernel : kernel :
func : minimum_grad func : minimum_grad
- backward_api : mish_grad
forward : mish (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : mish_grad
- backward_api : mode_grad - backward_api : mode_grad
forward : mode(Tensor x, int axis, bool keepdim) -> Tensor(out), Tensor(indices) forward : mode(Tensor x, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, int axis, bool keepdim) args : (Tensor x, Tensor indices, Tensor out_grad, int axis, bool keepdim)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册