未验证 提交 68643a9e 编写于 作者: Z Zhanlue Yang 提交者: GitHub

[DoubleGrad] Enabled test_autograd_functional_dynamic.py under eager mode (#41668) (#41895)

* [DoubleGrad] Enabled double grad test cases in eager_mode for test_imperative_double_grad

* Fixed elementwise issue

* Addressed CI failures

* [DoubleGrad] Enabled test_imperative_triple_grad test cases under eager_mode

* [DoubleGrad] Enabled test_autograd_functional_dynamic.py under eager mode

* Enabled more test cases

* Fixed performance issues

* Fixed minor issue
上级 e568268b
......@@ -22,9 +22,16 @@ import os
### Global Variables ###
########################
ops_to_fill_zero_for_empty_grads = set([
"split_grad", "rnn_grad", "matmul_double_grad", "matmul_triple_grad",
"sigmoid_double_grad", "sigmoid_triple_grad", "add_double_grad",
"add_triple_grad"
"split_grad",
"rnn_grad",
"matmul_double_grad",
"matmul_triple_grad",
"sigmoid_double_grad",
"sigmoid_triple_grad",
"add_double_grad",
"add_triple_grad",
"multiply_double_grad",
"multiply_triple_grad",
])
# For API dispatch used at python-level
......
......@@ -107,6 +107,8 @@ class AutogradMeta : public AbstractAutogradMeta {
GradNodeBase* GradNode() const { return grad_node_.get(); }
void ResetGradNode() { grad_node_.reset(); }
void SetSingleOutRankWithSlot(size_t slot_id, size_t rank) {
out_slot_id_ = slot_id;
out_rank_ = rank;
......
......@@ -53,7 +53,7 @@ class GeneralGrad {
auto* target_node = auto_grad_meta->GetMutableGradNode().get();
if (orig_to_copied_node_mapping_.count(target_node)) {
target_node = orig_to_copied_node_mapping_[target_node];
target_node = orig_to_copied_node_mapping_[target_node].get();
} else {
VLOG(6) << "Unable to find target node in "
"orig_to_copied_node_mapping_, likely indicating an "
......@@ -261,7 +261,7 @@ class GeneralGrad {
auto* target_node = auto_grad_meta->GetMutableGradNode().get();
if (orig_to_copied_node_mapping_.count(target_node)) {
target_node = orig_to_copied_node_mapping_[target_node];
target_node = orig_to_copied_node_mapping_[target_node].get();
} else {
VLOG(6) << "Unable to find target node in "
"orig_to_copied_node_mapping_, likely indicating an unused "
......@@ -349,12 +349,12 @@ class GeneralGrad {
GradNodeBase* CopyGradNode(const std::shared_ptr<GradNodeBase>& orig_node) {
if (orig_to_copied_node_mapping_.count(orig_node.get())) {
return orig_to_copied_node_mapping_[orig_node.get()];
return orig_to_copied_node_mapping_[orig_node.get()].get();
}
std::shared_ptr<GradNodeBase> copied_node = orig_node->Copy();
// Save node and update mapping
orig_to_copied_node_mapping_[orig_node.get()] = copied_node.get();
orig_to_copied_node_mapping_[orig_node.get()] = copied_node;
copied_grad_nodes_.push_back(copied_node);
return copied_node.get();
......@@ -379,7 +379,7 @@ class GeneralGrad {
paddle::platform::errors::Fatal(
"Cannot reconstruct backward graph,"
"unable to find copied target for certain grad node."));
GradNodeBase* copied_node = orig_to_copied_node_mapping_[orig_node];
GradNodeBase* copied_node = orig_to_copied_node_mapping_[orig_node].get();
const std::vector<std::vector<Edge>>& orig_edges = orig_node->GetEdges();
std::vector<std::vector<Edge>>& copied_edges =
......@@ -397,13 +397,12 @@ class GeneralGrad {
std::shared_ptr<GradNodeBase> copied_next_node;
if (orig_to_copied_node_mapping_.count(orig_next_node.get())) {
copied_next_node =
orig_to_copied_node_mapping_[orig_next_node.get()]
->shared_from_this();
orig_to_copied_node_mapping_[orig_next_node.get()];
} else {
copied_next_node = orig_next_node->Copy();
orig_to_copied_node_mapping_[orig_next_node.get()] =
copied_next_node.get();
copied_next_node;
copied_grad_nodes_.push_back(copied_next_node);
}
......@@ -436,7 +435,8 @@ class GeneralGrad {
std::unordered_map<GradNodeBase*, paddle::experimental::Tensor> results_map;
std::vector<std::shared_ptr<GradNodeBase>> copied_grad_nodes_;
std::unordered_map<GradNodeBase*, GradNodeBase*> orig_to_copied_node_mapping_;
std::unordered_map<GradNodeBase*, std::shared_ptr<GradNodeBase>>
orig_to_copied_node_mapping_;
DISABLE_COPY_AND_ASSIGN(GeneralGrad);
};
......@@ -534,6 +534,7 @@ std::vector<paddle::experimental::Tensor> RunBackward(
// GeneralGrad
bool is_general_grad = !inputs.empty();
if (is_general_grad) GeneralGrad::Instance().Clear();
/* --- Initialization --- */
// 1. Init queue with starting nodes
......@@ -746,6 +747,7 @@ std::vector<paddle::experimental::Tensor> RunBackward(
VLOG(6) << "We get grad_output_tensor with slot: " << i
<< ", rank: " << j << " as uninitialized or undefined tensor";
}
VLOG(6) << "Get Edge and grad_output_tensor with slot: " << i
<< ", rank: " << j
<< " 's name is: " << grad_output_tensor.name();
......
......@@ -87,7 +87,7 @@ class GradSlotMeta {
std::shared_ptr<phi::DenseTensorMeta> meta_ = nullptr;
};
class GradNodeBase : public std::enable_shared_from_this<GradNodeBase> {
class GradNodeBase {
public:
GradNodeBase() { VLOG(6) << "Construct GradNodeBase"; }
GradNodeBase(size_t bwd_in_slot_num, size_t bwd_out_slot_num);
......
......@@ -79,9 +79,9 @@ class TensorWrapper {
auto* tensor_autograd_meta = EagerUtils::nullable_autograd_meta(tensor);
if (tensor_autograd_meta) {
auto autograd_meta = std::make_shared<AutogradMeta>(
Edge(nullptr, EagerUtils::OutRankInfo(tensor)));
autograd_meta->SetStopGradient(tensor_autograd_meta->StopGradient());
auto autograd_meta =
std::make_shared<AutogradMeta>(*tensor_autograd_meta);
autograd_meta->ResetGradNode();
intermidiate_tensor_.set_autograd_meta(autograd_meta);
weak_grad_node_ = tensor_autograd_meta->GetMutableGradNode();
}
......@@ -98,8 +98,11 @@ class TensorWrapper {
check_inplace_version();
// if it's full_reserved just return the full copy of tensor
if (full_reserved_) {
return intermidiate_tensor_;
} else {
paddle::experimental::Tensor recovered_tensor = intermidiate_tensor_;
if (!full_reserved_) {
std::shared_ptr<GradNodeBase> new_grad_node = weak_grad_node_.lock();
if (new_grad_node) {
VLOG(3) << "Recovered TensorWrapper with GradNode "
......@@ -109,18 +112,16 @@ class TensorWrapper {
}
auto* intermediate_autograd_meta =
EagerUtils::unsafe_autograd_meta(intermidiate_tensor_);
auto p_ab_autograd_meta = std::make_shared<AutogradMeta>(
Edge(new_grad_node, intermediate_autograd_meta->OutRankInfo()));
p_ab_autograd_meta->SetStopGradient(
intermediate_autograd_meta->StopGradient());
recovered_tensor.set_autograd_meta(
std::static_pointer_cast<paddle::experimental::AbstractAutogradMeta>(
p_ab_autograd_meta));
auto p_ab_autograd_meta =
std::make_shared<AutogradMeta>(*intermediate_autograd_meta);
if (new_grad_node) {
p_ab_autograd_meta->SetGradNode(new_grad_node);
}
recovered_tensor.set_autograd_meta(p_ab_autograd_meta);
return recovered_tensor;
}
}
void check_inplace_version() {
if (no_need_buffer_) {
......
......@@ -100,6 +100,8 @@ void GatherNdGradInferMeta(const MetaTensor& x,
const MetaTensor& out_grad,
MetaTensor* x_grad);
void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx);
void GeneralBinaryGradInferMeta(const MetaTensor& x,
const MetaTensor& y,
MetaTensor* dx,
......@@ -132,8 +134,6 @@ void GeneralQuinaryGradInferMeta(const MetaTensor& x,
MetaTensor* dk,
MetaTensor* dl);
void GeneralUnaryGradInferMeta(const MetaTensor& x, MetaTensor* dx);
void GumbelSoftmaxGradInferMeta(const MetaTensor& out,
const MetaTensor& dout,
int axis,
......
......@@ -943,8 +943,10 @@ def batch_jacobian(func, inputs, create_graph=False, allow_unused=False):
# [0., 1., 0., 1., 0., 1., 0., 1.]]))
'''
inputs = _as_tensors(inputs)
outputs = _as_tensors(func(*inputs))
batch_size = inputs[0].shape[0]
for input in inputs:
assert input.shape[
......@@ -961,12 +963,14 @@ def batch_jacobian(func, inputs, create_graph=False, allow_unused=False):
for i, flat_output in enumerate(flat_outputs):
jac_i = list([] for _ in range(fin_size))
for k in range(flat_output.shape[1]):
row_k = paddle.grad(
flat_output[:, k],
inputs,
create_graph=create_graph,
retain_graph=True,
allow_unused=allow_unused)
for j in range(fin_size):
jac_i[j].append(
paddle.reshape(
......
......@@ -205,7 +205,14 @@ class TestVJP(TestAutogradFunctional):
self.check_results(ref_result, aliased_result)
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.func_vjp_i1o1()
self.func_vjp_i2o1()
self.func_vjp_i2o2()
self.func_vjp_i2o2_omitting_v()
self.func_vjp_nested()
self.func_vjp_aliased_input()
self.func_vjp_i1o1()
self.func_vjp_i2o1()
self.func_vjp_i2o2()
......@@ -227,7 +234,8 @@ class TestVJPException(unittest.TestCase):
paddle.to_tensor(self.v))
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.func_vjp()
self.func_vjp()
......@@ -303,7 +311,11 @@ class TestJVP(TestAutogradFunctional):
self.check_results(results_omitting_v, results_with_v)
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.func_jvp_i1o1()
self.func_jvp_i2o1()
self.func_jvp_i2o2()
self.func_jvp_i2o2_omitting_v()
self.func_jvp_i1o1()
self.func_jvp_i2o1()
self.func_jvp_i2o2()
......@@ -328,12 +340,12 @@ class TestJacobianClassNoBatch(unittest.TestCase):
self._atol = config.TOLERANCE.get(str(self._dtype)).get(
"first_order_grad").get("atol")
self.xs = [paddle.to_tensor(x) for x in self.xs] if isinstance(
def func_jacobian(self):
xs = [paddle.to_tensor(x) for x in self.xs] if isinstance(
self.xs, typing.Sequence) else paddle.to_tensor(self.xs)
self._actual = paddle.autograd.Jacobian(self.func, self.xs, False)
self._expected = self._expected()
self._actual = paddle.autograd.Jacobian(self.func, xs, False)
self._expected = self._get_expected()
def func_jacobian(self):
Index = collections.namedtuple('Index', ('type', 'value'))
indexes = (Index('all', (slice(0, None, None), slice(0, None, None))),
Index('row', (0, slice(0, None, None))),
......@@ -349,13 +361,16 @@ class TestJacobianClassNoBatch(unittest.TestCase):
err_msg=f'Testcase {index.type} index not passed, value is {index.value}'
)
def _expected(self):
jac = utils._compute_numerical_jacobian(self.func, self.xs, self._eps,
def _get_expected(self):
xs = [paddle.to_tensor(x) for x in self.xs] if isinstance(
self.xs, typing.Sequence) else paddle.to_tensor(self.xs)
jac = utils._compute_numerical_jacobian(self.func, xs, self._eps,
self._dtype)
return utils._np_concat_matrix_sequence(jac, utils.MatrixFormat.NM)
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.func_jacobian()
self.func_jacobian()
......@@ -375,12 +390,12 @@ class TestJacobianClassBatchFirst(unittest.TestCase):
self._atol = config.TOLERANCE.get(str(self._dtype)).get(
"first_order_grad").get("atol")
self.xs = [paddle.to_tensor(x) for x in self.xs] if isinstance(
def func_jacobian(self):
xs = [paddle.to_tensor(x) for x in self.xs] if isinstance(
self.xs, typing.Sequence) else paddle.to_tensor(self.xs)
self._actual = paddle.autograd.Jacobian(self.func, self.xs, True)
self._expected = self._expected()
self._actual = paddle.autograd.Jacobian(self.func, xs, True)
self._expected = self._get_expected()
def func_jacobian(self):
Index = collections.namedtuple('Index', ('type', 'value'))
indexes = (
Index('all', (slice(0, None, None), slice(0, None, None),
......@@ -402,15 +417,18 @@ class TestJacobianClassBatchFirst(unittest.TestCase):
err_msg=f'Testcase {index.type} index not passed, value is {index.value}'
)
def _expected(self):
jac = utils._compute_numerical_batch_jacobian(
self.func, self.xs, self._eps, self._dtype, False)
def _get_expected(self):
xs = [paddle.to_tensor(x) for x in self.xs] if isinstance(
self.xs, typing.Sequence) else paddle.to_tensor(self.xs)
jac = utils._compute_numerical_batch_jacobian(self.func, xs, self._eps,
self._dtype, False)
jac = utils._np_concat_matrix_sequence(jac, utils.MatrixFormat.NBM)
return utils._np_transpose_matrix_format(jac, utils.MatrixFormat.NBM,
utils.MatrixFormat.BNM)
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.func_jacobian()
self.func_jacobian()
......@@ -492,7 +510,14 @@ class TestHessianClassNoBatch(unittest.TestCase):
paddle.autograd.Hessian(func, paddle.ones([3]))
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.setUpClass()
self.func_single_input()
self.func_multi_input()
self.func_allow_unused_true()
self.func_create_graph_true()
self.func_out_not_single()
self.setUpClass()
self.func_single_input()
self.func_multi_input()
self.func_allow_unused_true()
......@@ -599,7 +624,14 @@ class TestHessianClassBatchFirst(unittest.TestCase):
paddle.autograd.Hessian(func, paddle.ones((3, 3)), is_batched=True)
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.setUpClass()
self.func_single_input()
self.func_multi_input()
self.func_allow_unused()
self.func_stop_gradient()
self.func_out_not_single()
self.setUpClass()
self.func_single_input()
self.func_multi_input()
self.func_allow_unused()
......@@ -619,6 +651,7 @@ class TestHessian(unittest.TestCase):
"second_order_grad").get("rtol")
self.atol = config.TOLERANCE.get(self.dtype).get(
"second_order_grad").get("atol")
self.x = paddle.rand(shape=self.shape, dtype=self.dtype)
self.y = paddle.rand(shape=self.shape, dtype=self.dtype)
......@@ -694,9 +727,10 @@ class TestHessian(unittest.TestCase):
self.rtol, self.atol)
try:
paddle.grad(hessian, self.x)
except RuntimeError as e:
except Exception as e:
error_msg = cpt.get_exception_message(e)
assert error_msg.find("has no gradient") > 0
assert error_msg.find("has no gradient") > 0 or error_msg.find(
"does not appear") > 0
def func_create_graph_true(self):
def func(x):
......@@ -713,7 +747,15 @@ class TestHessian(unittest.TestCase):
assert triple_grad is not None
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.setUpClass()
self.func_single_input()
self.func_multi_input()
self.func_allow_unused_false()
self.func_allow_unused_true()
self.func_create_graph_false()
self.func_create_graph_true()
self.setUpClass()
self.func_single_input()
self.func_multi_input()
self.func_allow_unused_false()
......@@ -830,9 +872,10 @@ class TestBatchHessian(unittest.TestCase):
self.rtol, self.atol)
try:
paddle.grad(hessian, self.x)
except RuntimeError as e:
except Exception as e:
error_msg = cpt.get_exception_message(e)
assert error_msg.find("has no gradient") > 0
assert error_msg.find("has no gradient") > 0 or error_msg.find(
"does not appear") > 0
def func_create_graph_true(self):
def func(x):
......@@ -849,7 +892,15 @@ class TestBatchHessian(unittest.TestCase):
assert triple_grad is not None
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.setUpClass()
self.func_single_input()
self.func_multi_input()
self.func_allow_unused_false()
self.func_allow_unused_true()
self.func_create_graph_false()
self.func_create_graph_true()
self.setUpClass()
self.func_single_input()
self.func_multi_input()
self.func_allow_unused_false()
......@@ -985,7 +1036,14 @@ class TestVHP(unittest.TestCase):
assert triple_grad is not None
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.setUpClass()
self.func_v_default()
self.func_multi_input()
self.func_single_input()
self.func_allow_unused_true()
self.func_create_graph_true()
self.setUpClass()
self.func_v_default()
self.func_multi_input()
self.func_single_input()
......@@ -1100,9 +1158,10 @@ class TestJacobian(unittest.TestCase):
self.atol)
try:
paddle.grad(jacobian[0], [self.x, self.y])
except RuntimeError as e:
except Exception as e:
error_msg = cpt.get_exception_message(e)
assert error_msg.find("has no gradient") > 0
assert error_msg.find("has no gradient") > 0 or error_msg.find(
"does not appear") > 0
def func_create_graph_true(self):
def func(x, y):
......@@ -1123,7 +1182,17 @@ class TestJacobian(unittest.TestCase):
assert double_grad is not None
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.setUpClass()
self.func_multi_input_and_multi_output()
self.func_multi_input_and_single_output()
self.func_single_input_and_multi_output()
self.func_single_input_and_single_output()
self.func_allow_unused_false()
self.func_allow_unused_true()
self.func_create_graph_false()
self.func_create_graph_true()
self.setUpClass()
self.func_multi_input_and_multi_output()
self.func_multi_input_and_single_output()
self.func_single_input_and_multi_output()
......@@ -1269,9 +1338,10 @@ class TestJacobianBatch(unittest.TestCase):
self.atol)
try:
paddle.grad(jacobian[0], [self.x, self.y])
except RuntimeError as e:
except Exception as e:
error_msg = cpt.get_exception_message(e)
assert error_msg.find("has no gradient") > 0
assert error_msg.find("has no gradient") > 0 or error_msg.find(
"does not appear") > 0
def func_create_graph_true(self):
def func(x, y):
......@@ -1292,7 +1362,17 @@ class TestJacobianBatch(unittest.TestCase):
assert double_grad is not None
def test_all_cases(self):
if _in_legacy_dygraph():
with _test_eager_guard():
self.setUpClass()
self.func_batch_single_input_and_batch_single_output()
self.func_batch_single_input_and_batch_multi_output()
self.func_batch_multi_input_and_batch_single_output()
self.func_batch_multi_input_and_batch_multi_output()
self.func_allow_unused_false()
self.func_allow_unused_true()
self.func_create_graph_false()
self.func_create_graph_true()
self.setUpClass()
self.func_batch_single_input_and_batch_single_output()
self.func_batch_single_input_and_batch_multi_output()
self.func_batch_multi_input_and_batch_single_output()
......
......@@ -1103,7 +1103,15 @@ def t(input, name=None):
"Input(input) only support N-D (N<=2) tensor, but received "
"length of Input(input) is %s. Perhaps you can use paddle."
"tensor.transpose() instead." % len(input.shape))
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if len(input.shape) == 1:
return input
# 2-D tensor
perm = [1, 0]
out = _C_ops.final_state_transpose(input, perm)
return out
if _in_legacy_dygraph():
if len(input.shape) == 1:
return input
# 2-D tensor
......
......@@ -1060,6 +1060,7 @@
kernel :
func : multiply_double_grad
optional : grad_x_grad, grad_y_grad
backward : multiply_triple_grad
- backward_api : multiply_grad
forward : multiply (Tensor x, Tensor y) -> Tensor(out)
......@@ -1072,6 +1073,17 @@
func : multiply_grad
backward : multiply_double_grad
- backward_api : multiply_triple_grad
forward : multiply_double_grad (Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, int aixs = -1) -> Tensor(grad_x), Tensor(grad_y), Tensor(grad_grad_out)
args : (Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, Tensor grad_x_grad, Tensor grad_y_grad, Tensor grad_grad_out_grad, int axis = -1)
output : Tensor(x_grad), Tensor(y_grad), Tensor(fwd_grad_out_grad), Tensor(fwd_grad_grad_x_grad), Tensor(fwd_grad_grad_y_grad)
infer_meta :
func : GeneralQuinaryGradInferMeta
param : [x, y, fwd_grad_out, x, y]
kernel :
func : multiply_triple_grad
optional : fwd_grad_grad_x, fwd_grad_grad_y, grad_grad_out_grad
- backward_api : mv_grad
forward : mv (Tensor x, Tensor vec) -> Tensor(out)
args : (Tensor x, Tensor vec, Tensor out_grad)
......@@ -1239,6 +1251,16 @@
func : relu_grad
backward: relu_double_grad
- backward_api : reshape_double_grad
forward : reshape_grad (Tensor xshape, Tensor grad_out) -> Tensor(grad_x)
args : (Tensor grad_out, Tensor grad_x_grad)
output : Tensor(grad_out_grad)
infer_meta :
func : UnchangedInferMeta
param : [grad_out]
kernel :
func : reshape_double_grad
- backward_api : reshape_grad
forward : reshape_with_xshape (Tensor x, IntArray shape) -> Tensor(out), Tensor(xshape)
args : (Tensor xshape, Tensor out_grad)
......@@ -1252,6 +1274,7 @@
data_type: out_grad
backend: out_grad
layout: out_grad
backward : reshape_double_grad
- backward_api : roi_align_grad
forward : roi_align (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned) -> Tensor(out)
......@@ -1540,6 +1563,13 @@
func : subtract_grad
no_need_buffer : x, y
- backward_api : sum_double_grad
forward : sum_grad (Tensor x, Tensor grad_out, int64_t[] dims, bool keep_dim, bool reduce_all=false) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int64_t[] dims={}, bool keep_dim=false)
output : Tensor(grad_out_grad)
invoke : sum(grad_x_grad, dims, grad_x_grad.dtype(), keep_dim)
backward : sum_triple_grad
- backward_api : sum_grad
forward : sum (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all=false)
......@@ -1549,6 +1579,13 @@
param : [x]
kernel :
func : sum_grad
backward : sum_double_grad
- backward_api : sum_triple_grad
forward : sum_double_grad (Tensor grad_grad_x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(grad_grad_out)
args : (Tensor grad_grad_x, Tensor grad_grad_out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output : Tensor(grad_grad_x_grad)
invoke : sum_grad(grad_grad_x, grad_grad_out_grad, dims, keep_dim, reduce_all)
no_need_buffer : x
- backward_api : swish_grad
......@@ -1643,6 +1680,12 @@
func : trace_grad
no_need_buffer : x
- backward_api : transpose_double_grad
forward : transpose_grad (Tensor grad_out, int[] axis) -> Tensor(grad_x)
args : (Tensor grad_x_grad, int[] axis)
output : Tensor(grad_out_grad)
invoke : transpose(grad_x_grad, axis)
- backward_api : transpose_grad
forward : transpose (Tensor x, int[] axis) -> Tensor(out)
args : (Tensor out_grad, int[] axis)
......@@ -1652,6 +1695,7 @@
param : [out_grad, axis]
kernel :
func : transpose_grad
backward : transpose_double_grad
- backward_api : tril_triu_grad
forward : tril_triu(Tensor x, int diagonal, bool lower) -> Tensor(out)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册