提交 c01bcbf6 编写于 作者: P phlrain

fix bugs

上级 111ee988
......@@ -260,7 +260,8 @@ def generate_activation_fn(op_type):
if in_dygraph_mode():
if _in_eager_mode():
op = getattr(_C_ops, "final_state_" + op_type)
return op(x)
if op:
return op(x)
op = getattr(_C_ops, op_type)
return op(x)
......
......@@ -21,7 +21,7 @@ from paddle.utils import deprecated
from . import nn
from .layer_function_generator import templatedoc
from ..layer_helper import LayerHelper
from ..framework import Variable, in_dygraph_mode, static_only, in_dygraph_mode
from ..framework import Variable, in_dygraph_mode, static_only, in_dygraph_mode, _in_eager_mode
from .. import core
from ..data_feeder import check_variable_and_dtype, check_type
from ..param_attr import ParamAttr
......
......@@ -87,9 +87,9 @@ def accuracy(input, label, k=1, correct=None, total=None):
_k = k.numpy().item(0) if isinstance(k, Variable) else k
topk_out, topk_indices = _C_ops.top_k_v2(input, 'k', _k, 'sorted',
False)
if _in_eager_mode():
_acc = _C_ops.final_state_accuracy(topk_out, topk_indices, label)
return _acc
# if _in_eager_mode():
# _acc = _C_ops.final_state_accuracy(topk_out, topk_indices, label)
# return _acc
_acc, _, _ = _C_ops.accuracy(topk_out, topk_indices, label, correct,
total)
return _acc
......
......@@ -1501,6 +1501,7 @@ class OpTest(unittest.TestCase):
.recursive_sequence_lengths(), expect[1],
"Output (" + out_name + ") has different lod at " +
str(place) + " in eager dygraph mode")
if check_eager:
with fluid.dygraph.base.guard():
with _test_eager_guard():
self.assertListEqual(
......
......@@ -285,4 +285,5 @@ class TestFilterByInstagOp7(OpTest):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -16,6 +16,7 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
import paddle
class TestHashOp(OpTest):
......@@ -140,4 +141,5 @@ class TestHashOpError(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -460,4 +460,5 @@ class API_TestDygraphSplit(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -798,9 +798,9 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
total = _varbase_creator(dtype="int32")
topk_out, topk_indices = paddle.topk(input, k=k)
if _in_eager_mode():
_acc = _C_ops.final_state_accuracy(topk_out, topk_indices, label)
return _acc
# if _in_eager_mode():
# _acc = _C_ops.final_state_accuracy(topk_out, topk_indices, label)
# return _acc
_acc, _, _ = _C_ops.accuracy(topk_out, topk_indices, label, correct,
total)
......
......@@ -948,9 +948,8 @@ def split(x, num_or_sections, axis=0, name=None):
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
"""
if paddle.in_dygraph_mode():
if _in_eager_mode():
return _C_ops.final_state_split(x, num_or_sections, dim)
if paddle.in_dynamic_mode() and _in_eager_mode():
return _C_ops.final_state_split(x, num_or_sections, dim)
return paddle.fluid.layers.split(
input=x, num_or_sections=num_or_sections, dim=axis, name=name)
......
......@@ -166,6 +166,7 @@
kernel :
func : relu
inplace : (x -> out)
backward : relu_grad
- api : scale
args : (Tensor x, Scalar scale, float bias, bool bias_after_scale)
......@@ -191,7 +192,8 @@
infer_meta :
func : SoftmaxInferMeta
kernel :
func : sotfmax
func : softmax
backward : softmax_grad
- api : split
args : (Tensor x, ScalarArray num_or_sections, Scalar axis)
......@@ -342,15 +344,15 @@
backward : segment_pool_grad
# accuracy
- api : accuracy
args : (Tensor x, Tensor indices, Tensor label)
output : Tensor(accuracy), Tensor(correct), Tensor(total)
infer_meta :
func : AccuracyInferMeta
kernel :
func : accuracy
dtype : x
# # accuracy
# - api : accuracy
# args : (Tensor x, Tensor indices, Tensor label)
# output : Tensor(accuracy), Tensor(correct), Tensor(total)
# infer_meta :
# func : AccuracyInferMeta
# kernel :
# func : accuracy
# dtype : x
# sin
- api : sin
......@@ -475,6 +477,126 @@
func : sigmoid
backward : sigmoid_grad
# tan
- api : tan
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : tan
backward : tan_grad
# tanh_shrink
- api : tanh_shrink
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : tanh_shrink
backward : tanh_shrink_grad
# silu
- api : silu
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : silu
backward : silu_grad
# logsigmoid
- api : logsigmoid
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : logsigmoid
backward : logsigmoid_grad
# leaky_relu
- api : leaky_relu
args : (Tensor x, float alpha)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : leaky_relu
backward : leaky_relu_grad
# thresholded_relu
- api : thresholded_relu
args : (Tensor x, float threshold)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : thresholded_relu
backward : thresholded_relu_grad
# soft_shrink
- api : soft_shrink
args : (Tensor x, float lambda)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : soft_shrink
backward : soft_shrink_grad
# hard_shrink
- api : hard_shrink
args : (Tensor x, float threshold)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_shrink
backward : hard_shrink_grad
# elu
- api : elu
args : (Tensor x, float alpha)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : elu
backward : elu_grad
# brelu
- api : brelu
args : (Tensor x, float t_min, float t_max)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : brelu
backward : brelu_grad
# hard_sigmoid
- api : hard_sigmoid
args : (Tensor x, float slope, float offset)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_sigmoid
backward : hard_sigmoid_grad
# arg_min # int64 ???? dtype
- api : argmin
args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
......
......@@ -75,11 +75,11 @@
kernel :
func : diagonal_grad
# - backward_api : split_grad
# forward : split (Tensor x, ScalarArray num_or_sections, Scalar axis) -> Tensor[](out)
# args : (Tensor[] out_grad, Scalar axis)
# output : Tensor(x_grad)
# invoke : concat( out_grad, axis)
- backward_api : split_grad
forward : split (Tensor x, ScalarArray num_or_sections, Scalar axis) -> Tensor[](out)
args : (Tensor[] out_grad, Scalar axis)
output : Tensor(x_grad)
invoke : concat( out_grad, axis)
# TODO(zhangyunfei) The config of double grad and triple grad will be supported in the future.
# - backward_api : matmul_triple_grad
......@@ -165,11 +165,11 @@
- backward_api : cos_grad
forward : cos (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : cos_grad
......@@ -185,91 +185,91 @@
- backward_api : acos_grad
forward : acos (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : acos_grad
- backward_api : sin_grad
forward : sin (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : sin_grad
- backward_api : asin_grad
forward : asin (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : asin_grad
- backward_api : atan_grad
forward : atan (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : atan_grad
- backward_api : sinh_grad
forward : sinh (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : sinh_grad
- backward_api : cosh_grad
forward : cosh (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : cosh_grad
- backward_api : asinh_grad
forward : asinh (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : asinh_grad
- backward_api : acosh_grad
forward : acosh (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : acosh_grad
- backward_api : atanh_grad
forward : atanh (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
param : [x]
kernel :
func : atanh_grad
......@@ -293,6 +293,122 @@
kernel :
func : sigmoid_grad
- backward_api : tan_grad
forward : tan (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : relu_grad
- backward_api : tanh_shrink_grad
forward : tanh_shrink (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : tanh_shrink_grad
- backward_api : silu_grad
forward : silu (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : silu_grad
- backward_api : logsigmoid_grad
forward : logsigmoid (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logsigmoid_grad
- backward_api : leaky_relu_grad
forward : leaky_relu (Tensor x, float alpha) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float alpha)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : leaky_relu_grad
- backward_api : thresholded_relu_grad
forward : thresholded_relu (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : thresholded_relu_grad
- backward_api : soft_shrink_grad
forward : soft_shrink (Tensor x, float lambda) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float lambda)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : soft_shrink_grad
- backward_api : hard_shrink_grad
forward : hard_shrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_shrink_grad
- backward_api : elu_grad
forward : elu (Tensor x, float alpha) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad, float alpha)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : elu_grad
- backward_api : brelu_grad
forward : brelu (Tensor x, float t_min, float t_max) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float t_min, float t_max)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : brelu_grad
- backward_api : hard_sigmoid_grad
forward : hard_sigmoid (Tensor x, float slope, float offset) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float slope, float offset)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [out]
kernel :
func : hard_sigmoid_grad
- backward_api : argsort_grad
forward : argsort (Tensor x, int axis, bool descending) -> Tensor(out), Tensor(indices)
args : (Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册