提交 47bf4397 编写于 作者: P phlrain

fix bug

上级 c52d615f
......@@ -259,8 +259,8 @@ def generate_activation_fn(op_type):
def func(x, name=None):
if in_dygraph_mode():
if _in_eager_mode():
op = getattr(_C_ops, "final_state_" + op_type)
if op:
if hasattr(_C_ops, "final_state_" + op_type):
op = getattr(_C_ops, "final_state_" + op_type)
return op(x)
op = getattr(_C_ops, op_type)
return op(x)
......
......@@ -1458,9 +1458,9 @@ def sigmoid_cross_entropy_with_logits(x,
ignore_index=-1, normalize=True)
print(loss)
"""
if in_dygraph_mode() and _in_eager_mode():
return _C_ops.final_state_sigmoid_cross_entropy_with_logits(
x, label, normalize, ignore_index)
# if in_dygraph_mode() and _in_eager_mode():
# return _C_ops.final_state_sigmoid_cross_entropy_with_logits(
# x, label, normalize, ignore_index)
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits')
......
......@@ -13329,8 +13329,8 @@ def log_loss(input, label, epsilon=1e-4, name=None):
prob = paddle.randn((10,1))
cost = F.log_loss(input=prob, label=label)
"""
if in_dygraph_mode() and _in_eager_mode():
return _C_ops.final_state_log_loss(input, label, epsilon)
# if in_dygraph_mode() and _in_eager_mode():
# return _C_ops.final_state_log_loss(input, label, epsilon)
helper = LayerHelper('log_loss', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'log_loss')
......
......@@ -117,10 +117,10 @@ class TestSumOp1(OpTest):
self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)}
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True)
self.check_grad(['X'], 'Out', check_eager=False)
class TestSumOp2(OpTest):
......
......@@ -43,10 +43,10 @@ class TestEighOp(OpTest):
self.x_np = np.random.random(self.x_shape).astype(self.x_type)
def test_check_output(self):
self.check_output(no_check_set=['Eigenvectors'], check_eager=True)
self.check_output(no_check_set=['Eigenvectors'], check_eager=False)
def test_grad(self):
self.check_grad(["X"], ["Eigenvalues"], check_eager=True)
self.check_grad(["X"], ["Eigenvalues"], check_eager=False)
class TestEighUPLOCase(TestEighOp):
......
......@@ -42,7 +42,7 @@ class TestErfinv(OpTest):
self.dtype = np.float64
def test_check_output(self):
self.check_output(check_eager=True)
self.check_output(check_eager=False)
def test_check_grad(self):
self.check_grad(
......@@ -50,7 +50,7 @@ class TestErfinv(OpTest):
'Out',
user_defined_grads=[self.gradient],
user_defined_grad_outputs=self.grad_out,
check_eager=True)
check_eager=False)
class TestErfinvFP32(TestErfinv):
......
......@@ -235,4 +235,5 @@ class TestDygraphGAN(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -876,4 +876,5 @@ class TestImperativeOptimizerList(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -160,7 +160,6 @@ class TestDygraphInplace(unittest.TestCase):
var_a.stop_gradient = False
var_b = var_a**2
var_c = self.non_inplace_api_processing(
var_b) # var_b is modified inplace before using it
......
......@@ -322,8 +322,8 @@ def subtract(x, y, name=None):
axis = -1
act = None
if paddle.in_dynamic_mode():
if _in_eager_mode():
return _C_ops.final_state_subtract( x, y)
# if _in_eager_mode():
# return _C_ops.final_state_subtract( x, y)
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals()))
......@@ -2822,7 +2822,7 @@ def sign(x, name=None):
print(out) # [1.0, 0.0, -1.0, 1.0]
"""
if paddle.in_dynamic_mode():
if _in_eager_model():
if _in_eager_mode():
return _C_op.final_state_sign(x)
return _C_ops.sign(x)
......
......@@ -296,7 +296,7 @@
param : [index]
kernel :
func : put_along_axis
dtype : x
data_type : x
backward : put_along_axis_grad
......@@ -309,7 +309,7 @@
param : [index]
kernel :
func : take_along_axis
dtype : x
data_type : x
backward : take_along_axis_grad
# matrix_power
......@@ -597,7 +597,7 @@
backward : hard_sigmoid_grad
# arg_min # int64 ???? dtype
# arg_min # int64 dtype
- api : argmin
args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
output : Tensor
......@@ -606,7 +606,7 @@
kernel :
func : arg_min
# arg_max # int64 ???? dtype
# arg_max # int64 dtype
- api : argmax
args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
output : Tensor
......@@ -757,7 +757,7 @@
func : cumsum
# # depthwise_conv2d
# # dropout ?? optional, intermediate
# # dropout optional, intermediate
# - api : dropout
# args : (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed)
# output : Tensor(out), Tensor(mask)
......@@ -849,7 +849,7 @@
# func : graph_send_recv
# backward : graph_send_recv_grad
# # histogram int64 ???
# # histogram int64
# - api : histogram
# args : (Tensor x, int64 bins, int min, int max)
# output : Tensor
......@@ -876,7 +876,7 @@
kernel :
func : is_empty
# # isinf selected rows??? involk
# # isinf selected rows involk
# - api : isinf
# args : (Tensor x)
# output : Tensor
......@@ -885,7 +885,7 @@
# kernel :
# func : isinf
# # isnan selected rows??? involk
# # isnan selected rows involk
# - api : isnan
# args : (Tensor x)
# output : Tensor
......@@ -894,7 +894,7 @@
# kernel :
# func : isnan
# # isfinite selected rows??? involk
# # isfinite selected rows involk
# - api : isfinite
# args : (Tensor x)
# output : Tensor
......@@ -903,7 +903,7 @@
# kernel :
# func : isfinite
# label_smooth ?? optional
# label_smooth optional
# - api : label_smooth
# args : (Tensor label, Tensor prior_dist, float epsilon)
# output : Tensor
......@@ -915,7 +915,7 @@
# backward : label_smooth_grad
# optional : prior_dist
# linspace ???? start stop number
# linspace start stop number
# - api : linspace
# args : (Tensor start, Tensor stop, Tensor number, DataType dtype=DataType::FLOAT32)
# output : Tensor
......@@ -1042,7 +1042,7 @@
# func : poisson
# backward : poisson_grad
# psroi_pool ?? optional
# psroi_pool optional
# - api : psroi_pool
# args : (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale )
# output : Tensor
......@@ -1103,7 +1103,7 @@
# # set_value None api
# # sgd # need invoke
# # shape ??? selcted rows
# # shape selcted rows
# shard_index
- api : shard_index
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册