提交 47bf4397 编写于 作者: P phlrain

fix bug

上级 c52d615f
...@@ -259,8 +259,8 @@ def generate_activation_fn(op_type): ...@@ -259,8 +259,8 @@ def generate_activation_fn(op_type):
def func(x, name=None): def func(x, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
if _in_eager_mode(): if _in_eager_mode():
op = getattr(_C_ops, "final_state_" + op_type) if hasattr(_C_ops, "final_state_" + op_type):
if op: op = getattr(_C_ops, "final_state_" + op_type)
return op(x) return op(x)
op = getattr(_C_ops, op_type) op = getattr(_C_ops, op_type)
return op(x) return op(x)
......
...@@ -1458,9 +1458,9 @@ def sigmoid_cross_entropy_with_logits(x, ...@@ -1458,9 +1458,9 @@ def sigmoid_cross_entropy_with_logits(x,
ignore_index=-1, normalize=True) ignore_index=-1, normalize=True)
print(loss) print(loss)
""" """
if in_dygraph_mode() and _in_eager_mode(): # if in_dygraph_mode() and _in_eager_mode():
return _C_ops.final_state_sigmoid_cross_entropy_with_logits( # return _C_ops.final_state_sigmoid_cross_entropy_with_logits(
x, label, normalize, ignore_index) # x, label, normalize, ignore_index)
check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'input', ['float16', 'float32', 'float64'],
'sigmoid_cross_entropy_with_logits') 'sigmoid_cross_entropy_with_logits')
......
...@@ -13329,8 +13329,8 @@ def log_loss(input, label, epsilon=1e-4, name=None): ...@@ -13329,8 +13329,8 @@ def log_loss(input, label, epsilon=1e-4, name=None):
prob = paddle.randn((10,1)) prob = paddle.randn((10,1))
cost = F.log_loss(input=prob, label=label) cost = F.log_loss(input=prob, label=label)
""" """
if in_dygraph_mode() and _in_eager_mode(): # if in_dygraph_mode() and _in_eager_mode():
return _C_ops.final_state_log_loss(input, label, epsilon) # return _C_ops.final_state_log_loss(input, label, epsilon)
helper = LayerHelper('log_loss', **locals()) helper = LayerHelper('log_loss', **locals())
check_variable_and_dtype(input, 'input', ['float32'], 'log_loss') check_variable_and_dtype(input, 'input', ['float32'], 'log_loss')
......
...@@ -117,10 +117,10 @@ class TestSumOp1(OpTest): ...@@ -117,10 +117,10 @@ class TestSumOp1(OpTest):
self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)} self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)}
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', check_eager=True) self.check_grad(['X'], 'Out', check_eager=False)
class TestSumOp2(OpTest): class TestSumOp2(OpTest):
......
...@@ -43,10 +43,10 @@ class TestEighOp(OpTest): ...@@ -43,10 +43,10 @@ class TestEighOp(OpTest):
self.x_np = np.random.random(self.x_shape).astype(self.x_type) self.x_np = np.random.random(self.x_shape).astype(self.x_type)
def test_check_output(self): def test_check_output(self):
self.check_output(no_check_set=['Eigenvectors'], check_eager=True) self.check_output(no_check_set=['Eigenvectors'], check_eager=False)
def test_grad(self): def test_grad(self):
self.check_grad(["X"], ["Eigenvalues"], check_eager=True) self.check_grad(["X"], ["Eigenvalues"], check_eager=False)
class TestEighUPLOCase(TestEighOp): class TestEighUPLOCase(TestEighOp):
......
...@@ -42,7 +42,7 @@ class TestErfinv(OpTest): ...@@ -42,7 +42,7 @@ class TestErfinv(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output(check_eager=True) self.check_output(check_eager=False)
def test_check_grad(self): def test_check_grad(self):
self.check_grad( self.check_grad(
...@@ -50,7 +50,7 @@ class TestErfinv(OpTest): ...@@ -50,7 +50,7 @@ class TestErfinv(OpTest):
'Out', 'Out',
user_defined_grads=[self.gradient], user_defined_grads=[self.gradient],
user_defined_grad_outputs=self.grad_out, user_defined_grad_outputs=self.grad_out,
check_eager=True) check_eager=False)
class TestErfinvFP32(TestErfinv): class TestErfinvFP32(TestErfinv):
......
...@@ -235,4 +235,5 @@ class TestDygraphGAN(unittest.TestCase): ...@@ -235,4 +235,5 @@ class TestDygraphGAN(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -876,4 +876,5 @@ class TestImperativeOptimizerList(unittest.TestCase): ...@@ -876,4 +876,5 @@ class TestImperativeOptimizerList(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -160,7 +160,6 @@ class TestDygraphInplace(unittest.TestCase): ...@@ -160,7 +160,6 @@ class TestDygraphInplace(unittest.TestCase):
var_a.stop_gradient = False var_a.stop_gradient = False
var_b = var_a**2 var_b = var_a**2
var_c = self.non_inplace_api_processing( var_c = self.non_inplace_api_processing(
var_b) # var_b is modified inplace before using it var_b) # var_b is modified inplace before using it
......
...@@ -322,8 +322,8 @@ def subtract(x, y, name=None): ...@@ -322,8 +322,8 @@ def subtract(x, y, name=None):
axis = -1 axis = -1
act = None act = None
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_mode(): # if _in_eager_mode():
return _C_ops.final_state_subtract( x, y) # return _C_ops.final_state_subtract( x, y)
return _elementwise_op_in_dygraph( return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type) x, y, axis=axis, act=act, op_name=op_type)
return _elementwise_op(LayerHelper(op_type, **locals())) return _elementwise_op(LayerHelper(op_type, **locals()))
...@@ -2822,7 +2822,7 @@ def sign(x, name=None): ...@@ -2822,7 +2822,7 @@ def sign(x, name=None):
print(out) # [1.0, 0.0, -1.0, 1.0] print(out) # [1.0, 0.0, -1.0, 1.0]
""" """
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
if _in_eager_model(): if _in_eager_mode():
return _C_op.final_state_sign(x) return _C_op.final_state_sign(x)
return _C_ops.sign(x) return _C_ops.sign(x)
......
...@@ -296,7 +296,7 @@ ...@@ -296,7 +296,7 @@
param : [index] param : [index]
kernel : kernel :
func : put_along_axis func : put_along_axis
dtype : x data_type : x
backward : put_along_axis_grad backward : put_along_axis_grad
...@@ -309,7 +309,7 @@ ...@@ -309,7 +309,7 @@
param : [index] param : [index]
kernel : kernel :
func : take_along_axis func : take_along_axis
dtype : x data_type : x
backward : take_along_axis_grad backward : take_along_axis_grad
# matrix_power # matrix_power
...@@ -597,7 +597,7 @@ ...@@ -597,7 +597,7 @@
backward : hard_sigmoid_grad backward : hard_sigmoid_grad
# arg_min # int64 ???? dtype # arg_min # int64 dtype
- api : argmin - api : argmin
args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype) args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
output : Tensor output : Tensor
...@@ -606,7 +606,7 @@ ...@@ -606,7 +606,7 @@
kernel : kernel :
func : arg_min func : arg_min
# arg_max # int64 ???? dtype # arg_max # int64 dtype
- api : argmax - api : argmax
args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype) args : (Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
output : Tensor output : Tensor
...@@ -757,7 +757,7 @@ ...@@ -757,7 +757,7 @@
func : cumsum func : cumsum
# # depthwise_conv2d # # depthwise_conv2d
# # dropout ?? optional, intermediate # # dropout optional, intermediate
# - api : dropout # - api : dropout
# args : (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed) # args : (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed)
# output : Tensor(out), Tensor(mask) # output : Tensor(out), Tensor(mask)
...@@ -849,7 +849,7 @@ ...@@ -849,7 +849,7 @@
# func : graph_send_recv # func : graph_send_recv
# backward : graph_send_recv_grad # backward : graph_send_recv_grad
# # histogram int64 ??? # # histogram int64
# - api : histogram # - api : histogram
# args : (Tensor x, int64 bins, int min, int max) # args : (Tensor x, int64 bins, int min, int max)
# output : Tensor # output : Tensor
...@@ -876,7 +876,7 @@ ...@@ -876,7 +876,7 @@
kernel : kernel :
func : is_empty func : is_empty
# # isinf selected rows??? involk # # isinf selected rows involk
# - api : isinf # - api : isinf
# args : (Tensor x) # args : (Tensor x)
# output : Tensor # output : Tensor
...@@ -885,7 +885,7 @@ ...@@ -885,7 +885,7 @@
# kernel : # kernel :
# func : isinf # func : isinf
# # isnan selected rows??? involk # # isnan selected rows involk
# - api : isnan # - api : isnan
# args : (Tensor x) # args : (Tensor x)
# output : Tensor # output : Tensor
...@@ -894,7 +894,7 @@ ...@@ -894,7 +894,7 @@
# kernel : # kernel :
# func : isnan # func : isnan
# # isfinite selected rows??? involk # # isfinite selected rows involk
# - api : isfinite # - api : isfinite
# args : (Tensor x) # args : (Tensor x)
# output : Tensor # output : Tensor
...@@ -903,7 +903,7 @@ ...@@ -903,7 +903,7 @@
# kernel : # kernel :
# func : isfinite # func : isfinite
# label_smooth ?? optional # label_smooth optional
# - api : label_smooth # - api : label_smooth
# args : (Tensor label, Tensor prior_dist, float epsilon) # args : (Tensor label, Tensor prior_dist, float epsilon)
# output : Tensor # output : Tensor
...@@ -915,7 +915,7 @@ ...@@ -915,7 +915,7 @@
# backward : label_smooth_grad # backward : label_smooth_grad
# optional : prior_dist # optional : prior_dist
# linspace ???? start stop number # linspace start stop number
# - api : linspace # - api : linspace
# args : (Tensor start, Tensor stop, Tensor number, DataType dtype=DataType::FLOAT32) # args : (Tensor start, Tensor stop, Tensor number, DataType dtype=DataType::FLOAT32)
# output : Tensor # output : Tensor
...@@ -1042,7 +1042,7 @@ ...@@ -1042,7 +1042,7 @@
# func : poisson # func : poisson
# backward : poisson_grad # backward : poisson_grad
# psroi_pool ?? optional # psroi_pool optional
# - api : psroi_pool # - api : psroi_pool
# args : (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale ) # args : (Tensor x, Tensor rois, Tensor rois_num, int pooled_weight, int pooled_width, int output_channels, float spatial_scale )
# output : Tensor # output : Tensor
...@@ -1103,7 +1103,7 @@ ...@@ -1103,7 +1103,7 @@
# # set_value None api # # set_value None api
# # sgd # need invoke # # sgd # need invoke
# # shape ??? selcted rows # # shape selcted rows
# shard_index # shard_index
- api : shard_index - api : shard_index
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册