未验证 提交 56c43ccd 编写于 作者: H hong 提交者: GitHub

Merge some test bug (#40543)

* switch eager mode and change it

* set default is eager

* set default is eager

* fix error; test=develop

* fix some error; test=develop

* update

* upd

* update code; test=develop

* update

* fix some bug; test=develop

* fix bug; test=develop

* fix bug; test=develop

* fix bug; test=develop

* fix error; test=develop

* format; test=develop
Co-authored-by: NJiabinYang <360788950@qq.com>
上级 1b9aee4f
...@@ -2032,7 +2032,15 @@ static std::string GenerateSingleOpBase( ...@@ -2032,7 +2032,15 @@ static std::string GenerateSingleOpBase(
const char* ATTRS_TEMPLATE = " auto& %s = this->attr_map_;\n"; const char* ATTRS_TEMPLATE = " auto& %s = this->attr_map_;\n";
std::string grad_attrs_str = std::string grad_attrs_str =
paddle::string::Sprintf(ATTRS_TEMPLATE, attrs_name); paddle::string::Sprintf(ATTRS_TEMPLATE, attrs_name);
if (fwd_op_type == "cast") {
// swtich in out dtype
const char* CAST_GRAD =
" auto temp_type = %s[\"in_dtype\"];\n"
" %s[\"in_dtype\"] = %s[\"out_dtype\"];\n"
" %s[\"out_dtype\"] = temp_type;\n";
grad_attrs_str += paddle::string::Sprintf(CAST_GRAD, attrs_name, attrs_name,
attrs_name, attrs_name);
}
// Handle dynamic grad attributes // Handle dynamic grad attributes
grad_attrs_str += HandleDynamicGradAttributes(fwd_op_type, attrs_name); grad_attrs_str += HandleDynamicGradAttributes(fwd_op_type, attrs_name);
generated_grad_function_body += grad_attrs_str; generated_grad_function_body += grad_attrs_str;
......
...@@ -93,7 +93,7 @@ void GradTensorHolder::add(size_t slot_id, size_t rank, ...@@ -93,7 +93,7 @@ void GradTensorHolder::add(size_t slot_id, size_t rank,
// Create new tensor->impl and fill it with 1.0 // Create new tensor->impl and fill it with 1.0
if (t.defined()) { if (t.defined()) {
// Fill 1.0 // Fill 1.0
buffer_[slot_id][rank] = paddle::experimental::ones_like(t); buffer_[slot_id][rank] = paddle::experimental::ones_like(t, t.dtype());
} }
} }
} }
......
...@@ -62,6 +62,8 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) { ...@@ -62,6 +62,8 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) {
return pybind11::detail::npy_api::NPY_INT32_; return pybind11::detail::npy_api::NPY_INT32_;
case phi::DataType::INT64: case phi::DataType::INT64:
return pybind11::detail::npy_api::NPY_INT64_; return pybind11::detail::npy_api::NPY_INT64_;
case phi::DataType::BFLOAT16:
return pybind11::detail::NPY_UINT16_;
case phi::DataType::FLOAT16: case phi::DataType::FLOAT16:
return pybind11::detail::NPY_FLOAT16_; return pybind11::detail::NPY_FLOAT16_;
case phi::DataType::BFLOAT16: case phi::DataType::BFLOAT16:
......
...@@ -612,7 +612,7 @@ def grad(outputs, ...@@ -612,7 +612,7 @@ def grad(outputs,
if no_grad_vars is None: if no_grad_vars is None:
no_grad_vars = [] no_grad_vars = []
elif isinstance(no_grad_vars, core.VarBase): elif isinstance(no_grad_vars, (core.VarBase, core.eager.Tensor)):
no_grad_vars = [no_grad_vars] no_grad_vars = [no_grad_vars]
elif isinstance(no_grad_vars, core.eager.Tensor): elif isinstance(no_grad_vars, core.eager.Tensor):
no_grad_vars = [no_grad_vars] no_grad_vars = [no_grad_vars]
...@@ -718,13 +718,13 @@ def to_variable(value, name=None, zero_copy=None, dtype=None): ...@@ -718,13 +718,13 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
y.shape # [3L, 2L] y.shape # [3L, 2L]
""" """
support_type = (list, tuple, np.ndarray, core.VarBase, framework.Variable, support_type = (list, tuple, np.ndarray, core.eager.Tensor, core.VarBase,
core.Tensor, core.LoDTensor) framework.Variable, core.Tensor, core.LoDTensor)
if not isinstance(value, support_type): if not isinstance(value, support_type):
raise TypeError( raise TypeError(
"The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s." "The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s."
% (support_type, type(value))) % (support_type, type(value)))
if isinstance(value, (core.VarBase, framework.Variable)): if isinstance(value, (core.eager.Tensor, core.VarBase, framework.Variable)):
return value return value
elif isinstance(value, (core.Tensor, core.LoDTensor)): elif isinstance(value, (core.Tensor, core.LoDTensor)):
return core.VarBase(value) return core.VarBase(value)
......
...@@ -28,6 +28,7 @@ from .math_op_patch import monkey_patch_math_varbase ...@@ -28,6 +28,7 @@ from .math_op_patch import monkey_patch_math_varbase
from .parallel import scale_loss from .parallel import scale_loss
from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE
import paddle.utils.deprecated as deprecated import paddle.utils.deprecated as deprecated
from paddle import _C_ops
class TensorHookRemoveHelper(object): class TensorHookRemoveHelper(object):
...@@ -782,7 +783,7 @@ def monkey_patch_varbase(): ...@@ -782,7 +783,7 @@ def monkey_patch_varbase():
@framework.dygraph_only @framework.dygraph_only
def clone(self): def clone(self):
return _C_ops_.assign(self) return _C_ops.assign(self)
@framework.dygraph_only @framework.dygraph_only
def value(self): def value(self):
......
...@@ -316,7 +316,8 @@ def _dygraph_not_support_(func): ...@@ -316,7 +316,8 @@ def _dygraph_not_support_(func):
def _dygraph_only_(func): def _dygraph_only_(func):
def __impl__(*args, **kwargs): def __impl__(*args, **kwargs):
assert in_dygraph_mode( assert (
in_dygraph_mode() or _in_eager_mode()
), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__ ), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__
return func(*args, **kwargs) return func(*args, **kwargs)
......
...@@ -886,6 +886,7 @@ class TestDistributeFpnProposals(LayerTest): ...@@ -886,6 +886,7 @@ class TestDistributeFpnProposals(LayerTest):
refer_level=4, refer_level=4,
refer_scale=224, refer_scale=224,
rois_num=rois_num_dy) rois_num=rois_num_dy)
print(type(multi_rois_dy))
output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy
output_dy_np = [] output_dy_np = []
for output in output_dy: for output in output_dy:
...@@ -973,4 +974,5 @@ class TestBoxDecoderAndAssign(unittest.TestCase): ...@@ -973,4 +974,5 @@ class TestBoxDecoderAndAssign(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -50,3 +50,7 @@ class TestExponentialFamilyException(unittest.TestCase): ...@@ -50,3 +50,7 @@ class TestExponentialFamilyException(unittest.TestCase):
def test_entropy_exception(self): def test_entropy_exception(self):
with self.assertRaises(NotImplementedError): with self.assertRaises(NotImplementedError):
paddle.distribution.ExponentialFamily.entropy(self.dist) paddle.distribution.ExponentialFamily.entropy(self.dist)
if __name__ == '__main__':
unittest.main()
...@@ -112,3 +112,7 @@ class TestKLExpfamilyExpFamily(unittest.TestCase): ...@@ -112,3 +112,7 @@ class TestKLExpfamilyExpFamily(unittest.TestCase):
kl._kl_expfamily_expfamily(self.p, self.q), kl._kl_expfamily_expfamily(self.p, self.q),
rtol=config.RTOL.get(config.DEFAULT_DTYPE), rtol=config.RTOL.get(config.DEFAULT_DTYPE),
atol=config.ATOL.get(config.DEFAULT_DTYPE)) atol=config.ATOL.get(config.DEFAULT_DTYPE))
if __name__ == '__main__':
unittest.main()
...@@ -20,6 +20,7 @@ import sys ...@@ -20,6 +20,7 @@ import sys
sys.path.append("../") sys.path.append("../")
from op_test import OpTest from op_test import OpTest
import paddle
from paddle import fluid from paddle import fluid
...@@ -115,4 +116,5 @@ class TestSequenceConcatOpError(unittest.TestCase): ...@@ -115,4 +116,5 @@ class TestSequenceConcatOpError(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -39,6 +39,7 @@ class TensorTypeTest(unittest.TestCase): ...@@ -39,6 +39,7 @@ class TensorTypeTest(unittest.TestCase):
tensorx = paddle.tensor.logic.Tensor(inx) tensorx = paddle.tensor.logic.Tensor(inx)
typex_str = str(type(tensorx)) typex_str = str(type(tensorx))
expectx = "<class 'paddle.Tensor'>" expectx = "<class 'paddle.Tensor'>"
self.assertEqual((typex_str == expectx), True) self.assertEqual((typex_str == expectx), True)
......
...@@ -1202,4 +1202,5 @@ class TestMultiTensorAdam(unittest.TestCase): ...@@ -1202,4 +1202,5 @@ class TestMultiTensorAdam(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -451,4 +451,5 @@ class TestLayerTo(unittest.TestCase): ...@@ -451,4 +451,5 @@ class TestLayerTo(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
from paddle.fluid import ParamAttr, initializer from paddle.fluid import ParamAttr, initializer
import paddle
class TestCreateParameterError(unittest.TestCase): class TestCreateParameterError(unittest.TestCase):
...@@ -50,4 +51,5 @@ class TestCreateParameterError(unittest.TestCase): ...@@ -50,4 +51,5 @@ class TestCreateParameterError(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -20,6 +20,7 @@ import numpy as np ...@@ -20,6 +20,7 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
from test_softmax_op import stable_softmax from test_softmax_op import stable_softmax
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
def CTCAlign(input, lod, blank, merge_repeated, padding=0, input_length=None): def CTCAlign(input, lod, blank, merge_repeated, padding=0, input_length=None):
...@@ -229,4 +230,5 @@ class BadInputTestCTCAlignr(unittest.TestCase): ...@@ -229,4 +230,5 @@ class BadInputTestCTCAlignr(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -211,4 +211,5 @@ class TestDiffOpPreAppendAxis(TestDiffOp): ...@@ -211,4 +211,5 @@ class TestDiffOpPreAppendAxis(TestDiffOp):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -190,4 +190,5 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase): ...@@ -190,4 +190,5 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -209,4 +209,5 @@ class TestExponentialAPI(unittest.TestCase): ...@@ -209,4 +209,5 @@ class TestExponentialAPI(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -189,3 +189,8 @@ class TestElementwiseFmin2Op(OpTest): ...@@ -189,3 +189,8 @@ class TestElementwiseFmin2Op(OpTest):
"""test_check_grad_ingore_y""" """test_check_grad_ingore_y"""
self.check_grad( self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y')) ['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
...@@ -1025,4 +1025,5 @@ class TestDiracInitializer3(TestDiracInitializer1): ...@@ -1025,4 +1025,5 @@ class TestDiracInitializer3(TestDiracInitializer1):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -163,4 +163,5 @@ class TestMultiplyError(unittest.TestCase): ...@@ -163,4 +163,5 @@ class TestMultiplyError(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -88,4 +88,5 @@ class TestWhenTrainWithNoGrad(unittest.TestCase): ...@@ -88,4 +88,5 @@ class TestWhenTrainWithNoGrad(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -210,6 +210,9 @@ class TestIscloseOpFloat64(TestIscloseOp): ...@@ -210,6 +210,9 @@ class TestIscloseOpFloat64(TestIscloseOp):
self.atol = np.array([0]).astype("float64") self.atol = np.array([0]).astype("float64")
self.equal_nan = False self.equal_nan = False
def test_check_output(self):
self.check_output()
class TestIscloseOpLargeDimInput(TestIscloseOp): class TestIscloseOpLargeDimInput(TestIscloseOp):
def set_args(self): def set_args(self):
...@@ -222,4 +225,5 @@ class TestIscloseOpLargeDimInput(TestIscloseOp): ...@@ -222,4 +225,5 @@ class TestIscloseOpLargeDimInput(TestIscloseOp):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -175,4 +175,5 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase): ...@@ -175,4 +175,5 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -555,4 +555,5 @@ class TestLRScheduler(unittest.TestCase): ...@@ -555,4 +555,5 @@ class TestLRScheduler(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -19,6 +19,7 @@ import unittest ...@@ -19,6 +19,7 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle
def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects, def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects,
...@@ -129,4 +130,5 @@ class TestMeanIOUOpError(unittest.TestCase): ...@@ -129,4 +130,5 @@ class TestMeanIOUOpError(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -17,6 +17,7 @@ from __future__ import print_function ...@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle
class TestMinusOp(OpTest): class TestMinusOp(OpTest):
...@@ -36,4 +37,5 @@ class TestMinusOp(OpTest): ...@@ -36,4 +37,5 @@ class TestMinusOp(OpTest):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -588,4 +588,5 @@ class API_NormTest(unittest.TestCase): ...@@ -588,4 +588,5 @@ class API_NormTest(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -315,7 +315,9 @@ class TestSaveLoadAny(unittest.TestCase): ...@@ -315,7 +315,9 @@ class TestSaveLoadAny(unittest.TestCase):
paddle.save(tensor, path) paddle.save(tensor, path)
t_dygraph = paddle.load(path) t_dygraph = paddle.load(path)
np_dygraph = paddle.load(path, return_numpy=True) np_dygraph = paddle.load(path, return_numpy=True)
self.assertTrue(isinstance(t_dygraph, paddle.fluid.core.VarBase)) self.assertTrue(
isinstance(t_dygraph, (paddle.fluid.core.VarBase,
paddle.fluid.core.eager.Tensor)))
self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph)) self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph))
self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy())) self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy()))
paddle.enable_static() paddle.enable_static()
...@@ -685,27 +687,34 @@ class TestSaveLoadAny(unittest.TestCase): ...@@ -685,27 +687,34 @@ class TestSaveLoadAny(unittest.TestCase):
np.array(v), np.array(load_tensor2['k2'][k]))) np.array(v), np.array(load_tensor2['k2'][k])))
self.assertTrue(load_tensor2['epoch'] == 123) self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(isinstance(load_tensor3[0], fluid.core.VarBase)) self.assertTrue(
isinstance(load_tensor3[0], (fluid.core.VarBase,
fluid.core.eager.Tensor)))
self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0])) self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0]))
self.assertTrue(isinstance(load_tensor3[1], fluid.core.VarBase)) self.assertTrue(
isinstance(load_tensor3[1], (fluid.core.VarBase,
fluid.core.eager.Tensor)))
self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1])) self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1]))
for k, v in state_dict.items(): for k, v in state_dict.items():
self.assertTrue( self.assertTrue(
isinstance(load_tensor3[2]["state_dict"][k], isinstance(load_tensor3[2]["state_dict"][k], (
fluid.core.VarBase)) fluid.core.VarBase, fluid.core.eager.Tensor)))
self.assertTrue( self.assertTrue(
np.array_equal(load_tensor3[2]["state_dict"][k].numpy(), np.array_equal(load_tensor3[2]["state_dict"][k].numpy(),
np.array(v))) np.array(v)))
for k, v in state_dict.items(): for k, v in state_dict.items():
self.assertTrue( self.assertTrue(
isinstance(load_tensor3[2]["opt"][k], fluid.core.VarBase)) isinstance(load_tensor3[2]["opt"][k], (
fluid.core.VarBase, fluid.core.eager.Tensor)))
self.assertTrue( self.assertTrue(
np.array_equal(load_tensor3[2]["opt"][k].numpy(), np.array_equal(load_tensor3[2]["opt"][k].numpy(),
np.array(v))) np.array(v)))
self.assertTrue(isinstance(load_tensor4[0], fluid.core.VarBase)) self.assertTrue(
isinstance(load_tensor4[0], (fluid.core.VarBase,
fluid.core.eager.Tensor)))
self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0])) self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True) load_array1 = paddle.load(path1, return_numpy=True)
......
...@@ -54,7 +54,7 @@ class TestRenormAPI(unittest.TestCase): ...@@ -54,7 +54,7 @@ class TestRenormAPI(unittest.TestCase):
def test_dygraph_api(self): def test_dygraph_api(self):
self.input_data() self.input_data()
# case axis none # case axis none
with fluid.dygraph.guard(): with fluid.dygraph.guard(fluid.CPUPlace()):
input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]] input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]]
x = paddle.to_tensor(input, stop_gradient=False) x = paddle.to_tensor(input, stop_gradient=False)
y = paddle.renorm(x, 1.0, 2, 2.05) y = paddle.renorm(x, 1.0, 2, 2.05)
...@@ -94,4 +94,5 @@ class TestRenormAPI(unittest.TestCase): ...@@ -94,4 +94,5 @@ class TestRenormAPI(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -23,6 +23,7 @@ from test_multiclass_nms_op import iou ...@@ -23,6 +23,7 @@ from test_multiclass_nms_op import iou
from test_multiclass_nms_op import nms from test_multiclass_nms_op import nms
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import Program, program_guard from paddle.fluid import Program, program_guard
import paddle
def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold): def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold):
...@@ -518,4 +519,5 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase): ...@@ -518,4 +519,5 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -178,4 +178,5 @@ class SmoothL1Loss(unittest.TestCase): ...@@ -178,4 +178,5 @@ class SmoothL1Loss(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -22,7 +22,7 @@ import paddle.fluid as fluid ...@@ -22,7 +22,7 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
# Situation 1: repeat_times is a list (without tensor) #Situation 1: repeat_times is a list (without tensor)
class TestTileOpRank1(OpTest): class TestTileOpRank1(OpTest):
def setUp(self): def setUp(self):
self.op_type = "tile" self.op_type = "tile"
...@@ -248,4 +248,5 @@ class TestTileAPI(unittest.TestCase): ...@@ -248,4 +248,5 @@ class TestTileAPI(unittest.TestCase):
if __name__ == "__main__": if __name__ == "__main__":
paddle.enable_static()
unittest.main() unittest.main()
...@@ -1361,4 +1361,5 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase): ...@@ -1361,4 +1361,5 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase):
if __name__ == '__main__': if __name__ == '__main__':
paddle.enable_static()
unittest.main() unittest.main()
...@@ -68,8 +68,9 @@ def to_list(value): ...@@ -68,8 +68,9 @@ def to_list(value):
def to_numpy(var): def to_numpy(var):
assert isinstance(var, (Variable, fluid.core.VarBase)), "not a variable" assert isinstance(var, (Variable, fluid.core.VarBase,
if isinstance(var, fluid.core.VarBase): fluid.core.eager.Tensor)), "not a variable"
if isinstance(var, (fluid.core.VarBase, fluid.core.eager.Tensor)):
return var.numpy() return var.numpy()
t = global_scope().find_var(var.name).get_tensor() t = global_scope().find_var(var.name).get_tensor()
return np.array(t) return np.array(t)
......
...@@ -282,7 +282,7 @@ class Accuracy(Metric): ...@@ -282,7 +282,7 @@ class Accuracy(Metric):
Return: Return:
Tensor: the accuracy of current step. Tensor: the accuracy of current step.
""" """
if isinstance(correct, paddle.Tensor): if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
correct = correct.numpy() correct = correct.numpy()
num_samples = np.prod(np.array(correct.shape[:-1])) num_samples = np.prod(np.array(correct.shape[:-1]))
accs = [] accs = []
...@@ -410,12 +410,12 @@ class Precision(Metric): ...@@ -410,12 +410,12 @@ class Precision(Metric):
the shape should keep the same as preds. the shape should keep the same as preds.
The data type is 'int32' or 'int64'. The data type is 'int32' or 'int64'.
""" """
if isinstance(preds, paddle.Tensor): if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy() preds = preds.numpy()
elif not _is_numpy_(preds): elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, paddle.Tensor): if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy() labels = labels.numpy()
elif not _is_numpy_(labels): elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
...@@ -543,12 +543,12 @@ class Recall(Metric): ...@@ -543,12 +543,12 @@ class Recall(Metric):
the shape should keep the same as preds. the shape should keep the same as preds.
Shape: [batch_size, 1], Dtype: 'int32' or 'int64'. Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.
""" """
if isinstance(preds, paddle.Tensor): if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy() preds = preds.numpy()
elif not _is_numpy_(preds): elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, paddle.Tensor): if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy() labels = labels.numpy()
elif not _is_numpy_(labels): elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
...@@ -698,12 +698,12 @@ class Auc(Metric): ...@@ -698,12 +698,12 @@ class Auc(Metric):
(batch_size, 1), labels[i] is either o or 1, (batch_size, 1), labels[i] is either o or 1,
representing the label of the instance i. representing the label of the instance i.
""" """
if isinstance(labels, paddle.Tensor): if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy() labels = labels.numpy()
elif not _is_numpy_(labels): elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.") raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
if isinstance(preds, paddle.Tensor): if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy() preds = preds.numpy()
elif not _is_numpy_(preds): elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.") raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
......
...@@ -462,7 +462,7 @@ def is_tensor(x): ...@@ -462,7 +462,7 @@ def is_tensor(x):
print(check) #False print(check) #False
""" """
return isinstance(x, Tensor) return isinstance(x, (Tensor, paddle.fluid.core.eager.Tensor))
def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
......
...@@ -263,6 +263,9 @@ def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None): ...@@ -263,6 +263,9 @@ def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None):
setattr(core.VarBase, 'fill_diagonal_tensor', fill_diagonal_tensor) setattr(core.VarBase, 'fill_diagonal_tensor', fill_diagonal_tensor)
if core._in_eager_mode():
setattr(core.eager.Tensor, 'fill_diagonal_tensor', fill_diagonal_tensor)
@dygraph_only @dygraph_only
def tolist(x): def tolist(x):
......
...@@ -1335,7 +1335,7 @@ def renorm(x, p, axis, max_norm): ...@@ -1335,7 +1335,7 @@ def renorm(x, p, axis, max_norm):
raise ValueError("the axis:{} should not be less than -1 * length of input_shape:{}".format(axis,-1 * len(input_shape))) raise ValueError("the axis:{} should not be less than -1 * length of input_shape:{}".format(axis,-1 * len(input_shape)))
axis = axis + len(input_shape) axis = axis + len(input_shape)
if paddle.in_dynamic_mode(): if paddle.in_dynamic_mode():
out = core.ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm) out = _C_ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm)
return out return out
inputs = {'X': x} inputs = {'X': x}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册