未验证 提交 56c43ccd 编写于 作者: H hong 提交者: GitHub

Merge some test bug (#40543)

* switch eager mode and change it

* set default is eager

* set default is eager

* fix error; test=develop

* fix some error; test=develop

* update

* upd

* update code; test=develop

* update

* fix some bug; test=develop

* fix bug; test=develop

* fix bug; test=develop

* fix bug; test=develop

* fix error; test=develop

* format; test=develop
Co-authored-by: NJiabinYang <360788950@qq.com>
上级 1b9aee4f
......@@ -2032,7 +2032,15 @@ static std::string GenerateSingleOpBase(
const char* ATTRS_TEMPLATE = " auto& %s = this->attr_map_;\n";
std::string grad_attrs_str =
paddle::string::Sprintf(ATTRS_TEMPLATE, attrs_name);
if (fwd_op_type == "cast") {
// swtich in out dtype
const char* CAST_GRAD =
" auto temp_type = %s[\"in_dtype\"];\n"
" %s[\"in_dtype\"] = %s[\"out_dtype\"];\n"
" %s[\"out_dtype\"] = temp_type;\n";
grad_attrs_str += paddle::string::Sprintf(CAST_GRAD, attrs_name, attrs_name,
attrs_name, attrs_name);
}
// Handle dynamic grad attributes
grad_attrs_str += HandleDynamicGradAttributes(fwd_op_type, attrs_name);
generated_grad_function_body += grad_attrs_str;
......
......@@ -93,7 +93,7 @@ void GradTensorHolder::add(size_t slot_id, size_t rank,
// Create new tensor->impl and fill it with 1.0
if (t.defined()) {
// Fill 1.0
buffer_[slot_id][rank] = paddle::experimental::ones_like(t);
buffer_[slot_id][rank] = paddle::experimental::ones_like(t, t.dtype());
}
}
}
......
......@@ -62,6 +62,8 @@ int TensorDtype2NumpyDtype(phi::DataType dtype) {
return pybind11::detail::npy_api::NPY_INT32_;
case phi::DataType::INT64:
return pybind11::detail::npy_api::NPY_INT64_;
case phi::DataType::BFLOAT16:
return pybind11::detail::NPY_UINT16_;
case phi::DataType::FLOAT16:
return pybind11::detail::NPY_FLOAT16_;
case phi::DataType::BFLOAT16:
......
......@@ -612,7 +612,7 @@ def grad(outputs,
if no_grad_vars is None:
no_grad_vars = []
elif isinstance(no_grad_vars, core.VarBase):
elif isinstance(no_grad_vars, (core.VarBase, core.eager.Tensor)):
no_grad_vars = [no_grad_vars]
elif isinstance(no_grad_vars, core.eager.Tensor):
no_grad_vars = [no_grad_vars]
......@@ -718,13 +718,13 @@ def to_variable(value, name=None, zero_copy=None, dtype=None):
y.shape # [3L, 2L]
"""
support_type = (list, tuple, np.ndarray, core.VarBase, framework.Variable,
core.Tensor, core.LoDTensor)
support_type = (list, tuple, np.ndarray, core.eager.Tensor, core.VarBase,
framework.Variable, core.Tensor, core.LoDTensor)
if not isinstance(value, support_type):
raise TypeError(
"The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s."
% (support_type, type(value)))
if isinstance(value, (core.VarBase, framework.Variable)):
if isinstance(value, (core.eager.Tensor, core.VarBase, framework.Variable)):
return value
elif isinstance(value, (core.Tensor, core.LoDTensor)):
return core.VarBase(value)
......
......@@ -28,6 +28,7 @@ from .math_op_patch import monkey_patch_math_varbase
from .parallel import scale_loss
from paddle.fluid.data_feeder import convert_dtype, _PADDLE_DTYPE_2_NUMPY_DTYPE
import paddle.utils.deprecated as deprecated
from paddle import _C_ops
class TensorHookRemoveHelper(object):
......@@ -782,7 +783,7 @@ def monkey_patch_varbase():
@framework.dygraph_only
def clone(self):
return _C_ops_.assign(self)
return _C_ops.assign(self)
@framework.dygraph_only
def value(self):
......
......@@ -316,7 +316,8 @@ def _dygraph_not_support_(func):
def _dygraph_only_(func):
def __impl__(*args, **kwargs):
assert in_dygraph_mode(
assert (
in_dygraph_mode() or _in_eager_mode()
), "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__
return func(*args, **kwargs)
......
......@@ -886,6 +886,7 @@ class TestDistributeFpnProposals(LayerTest):
refer_level=4,
refer_scale=224,
rois_num=rois_num_dy)
print(type(multi_rois_dy))
output_dy = multi_rois_dy + [restore_ind_dy] + rois_num_per_level_dy
output_dy_np = []
for output in output_dy:
......@@ -973,4 +974,5 @@ class TestBoxDecoderAndAssign(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -50,3 +50,7 @@ class TestExponentialFamilyException(unittest.TestCase):
def test_entropy_exception(self):
with self.assertRaises(NotImplementedError):
paddle.distribution.ExponentialFamily.entropy(self.dist)
if __name__ == '__main__':
unittest.main()
......@@ -112,3 +112,7 @@ class TestKLExpfamilyExpFamily(unittest.TestCase):
kl._kl_expfamily_expfamily(self.p, self.q),
rtol=config.RTOL.get(config.DEFAULT_DTYPE),
atol=config.ATOL.get(config.DEFAULT_DTYPE))
if __name__ == '__main__':
unittest.main()
......@@ -20,6 +20,7 @@ import sys
sys.path.append("../")
from op_test import OpTest
import paddle
from paddle import fluid
......@@ -115,4 +116,5 @@ class TestSequenceConcatOpError(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -39,6 +39,7 @@ class TensorTypeTest(unittest.TestCase):
tensorx = paddle.tensor.logic.Tensor(inx)
typex_str = str(type(tensorx))
expectx = "<class 'paddle.Tensor'>"
self.assertEqual((typex_str == expectx), True)
......
......@@ -1202,4 +1202,5 @@ class TestMultiTensorAdam(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -451,4 +451,5 @@ class TestLayerTo(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -18,6 +18,7 @@ import numpy as np
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid import ParamAttr, initializer
import paddle
class TestCreateParameterError(unittest.TestCase):
......@@ -50,4 +51,5 @@ class TestCreateParameterError(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -20,6 +20,7 @@ import numpy as np
from op_test import OpTest
from test_softmax_op import stable_softmax
import paddle.fluid as fluid
import paddle
def CTCAlign(input, lod, blank, merge_repeated, padding=0, input_length=None):
......@@ -229,4 +230,5 @@ class BadInputTestCTCAlignr(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -211,4 +211,5 @@ class TestDiffOpPreAppendAxis(TestDiffOp):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -190,4 +190,5 @@ class TestDygraphRemoveWeightNorm(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -209,4 +209,5 @@ class TestExponentialAPI(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -189,3 +189,8 @@ class TestElementwiseFmin2Op(OpTest):
"""test_check_grad_ingore_y"""
self.check_grad(
['X'], 'Out', max_relative_error=0.005, no_grad_set=set('Y'))
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -1025,4 +1025,5 @@ class TestDiracInitializer3(TestDiracInitializer1):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -163,4 +163,5 @@ class TestMultiplyError(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -88,4 +88,5 @@ class TestWhenTrainWithNoGrad(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -210,6 +210,9 @@ class TestIscloseOpFloat64(TestIscloseOp):
self.atol = np.array([0]).astype("float64")
self.equal_nan = False
def test_check_output(self):
self.check_output()
class TestIscloseOpLargeDimInput(TestIscloseOp):
def set_args(self):
......@@ -222,4 +225,5 @@ class TestIscloseOpLargeDimInput(TestIscloseOp):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -175,4 +175,5 @@ class TestNNFunctionalLogSoftmaxAPI(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -555,4 +555,5 @@ class TestLRScheduler(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -19,6 +19,7 @@ import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid as fluid
import paddle
def compute_mean_iou(predictions, labels, num_classes, in_wrongs, in_corrects,
......@@ -129,4 +130,5 @@ class TestMeanIOUOpError(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -17,6 +17,7 @@ from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
class TestMinusOp(OpTest):
......@@ -36,4 +37,5 @@ class TestMinusOp(OpTest):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -588,4 +588,5 @@ class API_NormTest(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -315,7 +315,9 @@ class TestSaveLoadAny(unittest.TestCase):
paddle.save(tensor, path)
t_dygraph = paddle.load(path)
np_dygraph = paddle.load(path, return_numpy=True)
self.assertTrue(isinstance(t_dygraph, paddle.fluid.core.VarBase))
self.assertTrue(
isinstance(t_dygraph, (paddle.fluid.core.VarBase,
paddle.fluid.core.eager.Tensor)))
self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph))
self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy()))
paddle.enable_static()
......@@ -685,27 +687,34 @@ class TestSaveLoadAny(unittest.TestCase):
np.array(v), np.array(load_tensor2['k2'][k])))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(isinstance(load_tensor3[0], fluid.core.VarBase))
self.assertTrue(
isinstance(load_tensor3[0], (fluid.core.VarBase,
fluid.core.eager.Tensor)))
self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0]))
self.assertTrue(isinstance(load_tensor3[1], fluid.core.VarBase))
self.assertTrue(
isinstance(load_tensor3[1], (fluid.core.VarBase,
fluid.core.eager.Tensor)))
self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["state_dict"][k],
fluid.core.VarBase))
isinstance(load_tensor3[2]["state_dict"][k], (
fluid.core.VarBase, fluid.core.eager.Tensor)))
self.assertTrue(
np.array_equal(load_tensor3[2]["state_dict"][k].numpy(),
np.array(v)))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["opt"][k], fluid.core.VarBase))
isinstance(load_tensor3[2]["opt"][k], (
fluid.core.VarBase, fluid.core.eager.Tensor)))
self.assertTrue(
np.array_equal(load_tensor3[2]["opt"][k].numpy(),
np.array(v)))
self.assertTrue(isinstance(load_tensor4[0], fluid.core.VarBase))
self.assertTrue(
isinstance(load_tensor4[0], (fluid.core.VarBase,
fluid.core.eager.Tensor)))
self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
......
......@@ -54,7 +54,7 @@ class TestRenormAPI(unittest.TestCase):
def test_dygraph_api(self):
self.input_data()
# case axis none
with fluid.dygraph.guard():
with fluid.dygraph.guard(fluid.CPUPlace()):
input = [[[2.0, 2, -2], [3, 0.3, 3]], [[2, -8, 2], [3.1, 3.7, 3]]]
x = paddle.to_tensor(input, stop_gradient=False)
y = paddle.renorm(x, 1.0, 2, 2.05)
......@@ -94,4 +94,5 @@ class TestRenormAPI(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -23,6 +23,7 @@ from test_multiclass_nms_op import iou
from test_multiclass_nms_op import nms
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
import paddle
def multiclass_nms(prediction, class_num, keep_top_k, nms_threshold):
......@@ -518,4 +519,5 @@ class TestRetinanetDetectionOutOpError(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -178,4 +178,5 @@ class SmoothL1Loss(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -22,7 +22,7 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
# Situation 1: repeat_times is a list (without tensor)
#Situation 1: repeat_times is a list (without tensor)
class TestTileOpRank1(OpTest):
def setUp(self):
self.op_type = "tile"
......@@ -248,4 +248,5 @@ class TestTileAPI(unittest.TestCase):
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -1361,4 +1361,5 @@ class TestVarBaseCopyGradientFrom(unittest.TestCase):
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
......@@ -68,8 +68,9 @@ def to_list(value):
def to_numpy(var):
assert isinstance(var, (Variable, fluid.core.VarBase)), "not a variable"
if isinstance(var, fluid.core.VarBase):
assert isinstance(var, (Variable, fluid.core.VarBase,
fluid.core.eager.Tensor)), "not a variable"
if isinstance(var, (fluid.core.VarBase, fluid.core.eager.Tensor)):
return var.numpy()
t = global_scope().find_var(var.name).get_tensor()
return np.array(t)
......
......@@ -282,7 +282,7 @@ class Accuracy(Metric):
Return:
Tensor: the accuracy of current step.
"""
if isinstance(correct, paddle.Tensor):
if isinstance(correct, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
correct = correct.numpy()
num_samples = np.prod(np.array(correct.shape[:-1]))
accs = []
......@@ -410,12 +410,12 @@ class Precision(Metric):
the shape should keep the same as preds.
The data type is 'int32' or 'int64'.
"""
if isinstance(preds, paddle.Tensor):
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, paddle.Tensor):
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
......@@ -543,12 +543,12 @@ class Recall(Metric):
the shape should keep the same as preds.
Shape: [batch_size, 1], Dtype: 'int32' or 'int64'.
"""
if isinstance(preds, paddle.Tensor):
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
if isinstance(labels, paddle.Tensor):
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
......@@ -698,12 +698,12 @@ class Auc(Metric):
(batch_size, 1), labels[i] is either o or 1,
representing the label of the instance i.
"""
if isinstance(labels, paddle.Tensor):
if isinstance(labels, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
labels = labels.numpy()
elif not _is_numpy_(labels):
raise ValueError("The 'labels' must be a numpy ndarray or Tensor.")
if isinstance(preds, paddle.Tensor):
if isinstance(preds, (paddle.Tensor, paddle.fluid.core.eager.Tensor)):
preds = preds.numpy()
elif not _is_numpy_(preds):
raise ValueError("The 'preds' must be a numpy ndarray or Tensor.")
......
......@@ -462,7 +462,7 @@ def is_tensor(x):
print(check) #False
"""
return isinstance(x, Tensor)
return isinstance(x, (Tensor, paddle.fluid.core.eager.Tensor))
def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True):
......
......@@ -263,6 +263,9 @@ def fill_diagonal_tensor(x, y, offset=0, dim1=0, dim2=1, name=None):
setattr(core.VarBase, 'fill_diagonal_tensor', fill_diagonal_tensor)
if core._in_eager_mode():
setattr(core.eager.Tensor, 'fill_diagonal_tensor', fill_diagonal_tensor)
@dygraph_only
def tolist(x):
......
......@@ -1335,7 +1335,7 @@ def renorm(x, p, axis, max_norm):
raise ValueError("the axis:{} should not be less than -1 * length of input_shape:{}".format(axis,-1 * len(input_shape)))
axis = axis + len(input_shape)
if paddle.in_dynamic_mode():
out = core.ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm)
out = _C_ops.renorm(x, 'p',p, 'axis',axis, 'max_norm', max_norm)
return out
inputs = {'X': x}
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册