未验证 提交 c3796061 编写于 作者: Z Zhanlue Yang 提交者: GitHub

Refactored python-level trace_op to call through _C_ops instead of...

Refactored python-level trace_op to call through _C_ops instead of Tracer::TraceOp, under eager_mode (#38338)

* Replaced core.ops with _C_ops

* Refactored python-level trace_op to call through _C_ops instead of Tracer::TraceOp, under eager_mode

* Modified trace_op interface

* Refactored trace_op logic for eager mode

* Added Eager Dygraph support for OpTest

* Fixed ci issues

* Fixed CI failures

* Fixed Coverage CI Issues

* Fixed XPU CI Issues
上级 8c5c1046
...@@ -37,6 +37,8 @@ std::unordered_map<std::string, std::vector<std::string>> ...@@ -37,6 +37,8 @@ std::unordered_map<std::string, std::vector<std::string>>
core_ops_returns_info = {}; core_ops_returns_info = {};
std::unordered_map<std::string, std::vector<std::string>> core_ops_args_info = std::unordered_map<std::string, std::vector<std::string>> core_ops_args_info =
{}; {};
std::unordered_map<std::string, std::vector<std::string>>
core_ops_args_type_info = {};
/* --- Static maps to handle corner cases --- */ /* --- Static maps to handle corner cases --- */
static std::unordered_map<std::string, paddle::framework::AttributeMap> static std::unordered_map<std::string, paddle::framework::AttributeMap>
...@@ -1225,10 +1227,16 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1225,10 +1227,16 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
*/ */
VLOG(6) << "Generating Dygraph Forward Function"; VLOG(6) << "Generating Dygraph Forward Function";
std::string generated_function_body = ""; const char* FORWARD_FUNCTION_TEMPLATE =
" VLOG(3) << \"Running Eager Forward Op: %s\";\n";
std::string generated_function_body =
paddle::string::Sprintf(FORWARD_FUNCTION_TEMPLATE, op_type);
std::string dygraph_function_args_str = ""; std::string dygraph_function_args_str = "";
core_ops_args_info[op_type] = {}; core_ops_args_info[op_type] = {};
core_ops_args_type_info[op_type] = {};
core_ops_args_info[op_type].resize(in_vars.size()); core_ops_args_info[op_type].resize(in_vars.size());
core_ops_args_type_info[op_type].resize(in_vars.size());
/* ------ Dygraph forward function generation ------ */ /* ------ Dygraph forward function generation ------ */
generated_function_body += " // Dygraph Forward Pass\n"; generated_function_body += " // Dygraph Forward Pass\n";
...@@ -1246,10 +1254,14 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1246,10 +1254,14 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
"const std::vector<egr::EagerTensor>& %s"; "const std::vector<egr::EagerTensor>& %s";
input_args_str_list[input_position] = input_args_str_list[input_position] =
paddle::string::Sprintf(FWD_INS_ARG_TEMPLATE, input_name); paddle::string::Sprintf(FWD_INS_ARG_TEMPLATE, input_name);
core_ops_args_type_info[op_type][input_position] = "list";
} else { } else {
const char* FWD_INS_ARG_TEMPLATE = "const egr::EagerTensor& %s"; const char* FWD_INS_ARG_TEMPLATE = "const egr::EagerTensor& %s";
input_args_str_list[input_position] = input_args_str_list[input_position] =
paddle::string::Sprintf(FWD_INS_ARG_TEMPLATE, input_name); paddle::string::Sprintf(FWD_INS_ARG_TEMPLATE, input_name);
core_ops_args_type_info[op_type][input_position] = "tensor";
} }
core_ops_args_info[op_type][input_position] = input_name; core_ops_args_info[op_type][input_position] = input_name;
...@@ -1318,11 +1330,14 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1318,11 +1330,14 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
paddle::string::Sprintf(FWD_NUM_ARG_TEMPLATE, output_var_name); paddle::string::Sprintf(FWD_NUM_ARG_TEMPLATE, output_var_name);
dygraph_function_args_str += arg_str; dygraph_function_args_str += arg_str;
core_ops_args_type_info[op_type].push_back("list");
} else { } else {
const char* FWD_NUM_ARG_TEMPLATE = ", egr::EagerTensor* %s"; const char* FWD_NUM_ARG_TEMPLATE = ", egr::EagerTensor* %s";
std::string arg_str = std::string arg_str =
paddle::string::Sprintf(FWD_NUM_ARG_TEMPLATE, output_var_name); paddle::string::Sprintf(FWD_NUM_ARG_TEMPLATE, output_var_name);
dygraph_function_args_str += arg_str; dygraph_function_args_str += arg_str;
core_ops_args_type_info[op_type].push_back("tensor");
} }
const char* FWD_OUTS_CONTENT_TEMPLATE = const char* FWD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", egr::EagerUtils::TrySyncToVars(%s) },"; "{ \"%s\", egr::EagerUtils::TrySyncToVars(%s) },";
...@@ -1344,6 +1359,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents( ...@@ -1344,6 +1359,7 @@ static std::pair<std::string, std::string> GenerateForwardFunctionContents(
outs_contents_str += paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE, outs_contents_str += paddle::string::Sprintf(FWD_OUTS_CONTENT_TEMPLATE,
output_name, outnum); output_name, outnum);
core_ops_args_info[op_type].push_back(outnum); core_ops_args_info[op_type].push_back(outnum);
core_ops_args_type_info[op_type].push_back("int");
} else { } else {
const char* FWD_OUTS_CONTENT_TEMPLATE = const char* FWD_OUTS_CONTENT_TEMPLATE =
"{ \"%s\", " "{ \"%s\", "
...@@ -1811,6 +1827,11 @@ static std::string GenerateGradNodeCCContents( ...@@ -1811,6 +1827,11 @@ static std::string GenerateGradNodeCCContents(
} }
*/ */
const char* EAGER_LOG_TEMPLATE =
" VLOG(3) << \"Running Eager Backward Node: GradNode%s\";\n";
std::string generated_grad_function_body =
paddle::string::Sprintf(EAGER_LOG_TEMPLATE, fwd_op_type);
// This is a Copy // This is a Copy
auto op_base_infos = bwd_info.GetOpBaseInfos(); auto op_base_infos = bwd_info.GetOpBaseInfos();
...@@ -1829,7 +1850,6 @@ static std::string GenerateGradNodeCCContents( ...@@ -1829,7 +1850,6 @@ static std::string GenerateGradNodeCCContents(
op_base_infos.emplace_back(std::move(op_base_info)); op_base_infos.emplace_back(std::move(op_base_info));
} }
std::string generated_grad_function_body = "";
size_t outs_size = 0; size_t outs_size = 0;
for (size_t i = 0; i < op_base_infos.size(); i++) { for (size_t i = 0; i < op_base_infos.size(); i++) {
const auto& op_base_info = op_base_infos[i]; const auto& op_base_info = op_base_infos[i];
...@@ -2030,6 +2050,9 @@ static std::string GenerateDygraphHFileIncludes() { ...@@ -2030,6 +2050,9 @@ static std::string GenerateDygraphHFileIncludes() {
dygraph_forward_api_includes_str += dygraph_forward_api_includes_str +=
"extern std::unordered_map<std::string, std::vector<std::string>> " "extern std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_args_info;\n"; "core_ops_args_info;\n";
dygraph_forward_api_includes_str +=
"extern std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_args_type_info;\n";
dygraph_forward_api_includes_str += dygraph_forward_api_includes_str +=
"extern std::unordered_map<std::string, std::vector<std::string>> " "extern std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_returns_info;\n\n"; "core_ops_returns_info;\n\n";
...@@ -2126,16 +2149,20 @@ static std::string GenerateCoreOpsReturnsInfo() { ...@@ -2126,16 +2149,20 @@ static std::string GenerateCoreOpsReturnsInfo() {
"std::unordered_map<std::string, std::vector<std::string>> " "std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_args_info = { %s };\n" "core_ops_args_info = { %s };\n"
"std::unordered_map<std::string, std::vector<std::string>> " "std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_args_type_info = { %s };\n"
"std::unordered_map<std::string, std::vector<std::string>> "
"core_ops_returns_info = { %s };\n"; "core_ops_returns_info = { %s };\n";
std::string core_ops_args_info_init_str = std::string core_ops_args_info_init_str =
ConvertCoreOpsInfosToString(core_ops_args_info); ConvertCoreOpsInfosToString(core_ops_args_info);
std::string core_ops_args_type_info_init_str =
ConvertCoreOpsInfosToString(core_ops_args_type_info);
std::string core_ops_returns_info_init_str = std::string core_ops_returns_info_init_str =
ConvertCoreOpsInfosToString(core_ops_returns_info); ConvertCoreOpsInfosToString(core_ops_returns_info);
std::string core_ops_info_str = paddle::string::Sprintf( std::string core_ops_info_str = paddle::string::Sprintf(
Core_Ops_Returns_MAP_TEMPLATE, core_ops_args_info_init_str, Core_Ops_Returns_MAP_TEMPLATE, core_ops_args_info_init_str,
core_ops_returns_info_init_str); core_ops_args_type_info_init_str, core_ops_returns_info_init_str);
return core_ops_info_str; return core_ops_info_str;
} }
......
...@@ -121,6 +121,30 @@ static PyObject* eager_tensor_method__copy_to(EagerTensorObject* self, ...@@ -121,6 +121,30 @@ static PyObject* eager_tensor_method__copy_to(EagerTensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
} }
static PyObject* eager_tensor_method_reconstruct_from_(EagerTensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_SYNC_TRY
egr::EagerTensor src_tensor =
CastPyArg2EagerTensor(PyTuple_GET_ITEM(args, 0), 0);
bool blocking = CastPyArg2AttrBoolean(PyTuple_GET_ITEM(args, 1), 1);
std::string orig_name = self->eager_tensor.name();
VLOG(6) << "Start Reconstructing Tensor from" << src_tensor.name() << " to "
<< orig_name;
self->eager_tensor.copy_(src_tensor, blocking);
// Steal Tensor from src tensor
self->eager_tensor.set_tensor(src_tensor.Tensor());
// Recover source name
self->eager_tensor.set_name(orig_name);
VLOG(6) << "Finished Reconstructing Tensor from" << src_tensor.name()
<< " to " << self->eager_tensor.name();
Py_INCREF(Py_None);
return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL
}
static PyObject* eager_tensor_method_copy_(EagerTensorObject* self, static PyObject* eager_tensor_method_copy_(EagerTensorObject* self,
PyObject* args, PyObject* kwargs) { PyObject* args, PyObject* kwargs) {
EAGER_SYNC_TRY EAGER_SYNC_TRY
...@@ -387,6 +411,9 @@ PyMethodDef variable_methods[] = { ...@@ -387,6 +411,9 @@ PyMethodDef variable_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"copy_", (PyCFunction)(void (*)(void))eager_tensor_method_copy_, {"copy_", (PyCFunction)(void (*)(void))eager_tensor_method_copy_,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"reconstruct_from_",
(PyCFunction)(void (*)(void))eager_tensor_method_reconstruct_from_,
METH_VARARGS | METH_KEYWORDS, NULL},
{"retain_grads", (PyCFunction)(void (*)(void))eager_tensor_retain_grads, {"retain_grads", (PyCFunction)(void (*)(void))eager_tensor_retain_grads,
METH_VARARGS | METH_KEYWORDS, NULL}, METH_VARARGS | METH_KEYWORDS, NULL},
{"_clear_gradient", {"_clear_gradient",
......
...@@ -313,6 +313,21 @@ static std::string GenerateCoreOpsInfoMap() { ...@@ -313,6 +313,21 @@ static std::string GenerateCoreOpsInfoMap() {
" }\n" " }\n"
"}\n" "}\n"
"\n" "\n"
"static PyObject * eager_get_core_ops_args_type_info(PyObject *self) {\n"
" PyThreadState *tstate = nullptr;\n"
" try\n"
" {\n"
" return ToPyObject(core_ops_args_type_info);\n"
" }\n"
" catch(...) {\n"
" if (tstate) {\n"
" PyEval_RestoreThread(tstate);\n"
" }\n"
" ThrowExceptionToPython(std::current_exception());\n"
" return nullptr;\n"
" }\n"
"}\n"
"\n"
"static PyObject * eager_get_core_ops_returns_info(PyObject *self) {\n" "static PyObject * eager_get_core_ops_returns_info(PyObject *self) {\n"
" PyThreadState *tstate = nullptr;\n" " PyThreadState *tstate = nullptr;\n"
" try\n" " try\n"
...@@ -399,6 +414,10 @@ int main(int argc, char* argv[]) { ...@@ -399,6 +414,10 @@ int main(int argc, char* argv[]) {
"{\"get_core_ops_args_info\", " "{\"get_core_ops_args_info\", "
"(PyCFunction)(void(*)(void))eager_get_core_ops_args_info, METH_NOARGS, " "(PyCFunction)(void(*)(void))eager_get_core_ops_args_info, METH_NOARGS, "
"\"C++ interface function for eager_get_core_ops_args_info.\"},\n" "\"C++ interface function for eager_get_core_ops_args_info.\"},\n"
"{\"get_core_ops_args_type_info\", "
"(PyCFunction)(void(*)(void))eager_get_core_ops_args_type_info, "
"METH_NOARGS, "
"\"C++ interface function for eager_get_core_ops_args_type_info.\"},\n"
" {\"get_core_ops_returns_info\", " " {\"get_core_ops_returns_info\", "
"(PyCFunction)(void(*)(void))eager_get_core_ops_returns_info, " "(PyCFunction)(void(*)(void))eager_get_core_ops_returns_info, "
"METH_NOARGS, \"C++ interface function for " "METH_NOARGS, \"C++ interface function for "
......
...@@ -19,6 +19,7 @@ import six ...@@ -19,6 +19,7 @@ import six
from collections import defaultdict from collections import defaultdict
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid import framework from paddle.fluid import framework
from paddle import _C_ops
class Tracer(core.Tracer): class Tracer(core.Tracer):
...@@ -46,6 +47,84 @@ class Tracer(core.Tracer): ...@@ -46,6 +47,84 @@ class Tracer(core.Tracer):
attrs, attrs,
stop_gradient=False, stop_gradient=False,
inplace_map=None): inplace_map=None):
if framework._in_eager_mode():
# inputs : {"sum": [tensor], ...}
# outputs : {"sum": [tensor], ...}
function_ptr = _C_ops.__dict__[type]
core_ops_args_info = _C_ops.get_core_ops_args_info()
core_ops_args_type_info = _C_ops.get_core_ops_args_type_info()
core_ops_returns_info = _C_ops.get_core_ops_returns_info()
op_args = core_ops_args_info[type]
op_args_type = core_ops_args_type_info[type]
op_returns = core_ops_returns_info[type]
arg_list = []
for i in range(len(op_args)):
arg_name = op_args[i]
arg_type = op_args_type[i]
if arg_name in inputs.keys():
arg_to_append = inputs[arg_name]
elif arg_name in outputs.keys():
arg_to_append = outputs[arg_name]
else:
if "Num" in arg_name:
# Remove "Num" suffix to get out_name
out_name = arg_name[:-3]
assert out_name in outputs.keys()
num_outs = len(outputs[out_name])
arg_to_append = num_outs
else:
arg_to_append = None
if arg_to_append is None:
arg_list.append(arg_to_append)
elif arg_type == "tensor":
if isinstance(arg_to_append, list):
arg_list.append(arg_to_append[0])
else:
arg_list.append(arg_to_append)
elif arg_type == "list":
assert isinstance(arg_to_append, list)
arg_list.append(arg_to_append)
else:
assert arg_type == "int"
assert isinstance(arg_to_append, int)
arg_list.append(arg_to_append)
attrs_list = []
for k, v in attrs.items():
attrs_list.append(k)
attrs_list.append(v)
returns = function_ptr(*arg_list, *attrs_list)
if isinstance(returns, tuple):
for i in range(len(op_returns)):
retname = op_returns[i]
if retname in outputs.keys():
# Replaced outputs by function returns
if isinstance(returns[i], list):
for j in range(len(returns[i])):
outputs[retname][j].reconstruct_from_(
returns[i][j], False)
else:
outputs[retname][0].reconstruct_from_(returns[i],
False)
elif isinstance(returns, list):
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
for j in range(len(returns)):
outputs[key][j].reconstruct_from_(returns[j], False)
else:
assert len(outputs.keys()) == 1
key = list(outputs.keys())[0]
if isinstance(outputs[key], list):
outputs[key][0].reconstruct_from_(returns, False)
else:
outputs[key].reconstruct_from_(returns, False)
else:
self.trace(type, inputs, outputs, attrs, self.trace(type, inputs, outputs, attrs,
framework._current_expected_place(), self._has_grad and framework._current_expected_place(), self._has_grad and
not stop_gradient, inplace_map if inplace_map else {}) not stop_gradient, inplace_map if inplace_map else {})
......
...@@ -104,6 +104,10 @@ foreach(TEST_OP ${MIXED_DIST_TEST_OPS}) ...@@ -104,6 +104,10 @@ foreach(TEST_OP ${MIXED_DIST_TEST_OPS})
list(REMOVE_ITEM TEST_OPS ${TEST_OP}) list(REMOVE_ITEM TEST_OPS ${TEST_OP})
endforeach() endforeach()
if(ON_INFER)
LIST(REMOVE_ITEM TEST_OPS test_eager_trace_op)
endif()
if(NOT WITH_GPU) if(NOT WITH_GPU)
LIST(REMOVE_ITEM TEST_OPS test_fused_feedforward_op) LIST(REMOVE_ITEM TEST_OPS test_fused_feedforward_op)
LIST(REMOVE_ITEM TEST_OPS test_fused_attention_op) LIST(REMOVE_ITEM TEST_OPS test_fused_attention_op)
......
...@@ -25,10 +25,12 @@ import time ...@@ -25,10 +25,12 @@ import time
import itertools import itertools
import collections import collections
from collections import defaultdict from collections import defaultdict
from copy import copy
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.op import Operator from paddle.fluid.op import Operator
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
...@@ -495,7 +497,7 @@ class OpTest(unittest.TestCase): ...@@ -495,7 +497,7 @@ class OpTest(unittest.TestCase):
type=self.op_type, type=self.op_type,
inputs=inputs, inputs=inputs,
outputs=outputs, outputs=outputs,
attrs=self.attrs if hasattr(self, "attrs") else dict()) attrs=copy(self.attrs) if hasattr(self, "attrs") else dict())
# infer variable type and infer shape in compile-time # infer variable type and infer shape in compile-time
op.desc.infer_var_type(block.desc) op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc) op.desc.infer_shape(block.desc)
...@@ -1111,7 +1113,8 @@ class OpTest(unittest.TestCase): ...@@ -1111,7 +1113,8 @@ class OpTest(unittest.TestCase):
no_check_set=None, no_check_set=None,
equal_nan=False, equal_nan=False,
check_dygraph=True, check_dygraph=True,
inplace_atol=None): inplace_atol=None,
check_eager=False):
self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
if self.dtype == np.float64 and \ if self.dtype == np.float64 and \
self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST: self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_OUTPUT_THRESHOLD_OP_LIST:
...@@ -1120,6 +1123,7 @@ class OpTest(unittest.TestCase): ...@@ -1120,6 +1123,7 @@ class OpTest(unittest.TestCase):
if self.is_bfloat16_op(): if self.is_bfloat16_op():
if self.is_mkldnn_op(): if self.is_mkldnn_op():
check_dygraph = False check_dygraph = False
check_eager = False
if hasattr(self, 'force_fp32_output') and getattr( if hasattr(self, 'force_fp32_output') and getattr(
self, 'force_fp32_output'): self, 'force_fp32_output'):
atol = 1e-2 atol = 1e-2
...@@ -1136,6 +1140,10 @@ class OpTest(unittest.TestCase): ...@@ -1136,6 +1140,10 @@ class OpTest(unittest.TestCase):
if check_dygraph: if check_dygraph:
dygraph_outs = self._calc_dygraph_output( dygraph_outs = self._calc_dygraph_output(
place, no_check_set=no_check_set) place, no_check_set=no_check_set)
if check_eager:
with _test_eager_guard():
eager_dygraph_outs = self._calc_dygraph_output(
place, no_check_set=no_check_set)
outs, fetch_list = self._calc_output(place, no_check_set=no_check_set) outs, fetch_list = self._calc_output(place, no_check_set=no_check_set)
for out_name, out_dup in Operator.get_op_outputs(self.op_type): for out_name, out_dup in Operator.get_op_outputs(self.op_type):
...@@ -1178,6 +1186,13 @@ class OpTest(unittest.TestCase): ...@@ -1178,6 +1186,13 @@ class OpTest(unittest.TestCase):
sub_out_name, dygraph_outs, place) sub_out_name, dygraph_outs, place)
imperative_actual_t = np.array(imperative_actual.value() imperative_actual_t = np.array(imperative_actual.value()
.get_tensor()) .get_tensor())
if check_eager:
with _test_eager_guard():
eager_imperative_actual = find_imperative_actual(
sub_out_name, eager_dygraph_outs, place)
eager_imperative_actual_t = eager_imperative_actual.numpy(
)
idx = find_actual(sub_out_name, fetch_list) idx = find_actual(sub_out_name, fetch_list)
actual = outs[idx] actual = outs[idx]
actual_t = np.array(actual) actual_t = np.array(actual)
...@@ -1197,6 +1212,16 @@ class OpTest(unittest.TestCase): ...@@ -1197,6 +1212,16 @@ class OpTest(unittest.TestCase):
equal_nan=equal_nan), equal_nan=equal_nan),
"Output (" + sub_out_name + ") has diff at " + "Output (" + sub_out_name + ") has diff at " +
str(place) + " in dygraph mode") str(place) + " in dygraph mode")
if check_eager:
with _test_eager_guard():
self.assertTrue(
np.allclose(
eager_imperative_actual_t,
expect_t,
atol=atol,
equal_nan=equal_nan),
"Output (" + sub_out_name + ") has diff at " +
str(place) + " in eager dygraph mode")
if isinstance(expect, tuple): if isinstance(expect, tuple):
self.assertListEqual( self.assertListEqual(
actual.recursive_sequence_lengths(), expect[1], actual.recursive_sequence_lengths(), expect[1],
...@@ -1209,12 +1234,27 @@ class OpTest(unittest.TestCase): ...@@ -1209,12 +1234,27 @@ class OpTest(unittest.TestCase):
"Output (" + out_name + "Output (" + out_name +
") has different lod at " + str(place) + ") has different lod at " + str(place) +
" in dygraph mode") " in dygraph mode")
if check_eager:
with _test_eager_guard():
self.assertListEqual(
eager_imperative_actual.value().get_tensor()
.recursive_sequence_lengths(), expect[1],
"Output (" + out_name +
") has different lod at " + str(place) +
" in eager dygraph mode")
else: else:
if check_dygraph: if check_dygraph:
imperative_actual = find_imperative_actual( imperative_actual = find_imperative_actual(
out_name, dygraph_outs, place) out_name, dygraph_outs, place)
imperative_actual_t = np.array(imperative_actual.value() imperative_actual_t = np.array(imperative_actual.value()
.get_tensor()) .get_tensor())
if check_eager:
with _test_eager_guard():
eager_imperative_actual = find_imperative_actual(
out_name, eager_dygraph_outs, place)
eager_imperative_actual_t = eager_imperative_actual.numpy(
)
idx = find_actual(out_name, fetch_list) idx = find_actual(out_name, fetch_list)
actual = outs[idx] actual = outs[idx]
actual_t = np.array(actual) actual_t = np.array(actual)
...@@ -1275,6 +1315,32 @@ class OpTest(unittest.TestCase): ...@@ -1275,6 +1315,32 @@ class OpTest(unittest.TestCase):
str(place) + "\nExpect " + str(expect_t) + "\n" + str(place) + "\nExpect " + str(expect_t) + "\n" +
"But Got" + str(imperative_actual_t) + " in class " "But Got" + str(imperative_actual_t) + " in class "
+ self.__class__.__name__) + self.__class__.__name__)
if check_eager:
with _test_eager_guard():
if self.is_bfloat16_op():
if eager_imperative_actual_t.dtype == np.uint16:
eager_imperative_actual_t = convert_uint16_to_float(
eager_imperative_actual_t)
if expect_t.dtype == np.uint16:
expect_t = convert_uint16_to_float(expect_t)
if six.moves.reduce(lambda x, y: x * y,
eager_imperative_actual_t.shape,
1) == 0 and six.moves.reduce(
lambda x, y: x * y,
expect_t.shape, 1) == 0:
pass
else:
self.assertTrue(
np.allclose(
eager_imperative_actual_t,
expect_t,
atol=atol,
rtol=rtol,
equal_nan=equal_nan),
"Output (" + out_name + ") has diff at " +
str(place) + "\nExpect " + str(expect_t) + "\n"
+ "But Got" + str(eager_imperative_actual_t) +
" in class " + self.__class__.__name__)
if isinstance(expect, tuple): if isinstance(expect, tuple):
self.assertListEqual(actual.recursive_sequence_lengths(), self.assertListEqual(actual.recursive_sequence_lengths(),
expect[1], "Output (" + out_name + expect[1], "Output (" + out_name +
...@@ -1284,7 +1350,15 @@ class OpTest(unittest.TestCase): ...@@ -1284,7 +1350,15 @@ class OpTest(unittest.TestCase):
imperative_actual.value().get_tensor() imperative_actual.value().get_tensor()
.recursive_sequence_lengths(), expect[1], .recursive_sequence_lengths(), expect[1],
"Output (" + out_name + ") has different lod at " + "Output (" + out_name + ") has different lod at " +
str(place) + " in dygraph mode") str(place) + " in eager dygraph mode")
if check_eager:
with _test_eager_guard():
self.assertListEqual(
eager_imperative_actual.value().get_tensor()
.recursive_sequence_lengths(), expect[1],
"Output (" + out_name +
") has different lod at " + str(place) +
" in eager dygraph mode")
# Note(zhiqiu): inplace_atol should be only set when op doesn't ensure # Note(zhiqiu): inplace_atol should be only set when op doesn't ensure
# computational consistency. # computational consistency.
...@@ -1306,7 +1380,9 @@ class OpTest(unittest.TestCase): ...@@ -1306,7 +1380,9 @@ class OpTest(unittest.TestCase):
self.check_inplace_output_with_place( self.check_inplace_output_with_place(
place, no_check_set=no_check_set, inplace_atol=inplace_atol) place, no_check_set=no_check_set, inplace_atol=inplace_atol)
if check_dygraph: if check_eager:
return outs, dygraph_outs, eager_dygraph_outs, fetch_list
elif check_dygraph:
return outs, dygraph_outs, fetch_list return outs, dygraph_outs, fetch_list
else: else:
return outs, fetch_list return outs, fetch_list
...@@ -1377,7 +1453,8 @@ class OpTest(unittest.TestCase): ...@@ -1377,7 +1453,8 @@ class OpTest(unittest.TestCase):
no_check_set=None, no_check_set=None,
equal_nan=False, equal_nan=False,
check_dygraph=True, check_dygraph=True,
inplace_atol=None): inplace_atol=None,
check_eager=False):
self.__class__.op_type = self.op_type self.__class__.op_type = self.op_type
if self.is_mkldnn_op(): if self.is_mkldnn_op():
self.__class__.use_mkldnn = True self.__class__.use_mkldnn = True
...@@ -1387,10 +1464,18 @@ class OpTest(unittest.TestCase): ...@@ -1387,10 +1464,18 @@ class OpTest(unittest.TestCase):
places = self._get_places() places = self._get_places()
for place in places: for place in places:
res = self.check_output_with_place(place, atol, no_check_set, res = self.check_output_with_place(
equal_nan, check_dygraph, place,
inplace_atol) atol,
if check_dygraph: no_check_set,
equal_nan,
check_dygraph,
inplace_atol,
check_eager=check_eager)
if check_eager:
assert check_dygraph == True
outs, dygraph_outs, eager_dygraph_outs, fetch_list = res
elif check_dygraph:
outs, dygraph_outs, fetch_list = res outs, dygraph_outs, fetch_list = res
else: else:
outs, fetch_list = res outs, fetch_list = res
...@@ -1461,14 +1546,23 @@ class OpTest(unittest.TestCase): ...@@ -1461,14 +1546,23 @@ class OpTest(unittest.TestCase):
max_relative_error=0.005, max_relative_error=0.005,
user_defined_grads=None, user_defined_grads=None,
user_defined_grad_outputs=None, user_defined_grad_outputs=None,
check_dygraph=True): check_dygraph=True,
check_eager=False):
self._check_grad_helper() self._check_grad_helper()
places = self._get_places() places = self._get_places()
for place in places: for place in places:
self.check_grad_with_place( self.check_grad_with_place(
place, inputs_to_check, output_names, no_grad_set, place,
numeric_grad_delta, in_place, max_relative_error, inputs_to_check,
user_defined_grads, user_defined_grad_outputs, check_dygraph) output_names,
no_grad_set,
numeric_grad_delta,
in_place,
max_relative_error,
user_defined_grads,
user_defined_grad_outputs,
check_dygraph,
check_eager=check_eager)
def check_grad_with_place(self, def check_grad_with_place(self,
place, place,
...@@ -1481,7 +1575,8 @@ class OpTest(unittest.TestCase): ...@@ -1481,7 +1575,8 @@ class OpTest(unittest.TestCase):
user_defined_grads=None, user_defined_grads=None,
user_defined_grad_outputs=None, user_defined_grad_outputs=None,
check_dygraph=True, check_dygraph=True,
numeric_place=None): numeric_place=None,
check_eager=False):
self.scope = core.Scope() self.scope = core.Scope()
op_inputs = self.inputs if hasattr(self, "inputs") else dict() op_inputs = self.inputs if hasattr(self, "inputs") else dict()
op_outputs = self.outputs if hasattr(self, "outputs") else dict() op_outputs = self.outputs if hasattr(self, "outputs") else dict()
...@@ -1490,6 +1585,7 @@ class OpTest(unittest.TestCase): ...@@ -1490,6 +1585,7 @@ class OpTest(unittest.TestCase):
self._check_grad_helper() self._check_grad_helper()
if self.is_bfloat16_op() and self.is_mkldnn_op(): if self.is_bfloat16_op() and self.is_mkldnn_op():
check_dygraph = False check_dygraph = False
check_eager = False
if self.dtype == np.float64 and \ if self.dtype == np.float64 and \
self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST: self.op_type not in op_threshold_white_list.NEED_FIX_FP64_CHECK_GRAD_THRESHOLD_OP_LIST:
...@@ -1592,6 +1688,22 @@ class OpTest(unittest.TestCase): ...@@ -1592,6 +1688,22 @@ class OpTest(unittest.TestCase):
max_relative_error, max_relative_error,
"Gradient Check On %s" % str(place)) "Gradient Check On %s" % str(place))
if check_eager:
with _test_eager_guard():
eager_dygraph_grad = self._get_dygraph_grad(
inputs_to_check, place, output_names,
user_defined_grad_outputs, no_grad_set)
fp32_grads = []
for grad in eager_dygraph_grad:
if grad.dtype == np.uint16:
grad = convert_uint16_to_float(grad)
max_relative_error = 0.03 if max_relative_error < 0.03 else max_relative_error
fp32_grads.append(grad)
eager_dygraph_grad = fp32_grads
self._assert_is_close(numeric_grads, eager_dygraph_grad,
inputs_to_check, max_relative_error,
"Gradient Check On %s" % str(place))
def _find_var_in_dygraph(self, output_vars, name): def _find_var_in_dygraph(self, output_vars, name):
if name in output_vars: if name in output_vars:
return output_vars[name] return output_vars[name]
......
...@@ -78,7 +78,8 @@ class XPUOpTest(OpTest): ...@@ -78,7 +78,8 @@ class XPUOpTest(OpTest):
no_check_set=None, no_check_set=None,
equal_nan=False, equal_nan=False,
check_dygraph=True, check_dygraph=True,
inplace_atol=None): inplace_atol=None,
check_eager=False):
self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs) self.infer_dtype_from_inputs_outputs(self.inputs, self.outputs)
#xpu not support float64 #xpu not support float64
if self.dtype == np.float64: if self.dtype == np.float64:
...@@ -105,7 +106,8 @@ class XPUOpTest(OpTest): ...@@ -105,7 +106,8 @@ class XPUOpTest(OpTest):
user_defined_grads=None, user_defined_grads=None,
user_defined_grad_outputs=None, user_defined_grad_outputs=None,
check_dygraph=True, check_dygraph=True,
numeric_place=None): numeric_place=None,
check_eager=False):
if place == None: if place == None:
place = paddle.XPUPlace(0) place = paddle.XPUPlace(0)
......
...@@ -41,7 +41,7 @@ class TestDiagV2Op(OpTest): ...@@ -41,7 +41,7 @@ class TestDiagV2Op(OpTest):
def test_check_output(self): def test_check_output(self):
paddle.enable_static() paddle.enable_static()
self.check_output() self.check_output(check_eager=True)
def init_config(self): def init_config(self):
pass pass
......
...@@ -40,10 +40,10 @@ class TestDigammaOp(OpTest): ...@@ -40,10 +40,10 @@ class TestDigammaOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestDigammaOpFp32(TestDigammaOp): class TestDigammaOpFp32(TestDigammaOp):
......
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid.core as core
from paddle import _C_ops
import paddle.fluid as fluid
from paddle.fluid import Program, program_guard
from paddle.fluid.framework import _test_eager_guard
class TestEagerTraceOp(unittest.TestCase):
def test_branches(self):
with _test_eager_guard():
data = np.random.random([1, 1]).astype(np.float32)
x = paddle.to_tensor(data)
paddle.fluid.framework._dygraph_tracer().trace_op(
'broadcast_tensors', {'X': [x, x],
'Out': [x, x]}, {'Out': [x, x]}, {})
paddle.fluid.framework._dygraph_tracer().trace_op(
'scale', {'X': x}, {'Out': x}, {'scale': 0.5})
scale = paddle.to_tensor(np.random.random([1]).astype(np.float32))
paddle.fluid.framework._dygraph_tracer().trace_op(
'instance_norm', {'Scale': [scale],
'X': [x]}, {'Y': [x]}, {})
paddle.fluid.framework._dygraph_tracer().trace_op(
'coalesce_tensor', {'Input': [x]}, {'Output': [x]},
{'dtype': int(core.VarDesc.VarType.FP32)})
if __name__ == "__main__":
unittest.main()
...@@ -37,10 +37,10 @@ class TestTruncOp(OpTest): ...@@ -37,10 +37,10 @@ class TestTruncOp(OpTest):
self.dtype = np.float64 self.dtype = np.float64
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5) self.check_grad(['X'], 'Out', numeric_grad_delta=1e-5, check_eager=True)
class TestFloatTruncOp(TestTruncOp): class TestFloatTruncOp(TestTruncOp):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册