未验证 提交 4ccd5cb8 编写于 作者: 0 0x45f 提交者: GitHub

Refine eager run_program OP for dy2st UT (#40768)

* Refine eager run_program OP for dy2st UT

* append run_program error string and refine run_program_grad

* remove some comments

* refine ConstructXGradTensors
上级 e6cbd72d
...@@ -51,13 +51,12 @@ static std::vector<std::string> GetTensorsName( ...@@ -51,13 +51,12 @@ static std::vector<std::string> GetTensorsName(
} }
static void CheckInputVarStatus(const Tensor &tensor) { static void CheckInputVarStatus(const Tensor &tensor) {
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(tensor.defined() && tensor.is_dense_tensor(), true,
tensor.defined() && phi::DenseTensor::classof(tensor.impl().get()), true, paddle::platform::errors::InvalidArgument(
paddle::platform::errors::InvalidArgument( "The input tensor %s of "
"The input tensor %s of " "RunProgram(Grad)Op holds "
"RunProgram(Grad)Op holds " "wrong type. Expect type is DenseTensor.",
"wrong type. Expect type is DenseTensor.", tensor.name()));
tensor.name()));
PADDLE_ENFORCE_EQ(tensor.initialized(), true, PADDLE_ENFORCE_EQ(tensor.initialized(), true,
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
...@@ -74,7 +73,7 @@ static void CheckOutputVarStatus(const paddle::framework::Variable &src_var, ...@@ -74,7 +73,7 @@ static void CheckOutputVarStatus(const paddle::framework::Variable &src_var,
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
"dst_tensor shall be defined.")); "dst_tensor shall be defined."));
if (phi::DenseTensor::classof(dst_tensor.impl().get())) { if (dst_tensor.is_dense_tensor()) {
auto &src_tensor = src_var.Get<phi::DenseTensor>(); auto &src_tensor = src_var.Get<phi::DenseTensor>();
PADDLE_ENFORCE_EQ(phi::DenseTensor::classof(&src_tensor), true, PADDLE_ENFORCE_EQ(phi::DenseTensor::classof(&src_tensor), true,
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
...@@ -88,7 +87,7 @@ static void CheckOutputVarStatus(const paddle::framework::Variable &src_var, ...@@ -88,7 +87,7 @@ static void CheckOutputVarStatus(const paddle::framework::Variable &src_var,
"RunProgram(Grad)Op's internal " "RunProgram(Grad)Op's internal "
"scope is not initialized.", "scope is not initialized.",
name)); name));
} else if (phi::SelectedRows::classof(dst_tensor.impl().get())) { } else if (dst_tensor.is_selected_rows()) {
auto &src_tensor = src_var.Get<phi::SelectedRows>(); auto &src_tensor = src_var.Get<phi::SelectedRows>();
PADDLE_ENFORCE_EQ(phi::SelectedRows::classof(&src_tensor), true, PADDLE_ENFORCE_EQ(phi::SelectedRows::classof(&src_tensor), true,
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
...@@ -159,9 +158,6 @@ static void ShareTensorsFromScope( ...@@ -159,9 +158,6 @@ static void ShareTensorsFromScope(
name)); name));
CheckOutputVarStatus(*var, *tensors[i]); CheckOutputVarStatus(*var, *tensors[i]);
// share tensor // share tensor
// TODO(dev): Determine Tensor type by scope.var
// auto tensor_base = tensors[i]->impl();
// if (phi::DenseTensor::classof(tensor_base.get())) {
if (var->IsType<phi::DenseTensor>()) { if (var->IsType<phi::DenseTensor>()) {
auto &src_tensor = var->Get<phi::DenseTensor>(); auto &src_tensor = var->Get<phi::DenseTensor>();
auto *dst_tensor = const_cast<phi::DenseTensor *>( auto *dst_tensor = const_cast<phi::DenseTensor *>(
...@@ -169,7 +165,6 @@ static void ShareTensorsFromScope( ...@@ -169,7 +165,6 @@ static void ShareTensorsFromScope(
VLOG(2) << "share " << name << " from scope"; VLOG(2) << "share " << name << " from scope";
*dst_tensor = src_tensor; *dst_tensor = src_tensor;
} else if (var->IsType<phi::SelectedRows>()) { } else if (var->IsType<phi::SelectedRows>()) {
// } else if (phi::SelectedRows::classof(tensor_base.get())) {
auto &src_tensor = var->Get<phi::SelectedRows>(); auto &src_tensor = var->Get<phi::SelectedRows>();
auto *dst_tensor = const_cast<phi::SelectedRows *>( auto *dst_tensor = const_cast<phi::SelectedRows *>(
dynamic_cast<const phi::SelectedRows *>(tensors[i]->impl().get())); dynamic_cast<const phi::SelectedRows *>(tensors[i]->impl().get()));
...@@ -202,7 +197,6 @@ inline void RunProgramAPI( ...@@ -202,7 +197,6 @@ inline void RunProgramAPI(
"The OutScope of RunProgramGradOp should only hold one scope.")); "The OutScope of RunProgramGradOp should only hold one scope."));
// Step 2. prepare executor and init persistable variables // Step 2. prepare executor and init persistable variables
// NOTE(Aurelius84): While training some models, forward can be called many // NOTE(Aurelius84): While training some models, forward can be called many
// times and then apply backpropagation all at once, such as Reinforcement // times and then apply backpropagation all at once, such as Reinforcement
// Learning. Tensor data in multi-step training should be saved into single // Learning. Tensor data in multi-step training should be saved into single
...@@ -277,11 +271,6 @@ inline void RunProgramGradAPI( ...@@ -277,11 +271,6 @@ inline void RunProgramGradAPI(
// if all output vars are set to stop_gradient, grad op no need to executed // if all output vars are set to stop_gradient, grad op no need to executed
if (x_grad.empty() && params_grad.empty()) return; if (x_grad.empty() && params_grad.empty()) return;
// TODO(dev): Remove this line hard code. And need to deal with the out_grad
// name problem.
// const_cast<paddle::experimental::Tensor &>(out_grad[0])
// .set_name("matmul_v2_0.tmp_0@GRAD");
auto *global_block = auto *global_block =
BOOST_GET_CONST(paddle::framework::BlockDesc *, attrs.at("global_block")); BOOST_GET_CONST(paddle::framework::BlockDesc *, attrs.at("global_block"));
auto orig_end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index")); auto orig_end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index"));
...@@ -381,8 +370,8 @@ class GradNodeRunProgram : public egr::GradNodeBase { ...@@ -381,8 +370,8 @@ class GradNodeRunProgram : public egr::GradNodeBase {
VLOG(3) << "out_grads[0].size() : " << grads[0].size(); VLOG(3) << "out_grads[0].size() : " << grads[0].size();
std::vector<paddle::experimental::Tensor> x_grad; std::vector<paddle::experimental::Tensor> x_grad;
std::vector<paddle::experimental::Tensor> params_grad; std::vector<paddle::experimental::Tensor> params_grad;
ConstructGradTensors(x_, &x_grad); ConstructXGradTensors(x_, &x_grad);
ConstructGradTensors(params_, &params_grad); ConstructParamGradTensors(params_, &params_grad);
std::vector<paddle::experimental::Tensor *> x_grad_ptr; std::vector<paddle::experimental::Tensor *> x_grad_ptr;
std::vector<paddle::experimental::Tensor *> params_grad_ptr; std::vector<paddle::experimental::Tensor *> params_grad_ptr;
for (auto &i : x_grad) { for (auto &i : x_grad) {
...@@ -392,9 +381,6 @@ class GradNodeRunProgram : public egr::GradNodeBase { ...@@ -392,9 +381,6 @@ class GradNodeRunProgram : public egr::GradNodeBase {
params_grad_ptr.emplace_back(&i); params_grad_ptr.emplace_back(&i);
} }
// auto x_grad_ptr = ConstructGradTensors(x_);
// auto params_grad_ptr = ConstructGradTensors(params_);
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
grads[0].size(), fwd_out_names_.size(), grads[0].size(), fwd_out_names_.size(),
paddle::platform::errors::InvalidArgument( paddle::platform::errors::InvalidArgument(
...@@ -412,7 +398,6 @@ class GradNodeRunProgram : public egr::GradNodeBase { ...@@ -412,7 +398,6 @@ class GradNodeRunProgram : public egr::GradNodeBase {
params_grad_ptr); params_grad_ptr);
VLOG(3) << "End Eager Backward Node: GradNodeRunProgram"; VLOG(3) << "End Eager Backward Node: GradNodeRunProgram";
return {x_grad, params_grad}; return {x_grad, params_grad};
// return {x_grad, details::DereferenceTensors(params_grad_ptr)};
} }
void ClearTensorWrappers() override { VLOG(6) << "Do nothing here now"; } void ClearTensorWrappers() override { VLOG(6) << "Do nothing here now"; }
...@@ -447,29 +432,35 @@ class GradNodeRunProgram : public egr::GradNodeBase { ...@@ -447,29 +432,35 @@ class GradNodeRunProgram : public egr::GradNodeBase {
} }
protected: protected:
void ConstructGradTensors( void ConstructXGradTensors(
const std::vector<paddle::experimental::Tensor> &fwd_tensors, const std::vector<paddle::experimental::Tensor> &x,
std::vector<paddle::experimental::Tensor> *grad_tensors) { std::vector<paddle::experimental::Tensor> *x_grad) {
// TODO(dev): Need an elegant way to determine inforamtion of grad_tensor, // TODO(dev): Need an elegant way to determine inforamtion of grad_tensor,
// such as: name, tensor type(DenseTensor or SelectedRows). // such as: name, tensor type(DenseTensor or SelectedRows).
VLOG(3) << "fwd_tensors.size(): " << fwd_tensors.size(); for (auto &t : x) {
for (auto &fwd_t : fwd_tensors) { if (t.is_dense_tensor()) {
if (phi::DenseTensor::classof(fwd_t.impl().get())) { x_grad->emplace_back(std::make_shared<phi::DenseTensor>());
grad_tensors->emplace_back(std::make_shared<phi::DenseTensor>()); } else if (t.is_selected_rows()) {
} else if (phi::SelectedRows::classof(fwd_t.impl().get())) { x_grad->emplace_back(std::make_shared<phi::SelectedRows>());
grad_tensors->emplace_back(std::make_shared<phi::SelectedRows>());
} }
auto &grad_t = grad_tensors->back(); x_grad->back().set_name(t.name() + "@GRAD");
grad_t.set_name(fwd_t.name() + "@GRAD");
} }
} }
void ConstructGradTensors( void ConstructParamGradTensors(
const std::vector<paddle::experimental::Tensor> &fwd_tensors) { const std::vector<paddle::experimental::Tensor> &param,
VLOG(3) << "fwd_tensors.size(): " << fwd_tensors.size(); std::vector<paddle::experimental::Tensor> *param_grad) {
for (auto &fwd_t : fwd_tensors) { for (auto &t : param) {
auto grad_tesnor = egr::EagerUtils::unsafe_autograd_meta(fwd_t)->Grad(); auto t_meta = egr::EagerUtils::unsafe_autograd_meta(t);
grad_tesnor.set_name(fwd_t.name() + "@GRAD"); auto t_grad = egr::EagerUtils::unsafe_autograd_meta(t)->Grad();
if (t_meta->StopGradient()) {
param_grad->emplace_back();
} else if (t_grad.is_dense_tensor()) {
param_grad->emplace_back(std::make_shared<phi::DenseTensor>());
} else if (t_grad.is_selected_rows()) {
param_grad->emplace_back(std::make_shared<phi::SelectedRows>());
}
param_grad->back().set_name(t.name() + "@GRAD");
} }
} }
......
...@@ -271,6 +271,7 @@ void EagerUtils::GetOutput(const std::shared_ptr<EagerVariable>& out, ...@@ -271,6 +271,7 @@ void EagerUtils::GetOutput(const std::shared_ptr<EagerVariable>& out,
"shared_ptr, this error may indicate some outputs " "shared_ptr, this error may indicate some outputs "
"are nullptr")); "are nullptr"));
out_var->set_impl(out->GetTensorBase()); out_var->set_impl(out->GetTensorBase());
out_var->set_name(out->name());
} }
void EagerUtils::GetOutputs( void EagerUtils::GetOutputs(
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#pragma once #pragma once
#include <iostream> #include <iostream>
#include "paddle/phi/core/enforce.h"
static PyObject *eager_api_run_program(PyObject *self, PyObject *args, static PyObject *eager_api_run_program(PyObject *self, PyObject *args,
PyObject *kwargs) { PyObject *kwargs) {
...@@ -33,13 +34,24 @@ static PyObject *eager_api_run_program(PyObject *self, PyObject *args, ...@@ -33,13 +34,24 @@ static PyObject *eager_api_run_program(PyObject *self, PyObject *args,
run_program_dygraph_function(X, Params, Out, OutScope, DOut, attrs); run_program_dygraph_function(X, Params, Out, OutScope, DOut, attrs);
PyEval_RestoreThread(tstate); PyEval_RestoreThread(tstate);
tstate = nullptr; tstate = nullptr;
Py_RETURN_NONE;
} catch (paddle::platform::EnforceNotMet &exception) {
if (tstate) {
PyEval_RestoreThread(tstate);
}
std::ostringstream sout;
sout << exception.what();
sout << " [operator < run_program > error]";
exception.set_error_str(sout.str());
ThrowExceptionToPython(std::current_exception());
return nullptr;
} catch (...) { } catch (...) {
if (tstate) { if (tstate) {
PyEval_RestoreThread(tstate); PyEval_RestoreThread(tstate);
} }
ThrowExceptionToPython(std::current_exception()); ThrowExceptionToPython(std::current_exception());
return nullptr;
} }
Py_RETURN_NONE;
} }
static PyMethodDef CustomEagerFinalStateMethods[] = { static PyMethodDef CustomEagerFinalStateMethods[] = {
......
...@@ -959,11 +959,11 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args, ...@@ -959,11 +959,11 @@ static PyObject* tensor__set_grad_type(TensorObject* self, PyObject* args,
EAGER_TRY EAGER_TRY
auto var_type = pybind::CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 0), 0); auto var_type = pybind::CastPyArg2ProtoType(PyTuple_GET_ITEM(args, 0), 0);
auto grad_tensor = auto grad_tensor =
egr::EagerUtils::unsafe_autograd_meta(self->tensor)->Grad(); egr::EagerUtils::unsafe_autograd_meta(self->tensor)->MutableGrad();
if (var_type == framework::proto::VarType::LOD_TENSOR) { if (var_type == framework::proto::VarType::LOD_TENSOR) {
grad_tensor.set_impl(std::make_shared<phi::DenseTensor>()); grad_tensor->set_impl(std::make_shared<phi::DenseTensor>());
} else if (var_type == framework::proto::VarType::SELECTED_ROWS) { } else if (var_type == framework::proto::VarType::SELECTED_ROWS) {
grad_tensor.set_impl(std::make_shared<phi::SelectedRows>()); grad_tensor->set_impl(std::make_shared<phi::SelectedRows>());
} }
return Py_None; return Py_None;
EAGER_CATCH_AND_THROW_RETURN_NULL EAGER_CATCH_AND_THROW_RETURN_NULL
......
...@@ -105,9 +105,8 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''): ...@@ -105,9 +105,8 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''):
if not isinstance(expected_type, tuple): if not isinstance(expected_type, tuple):
expected_type = (expected_type, ) expected_type = (expected_type, )
expected_type += (core.VarBase, ) expected_type += (core.VarBase, )
# TODO(jiabin): uncomment it when we support declarative mode in eager if core._in_eager_mode():
# if _in_eager_mode(): expected_type += (core.eager.Tensor, )
# expected_type += (core.eager.Tensor, )
elif isinstance(input, core.VarBase): elif isinstance(input, core.VarBase):
raise TypeError( raise TypeError(
"Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. " "Please use `with fluid.dygraph.guard()` as context or `fluid.enable_dygraph()` to switch to imperative mode firstly. "
......
...@@ -20,6 +20,7 @@ import unittest ...@@ -20,6 +20,7 @@ import unittest
import paddle import paddle
from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.jit import declarative
from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator from paddle.fluid.dygraph.dygraph_to_static.program_translator import ProgramTranslator
import paddle.fluid.core as core
from ifelse_simple_func import * from ifelse_simple_func import *
...@@ -379,7 +380,7 @@ class TestDy2StIfElseRetInt1(unittest.TestCase): ...@@ -379,7 +380,7 @@ class TestDy2StIfElseRetInt1(unittest.TestCase):
return out return out
def test_ast_to_func(self): def test_ast_to_func(self):
self.assertIsInstance(self.out[0], paddle.Tensor) self.assertIsInstance(self.out[0], (paddle.Tensor, core.eager.Tensor))
self.assertIsInstance(self.out[1], int) self.assertIsInstance(self.out[1], int)
...@@ -390,8 +391,8 @@ class TestDy2StIfElseRetInt2(TestDy2StIfElseRetInt1): ...@@ -390,8 +391,8 @@ class TestDy2StIfElseRetInt2(TestDy2StIfElseRetInt1):
self.out = self.get_dy2stat_out() self.out = self.get_dy2stat_out()
def test_ast_to_func(self): def test_ast_to_func(self):
self.assertIsInstance(self.out[0], paddle.Tensor) self.assertIsInstance(self.out[0], (paddle.Tensor, core.eager.Tensor))
self.assertIsInstance(self.out[1], paddle.Tensor) self.assertIsInstance(self.out[1], (paddle.Tensor, core.eager.Tensor))
class TestDy2StIfElseRetInt3(TestDy2StIfElseRetInt1): class TestDy2StIfElseRetInt3(TestDy2StIfElseRetInt1):
...@@ -401,7 +402,7 @@ class TestDy2StIfElseRetInt3(TestDy2StIfElseRetInt1): ...@@ -401,7 +402,7 @@ class TestDy2StIfElseRetInt3(TestDy2StIfElseRetInt1):
self.out = self.get_dy2stat_out() self.out = self.get_dy2stat_out()
def test_ast_to_func(self): def test_ast_to_func(self):
self.assertIsInstance(self.out, paddle.Tensor) self.assertIsInstance(self.out, (paddle.Tensor, core.eager.Tensor))
class TestDy2StIfElseRetInt4(TestDy2StIfElseRetInt1): class TestDy2StIfElseRetInt4(TestDy2StIfElseRetInt1):
......
...@@ -118,7 +118,8 @@ class TestWithNestedOutput(unittest.TestCase): ...@@ -118,7 +118,8 @@ class TestWithNestedOutput(unittest.TestCase):
self.assertTrue(len(dygraph_res) == len(static_res)) self.assertTrue(len(dygraph_res) == len(static_res))
for dy_var, st_var in zip(dygraph_res, static_res): for dy_var, st_var in zip(dygraph_res, static_res):
if isinstance(dy_var, fluid.core.VarBase): if isinstance(dy_var,
(fluid.core.VarBase, fluid.core.eager.Tensor)):
self.assertTrue(np.allclose(dy_var.numpy(), st_var.numpy())) self.assertTrue(np.allclose(dy_var.numpy(), st_var.numpy()))
else: else:
self.assertTrue(dy_var, st_var) self.assertTrue(dy_var, st_var)
......
...@@ -218,7 +218,7 @@ class TestReturnBase(unittest.TestCase): ...@@ -218,7 +218,7 @@ class TestReturnBase(unittest.TestCase):
res = self.dygraph_func(self.input) res = self.dygraph_func(self.input)
if isinstance(res, (tuple, list)): if isinstance(res, (tuple, list)):
return tuple(r.numpy() for r in res) return tuple(r.numpy() for r in res)
elif isinstance(res, core.VarBase): elif isinstance(res, (core.VarBase, core.eager.Tensor)):
return res.numpy() return res.numpy()
return res return res
......
...@@ -251,6 +251,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase): ...@@ -251,6 +251,9 @@ class EagerVariablePropertiesAndMethodsTestCase(unittest.TestCase):
self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace())) self.assertTrue(egr_tensor12.place._equals(paddle.fluid.CPUPlace()))
self.assertTrue(np.array_equal(egr_tensor12.numpy(), x)) self.assertTrue(np.array_equal(egr_tensor12.numpy(), x))
egr_tensor13 = paddle.randn([2, 2])
self.assertTrue("eager_tmp" in egr_tensor13.name)
with self.assertRaisesRegexp( with self.assertRaisesRegexp(
ValueError, "The shape of Parameter should not be None"): ValueError, "The shape of Parameter should not be None"):
eager_param = EagerParamBase(shape=None, dtype="float32") eager_param = EagerParamBase(shape=None, dtype="float32")
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册