未验证 提交 7c555f4e 编写于 作者: 0 0x45f 提交者: GitHub

Fix test_run_program_op.py (#41141)

上级 1faefc93
...@@ -185,7 +185,13 @@ inline void RunProgramAPI( ...@@ -185,7 +185,13 @@ inline void RunProgramAPI(
VLOG(2) << "RunProgramOpKernel Compute"; VLOG(2) << "RunProgramOpKernel Compute";
auto start_op_index = BOOST_GET_CONST(int64_t, attrs.at("start_op_index")); auto start_op_index = BOOST_GET_CONST(int64_t, attrs.at("start_op_index"));
auto end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index")); auto end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index"));
auto is_test = BOOST_GET_CONST(bool, attrs.at("is_test")); // In the original run_program OP, the default value of the is_test
// attribute is false, we should check if there is is_test parameter
// in attrs
auto is_test = false;
if (attrs.count("is_test")) {
is_test = BOOST_GET_CONST(bool, attrs.at("is_test"));
}
auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id")); auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id"));
// NOTE(chenweihang): In order not to add new variable type, use vector // NOTE(chenweihang): In order not to add new variable type, use vector
...@@ -447,12 +453,11 @@ class GradNodeRunProgram : public egr::GradNodeBase { ...@@ -447,12 +453,11 @@ class GradNodeRunProgram : public egr::GradNodeBase {
const std::vector<paddle::experimental::Tensor> &param, const std::vector<paddle::experimental::Tensor> &param,
std::vector<paddle::experimental::Tensor> *param_grad) { std::vector<paddle::experimental::Tensor> *param_grad) {
for (auto &t : param) { for (auto &t : param) {
auto t_meta = egr::EagerUtils::unsafe_autograd_meta(t);
auto t_grad = egr::EagerUtils::unsafe_autograd_meta(t)->Grad(); auto t_grad = egr::EagerUtils::unsafe_autograd_meta(t)->Grad();
// In eager mode, the number of param_grad should be the same as // In eager mode, the number of param_grad should be the same as
// param, so here an empty Tensor is added for the param with // param, so here an empty Tensor is added for the param with
// stop_gradient=True // stop_gradient=True
if (t_meta->StopGradient()) { if (!t_grad.defined()) {
param_grad->emplace_back(); param_grad->emplace_back();
} else if (t_grad.is_dense_tensor()) { } else if (t_grad.is_dense_tensor()) {
param_grad->emplace_back(std::make_shared<phi::DenseTensor>()); param_grad->emplace_back(std::make_shared<phi::DenseTensor>());
......
...@@ -20,10 +20,12 @@ import numpy as np ...@@ -20,10 +20,12 @@ import numpy as np
import six import six
import paddle import paddle
from paddle import _C_ops
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle import compat as cpt from paddle import compat as cpt
from paddle.fluid import core, framework, executor from paddle.fluid import core, framework, executor
from paddle.fluid.layers.utils import _hash_with_id from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.framework import _in_eager_mode_
paddle.enable_static() paddle.enable_static()
...@@ -95,11 +97,9 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -95,11 +97,9 @@ class RunProgramOpTest(unittest.TestCase):
return fluid.default_main_program().desc, fwd_op_num return fluid.default_main_program().desc, fwd_op_num
def prepare_attrs(self): def prepare_attrs(self):
return { return ('global_block', self.program_desc.block(0), 'start_op_index', 0,
'global_block': self.program_desc.block(0), 'end_op_index', self.fwd_op_num, 'program_id',
'start_op_index': 0, _hash_with_id(self.program_desc))
'end_op_index': self.fwd_op_num
}
def get_param_grad_names(self): def get_param_grad_names(self):
grad_names = [] grad_names = []
...@@ -127,8 +127,12 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -127,8 +127,12 @@ class RunProgramOpTest(unittest.TestCase):
def prepare_dygraph_input(self, place, return_param_list=False): def prepare_dygraph_input(self, place, return_param_list=False):
def create_var_base(is_input, name, np_value, stop_gradient): def create_var_base(is_input, name, np_value, stop_gradient):
var = core.VarBase( if _in_eager_mode_:
value=np_value, name=name, place=place, zero_copy=True) var = core.eager.Tensor(
value=np_value, name=name, place=place, zero_copy=True)
else:
var = core.VarBase(
value=np_value, name=name, place=place, zero_copy=True)
var.stop_gradient = stop_gradient var.stop_gradient = stop_gradient
return var return var
...@@ -162,12 +166,15 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -162,12 +166,15 @@ class RunProgramOpTest(unittest.TestCase):
for name in self.output_names['Out']: for name in self.output_names['Out']:
outputs['Out'].append(create_var_base(False, name)) outputs['Out'].append(create_var_base(False, name))
outputs['OutScope'] = framework._varbase_creator( if _in_eager_mode_:
type=core.VarDesc.VarType.STEP_SCOPES, outputs['OutScope'] = [core.Scope()]
name="program_out_scope", else:
persistable=True) outputs['OutScope'] = framework._varbase_creator(
inner_scope = core.Scope() type=core.VarDesc.VarType.STEP_SCOPES,
outputs['OutScope'].value().set_scope(inner_scope) name="program_out_scope",
persistable=True)
inner_scope = core.Scope()
outputs['OutScope'].value().set_scope(inner_scope)
outputs['DOut'] = [create_var_base(False, "Fake_var")] outputs['DOut'] = [create_var_base(False, "Fake_var")]
return outputs return outputs
...@@ -175,34 +182,28 @@ class RunProgramOpTest(unittest.TestCase): ...@@ -175,34 +182,28 @@ class RunProgramOpTest(unittest.TestCase):
def calc_dygraph_output(self, place): def calc_dygraph_output(self, place):
self.program_desc, self.fwd_op_num = self.get_program_desc() self.program_desc, self.fwd_op_num = self.get_program_desc()
self.attrs = self.prepare_attrs() self.attrs = self.prepare_attrs()
self.attrs['program_id'] = _hash_with_id(self.program_desc)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
inputs = self.prepare_dygraph_input(place) inputs = self.prepare_dygraph_input(place)
outputs = self.prepare_dygraph_output() outputs = self.prepare_dygraph_output()
framework._dygraph_tracer().trace_op( _C_ops.run_program(inputs['X'], inputs['Params'], outputs['Out'],
type=self.op_type, outputs['OutScope'], outputs['DOut'],
inputs=inputs, *self.attrs)
outputs=outputs,
attrs=self.attrs)
return outputs['Out'] return outputs['Out']
def calc_dygraph_grad(self, place): def calc_dygraph_grad(self, place):
self.program_desc, self.fwd_op_num = self.get_program_desc() self.program_desc, self.fwd_op_num = self.get_program_desc()
self.attrs = self.prepare_attrs() self.attrs = self.prepare_attrs()
self.attrs['program_id'] = _hash_with_id(self.program_desc)
with fluid.dygraph.guard(place): with fluid.dygraph.guard(place):
# Step 1. run forward # Step 1. run forward
inputs, input_param_list = self.prepare_dygraph_input(place, True) inputs, input_param_list = self.prepare_dygraph_input(place, True)
outputs = self.prepare_dygraph_output() outputs = self.prepare_dygraph_output()
framework._dygraph_tracer().trace_op( _C_ops.run_program(inputs['X'], inputs['Params'], outputs['Out'],
type=self.op_type, outputs['OutScope'], outputs['DOut'],
inputs=inputs, *self.attrs)
outputs=outputs,
attrs=self.attrs)
for param in input_param_list: for param in input_param_list:
var_type = self._get_grad_vartype(param.name) var_type = self._get_grad_vartype(param.name)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册