未验证 提交 7c555f4e 编写于 作者: 0 0x45f 提交者: GitHub

Fix test_run_program_op.py (#41141)

上级 1faefc93
......@@ -185,7 +185,13 @@ inline void RunProgramAPI(
VLOG(2) << "RunProgramOpKernel Compute";
auto start_op_index = BOOST_GET_CONST(int64_t, attrs.at("start_op_index"));
auto end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index"));
auto is_test = BOOST_GET_CONST(bool, attrs.at("is_test"));
// In the original run_program OP, the default value of the is_test
// attribute is false, we should check if there is is_test parameter
// in attrs
auto is_test = false;
if (attrs.count("is_test")) {
is_test = BOOST_GET_CONST(bool, attrs.at("is_test"));
}
auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id"));
// NOTE(chenweihang): In order not to add new variable type, use vector
......@@ -447,12 +453,11 @@ class GradNodeRunProgram : public egr::GradNodeBase {
const std::vector<paddle::experimental::Tensor> &param,
std::vector<paddle::experimental::Tensor> *param_grad) {
for (auto &t : param) {
auto t_meta = egr::EagerUtils::unsafe_autograd_meta(t);
auto t_grad = egr::EagerUtils::unsafe_autograd_meta(t)->Grad();
// In eager mode, the number of param_grad should be the same as
// param, so here an empty Tensor is added for the param with
// stop_gradient=True
if (t_meta->StopGradient()) {
if (!t_grad.defined()) {
param_grad->emplace_back();
} else if (t_grad.is_dense_tensor()) {
param_grad->emplace_back(std::make_shared<phi::DenseTensor>());
......
......@@ -20,10 +20,12 @@ import numpy as np
import six
import paddle
from paddle import _C_ops
import paddle.fluid as fluid
from paddle import compat as cpt
from paddle.fluid import core, framework, executor
from paddle.fluid.layers.utils import _hash_with_id
from paddle.fluid.framework import _in_eager_mode_
paddle.enable_static()
......@@ -95,11 +97,9 @@ class RunProgramOpTest(unittest.TestCase):
return fluid.default_main_program().desc, fwd_op_num
def prepare_attrs(self):
return {
'global_block': self.program_desc.block(0),
'start_op_index': 0,
'end_op_index': self.fwd_op_num
}
return ('global_block', self.program_desc.block(0), 'start_op_index', 0,
'end_op_index', self.fwd_op_num, 'program_id',
_hash_with_id(self.program_desc))
def get_param_grad_names(self):
grad_names = []
......@@ -127,6 +127,10 @@ class RunProgramOpTest(unittest.TestCase):
def prepare_dygraph_input(self, place, return_param_list=False):
def create_var_base(is_input, name, np_value, stop_gradient):
if _in_eager_mode_:
var = core.eager.Tensor(
value=np_value, name=name, place=place, zero_copy=True)
else:
var = core.VarBase(
value=np_value, name=name, place=place, zero_copy=True)
var.stop_gradient = stop_gradient
......@@ -162,6 +166,9 @@ class RunProgramOpTest(unittest.TestCase):
for name in self.output_names['Out']:
outputs['Out'].append(create_var_base(False, name))
if _in_eager_mode_:
outputs['OutScope'] = [core.Scope()]
else:
outputs['OutScope'] = framework._varbase_creator(
type=core.VarDesc.VarType.STEP_SCOPES,
name="program_out_scope",
......@@ -175,34 +182,28 @@ class RunProgramOpTest(unittest.TestCase):
def calc_dygraph_output(self, place):
self.program_desc, self.fwd_op_num = self.get_program_desc()
self.attrs = self.prepare_attrs()
self.attrs['program_id'] = _hash_with_id(self.program_desc)
with fluid.dygraph.guard(place):
inputs = self.prepare_dygraph_input(place)
outputs = self.prepare_dygraph_output()
framework._dygraph_tracer().trace_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=self.attrs)
_C_ops.run_program(inputs['X'], inputs['Params'], outputs['Out'],
outputs['OutScope'], outputs['DOut'],
*self.attrs)
return outputs['Out']
def calc_dygraph_grad(self, place):
self.program_desc, self.fwd_op_num = self.get_program_desc()
self.attrs = self.prepare_attrs()
self.attrs['program_id'] = _hash_with_id(self.program_desc)
with fluid.dygraph.guard(place):
# Step 1. run forward
inputs, input_param_list = self.prepare_dygraph_input(place, True)
outputs = self.prepare_dygraph_output()
framework._dygraph_tracer().trace_op(
type=self.op_type,
inputs=inputs,
outputs=outputs,
attrs=self.attrs)
_C_ops.run_program(inputs['X'], inputs['Params'], outputs['Out'],
outputs['OutScope'], outputs['DOut'],
*self.attrs)
for param in input_param_list:
var_type = self._get_grad_vartype(param.name)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册