未验证 提交 a0631364 编写于 作者: X xiongkun 提交者: GitHub

Fix test calc gradient (#37672)

* add scope_guard

* 1. fix control flow cases 2. fix calc_gradient
上级 74fdba7c
......@@ -16,6 +16,9 @@
#include "paddle/fluid/framework/executor_gc_helper.h"
#include "paddle/fluid/framework/new_executor/data_transfer.h"
#include "paddle/fluid/operators/controlflow/conditional_block_op_helper.h"
#include "paddle/fluid/operators/controlflow/recurrent_op_helper.h"
#include "paddle/fluid/operators/controlflow/while_op_helper.h"
namespace paddle {
namespace framework {
......@@ -127,6 +130,9 @@ void build_variable_scope(const framework::BlockDesc& block,
for (auto& var_desc : block.AllVars()) {
auto var_name = var_desc->Name();
// TODO(xiongkun): user may create a variable with name that exists before.
// under such circumstances, we should raise a error. Currently we can't
// get the var_desc of startup_program, so leave it later.
if (var_name == framework::kEmptyVarName) {
continue;
}
......@@ -149,7 +155,7 @@ void build_variable_scope(const framework::BlockDesc& block,
}
void create_all_ops(const framework::BlockDesc& block,
std::vector<std::shared_ptr<OperatorBase>>* ops) {
std::vector<std::unique_ptr<OperatorBase>>* ops) {
for (auto& op : block.AllOps()) {
VLOG(3) << "CreateOp from : " << op->Type();
......@@ -164,7 +170,7 @@ void create_all_ops(const framework::BlockDesc& block,
}
auto op_base =
info.Creator()(op->Type(), inputs_names, outputs_names, op_attr_map);
ops->emplace_back(std::shared_ptr<OperatorBase>(op_base));
ops->emplace_back(std::unique_ptr<OperatorBase>(op_base));
}
}
......@@ -260,10 +266,24 @@ void build_op_func_list(const platform::Place& place,
Scope* local_scope = use_local_scope ? var_scope->GetMutableLocalScope()
: var_scope->GetMutableScope();
auto& all_op_kernels = OperatorWithKernel::AllOpKernels();
std::vector<std::unique_ptr<OperatorBase>>
ops_unique; // its elements will be moved to vec_func_list
// Step 1: create all ops for current block.
create_all_ops(block, &ops_unique);
// If gc is enabled and block size > 1
const ProgramDesc& main_program = *block.Program();
operators::PrepareSafeEagerDeletionOnConditionalOpAndConditionalGradOp(
main_program, block.ID(), ops_unique);
operators::PrepareSafeEagerDeletionOnWhileOpAndWhileGradOp(
main_program, block.ID(), ops_unique);
operators::PrepareSafeEagerDeletionOnRecurrentOpAndRecurrentGradOp(
main_program, block.ID(), ops_unique);
std::vector<std::shared_ptr<OperatorBase>>
ops; // its elements will be moved to vec_func_list
// Step 1: create all ops for current block.
create_all_ops(block, &ops);
for (auto& op_unique : ops_unique) {
ops.emplace_back(std::move(op_unique));
}
auto unused_var_map = get_unused_vars(block, ops);
for (size_t i = 0; i < ops.size(); ++i) {
......
......@@ -33,6 +33,7 @@ StandaloneExecutor::StandaloneExecutor(const platform::Place& place,
if (scope) {
auto name_list = scope->LocalVarNames();
for (auto name : name_list) {
VLOG(4) << "Sync Variable from variable scope: " << name;
auto v = scope->Var(name);
if (!global_scope_.HasVar(name)) {
global_scope_.AddVar(name, *v);
......@@ -87,8 +88,9 @@ void StandaloneExecutor::BuildVariableScope(const framework::ProgramDesc& pdesc,
if (var->Name() == framework::kEmptyVarName) {
continue;
}
if (!var_scope->HasVar(var->Name())) {
VLOG(4) << "Create variable from startup_prog: "
<< var->Proto()->SerializeAsString();
var_scope->AddVar(var->Name(), var);
}
}
......
......@@ -14,6 +14,7 @@
from __future__ import print_function
import paddle
import unittest
import numpy as np
import paddle.fluid as fluid
......@@ -83,19 +84,20 @@ class TestDoubleGrad(unittest.TestCase):
class TestGradientWithPrune(unittest.TestCase):
def test_prune(self):
x = fluid.data(name='x', shape=[3], dtype='float32')
x.stop_gradient = False
x1, x2, x3 = fluid.layers.split(x, dim=0, num_or_sections=3)
y = x1 * 2
x1_grad = fluid.gradients(y, x)
with paddle.fluid.scope_guard(paddle.static.Scope()):
x = fluid.data(name='x', shape=[3], dtype='float32')
x.stop_gradient = False
x1, x2, x3 = fluid.layers.split(x, dim=0, num_or_sections=3)
y = x1 * 2
x1_grad = fluid.gradients(y, x)
exe = fluid.Executor(fluid.CPUPlace())
main = fluid.default_main_program()
exe.run(fluid.default_startup_program())
out = exe.run(main,
feed={'x': np.ones([3]).astype('float32')},
fetch_list=[x1_grad])
self.assertTrue(np.array_equal(out[0], [2., 0., 0.]))
exe = fluid.Executor(fluid.CPUPlace())
main = fluid.default_main_program()
exe.run(fluid.default_startup_program())
out = exe.run(main,
feed={'x': np.ones([3]).astype('float32')},
fetch_list=[x1_grad])
self.assertTrue(np.array_equal(out[0], [2., 0., 0.]))
if __name__ == "__main__":
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册