未验证 提交 bf222f19 编写于 作者: Y Yiqun Liu 提交者: GitHub

Use sub scope in tensor_array_to_tensor op. (#14524)

test=develop
上级 840c1b29
...@@ -392,8 +392,8 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope, ...@@ -392,8 +392,8 @@ void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
int64_t max_memory_size = GetEagerDeletionThreshold(); int64_t max_memory_size = GetEagerDeletionThreshold();
std::unique_ptr<GarbageCollector<Tensor>> gc; std::unique_ptr<GarbageCollector<Tensor>> gc;
// WhileOp would set keep_kids to false // WhileOp would set keep_kids to true,
// WhileGradOp would need the scopes created in WhileOp // because WhileGradOp needs the scopes created in WhileOp.
// Perhaps, we should not perform eager deletion in WhileOp // Perhaps, we should not perform eager deletion in WhileOp
// The scopes and variables created by WhileOp would be deleted // The scopes and variables created by WhileOp would be deleted
// in WhileGradOp. // in WhileGradOp.
......
...@@ -174,7 +174,6 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs, ...@@ -174,7 +174,6 @@ bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
inference::Timer timer; inference::Timer timer;
timer.tic(); timer.tic();
// set feed variable // set feed variable
std::vector<framework::LoDTensor> feeds;
framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get(); framework::Scope *scope = sub_scope_ ? sub_scope_ : scope_.get();
if (!SetFeed(inputs, scope)) { if (!SetFeed(inputs, scope)) {
LOG(ERROR) << "fail to set feed"; LOG(ERROR) << "fail to set feed";
......
...@@ -138,7 +138,6 @@ bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs, ...@@ -138,7 +138,6 @@ bool NativePaddlePredictor::Run(const std::vector<PaddleTensor> &inputs,
Timer timer; Timer timer;
timer.tic(); timer.tic();
// set feed variable // set feed variable
std::vector<framework::LoDTensor> feeds;
framework::Scope *scope = sub_scope_ != nullptr ? sub_scope_ : scope_.get(); framework::Scope *scope = sub_scope_ != nullptr ? sub_scope_ : scope_.get();
if (!SetFeed(inputs, scope)) { if (!SetFeed(inputs, scope)) {
LOG(ERROR) << "fail to set feed"; LOG(ERROR) << "fail to set feed";
......
...@@ -106,9 +106,9 @@ class LoDTensorArray2TensorOp : public framework::OperatorBase { ...@@ -106,9 +106,9 @@ class LoDTensorArray2TensorOp : public framework::OperatorBase {
out_inx_dim[0] = inx.size(); out_inx_dim[0] = inx.size();
out_inx.Resize(out_inx_dim); out_inx.Resize(out_inx_dim);
auto &local_scope = scope.NewScope();
std::string var_name = "out_index"; std::string var_name = "out_index";
framework::Variable *tmp_index_var = framework::Variable *tmp_index_var = local_scope.Var(var_name);
const_cast<framework::Scope &>(scope).Var(var_name);
auto &tmp_index_tensor = auto &tmp_index_tensor =
*(tmp_index_var->GetMutable<paddle::framework::LoDTensor>()); *(tmp_index_var->GetMutable<paddle::framework::LoDTensor>());
tmp_index_tensor.Resize(out_inx_dim); tmp_index_tensor.Resize(out_inx_dim);
...@@ -128,12 +128,12 @@ class LoDTensorArray2TensorOp : public framework::OperatorBase { ...@@ -128,12 +128,12 @@ class LoDTensorArray2TensorOp : public framework::OperatorBase {
out_dims[axis] = out_dim_sum; out_dims[axis] = out_dim_sum;
out.Resize(out_dims); out.Resize(out_dims);
LodTensorArray2LodTensorVector(scope, base_name, Input("X"), &names); LodTensorArray2LodTensorVector(local_scope, base_name, Input("X"), &names);
// Invoke Reshape Op // Invoke concat Op
auto concat_op = framework::OpRegistry::CreateOp( auto concat_op = framework::OpRegistry::CreateOp(
"concat", {{"X", names}}, {{"Out", {Output("Out")}}}, attrs); "concat", {{"X", names}}, {{"Out", {Output("Out")}}}, attrs);
concat_op->Run(scope, place); concat_op->Run(local_scope, place);
} }
}; };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册