executor.cc 18.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
qijun 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/framework/executor.h"
Y
Yang Yang 已提交
16

Y
Yi Wang 已提交
17 18 19
#include "paddle/fluid/framework/feed_fetch_method.h"
#include "paddle/fluid/framework/lod_rank_table.h"
#include "paddle/fluid/framework/lod_tensor_array.h"
B
baojun-nervana 已提交
20
#include "paddle/fluid/framework/ngraph_operator.h"
Y
Yi Wang 已提交
21 22
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/reader.h"
G
gongweibao 已提交
23
#include "paddle/fluid/operators/detail/macros.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/platform/place.h"
X
Xin Pan 已提交
25
#include "paddle/fluid/platform/profiler.h"
Y
Yang Yu 已提交
26

D
dzhwinter 已提交
27
DECLARE_bool(benchmark);
28
DEFINE_bool(use_mkldnn, false, "Use MKLDNN to run");
B
baojun-nervana 已提交
29
DEFINE_bool(use_ngraph, false, "Use NGRAPH to run");
Q
qijun 已提交
30 31 32

namespace paddle {
namespace framework {
X
Xin Pan 已提交
33 34 35 36 37
namespace {
// block id starts from 0. This id is used to represent the codeblock
// wrapping the first block 0.
int kProgramId = -1;
}  // namespace
Q
qijun 已提交
38

Q
Qiao Longfei 已提交
39 40
ExecutorPrepareContext::ExecutorPrepareContext(
    const framework::ProgramDesc& prog, size_t block_id)
S
sneaxiy 已提交
41 42 43 44 45
    : prog_(prog), block_id_(block_id) {
  if (GetEagerDeletionThreshold() >= 0) {
    ref_cnts_ = GetNonPersistableReferenceCount<int>(prog_, block_id_);
  }
}
Y
Yu Yang 已提交
46

Q
Qiao Longfei 已提交
47
ExecutorPrepareContext::~ExecutorPrepareContext() {
48
  VLOG(50) << "destroy ExecutorPrepareContext";
Q
Qiao Longfei 已提交
49
}
Y
Yu Yang 已提交
50

S
sneaxiy 已提交
51 52 53 54 55 56 57 58 59 60 61 62 63 64
template <typename RefCntMap>
static void DeleteUnusedTensors(const Scope& scope, const OperatorBase* op,
                                GarbageCollector<Tensor>* gc,
                                RefCntMap* ref_cnts) {
  std::unordered_set<Tensor*> erase_tensors;

  auto handler = [&](const VariableNameMap& name_map) {
    for (auto& name_pair : name_map) {
      for (auto& name : name_pair.second) {
        auto it = ref_cnts->find(name);
        if (it == ref_cnts->end()) continue;
        if ((it->second)-- == 1) {
          auto* var = scope.FindVar(name);
          if (var != nullptr) {
65
            VLOG(100) << "Erase tensor \'" << name << "\'";
S
sneaxiy 已提交
66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
            if (var->IsType<LoDTensor>()) {
              erase_tensors.insert(var->GetMutable<LoDTensor>());
            } else if (var->IsType<SelectedRows>()) {
              erase_tensors.insert(
                  var->GetMutable<SelectedRows>()->mutable_value());
            }
          }
        }
      }
    }
  };

  handler(op->Inputs());
  handler(op->Outputs());

  if (!erase_tensors.empty()) {
    gc->Add(erase_tensors);
  }
}

B
baojun-nervana 已提交
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
static void EnableFusedOp(ExecutorPrepareContext* ctx) {
#ifdef PADDLE_WITH_NGRAPH
  VLOG(3) << "use_ngraph=True";
  auto intervals = FusedOperator::FusedOpIntervals(&ctx->ops_);
  for (auto& interval : intervals) {
    auto* fused_op = new FusedOperator(ctx->prog_, ctx->block_id_,
                                       interval.at(0), interval.at(1));
    *interval[0] = std::unique_ptr<OperatorBase>(fused_op);
  }
  for (auto it = intervals.rbegin(); it != intervals.rend(); ++it) {
    ctx->ops_.erase(it->at(0) + 1, it->at(1));
  }
#else
  LOG(WARNING)
      << "'NGRAPH' is not supported, Please re-compile with WITH_NGRAPH option";
#endif
}

D
dzhwinter 已提交
104
Executor::Executor(const platform::Place& place) : place_(place) {}
Q
qijun 已提交
105

Y
Yancey1989 已提交
106
void Executor::Close() {
W
Wu Yi 已提交
107
#ifdef PADDLE_WITH_DISTRIBUTE
W
Wu Yi 已提交
108 109
  // TODO(typhoonzero): complete message will need to use real trainer_id,
  // except 0.
Y
Yancey1989 已提交
110
  ::paddle::operators::distributed::RPCClient::GetInstance<
W
Wu Yi 已提交
111
      ::paddle::operators::distributed::GRPCClient>(0)
Y
Yancey1989 已提交
112
      ->SendComplete();
W
Wu Yi 已提交
113
#endif
Y
Yancey1989 已提交
114
}
W
Wu Yi 已提交
115

Y
Stash  
Yu Yang 已提交
116
void InitializeVariable(Variable* var, proto::VarType::Type var_type) {
117
  if (var_type == proto::VarType::LOD_TENSOR) {
Q
QI JUN 已提交
118
    var->GetMutable<LoDTensor>();
119
  } else if (var_type == proto::VarType::SELECTED_ROWS) {
Q
QI JUN 已提交
120
    var->GetMutable<SelectedRows>();
121
  } else if (var_type == proto::VarType::FEED_MINIBATCH) {
Q
QI JUN 已提交
122
    var->GetMutable<FeedFetchList>();
123
  } else if (var_type == proto::VarType::FETCH_LIST) {
Q
QI JUN 已提交
124
    var->GetMutable<FeedFetchList>();
125
  } else if (var_type == proto::VarType::STEP_SCOPES) {
X
Xin Pan 已提交
126
    var->GetMutable<std::vector<framework::Scope*>>();
127
  } else if (var_type == proto::VarType::LOD_RANK_TABLE) {
Y
Yu Yang 已提交
128
    var->GetMutable<LoDRankTable>();
129
  } else if (var_type == proto::VarType::LOD_TENSOR_ARRAY) {
Y
Yu Yang 已提交
130
    var->GetMutable<LoDTensorArray>();
131
  } else if (var_type == proto::VarType::PLACE_LIST) {
Y
Yang Yu 已提交
132
    var->GetMutable<platform::PlaceList>();
133
  } else if (var_type == proto::VarType::READER) {
F
fengjiayi 已提交
134
    var->GetMutable<ReaderHolder>();
T
typhoonzero 已提交
135 136
  } else if (var_type == proto::VarType::RAW) {
    // GetMutable will be called in operator
Q
QI JUN 已提交
137 138
  } else {
    PADDLE_THROW(
Y
Yu Yang 已提交
139
        "Variable type %d is not in "
F
fengjiayi 已提交
140
        "[LOD_TENSOR, SELECTED_ROWS, FEED_MINIBATCH, FETCH_LIST, "
X
Xin Pan 已提交
141
        "LOD_RANK_TABLE, PLACE_LIST, READER, RAW]",
Y
Yu Yang 已提交
142
        var_type);
Q
QI JUN 已提交
143 144 145
  }
}

L
Liu Yiqun 已提交
146 147 148
void Executor::CreateVariables(const ProgramDesc& pdesc, Scope* scope,
                               int block_id) {
  auto& global_block = pdesc.Block(block_id);
149 150 151 152 153 154 155 156 157 158 159 160 161 162

  const Scope* ancestor_scope = scope;
  while (ancestor_scope->parent()) {
    ancestor_scope = ancestor_scope->parent();
  }

  if (ancestor_scope != scope) {
    for (auto& var : global_block.AllVars()) {
      if (var->Name() == framework::kEmptyVarName) {
        continue;
      }

      if (var->Persistable()) {
        auto* ptr = const_cast<Scope*>(ancestor_scope)->Var(var->Name());
163
        InitializeVariable(ptr, var->GetType());
164 165
        VLOG(30) << "Create Variable " << var->Name()
                 << " global, which pointer is " << ptr;
166 167
      } else {
        auto* ptr = scope->Var(var->Name());
168
        InitializeVariable(ptr, var->GetType());
169 170
        VLOG(30) << "Create Variable " << var->Name()
                 << " locally, which pointer is " << ptr;
171 172 173 174 175
      }
    }
  } else {
    for (auto& var : global_block.AllVars()) {
      auto* ptr = scope->Var(var->Name());
176
      InitializeVariable(ptr, var->GetType());
177 178
      VLOG(30) << "Create variable " << var->Name() << ", which pointer is "
               << ptr;
179 180 181 182
    }
  }
}

Y
Yu Yang 已提交
183
void Executor::Run(const ProgramDesc& pdesc, Scope* scope, int block_id,
T
typhoonzero 已提交
184
                   bool create_local_scope, bool create_vars) {
X
Xin Pan 已提交
185
  platform::RecordBlock b(block_id);
186
  if (FLAGS_use_mkldnn) EnableMKLDNN(pdesc);
Q
Qiao Longfei 已提交
187 188
  auto ctx = Prepare(pdesc, block_id);
  RunPreparedContext(ctx.get(), scope, create_local_scope, create_vars);
Q
qijun 已提交
189 190
}

191 192 193 194 195 196 197
// Check whether the block already has feed operators and feed_holder.
// Return false if the block does not have any feed operators.
// If some feed operators have been prepended to the block, check that
// the info contained in these feed operators matches the feed_targets
// and feed_holder_name. Raise exception when any mismatch is found.
// Return true if the block has feed operators and holder of matching info.
static bool has_feed_operators(
198
    const BlockDesc& block,
L
Liu Yiqun 已提交
199
    const std::map<std::string, const LoDTensor*>& feed_targets,
200 201
    const std::string& feed_holder_name) {
  size_t feed_count = 0;
202
  for (auto* op : block.AllOps()) {
203 204
    if (op->Type() == kFeedOpType) {
      feed_count++;
L
Liu Yiqun 已提交
205
      // The input variable's name of feed_op should be feed_holder_name.
206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
      PADDLE_ENFORCE_EQ(op->Input("X")[0], feed_holder_name,
                        "Input to feed op should be '%s'", feed_holder_name);
      std::string feed_target_name = op->Output("Out")[0];
      PADDLE_ENFORCE(
          feed_targets.find(feed_target_name) != feed_targets.end(),
          "Feed operator output name '%s' cannot be found in 'feed_targets'",
          feed_target_name);
    }
  }

  if (feed_count > 0) {
    PADDLE_ENFORCE_EQ(
        feed_count, feed_targets.size(),
        "The number of feed operators should match 'feed_targets'");

221
    if (!feed_holder_name.empty()) {
L
Liu Yiqun 已提交
222
      // When feed operator are present, so should be feed_holder.
223 224 225 226 227 228 229
      auto var = block.FindVar(feed_holder_name);
      PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable",
                              feed_holder_name);
      PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FEED_MINIBATCH,
                        "'%s' variable should be 'FEED_MINIBATCH' type",
                        feed_holder_name);
    }
230 231 232 233 234 235 236 237 238 239 240 241
  }

  return feed_count > 0;
}

// Check whether the block already has fetch operators and fetch_holder.
// Return false if the block does not have any fetch operators.
// If some fetch operators have been appended to the block, check that
// the info contained in these fetch operators matches the fetch_targets
// and fetch_holder_name. Raise exception when any mismatch is found.
// Return true if the block has fetch operators and holder of matching info.
static bool has_fetch_operators(
L
Liu Yiqun 已提交
242 243
    const BlockDesc& block,
    const std::map<std::string, LoDTensor*>& fetch_targets,
244 245
    const std::string& fetch_holder_name) {
  size_t fetch_count = 0;
246
  for (auto* op : block.AllOps()) {
247 248
    if (op->Type() == kFetchOpType) {
      fetch_count++;
L
Liu Yiqun 已提交
249
      // The output variable's name of fetch_op should be fetch_holder_name.
250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
      PADDLE_ENFORCE_EQ(op->Output("Out")[0], fetch_holder_name,
                        "Output of fetch op should be '%s'", fetch_holder_name);
      std::string fetch_target_name = op->Input("X")[0];
      PADDLE_ENFORCE(
          fetch_targets.find(fetch_target_name) != fetch_targets.end(),
          "Fetch operator input name '%s' cannot be found in 'fetch_targets'",
          fetch_target_name);
    }
  }

  if (fetch_count > 0) {
    PADDLE_ENFORCE_EQ(
        fetch_count, fetch_targets.size(),
        "The number of fetch operators should match 'fetch_targets'");

265
    if (!fetch_holder_name.empty()) {
L
Liu Yiqun 已提交
266
      // When fetch operator are present, so should be fetch_holder.
267 268 269 270 271 272 273
      auto var = block.FindVar(fetch_holder_name);
      PADDLE_ENFORCE_NOT_NULL(var, "Block should already have a '%s' variable",
                              fetch_holder_name);
      PADDLE_ENFORCE_EQ(var->GetType(), proto::VarType::FETCH_LIST,
                        "'%s' variable should be 'FETCH_LIST' type",
                        fetch_holder_name);
    }
274 275 276 277 278 279
  }

  return fetch_count > 0;
}

void Executor::Run(const ProgramDesc& program, Scope* scope,
280 281
                   std::map<std::string, const LoDTensor*>* feed_targets,
                   std::map<std::string, LoDTensor*>* fetch_targets,
W
Wu Yi 已提交
282 283
                   bool create_local_scope, bool create_vars,
                   const std::string& feed_holder_name,
284
                   const std::string& fetch_holder_name) {
X
Xin Pan 已提交
285
  platform::RecordBlock b(kProgramId);
286
  if (FLAGS_use_mkldnn) EnableMKLDNN(program);
287
  bool has_feed_ops =
288
      has_feed_operators(program.Block(0), *feed_targets, feed_holder_name);
289
  bool has_fetch_ops =
290
      has_fetch_operators(program.Block(0), *fetch_targets, fetch_holder_name);
291 292

  ProgramDesc* copy_program = const_cast<ProgramDesc*>(&program);
S
sneaxiy 已提交
293
  std::unique_ptr<ProgramDesc> unique_ptr_of_copy_program;
294
  if (!has_feed_ops || !has_fetch_ops) {
S
sneaxiy 已提交
295 296
    unique_ptr_of_copy_program.reset(new ProgramDesc(program));
    copy_program = unique_ptr_of_copy_program.get();
297
  }
298 299
  auto* global_block = copy_program->MutableBlock(0);

300
  if (!has_feed_ops) {
301 302
    // create feed_holder variable
    auto* feed_holder = global_block->Var(feed_holder_name);
303
    feed_holder->SetType(proto::VarType::FEED_MINIBATCH);
304 305 306
    feed_holder->SetPersistable(true);

    int i = 0;
307
    for (auto& feed_target : (*feed_targets)) {
308
      std::string var_name = feed_target.first;
309
      VLOG(30) << "feed target's name: " << var_name;
310 311 312 313 314 315 316 317 318 319 320 321 322

      // prepend feed op
      auto* op = global_block->PrependOp();
      op->SetType(kFeedOpType);
      op->SetInput("X", {feed_holder_name});
      op->SetOutput("Out", {var_name});
      op->SetAttr("col", {static_cast<int>(i)});
      op->CheckAttrs();

      i++;
    }
  }

323
  if (!has_fetch_ops) {
324 325
    // create fetch_holder variable
    auto* fetch_holder = global_block->Var(fetch_holder_name);
326
    fetch_holder->SetType(proto::VarType::FETCH_LIST);
327 328 329
    fetch_holder->SetPersistable(true);

    int i = 0;
330
    for (auto& fetch_target : (*fetch_targets)) {
331
      std::string var_name = fetch_target.first;
332
      VLOG(30) << "fetch target's name: " << var_name;
333 334 335 336 337 338 339 340 341 342 343 344 345

      // append fetch op
      auto* op = global_block->AppendOp();
      op->SetType(kFetchOpType);
      op->SetInput("X", {var_name});
      op->SetOutput("Out", {fetch_holder_name});
      op->SetAttr("col", {static_cast<int>(i)});
      op->CheckAttrs();

      i++;
    }
  }

346
  auto ctx = Prepare(*copy_program, 0);
W
Wu Yi 已提交
347 348 349
  RunPreparedContext(ctx.get(), scope, feed_targets, fetch_targets,
                     create_local_scope, create_vars, feed_holder_name,
                     fetch_holder_name);
350 351
}

Q
Qiao Longfei 已提交
352 353
std::unique_ptr<ExecutorPrepareContext> Executor::Prepare(
    const ProgramDesc& program, int block_id) {
Q
Qiyang Min 已提交
354 355
  std::unique_ptr<ExecutorPrepareContext> ctx(
      new ExecutorPrepareContext(program, block_id));
Y
Yu Yang 已提交
356 357 358 359 360
  PADDLE_ENFORCE_LT(static_cast<size_t>(block_id), program.Size());
  auto& block = program.Block(block_id);
  for (auto& op_desc : block.AllOps()) {
    ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc));
  }
B
baojun-nervana 已提交
361
  if (FLAGS_use_ngraph) EnableFusedOp(ctx.get());
Q
Qiyang Min 已提交
362
  return ctx;
Y
Yu Yang 已提交
363 364
}

T
refine  
typhoonzero 已提交
365
std::vector<std::shared_ptr<ExecutorPrepareContext>> Executor::Prepare(
T
typhoonzero 已提交
366 367 368 369 370 371 372 373 374 375 376 377 378 379
    const ProgramDesc& program, const std::vector<int>& block_ids) {
  std::vector<std::shared_ptr<ExecutorPrepareContext>> result;
  for (auto& bid : block_ids) {
    auto* ctx = new ExecutorPrepareContext(program, bid);
    PADDLE_ENFORCE_LT(static_cast<size_t>(bid), program.Size());
    auto& block = program.Block(bid);
    for (auto& op_desc : block.AllOps()) {
      ctx->ops_.push_back(OpRegistry::CreateOp(*op_desc));
    }
    result.push_back(std::shared_ptr<ExecutorPrepareContext>(ctx));
  }
  return result;
}

Y
Yu Yang 已提交
380
void Executor::RunPreparedContext(ExecutorPrepareContext* ctx, Scope* scope,
Q
qiaolongfei 已提交
381 382
                                  bool create_local_scope, bool create_vars,
                                  bool keep_kids) {
383
  PADDLE_ENFORCE_NOT_NULL(scope);
Y
Yu Yang 已提交
384 385 386 387
  Scope* local_scope = scope;
  if (create_vars) {
    if (create_local_scope) {
      local_scope = &scope->NewScope();
388 389
    }
    CreateVariables(ctx->prog_, local_scope, ctx->block_id_);
L
Liu Yiqun 已提交
390
  }
Y
Yu Yang 已提交
391

S
sneaxiy 已提交
392 393
  int64_t max_memory_size = GetEagerDeletionThreshold();
  std::unique_ptr<GarbageCollector<Tensor>> gc;
S
sneaxiy 已提交
394 395 396 397 398 399
  // WhileOp would set keep_kids to false
  // WhileGradOp would need the scopes created in WhileOp
  // Perhaps, we should not perform eager deletion in WhileOp
  // The scopes and variables created by WhileOp would be deleted
  // in WhileGradOp.
  if (max_memory_size >= 0 && !keep_kids) {
S
sneaxiy 已提交
400
    ctx->ResetReferenceCount();
S
sneaxiy 已提交
401 402 403 404 405 406 407 408 409 410 411 412 413
#ifdef PADDLE_WITH_CUDA
    if (platform::is_gpu_place(place_)) {
      gc.reset(new DefaultStreamGarbageCollector<Tensor>(
          boost::get<platform::CUDAPlace>(place_), max_memory_size));
    } else {
#endif
      gc.reset(new CPUGarbageCollector<Tensor>(
          boost::get<platform::CPUPlace>(place_), max_memory_size));
#ifdef PADDLE_WITH_CUDA
    }
#endif
  }

Y
Yu Yang 已提交
414
  for (auto& op : ctx->ops_) {
415
    op->Run(*local_scope, place_);
S
sneaxiy 已提交
416 417

    if (gc != nullptr) {
S
sneaxiy 已提交
418 419
      DeleteUnusedTensors(*local_scope, op.get(), gc.get(),
                          &(ctx->cur_ref_cnts_));
S
sneaxiy 已提交
420
    }
Y
Yang Yang 已提交
421

Y
Yu Yang 已提交
422
    if (FLAGS_benchmark) {
423 424
      VLOG(20) << "Memory used after operator " + op->Type() + " running: "
               << memory::memory_usage(place_);
Y
Yu Yang 已提交
425 426
    }
  }
S
sneaxiy 已提交
427

S
sneaxiy 已提交
428
  if (gc != nullptr) {
S
sneaxiy 已提交
429
    gc->Wait();
S
sneaxiy 已提交
430
  } else {
S
sneaxiy 已提交
431
    platform::DeviceContextPool::Instance().Get(place_)->Wait();
S
sneaxiy 已提交
432
  }
S
sneaxiy 已提交
433

Q
qiaolongfei 已提交
434
  if (local_scope != scope) {
Y
Yu Yang 已提交
435
    scope->DeleteScope(local_scope);
436
  } else {
Q
qiaolongfei 已提交
437 438 439 440 441
    if (!keep_kids) {
      // By default, we should delete all kid scopes after run executor because
      // some operators may create local scope when running, such as while_op.
      // But when while_op also create a local executor to run it's sub block,
      // the sub scopes it created should not be dropped immediately, because
Q
qiaolongfei 已提交
442 443
      // while_grad_op will use some variables created during while_op run, so
      // we need to keep the kids and wait for the outer executor to drop them.
Q
qiaolongfei 已提交
444 445
      scope->DropKids();
    }
Y
Yu Yang 已提交
446
  }
Q
qiaolongfei 已提交
447

Y
Yu Yang 已提交
448
  if (FLAGS_benchmark) {
449 450 451 452
    VLOG(20) << "-------------------------------------------------------";
    VLOG(20) << "Memory used after deleting local scope: "
             << memory::memory_usage(place_);
    VLOG(20) << "-------------------------------------------------------";
Y
Yu Yang 已提交
453 454 455
  }
}

456 457
void Executor::RunPreparedContext(
    ExecutorPrepareContext* ctx, Scope* scope,
458
    std::map<std::string, const LoDTensor*>* feed_targets,
W
Wu Yi 已提交
459 460 461
    std::map<std::string, LoDTensor*>* fetch_targets, bool create_local_scope,
    bool create_vars, const std::string& feed_holder_name,
    const std::string& fetch_holder_name) {
462 463
  auto& global_block = ctx->prog_.Block(ctx->block_id_);

464
  PADDLE_ENFORCE(
465
      has_feed_operators(global_block, *feed_targets, feed_holder_name),
466 467
      "Program in ExecutorPrepareContext should has feed_ops.");
  PADDLE_ENFORCE(
468
      has_fetch_operators(global_block, *fetch_targets, fetch_holder_name),
469 470
      "Program in the prepared context should has fetch_ops.");

471 472 473 474 475
  // map the data of feed_targets to feed_holder
  for (auto* op : global_block.AllOps()) {
    if (op->Type() == kFeedOpType) {
      std::string feed_target_name = op->Output("Out")[0];
      int idx = boost::get<int>(op->GetAttr("col"));
476 477
      SetFeedVariable(scope, *(*feed_targets)[feed_target_name],
                      feed_holder_name, idx);
478 479 480
    }
  }

W
Wu Yi 已提交
481
  RunPreparedContext(ctx, scope, create_local_scope, create_vars);
482 483 484 485 486 487

  // obtain the data of fetch_targets from fetch_holder
  for (auto* op : global_block.AllOps()) {
    if (op->Type() == kFetchOpType) {
      std::string fetch_target_name = op->Input("X")[0];
      int idx = boost::get<int>(op->GetAttr("col"));
488
      *(*fetch_targets)[fetch_target_name] =
489 490 491 492 493
          GetFetchVariable(*scope, fetch_holder_name, idx);
    }
  }
}

494 495
void Executor::EnableMKLDNN(const ProgramDesc& program) {
#ifdef PADDLE_WITH_MKLDNN
496
  VLOG(30) << "use_mkldnn=True";
497 498 499 500 501 502 503 504
  for (size_t bid = 0; bid < program.Size(); ++bid) {
    auto* block = const_cast<ProgramDesc&>(program).MutableBlock(bid);
    for (auto* op : block->AllOps()) {
      if (op->HasAttr("use_mkldnn")) {
        op->SetAttr("use_mkldnn", true);
      }
    }
  }
505 506 507
#else
  LOG(WARNING)
      << "'MKLDNN' is not supported, Please re-compile with WITH_MKLDNN option";
508 509
#endif
}
Q
qijun 已提交
510 511
}  // namespace framework
}  // namespace paddle