operator.cc 37.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
P
peizhilin 已提交
19 20 21 22 23
#include <sstream>
#include <string>
#include <vector>
#include "gflags/gflags.h"
#include "glog/logging.h"
Y
Yi Wang 已提交
24 25
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
26
#include "paddle/fluid/framework/lod_tensor.h"
27
#include "paddle/fluid/framework/op_proto_maker.h"
28
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/framework/shape_inference.h"
30
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
31
#include "paddle/fluid/framework/var_type.h"
32
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
33

D
dzhwinter 已提交
34
DECLARE_bool(benchmark);
C
chengduoZH 已提交
35 36 37
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
38

Q
Qiao Longfei 已提交
39 40 41
namespace paddle {
namespace framework {

42 43 44 45 46 47
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
48

Q
qiaolongfei 已提交
49 50
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
Y
Yu Yang 已提交
51
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
52
  } else if (var->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
53
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
54 55 56 57 58
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

59 60
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
61
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
62 63
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
64 65
  }

M
minqiyang 已提交
66 67
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
68
    if (UNLIKELY(!tensor.IsInitialized())) {
69
      return DDim({-1});
70
    }
M
minqiyang 已提交
71 72 73 74 75 76 77
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
78 79 80 81 82
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
83 84 85 86 87 88
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
89 90 91 92 93
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
94

M
minqiyang 已提交
95 96 97
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
98 99
      return "";
    }
Y
Yu Yang 已提交
100
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
101
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
102 103 104 105
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
106
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
107
    }
D
dzhwinter 已提交
108 109 110 111 112
  } else {
    return "";
  }
}

113 114 115 116 117 118
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
119 120
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
121 122 123 124 125
  }

  return -1;
}

Q
Qiao Longfei 已提交
126 127 128 129 130 131 132 133
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
134 135 136
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
137 138
      return default_lod;
    }
M
minqiyang 已提交
139
    return tensor.lod();
Q
Qiao Longfei 已提交
140 141 142 143 144
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
145 146 147 148 149
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
150
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
151 152 153 154 155 156
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
157
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
158 159 160 161 162 163
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

164
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
165 166 167
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
168
#ifndef PADDLE_WITH_CUDA
P
peizhilin 已提交
169
      PADDLE_THROW("Cannot run operator on place %s", place);
170
#else
P
peizhilin 已提交
171 172
      auto dev_id = boost::get<platform::CUDAPlace>(place).device;
      platform::SetDeviceId(dev_id);
173
#endif
P
peizhilin 已提交
174
    }
P
peizhilin 已提交
175

P
peizhilin 已提交
176 177 178 179 180 181 182 183 184 185 186 187
    // The profile has a process-wide mutex, results in serious performance
    // issue
    // in concurrency scenerio. Here use an `if` to fix this issue.
    // Please not remove the `if`, ask @Superjomn if there are any concern.
    if (platform::IsProfileEnabled()) {
      platform::DeviceContextPool& pool =
          platform::DeviceContextPool::Instance();
      platform::RecordEvent record_event(Type(), pool.Get(place));
      RunImpl(scope, place);
    } else {
      RunImpl(scope, place);
    }
188

P
peizhilin 已提交
189 190 191 192 193
    VLOG(3) << place << " " << DebugStringEx(&scope);
  } catch (platform::EnforceNotMet exception) {
    if (Attrs().count("sub_block") != 0) {
      throw exception;
    }
194

P
peizhilin 已提交
195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
    auto& callstack = Attr<std::vector<std::string>>(
        OpProtoAndCheckerMaker::OpCreationCallstackAttrName());

    if (callstack.empty()) {
      throw exception;
    }
    std::ostringstream sout;
    sout << "Invoke operator " << Type() << " error.\n";
    sout << "Python Callstacks: \n";
    for (auto& line : callstack) {
      sout << line;
    }
    sout << "C++ Callstacks: \n";
    sout << exception.err_str_;
    exception.err_str_ = sout.str();
    throw exception;
  } catch (...) {
    std::rethrow_exception(std::current_exception());
  }
214 215
}

216
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
217
  return inputs_.find(name) != inputs_.end();
218 219
}

220
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
221
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
222
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
223 224
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
225
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
226 227
}

Y
Yu Yang 已提交
228 229
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
230
  auto it = inputs_.find(name);
231 232
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
233
  return it->second;
Y
Yan Chunwei 已提交
234 235
}

236
bool OperatorBase::HasOutputs(const std::string& name) const {
237
  if (outputs_.find(name) != outputs_.end()) {
238 239 240 241 242 243
    return true;
  } else {
    return false;
  }
}

244
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
245
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
246
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
247 248
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
249
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
250 251
}

Y
Yu Yang 已提交
252 253
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
254
  auto it = outputs_.find(name);
255 256
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
257
  return it->second;
Y
Yan Chunwei 已提交
258 259
}

260
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
261
  std::stringstream ss;
Y
Yu Yang 已提交
262
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
263 264
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
265 266
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
267 268
      auto var_name = input.second[i];
      ss << var_name;
269
      if (scope) {
Q
Qiao Longfei 已提交
270 271 272 273 274 275 276 277 278 279 280
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
281
        }
282
      }
Y
Yu Yang 已提交
283 284 285
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
286
    }
Y
Yu Yang 已提交
287
    ss << "]";
Y
Yu Yang 已提交
288 289
    ++it;
    if (it != inputs_.end()) {
290 291
      ss << ", ";
    }
Q
Qiao Longfei 已提交
292
  }
Y
Yu Yang 已提交
293
  ss << "}, outputs:{";
Y
Yu Yang 已提交
294 295
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
296 297
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
298 299
      auto var_name = output.second[i];
      ss << var_name;
300
      if (scope) {
Q
Qiao Longfei 已提交
301 302 303 304 305 306 307
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
308 309
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
310 311
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
312
        }
313
      }
Y
Yu Yang 已提交
314 315 316
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
317
    }
Y
Yu Yang 已提交
318
    ss << "]";
Y
Yu Yang 已提交
319 320
    ++it;
    if (it != outputs_.end()) {
321 322
      ss << ", ";
    }
Q
Qiao Longfei 已提交
323
  }
Y
Yu Yang 已提交
324
  ss << "}.";
Q
Qiao Longfei 已提交
325 326 327
  return ss.str();
}

Y
Yu Yang 已提交
328
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
329 330
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
331 332
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
333 334
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
335
}
336

Q
qijun 已提交
337 338
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
339
  for (auto& o : inputs_) {
Q
qijun 已提交
340 341 342 343 344 345
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
346 347 348 349 350 351 352 353 354 355
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
356
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
357 358

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
359
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
360 361 362 363 364 365 366 367 368
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
369 370
}

371 372 373
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
374
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
375 376

  for (auto& in : op_info->Proto().inputs()) {
377 378 379 380
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
381 382 383
  }

  for (auto& out : op_info->Proto().outputs()) {
384 385 386 387 388
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
405
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
406
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
407 408
}

C
chengduo 已提交
409
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
410 411 412 413
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
414
  } else {
Y
Yang Yang 已提交
415
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
416
                 ToTypeName(var.Type()));
Q
QI JUN 已提交
417 418 419
  }
}

C
chengduo 已提交
420
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
421
  if (var->IsType<LoDTensor>()) {
422
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
423
  } else if (var->IsType<SelectedRows>()) {
424
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
425
  } else {
Y
Yang Yang 已提交
426
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
427
                 ToTypeName(var->Type()));
Q
QI JUN 已提交
428 429 430
  }
}

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

X
Xin Pan 已提交
463 464 465 466 467 468 469 470 471 472
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
473 474 475 476
const Variable* ExecutionContext::LegacyInputVar(
    const std::string& name) const {
  auto ipt = op_.Input(name);
  return ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
X
Xin Pan 已提交
477 478
}

X
clean  
Xin Pan 已提交
479
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
480 481 482 483 484 485 486 487 488
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
489 490 491 492 493
Variable* ExecutionContext::LegacyOutputVar(const std::string& name) const {
  auto opt = op_.Output(name);
  return opt == kEmptyVarName ? nullptr : scope_.FindVar(opt);
}

494
template <>
495
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
496
  return Input<LoDTensor>(name);
497 498
}

X
Xin Pan 已提交
499
template <>
X
clean  
Xin Pan 已提交
500
const Tensor* ExecutionContext::LegacyInput<Tensor>(
X
Xin Pan 已提交
501
    const std::string& name) const {
X
clean  
Xin Pan 已提交
502
  return LegacyInput<LoDTensor>(name);
X
Xin Pan 已提交
503 504
}

505
template <>
506
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
507
    const std::string& name) const {
X
Xin Pan 已提交
508 509 510 511 512 513 514 515 516 517 518 519 520
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> const Tensor* {
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "should be LoDTensor, but the received type is %s",
S
sneaxiy 已提交
521
                       ToTypeName(var->Type()));
X
Xin Pan 已提交
522 523 524 525 526 527 528 529
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

template <>
const std::vector<const Tensor*> ExecutionContext::LegacyMultiInput<Tensor>(
    const std::string& name) const {
530 531 532
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
533
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
534
                 [&](const std::string& sub_name) -> const Tensor* {
535
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
536 537 538 539
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
S
sneaxiy 已提交
540
                       sub_name, ToTypeName(var->Type()));
C
chengduo 已提交
541
                   return &(var->Get<LoDTensor>());
542
                 });
543 544 545 546
  return res;
}

template <>
547
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
548
  return Output<LoDTensor>(name);
549 550
}

X
Xin Pan 已提交
551
template <>
X
clean  
Xin Pan 已提交
552 553
Tensor* ExecutionContext::LegacyOutput<Tensor>(const std::string& name) const {
  return LegacyOutput<LoDTensor>(name);
X
Xin Pan 已提交
554 555
}

556
template <>
557
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
558 559 560 561
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
562
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
563
                 [&](const std::string& sub_name) -> Tensor* {
564
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
565 566 567 568
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
S
sneaxiy 已提交
569
                       sub_name, ToTypeName(var->Type()));
C
chengduo 已提交
570
                   return var->GetMutable<LoDTensor>();
571
                 });
572 573 574
  return res;
}

Y
Yu Yang 已提交
575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

590 591
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
592 593 594
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
      : op_(op), scope_(scope), ctx_(ctx) {}
595 596

  bool HasInput(const std::string& name) const override {
597
    // has only one input
X
Xin Pan 已提交
598
    const auto& ins = ctx_.inputs;
599 600
    auto it = ins.find(name);
    if (it == ins.end()) {
601 602
      return false;
    }
603
    const auto& in = it->second;
X
Xin Pan 已提交
604
    if (in.size() == 0) return false;
T
tensor-tang 已提交
605
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
606
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
607
    return in[0] != nullptr;
608 609 610
  }

  bool HasOutput(const std::string& name) const override {
611
    // has only one output
X
Xin Pan 已提交
612
    const auto& outs = ctx_.outputs;
613 614
    auto it = outs.find(name);
    if (it == outs.end()) {
615 616
      return false;
    }
617
    const auto& out = it->second;
X
Xin Pan 已提交
618
    if (out.size() == 0) {
619 620
      return false;
    }
T
tensor-tang 已提交
621 622
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
623
    return out[0] != nullptr;
624 625 626
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
627 628
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
629
    if (it == ins.end() || it->second.empty()) {
630 631
      return false;
    }
X
Xin Pan 已提交
632 633
    for (auto& input : it->second) {
      if (input == nullptr) {
634 635 636 637 638 639 640
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
641 642
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
643
    if (it == outs.end() || it->second.empty()) {
644 645
      return false;
    }
X
Xin Pan 已提交
646 647
    for (auto& output : it->second) {
      if (output == nullptr) {
648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

666 667
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
668 669 670 671 672 673 674 675 676
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
677 678

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
679
                   "The type of %s and %s is not the same.", in, out);
680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
698 699
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
700 701 702 703 704 705 706 707
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
708
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
709
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
710 711 712 713 714
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
715

M
mozga-intel 已提交
716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
735 736
  }

C
chengduo 已提交
737 738 739 740 741
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

742 743
  bool IsRuntime() const override { return true; }

744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
763 764 765 766 767 768 769 770 771 772 773 774 775
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
776 777 778 779 780 781 782 783 784 785
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
786 787 788 789 790 791 792 793 794 795 796 797 798 799
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

800
 protected:
X
Xin Pan 已提交
801
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
802
    PADDLE_ENFORCE_NOT_NULL(var);
803 804 805 806 807
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
808
      PADDLE_THROW(
X
Xin Pan 已提交
809
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
810
          "type_id is %s.",
S
sneaxiy 已提交
811
          ToTypeName(var->Type()));
F
fengjiayi 已提交
812 813 814
    }
  }

X
Xin Pan 已提交
815 816 817 818 819 820 821 822
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
823
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
824
    PADDLE_THROW("Only compile time support this method");
825 826
  }

X
Xin Pan 已提交
827
  void SetDim(Variable* var, const DDim& dim) {
828 829 830 831 832
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
833
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
834
                   ToTypeName(var->Type()));
X
Xin Pan 已提交
835 836 837 838 839 840 841 842 843 844 845 846
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
847 848 849
    }
  }

F
fengjiayi 已提交
850 851
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
852
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
853 854
  }

X
Xin Pan 已提交
855 856 857 858 859 860 861 862 863 864 865
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
866 867 868
    return ToVarType(var->Type());
  }

869 870 871 872 873 874 875 876 877 878 879 880 881 882
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
883 884
  }

885 886
  const OperatorBase& op_;
  const Scope& scope_;
X
Xin Pan 已提交
887
  const RuntimeContext& ctx_;
888 889
};

C
chengduoZH 已提交
890 891 892 893 894
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
895 896
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
897 898 899 900 901 902 903 904
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

B
baojun-nervana 已提交
905
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
906 907 908
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
909 910 911
  this->InferShape(&infer_shape_ctx);
}

912 913
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
X
Xin Pan 已提交
914
  RuntimeContext ctx(Inputs(), Outputs(), scope);
Y
Yu Yang 已提交
915
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
916
  auto* dev_ctx = pool.Get(place);
917

918 919 920 921
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
922 923
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
924 925
  }

Q
qiaolongfei 已提交
926 927
  OpKernelMap& kernels = kernels_iter->second;

X
Xin Pan 已提交
928 929
  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx));
M
minqiyang 已提交
930
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
931

932
  auto kernel_iter = kernels.find(expected_kernel_key);
933
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
934
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
935 936
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
937
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
938 939 940 941 942
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
943 944 945 946 947
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
948 949 950
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
X
Xin Pan 已提交
951
      PrepareData(scope, expected_kernel_key, &transfered_inplace_vars, &ctx);
952

Y
yuyang18 已提交
953 954 955 956 957 958
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
959
  }
Q
QI JUN 已提交
960

X
Xin Pan 已提交
961
  RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, ctx);
X
Xin Pan 已提交
962
  this->InferShape(&infer_shape_ctx);
X
clean  
Xin Pan 已提交
963 964
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
X
Xin Pan 已提交
965
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx, ctx));
D
dzhwinter 已提交
966

Y
yuyang18 已提交
967 968 969
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
970 971
  }

D
dzhwinter 已提交
972
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
973
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
974
    dev_ctx->Wait();
D
dzhwinter 已提交
975
  }
C
chengduoZH 已提交
976 977 978

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
979
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
980 981 982
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
983 984
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
985 986 987
      }
    }
  }
Q
Qiao Longfei 已提交
988
}
X
Xin Pan 已提交
989

Y
yuyang18 已提交
990 991 992 993
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
994
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
995 996
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
997 998 999
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
1000
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1001 1002 1003 1004
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
1005
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
1006
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1007 1008
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1009 1010
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
X
Xin Pan 已提交
1011 1012 1013 1014
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1015
      auto* var = input_vars[i];
X
Xin Pan 已提交
1016

Y
yuyang18 已提交
1017
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1018
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1019 1020 1021
        continue;
      }

C
chengduo 已提交
1022
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
1040 1041
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1042

1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1058
      }
1059
      if (!new_scope) {
Y
yuyang18 已提交
1060 1061 1062 1063
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1064
      input_vars[i] = trans_var;
1065

Y
yuyang18 已提交
1066
      Tensor out;
Y
yuyang18 已提交
1067
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1068 1069 1070 1071 1072 1073
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1074

1075
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1076 1077 1078
    const ExecutionContext& ctx) const {
  int data_type = -1;
  for (auto& input : this->inputs_) {
X
Xin Pan 已提交
1079 1080 1081
    const std::vector<const Variable*> vars = ctx.MultiInputVar(input.first);
    for (size_t i = 0; i < vars.size(); ++i) {
      const Variable* var = vars[i];
Y
Yu Yang 已提交
1082 1083 1084 1085 1086 1087 1088 1089 1090 1091
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
X
Xin Pan 已提交
1092 1093
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s(%lu)is not initialized",
                         input.first, i);
Y
Yu Yang 已提交
1094
          int tmp = static_cast<int>(t->type());
1095 1096
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
X
Xin Pan 已提交
1097 1098
              "DataType of Paddle Op %s must be the same. Get (%d) != (%d)",
              Type(), data_type, tmp);
Y
Yu Yang 已提交
1099 1100 1101 1102 1103 1104
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
1105
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
1106
}
1107

1108 1109 1110 1111 1112 1113 1114 1115
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1116 1117
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1118 1119
}

Q
Qiao Longfei 已提交
1120
}  // namespace framework
L
liaogang 已提交
1121
}  // namespace paddle