operator.cc 37.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
P
peizhilin 已提交
19 20 21
#include <sstream>
#include <string>
#include <vector>
Y
Yi Wang 已提交
22
#include "paddle/fluid/framework/data_transform.h"
23
#include "paddle/fluid/framework/details/op_handle_base.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/executor.h"
25
#include "paddle/fluid/framework/lod_tensor.h"
26
#include "paddle/fluid/framework/op_proto_maker.h"
27
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
28
#include "paddle/fluid/framework/shape_inference.h"
29
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
30
#include "paddle/fluid/framework/var_type.h"
31
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
32

D
dzhwinter 已提交
33
DECLARE_bool(benchmark);
C
chengduoZH 已提交
34 35 36
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
Q
Qiao Longfei 已提交
37
DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op");
D
dzhwinter 已提交
38

Q
Qiao Longfei 已提交
39 40 41
namespace paddle {
namespace framework {

42 43 44 45 46 47
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
48

Q
qiaolongfei 已提交
49 50
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
Y
Yu Yang 已提交
51
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
52
  } else if (var->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
53
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
54 55 56 57 58
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

59 60
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
61
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
62 63
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
64 65
  }

M
minqiyang 已提交
66 67
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
68
    if (UNLIKELY(!tensor.IsInitialized())) {
69
      return DDim({-1});
70
    }
M
minqiyang 已提交
71 72 73 74 75 76 77
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
78 79 80 81 82
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
83 84 85 86 87 88
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
89 90 91 92 93
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
94

M
minqiyang 已提交
95 96 97
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
98 99
      return "";
    }
Y
Yu Yang 已提交
100
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
101
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
102 103 104 105
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
106
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
107
    }
D
dzhwinter 已提交
108 109 110 111 112
  } else {
    return "";
  }
}

113 114 115 116 117 118
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
119 120
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
121 122 123 124 125
  }

  return -1;
}

Q
Qiao Longfei 已提交
126 127 128 129 130 131 132 133
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
134 135 136
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
137 138
      return default_lod;
    }
M
minqiyang 已提交
139
    return tensor.lod();
Q
Qiao Longfei 已提交
140 141 142 143 144
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
145 146 147 148 149
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
150
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
151 152 153 154 155 156
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
157
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
158 159 160 161 162 163
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

164
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
165 166 167
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
168
#ifndef PADDLE_WITH_CUDA
P
peizhilin 已提交
169
      PADDLE_THROW("Cannot run operator on place %s", place);
170
#else
P
peizhilin 已提交
171 172
      auto dev_id = boost::get<platform::CUDAPlace>(place).device;
      platform::SetDeviceId(dev_id);
173
#endif
P
peizhilin 已提交
174
    }
P
peizhilin 已提交
175

P
peizhilin 已提交
176 177 178 179 180
    // The profile has a process-wide mutex, results in serious performance
    // issue
    // in concurrency scenerio. Here use an `if` to fix this issue.
    // Please not remove the `if`, ask @Superjomn if there are any concern.
    if (platform::IsProfileEnabled()) {
181
      platform::RecordEvent record_event(Type());
P
peizhilin 已提交
182 183 184 185
      RunImpl(scope, place);
    } else {
      RunImpl(scope, place);
    }
186

P
peizhilin 已提交
187 188 189
    VLOG(3) << place << " " << DebugStringEx(&scope);
  } catch (platform::EnforceNotMet exception) {
    if (Attrs().count("sub_block") != 0) {
190
      throw std::move(exception);
P
peizhilin 已提交
191
    }
192

P
peizhilin 已提交
193 194 195 196
    auto& callstack = Attr<std::vector<std::string>>(
        OpProtoAndCheckerMaker::OpCreationCallstackAttrName());

    if (callstack.empty()) {
197
      throw std::move(exception);
P
peizhilin 已提交
198 199 200 201 202 203 204 205 206 207
    }
    std::ostringstream sout;
    sout << "Invoke operator " << Type() << " error.\n";
    sout << "Python Callstacks: \n";
    for (auto& line : callstack) {
      sout << line;
    }
    sout << "C++ Callstacks: \n";
    sout << exception.err_str_;
    exception.err_str_ = sout.str();
208
    throw std::move(exception);
P
peizhilin 已提交
209 210
  } catch (...) {
    std::rethrow_exception(std::current_exception());
211
  }
212 213
}

214
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
215
  return inputs_.find(name) != inputs_.end();
216 217
}

218
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
219
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
220
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
221 222
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
223
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
224 225
}

Y
Yu Yang 已提交
226 227
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
228
  auto it = inputs_.find(name);
229 230
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
231
  return it->second;
Y
Yan Chunwei 已提交
232 233
}

234
bool OperatorBase::HasOutputs(const std::string& name) const {
235
  if (outputs_.find(name) != outputs_.end()) {
236 237 238 239 240 241
    return true;
  } else {
    return false;
  }
}

242
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
243
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
244
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
245 246
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
247
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
248 249
}

Y
Yu Yang 已提交
250 251
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
252
  auto it = outputs_.find(name);
253 254
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
255
  return it->second;
Y
Yan Chunwei 已提交
256 257
}

258
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
259
  std::stringstream ss;
Y
Yu Yang 已提交
260
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
261 262
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
263 264
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
265 266
      auto var_name = input.second[i];
      ss << var_name;
267
      if (scope) {
Q
Qiao Longfei 已提交
268 269 270 271 272 273 274 275 276 277 278
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
279
        }
280
      }
Y
Yu Yang 已提交
281 282 283
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
284
    }
Y
Yu Yang 已提交
285
    ss << "]";
Y
Yu Yang 已提交
286 287
    ++it;
    if (it != inputs_.end()) {
288 289
      ss << ", ";
    }
Q
Qiao Longfei 已提交
290
  }
Y
Yu Yang 已提交
291
  ss << "}, outputs:{";
Y
Yu Yang 已提交
292 293
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
294 295
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
296 297
      auto var_name = output.second[i];
      ss << var_name;
298
      if (scope) {
Q
Qiao Longfei 已提交
299 300 301 302 303 304 305
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
306 307
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
308 309
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
310
        }
311
      }
Y
Yu Yang 已提交
312 313 314
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
315
    }
Y
Yu Yang 已提交
316
    ss << "]";
Y
Yu Yang 已提交
317 318
    ++it;
    if (it != outputs_.end()) {
319 320
      ss << ", ";
    }
Q
Qiao Longfei 已提交
321
  }
Y
Yu Yang 已提交
322
  ss << "}.";
Q
Qiao Longfei 已提交
323 324 325
  return ss.str();
}

Y
Yu Yang 已提交
326
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
327 328
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
329 330
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
331 332
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
333
}
334

Q
qijun 已提交
335 336
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
337
  for (auto& o : inputs_) {
Q
qijun 已提交
338 339 340 341 342 343
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
344 345 346 347 348 349 350 351 352 353
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
354
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
355 356

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
357
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
358 359 360 361 362 363 364 365 366
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
367 368
}

369 370 371
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
372
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
373 374

  for (auto& in : op_info->Proto().inputs()) {
375 376 377 378
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
379 380 381
  }

  for (auto& out : op_info->Proto().outputs()) {
382 383 384 385 386
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
403
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
404
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
405 406
}

C
chengduo 已提交
407
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
408 409 410 411
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
412
  } else {
Y
Yang Yang 已提交
413
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
414
                 ToTypeName(var.Type()));
Q
QI JUN 已提交
415 416 417
  }
}

C
chengduo 已提交
418
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
419
  if (var->IsType<LoDTensor>()) {
420
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
421
  } else if (var->IsType<SelectedRows>()) {
422
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
423
  } else {
Y
Yang Yang 已提交
424
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
425
                 ToTypeName(var->Type()));
Q
QI JUN 已提交
426 427 428
  }
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

X
Xin Pan 已提交
461 462 463 464 465 466 467 468 469 470
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
471
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
472 473 474 475 476 477 478 479 480
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

481
template <>
482
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
483
  return Input<LoDTensor>(name);
484 485 486
}

template <>
487
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
488
    const std::string& name) const {
X
Xin Pan 已提交
489 490 491 492 493 494 495 496 497 498 499 500 501
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> const Tensor* {
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "should be LoDTensor, but the received type is %s",
S
sneaxiy 已提交
502
                       ToTypeName(var->Type()));
X
Xin Pan 已提交
503 504 505 506 507
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

508
template <>
509
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
510
  return Output<LoDTensor>(name);
511 512 513
}

template <>
514
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
515
    const std::string& name) const {
516 517 518 519 520
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
521
  std::vector<Tensor*> res;
522 523 524 525 526
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> Tensor* {
                   return var == nullptr ? nullptr
                                         : var->GetMutable<LoDTensor>();
527
                 });
528 529 530
  return res;
}

Y
Yu Yang 已提交
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

546 547
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
548 549
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
G
Gabor Buella 已提交
550
      : op_(op), ctx_(ctx) {}
551 552

  bool HasInput(const std::string& name) const override {
553
    // has only one input
X
Xin Pan 已提交
554
    const auto& ins = ctx_.inputs;
555 556
    auto it = ins.find(name);
    if (it == ins.end()) {
557 558
      return false;
    }
559
    const auto& in = it->second;
X
Xin Pan 已提交
560
    if (in.size() == 0) return false;
T
tensor-tang 已提交
561
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
562
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
563
    return in[0] != nullptr;
564 565 566
  }

  bool HasOutput(const std::string& name) const override {
567
    // has only one output
X
Xin Pan 已提交
568
    const auto& outs = ctx_.outputs;
569 570
    auto it = outs.find(name);
    if (it == outs.end()) {
571 572
      return false;
    }
573
    const auto& out = it->second;
X
Xin Pan 已提交
574
    if (out.size() == 0) {
575 576
      return false;
    }
T
tensor-tang 已提交
577 578
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
579
    return out[0] != nullptr;
580 581 582
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
583 584
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
585
    if (it == ins.end() || it->second.empty()) {
586 587
      return false;
    }
X
Xin Pan 已提交
588 589
    for (auto& input : it->second) {
      if (input == nullptr) {
590 591 592 593 594 595 596
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
597 598
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
599
    if (it == outs.end() || it->second.empty()) {
600 601
      return false;
    }
X
Xin Pan 已提交
602 603
    for (auto& output : it->second) {
      if (output == nullptr) {
604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

622 623
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
624 625 626 627 628 629 630 631 632
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
633 634

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
635
                   "The type of %s and %s is not the same.", in, out);
636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
654 655
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
656 657 658 659 660 661 662 663
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
664
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
665
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
666 667 668 669 670
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
671

M
mozga-intel 已提交
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
691 692
  }

C
chengduo 已提交
693 694 695 696 697
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

698 699
  bool IsRuntime() const override { return true; }

700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
719 720 721 722 723 724 725 726 727 728 729 730 731
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
732 733 734 735 736 737 738 739 740 741
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
742 743 744 745 746 747 748 749 750 751 752 753 754 755
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

756
 protected:
X
Xin Pan 已提交
757
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
758
    PADDLE_ENFORCE_NOT_NULL(var);
759 760 761 762 763
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
764
      PADDLE_THROW(
X
Xin Pan 已提交
765
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
766
          "type_id is %s.",
S
sneaxiy 已提交
767
          ToTypeName(var->Type()));
F
fengjiayi 已提交
768 769 770
    }
  }

X
Xin Pan 已提交
771 772 773 774 775 776 777 778
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
779
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
780
    PADDLE_THROW("Only compile time support this method");
781 782
  }

X
Xin Pan 已提交
783
  void SetDim(Variable* var, const DDim& dim) {
784 785 786 787 788
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
789
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
790
                   ToTypeName(var->Type()));
X
Xin Pan 已提交
791 792 793 794 795 796 797 798 799 800 801 802
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
803 804 805
    }
  }

F
fengjiayi 已提交
806 807
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
808
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
809 810
  }

X
Xin Pan 已提交
811 812 813 814 815 816 817 818 819 820 821
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
822 823 824
    return ToVarType(var->Type());
  }

825 826 827 828 829 830 831 832 833 834 835 836 837 838
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
839 840
  }

841
  const OperatorBase& op_;
X
Xin Pan 已提交
842
  const RuntimeContext& ctx_;
843 844
};

845 846
static void CheckTensorNANOrInf(const std::string& op_type,
                                const std::string& name,
C
chengduoZH 已提交
847 848 849 850
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
851 852
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
853 854 855
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
856
                 "Operator %s output Tensor %s contains Inf", op_type, name);
C
chengduoZH 已提交
857
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
858
                 "Operator %s output Tensor %s contains NAN", op_type, name);
C
chengduoZH 已提交
859 860
}

B
baojun-nervana 已提交
861
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
862 863 864
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
865 866 867
  this->InferShape(&infer_shape_ctx);
}

X
polish  
Xin Pan 已提交
868 869 870 871 872 873 874 875 876 877
std::vector<KernelConfig>* OperatorWithKernel::GetKernelConfig(
    const OpKernelType& key) const {
  auto config_iter = kernel_configs_map_.find(key);
  std::vector<KernelConfig>* kernel_configs = nullptr;
  if (config_iter != kernel_configs_map_.end()) {
    kernel_configs = &(config_iter->second);
  }
  return kernel_configs;
}

878 879
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
880
  const Scope* cur_scope = &scope;
L
luotao1 已提交
881 882 883 884 885 886 887 888 889 890
  // RuntimeContext is used to relate input/output names of Operator with
  // the corresponding variables in Scope.
  // In a same Scope, since the input/output names of Operator do not change
  // in the execution, RuntimeContext could be created only at the first
  // iteration of the execution to save the elapsed time.
  // Note that the Scope should not be the local scope, since local scope
  // would be cleaned regularly.
  if (scope.FindVar(details::kLocalExecScopeName)) {
    runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
  } else if (!runtime_ctx_ || pre_scope_ != cur_scope) {
891 892
    runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
    pre_scope_ = cur_scope;
L
luotao1 已提交
893
  }
Y
Yu Yang 已提交
894
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
895
  auto* dev_ctx = pool.Get(place);
896

897 898 899 900
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
901 902
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
903 904
  }

Q
qiaolongfei 已提交
905 906
  OpKernelMap& kernels = kernels_iter->second;

X
Xin Pan 已提交
907
  auto expected_kernel_key = this->GetExpectedKernelType(
L
luotao1 已提交
908
      ExecutionContext(*this, scope, *dev_ctx, *runtime_ctx_, nullptr));
M
minqiyang 已提交
909
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
910

911
  auto kernel_iter = kernels.find(expected_kernel_key);
912
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
913
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
914 915
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
916
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
917 918 919 920 921
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
922 923 924 925 926
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

X
polish  
Xin Pan 已提交
927 928
  std::vector<KernelConfig>* kernel_configs =
      GetKernelConfig(expected_kernel_key);
929

Y
yuyang18 已提交
930 931
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
932 933
  auto* transfer_scope = PrepareData(
      scope, expected_kernel_key, &transfered_inplace_vars, runtime_ctx_.get());
934

Y
yuyang18 已提交
935 936 937 938 939 940
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
941
  }
Q
QI JUN 已提交
942

L
luotao1 已提交
943
  RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, *runtime_ctx_);
X
Xin Pan 已提交
944
  this->InferShape(&infer_shape_ctx);
X
clean  
Xin Pan 已提交
945 946
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
L
luotao1 已提交
947 948
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx,
                                       *runtime_ctx_, kernel_configs));
D
dzhwinter 已提交
949

Y
yuyang18 已提交
950 951 952
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
953 954
  }

D
dzhwinter 已提交
955
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
956
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
957
    dev_ctx->Wait();
D
dzhwinter 已提交
958
  }
C
chengduoZH 已提交
959 960 961

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
962
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
963 964
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
965
        CheckTensorNANOrInf(type_, vname, var->Get<framework::LoDTensor>());
966
      } else if (var->IsType<framework::SelectedRows>()) {
967 968
        CheckTensorNANOrInf(type_, vname,
                            var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
969 970 971
      }
    }
  }
Q
Qiao Longfei 已提交
972
}
X
Xin Pan 已提交
973

Y
yuyang18 已提交
974 975 976 977
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
978
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
979 980 981
    auto* origin_var = scope.FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(origin_var, "The var[%s] should not be nullptr.",
                            var_name);
C
chengduo 已提交
982
    auto* original_tensor =
C
chengduo 已提交
983
        GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var);
C
chengduo 已提交
984
    auto* var = transfer_scope.FindVar(var_name);
C
chengduo 已提交
985 986
    PADDLE_ENFORCE_NOT_NULL(var, "The var[%s] should not be nullptr.",
                            var_name);
C
chengduo 已提交
987
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
988 989 990 991
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
992
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
993
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
994 995
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
996 997
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
X
Xin Pan 已提交
998 999 1000 1001
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1002
      auto* var = input_vars[i];
X
Xin Pan 已提交
1003

Y
yuyang18 已提交
1004
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1005
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1006 1007 1008
        continue;
      }

C
chengduo 已提交
1009
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
1027 1028
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1029

1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1045
      }
1046
      if (!new_scope) {
Y
yuyang18 已提交
1047 1048 1049 1050
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1051
      input_vars[i] = trans_var;
1052

Y
yuyang18 已提交
1053
      Tensor out;
Y
yuyang18 已提交
1054
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1055 1056 1057 1058 1059 1060
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1061

1062
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1063
    const ExecutionContext& ctx) const {
1064 1065 1066
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
Y
Yu Yang 已提交
1067
  for (auto& input : this->inputs_) {
X
Xin Pan 已提交
1068 1069 1070
    const std::vector<const Variable*> vars = ctx.MultiInputVar(input.first);
    for (size_t i = 0; i < vars.size(); ++i) {
      const Variable* var = vars[i];
Y
Yu Yang 已提交
1071 1072 1073 1074 1075 1076 1077 1078 1079 1080
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
X
Xin Pan 已提交
1081 1082
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s(%lu)is not initialized",
                         input.first, i);
1083
          proto::VarType::Type tmp = t->type();
1084
          PADDLE_ENFORCE(
1085
              tmp == data_type || data_type == dafault_data_type,
X
Xin Pan 已提交
1086
              "DataType of Paddle Op %s must be the same. Get (%d) != (%d)",
1087
              Type(), DataTypeToString(data_type), DataTypeToString(tmp));
Y
Yu Yang 已提交
1088 1089 1090 1091 1092
          data_type = tmp;
        }
      }
    }
  }
1093 1094 1095
  PADDLE_ENFORCE(data_type != dafault_data_type,
                 "DataType should be indicated by input");
  return data_type;
Y
Yu Yang 已提交
1096
}
1097

1098 1099 1100 1101 1102 1103 1104 1105
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1106 1107
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1108 1109
}

Q
Qiao Longfei 已提交
1110
}  // namespace framework
L
liaogang 已提交
1111
}  // namespace paddle