operator.cc 37.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
P
peizhilin 已提交
19 20 21 22 23
#include <sstream>
#include <string>
#include <vector>
#include "gflags/gflags.h"
#include "glog/logging.h"
Y
Yi Wang 已提交
24 25
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
26
#include "paddle/fluid/framework/lod_tensor.h"
P
peizhilin 已提交
27
#include "paddle/fluid/framework/op_proto_maker.h"
28
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/framework/shape_inference.h"
30
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
31
#include "paddle/fluid/framework/var_type.h"
32
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
33

D
dzhwinter 已提交
34
DECLARE_bool(benchmark);
C
chengduoZH 已提交
35 36 37
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
38

Q
Qiao Longfei 已提交
39 40 41
namespace paddle {
namespace framework {

42 43 44 45 46 47
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
48

Q
qiaolongfei 已提交
49 50
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
Y
Yu Yang 已提交
51
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
52
  } else if (var->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
53
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
54 55 56 57 58
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

59 60
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
61
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
62 63
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
64 65
  }

M
minqiyang 已提交
66 67
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
68
    if (UNLIKELY(!tensor.IsInitialized())) {
69
      return DDim({-1});
70
    }
M
minqiyang 已提交
71 72 73 74 75 76 77
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
78 79 80 81 82
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
83 84 85 86 87 88
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
89 90 91 92 93
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
94

M
minqiyang 已提交
95 96 97
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
98 99
      return "";
    }
Y
Yu Yang 已提交
100
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
101
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
102 103 104 105
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
106
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
107
    }
D
dzhwinter 已提交
108 109 110 111 112
  } else {
    return "";
  }
}

113 114 115 116 117 118
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
119 120
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
121 122 123 124 125
  }

  return -1;
}

Q
Qiao Longfei 已提交
126 127 128 129 130 131 132 133
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
134 135 136
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
137 138
      return default_lod;
    }
M
minqiyang 已提交
139
    return tensor.lod();
Q
Qiao Longfei 已提交
140 141 142 143 144
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
145 146 147 148 149
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
150
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
151 152 153 154 155 156
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
157
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
158 159 160 161 162 163
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

164
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
165 166 167 168 169
  try {
    if (VLOG_IS_ON(4)) {
      VLOG(4) << place << " " << DebugStringEx(&scope);
    }
    if (platform::is_gpu_place(place)) {
170
#ifndef PADDLE_WITH_CUDA
P
peizhilin 已提交
171
      PADDLE_THROW("Cannot run operator on place %s", place);
172
#else
P
peizhilin 已提交
173 174
      auto dev_id = boost::get<platform::CUDAPlace>(place).device;
      platform::SetDeviceId(dev_id);
175
#endif
P
peizhilin 已提交
176
    }
177

P
peizhilin 已提交
178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216
    // The profile has a process-wide mutex, results in serious performance
    // issue
    // in concurrency scenerio. Here use an `if` to fix this issue.
    // Please not remove the `if`, ask @Superjomn if there are any concern.
    if (platform::IsProfileEnabled()) {
      platform::DeviceContextPool& pool =
          platform::DeviceContextPool::Instance();
      platform::RecordEvent record_event(Type(), pool.Get(place));
      RunImpl(scope, place);
    } else {
      RunImpl(scope, place);
    }

    if (VLOG_IS_ON(3)) {
      VLOG(3) << place << " " << DebugStringEx(&scope);
    }
  } catch (platform::EnforceNotMet exception) {
    if (Attrs().count("sub_block") != 0) {
      throw exception;
    }

    auto& callstack = Attr<std::vector<std::string>>(
        OpProtoAndCheckerMaker::OpCreationCallstackAttrName());

    if (callstack.empty()) {
      throw exception;
    }
    std::ostringstream sout;
    sout << "Invoke operator " << Type() << " error.\n";
    sout << "Python Callstacks: \n";
    for (auto& line : callstack) {
      sout << line;
    }
    sout << "C++ Callstacks: \n";
    sout << exception.err_str_;
    exception.err_str_ = sout.str();
    throw exception;
  } catch (...) {
    std::rethrow_exception(std::current_exception());
217
  }
218 219
}

220 221 222 223 224 225 226 227
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

228
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
229
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
230
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
231 232
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
233
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
234 235
}

Y
Yu Yang 已提交
236 237
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
238
  auto it = inputs_.find(name);
239 240
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
241
  return it->second;
Y
Yan Chunwei 已提交
242 243
}

244
bool OperatorBase::HasOutputs(const std::string& name) const {
245
  if (outputs_.find(name) != outputs_.end()) {
246 247 248 249 250 251
    return true;
  } else {
    return false;
  }
}

252
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
253
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
254
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
255 256
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
257
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
258 259
}

Y
Yu Yang 已提交
260 261
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
262
  auto it = outputs_.find(name);
263 264
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
265
  return it->second;
Y
Yan Chunwei 已提交
266 267
}

268
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
269
  std::stringstream ss;
Y
Yu Yang 已提交
270
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
271 272
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
273 274
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
275 276
      auto var_name = input.second[i];
      ss << var_name;
277
      if (scope) {
Q
Qiao Longfei 已提交
278 279 280 281 282 283 284 285 286 287 288
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
289
        }
290
      }
Y
Yu Yang 已提交
291 292 293
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
294
    }
Y
Yu Yang 已提交
295
    ss << "]";
Y
Yu Yang 已提交
296 297
    ++it;
    if (it != inputs_.end()) {
298 299
      ss << ", ";
    }
Q
Qiao Longfei 已提交
300
  }
Y
Yu Yang 已提交
301
  ss << "}, outputs:{";
Y
Yu Yang 已提交
302 303
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
304 305
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
306 307
      auto var_name = output.second[i];
      ss << var_name;
308
      if (scope) {
Q
Qiao Longfei 已提交
309 310 311 312 313 314 315
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
316 317
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
318 319
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
320
        }
321
      }
Y
Yu Yang 已提交
322 323 324
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
325
    }
Y
Yu Yang 已提交
326
    ss << "]";
Y
Yu Yang 已提交
327 328
    ++it;
    if (it != outputs_.end()) {
329 330
      ss << ", ";
    }
Q
Qiao Longfei 已提交
331
  }
Y
Yu Yang 已提交
332
  ss << "}.";
Q
Qiao Longfei 已提交
333 334 335
  return ss.str();
}

Y
Yu Yang 已提交
336
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
337 338
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
339 340
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
341 342
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
343
}
344

Q
qijun 已提交
345 346
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
347
  for (auto& o : inputs_) {
Q
qijun 已提交
348 349 350 351 352 353
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
354 355 356 357 358 359 360 361 362 363
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
364
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
365 366

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
367
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
368 369 370 371 372 373 374 375 376
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
377 378
}

379 380 381
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
382
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
383 384

  for (auto& in : op_info->Proto().inputs()) {
385 386 387 388
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
389 390 391
  }

  for (auto& out : op_info->Proto().outputs()) {
392 393 394 395 396
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
413
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
414
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
415 416
}

C
chengduo 已提交
417
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
418 419 420 421
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
422
  } else {
Y
Yang Yang 已提交
423
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
424
                 var.Type().name());
Q
QI JUN 已提交
425 426 427
  }
}

C
chengduo 已提交
428
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
429
  if (var->IsType<LoDTensor>()) {
430
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
431
  } else if (var->IsType<SelectedRows>()) {
432
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
433
  } else {
Y
Yang Yang 已提交
434 435
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
436 437 438
  }
}

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

X
Xin Pan 已提交
471 472 473 474 475 476 477 478 479 480
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
481 482 483 484
const Variable* ExecutionContext::LegacyInputVar(
    const std::string& name) const {
  auto ipt = op_.Input(name);
  return ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
X
Xin Pan 已提交
485 486
}

X
clean  
Xin Pan 已提交
487
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
488 489 490 491 492 493 494 495 496
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
497 498 499 500 501
Variable* ExecutionContext::LegacyOutputVar(const std::string& name) const {
  auto opt = op_.Output(name);
  return opt == kEmptyVarName ? nullptr : scope_.FindVar(opt);
}

502
template <>
503
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
504
  return Input<LoDTensor>(name);
505 506
}

X
Xin Pan 已提交
507
template <>
X
clean  
Xin Pan 已提交
508
const Tensor* ExecutionContext::LegacyInput<Tensor>(
X
Xin Pan 已提交
509
    const std::string& name) const {
X
clean  
Xin Pan 已提交
510
  return LegacyInput<LoDTensor>(name);
X
Xin Pan 已提交
511 512
}

513
template <>
514
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
515
    const std::string& name) const {
X
Xin Pan 已提交
516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> const Tensor* {
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "should be LoDTensor, but the received type is %s",
                       var->Type().name());
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

template <>
const std::vector<const Tensor*> ExecutionContext::LegacyMultiInput<Tensor>(
    const std::string& name) const {
538 539 540
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
541
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
542
                 [&](const std::string& sub_name) -> const Tensor* {
543
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
544 545 546 547 548 549
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
550
                 });
551 552 553 554
  return res;
}

template <>
555
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
556
  return Output<LoDTensor>(name);
557 558
}

X
Xin Pan 已提交
559
template <>
X
clean  
Xin Pan 已提交
560 561
Tensor* ExecutionContext::LegacyOutput<Tensor>(const std::string& name) const {
  return LegacyOutput<LoDTensor>(name);
X
Xin Pan 已提交
562 563
}

564
template <>
565
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
566 567 568 569
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
570
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
571
                 [&](const std::string& sub_name) -> Tensor* {
572
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
573 574 575 576 577 578
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
579
                 });
580 581 582
  return res;
}

Y
Yu Yang 已提交
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

598 599
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
600 601 602
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
      : op_(op), scope_(scope), ctx_(ctx) {}
603 604

  bool HasInput(const std::string& name) const override {
605
    // has only one input
X
Xin Pan 已提交
606
    const auto& ins = ctx_.inputs;
607 608
    auto it = ins.find(name);
    if (it == ins.end()) {
609 610
      return false;
    }
611
    const auto& in = it->second;
X
Xin Pan 已提交
612
    if (in.size() == 0) return false;
T
tensor-tang 已提交
613
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
614
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
615
    return in[0] != nullptr;
616 617 618
  }

  bool HasOutput(const std::string& name) const override {
619
    // has only one output
X
Xin Pan 已提交
620
    const auto& outs = ctx_.outputs;
621 622
    auto it = outs.find(name);
    if (it == outs.end()) {
623 624
      return false;
    }
625
    const auto& out = it->second;
X
Xin Pan 已提交
626
    if (out.size() == 0) {
627 628
      return false;
    }
T
tensor-tang 已提交
629 630
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
631
    return out[0] != nullptr;
632 633 634
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
635 636
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
637
    if (it == ins.end() || it->second.empty()) {
638 639
      return false;
    }
X
Xin Pan 已提交
640 641
    for (auto& input : it->second) {
      if (input == nullptr) {
642 643 644 645 646 647 648
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
649 650
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
651
    if (it == outs.end() || it->second.empty()) {
652 653
      return false;
    }
X
Xin Pan 已提交
654 655
    for (auto& output : it->second) {
      if (output == nullptr) {
656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

674 675
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
676 677 678 679 680 681 682 683 684
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
685 686

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
687
                   "The type of %s and %s is not the same.", in, out);
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
706 707
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
708 709 710 711 712 713 714 715
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
716
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
717
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
718 719 720 721 722
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
723

M
mozga-intel 已提交
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
743 744
  }

C
chengduo 已提交
745 746 747 748 749
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

750 751
  bool IsRuntime() const override { return true; }

752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
771 772 773 774 775 776 777 778 779 780 781 782 783
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
784 785 786 787 788 789 790 791 792 793
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
794 795 796 797 798 799 800 801 802 803 804 805 806 807
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

808
 protected:
X
Xin Pan 已提交
809
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
810
    PADDLE_ENFORCE_NOT_NULL(var);
811 812 813 814 815
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
816
      PADDLE_THROW(
X
Xin Pan 已提交
817
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
818
          "type_id is %s.",
X
Xin Pan 已提交
819
          var->Type().name());
F
fengjiayi 已提交
820 821 822
    }
  }

X
Xin Pan 已提交
823 824 825 826 827 828 829 830
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
831
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
832
    PADDLE_THROW("Only compile time support this method");
833 834
  }

X
Xin Pan 已提交
835
  void SetDim(Variable* var, const DDim& dim) {
836 837 838 839 840
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
841 842 843 844 845 846 847 848 849 850 851 852 853 854
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                   var->Type().name());
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
855 856 857
    }
  }

F
fengjiayi 已提交
858 859
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
860
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
861 862
  }

X
Xin Pan 已提交
863 864 865 866 867 868 869 870 871 872 873
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
874 875 876
    return ToVarType(var->Type());
  }

877 878 879 880 881 882 883 884 885 886 887 888 889 890
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
891 892
  }

893 894
  const OperatorBase& op_;
  const Scope& scope_;
X
Xin Pan 已提交
895
  const RuntimeContext& ctx_;
896 897
};

C
chengduoZH 已提交
898 899 900 901 902
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
903 904
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
905 906 907 908 909 910 911 912
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

B
baojun-nervana 已提交
913
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
914 915 916
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
917 918 919
  this->InferShape(&infer_shape_ctx);
}

920 921
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
X
Xin Pan 已提交
922
  RuntimeContext ctx(Inputs(), Outputs(), scope);
Y
Yu Yang 已提交
923
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
924
  auto* dev_ctx = pool.Get(place);
925

926 927 928 929
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
930 931
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
932 933
  }

Q
qiaolongfei 已提交
934 935
  OpKernelMap& kernels = kernels_iter->second;

X
Xin Pan 已提交
936 937
  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx));
M
minqiyang 已提交
938
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
939

940
  auto kernel_iter = kernels.find(expected_kernel_key);
941
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
942
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
943 944
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
945
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
946 947 948 949 950
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
951 952 953 954 955
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
956 957 958
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
X
Xin Pan 已提交
959
      PrepareData(scope, expected_kernel_key, &transfered_inplace_vars, &ctx);
960

Y
yuyang18 已提交
961 962 963 964 965 966
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
967
  }
Q
QI JUN 已提交
968

X
Xin Pan 已提交
969
  RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, ctx);
X
Xin Pan 已提交
970
  this->InferShape(&infer_shape_ctx);
X
clean  
Xin Pan 已提交
971 972
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
X
Xin Pan 已提交
973
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx, ctx));
D
dzhwinter 已提交
974

Y
yuyang18 已提交
975 976 977
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
978 979
  }

D
dzhwinter 已提交
980
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
981
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
982
    dev_ctx->Wait();
D
dzhwinter 已提交
983
  }
C
chengduoZH 已提交
984 985 986

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
987
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
988 989 990
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
991 992
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
993 994 995
      }
    }
  }
Q
Qiao Longfei 已提交
996
}
X
Xin Pan 已提交
997

Y
yuyang18 已提交
998 999 1000 1001
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1002
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1003 1004
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
1005 1006 1007
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
1008
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1009 1010 1011 1012
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
1013
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
1014
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1015 1016
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1017 1018
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
X
Xin Pan 已提交
1019 1020 1021 1022
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1023
      auto* var = input_vars[i];
X
Xin Pan 已提交
1024

Y
yuyang18 已提交
1025
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1026
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1027 1028 1029
        continue;
      }

C
chengduo 已提交
1030
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
1048 1049
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1050

1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1066
      }
1067
      if (!new_scope) {
Y
yuyang18 已提交
1068 1069 1070 1071
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1072
      input_vars[i] = trans_var;
1073

Y
yuyang18 已提交
1074
      Tensor out;
Y
yuyang18 已提交
1075
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1076 1077 1078 1079 1080 1081
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1082

1083
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1084 1085 1086
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
1087
  std::string last_input_name;
Y
Yu Yang 已提交
1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
S
fix bug  
sneaxiy 已提交
1101 1102
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s is not initialized: %s",
                         ipt_name, DebugString());
Y
Yu Yang 已提交
1103
          int tmp = static_cast<int>(t->type());
1104 1105
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
1106 1107
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
1108
          data_type = tmp;
1109
          last_input_name = ipt_name;
Y
Yu Yang 已提交
1110 1111 1112 1113 1114
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
1115
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
1116
}
1117

1118 1119 1120 1121 1122 1123 1124 1125
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1126 1127
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1128 1129
}

Q
Qiao Longfei 已提交
1130
}  // namespace framework
L
liaogang 已提交
1131
}  // namespace paddle