operator.cc 33.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
19

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
22
#include "paddle/fluid/framework/lod_tensor.h"
23
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/shape_inference.h"
25
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/var_type.h"
27
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
28

D
dzhwinter 已提交
29
DECLARE_bool(benchmark);
C
chengduoZH 已提交
30 31 32
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
33

Q
Qiao Longfei 已提交
34 35 36
namespace paddle {
namespace framework {

37 38 39 40 41 42
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
43

Q
qiaolongfei 已提交
44 45
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
Y
Yu Yang 已提交
46
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
47
  } else if (var->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
48
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
49 50 51 52 53
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

54 55
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
56
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
57 58
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
59 60
  }

M
minqiyang 已提交
61 62
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
63
    if (UNLIKELY(!tensor.IsInitialized())) {
64
      return DDim({-1});
65
    }
M
minqiyang 已提交
66 67 68 69 70 71 72
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
73 74 75 76 77
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
78 79 80 81 82 83
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
84 85 86 87 88
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
89

M
minqiyang 已提交
90 91 92
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
93 94
      return "";
    }
Y
Yu Yang 已提交
95
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
96
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
97 98 99 100
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
101
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
102
    }
D
dzhwinter 已提交
103 104 105 106 107
  } else {
    return "";
  }
}

108 109 110 111 112 113
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
114 115
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
116 117 118 119 120
  }

  return -1;
}

Q
Qiao Longfei 已提交
121 122 123 124 125 126 127 128
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
129 130 131
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
132 133
      return default_lod;
    }
M
minqiyang 已提交
134
    return tensor.lod();
Q
Qiao Longfei 已提交
135 136 137 138 139
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
140 141 142 143 144
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
145
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
146 147 148 149 150 151
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
152
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
153 154 155 156 157 158
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

159
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
M
minqiyang 已提交
160
  VLOG(4) << place << " " << DebugStringEx(&scope);
161
  if (platform::is_gpu_place(place)) {
162
#ifndef PADDLE_WITH_CUDA
163
    PADDLE_THROW("Cannot run operator on place %s", place);
164
#else
165 166
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
167 168
#endif
  }
169

P
peizhilin 已提交
170 171 172
  // The profile has a process-wide mutex, results in serious performance issue
  // in concurrency scenerio. Here use an `if` to fix this issue.
  // Please not remove the `if`, ask @Superjomn if there are any concern.
173 174 175 176
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
P
peizhilin 已提交
177
  } else {
178 179
    RunImpl(scope, place);
  }
M
minqiyang 已提交
180
  VLOG(3) << place << " " << DebugStringEx(&scope);
181 182
}

183 184 185 186 187 188 189 190
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

191
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
192
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
193
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
194 195
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
196
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
197 198
}

Y
Yu Yang 已提交
199 200
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
201
  auto it = inputs_.find(name);
202 203
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
204
  return it->second;
Y
Yan Chunwei 已提交
205 206
}

207
bool OperatorBase::HasOutputs(const std::string& name) const {
208
  if (outputs_.find(name) != outputs_.end()) {
209 210 211 212 213 214
    return true;
  } else {
    return false;
  }
}

215
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
216
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
217
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
218 219
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
220
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
221 222
}

Y
Yu Yang 已提交
223 224
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
225
  auto it = outputs_.find(name);
226 227
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
228
  return it->second;
Y
Yan Chunwei 已提交
229 230
}

231
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
232
  std::stringstream ss;
Y
Yu Yang 已提交
233
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
234 235
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
236 237
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
238 239
      auto var_name = input.second[i];
      ss << var_name;
240
      if (scope) {
Q
Qiao Longfei 已提交
241 242 243 244 245 246 247 248 249 250 251
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
252
        }
253
      }
Y
Yu Yang 已提交
254 255 256
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
257
    }
Y
Yu Yang 已提交
258
    ss << "]";
Y
Yu Yang 已提交
259 260
    ++it;
    if (it != inputs_.end()) {
261 262
      ss << ", ";
    }
Q
Qiao Longfei 已提交
263
  }
Y
Yu Yang 已提交
264
  ss << "}, outputs:{";
Y
Yu Yang 已提交
265 266
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
267 268
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
269 270
      auto var_name = output.second[i];
      ss << var_name;
271
      if (scope) {
Q
Qiao Longfei 已提交
272 273 274 275 276 277 278
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
279 280
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
281 282
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
283
        }
284
      }
Y
Yu Yang 已提交
285 286 287
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
288
    }
Y
Yu Yang 已提交
289
    ss << "]";
Y
Yu Yang 已提交
290 291
    ++it;
    if (it != outputs_.end()) {
292 293
      ss << ", ";
    }
Q
Qiao Longfei 已提交
294
  }
Y
Yu Yang 已提交
295
  ss << "}.";
Q
Qiao Longfei 已提交
296 297 298
  return ss.str();
}

Y
Yu Yang 已提交
299
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
300 301
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
302 303
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
304 305
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
306
}
307

Q
qijun 已提交
308 309
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
310
  for (auto& o : inputs_) {
Q
qijun 已提交
311 312 313 314 315 316
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
317 318 319 320 321 322 323 324 325 326
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
327
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
328 329

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
330
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
331 332 333 334 335 336 337 338 339
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
340 341
}

342 343 344
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
345
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
346 347

  for (auto& in : op_info->Proto().inputs()) {
348 349 350 351
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
352 353 354
  }

  for (auto& out : op_info->Proto().outputs()) {
355 356 357 358 359
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
376
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
377
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
378 379
}

C
chengduo 已提交
380
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
381 382 383 384
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
385
  } else {
Y
Yang Yang 已提交
386
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
387
                 var.Type().name());
Q
QI JUN 已提交
388 389 390
  }
}

C
chengduo 已提交
391
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
392
  if (var->IsType<LoDTensor>()) {
393
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
394
  } else if (var->IsType<SelectedRows>()) {
395
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
396
  } else {
Y
Yang Yang 已提交
397 398
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
399 400 401
  }
}

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

X
Xin Pan 已提交
434 435 436 437 438 439 440 441 442 443
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
444 445 446 447
const Variable* ExecutionContext::LegacyInputVar(
    const std::string& name) const {
  auto ipt = op_.Input(name);
  return ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
X
Xin Pan 已提交
448 449
}

X
clean  
Xin Pan 已提交
450
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
451 452 453 454 455 456 457 458 459
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
460 461 462 463 464
Variable* ExecutionContext::LegacyOutputVar(const std::string& name) const {
  auto opt = op_.Output(name);
  return opt == kEmptyVarName ? nullptr : scope_.FindVar(opt);
}

465
template <>
466
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
467
  return Input<LoDTensor>(name);
468 469
}

X
Xin Pan 已提交
470
template <>
X
clean  
Xin Pan 已提交
471
const Tensor* ExecutionContext::LegacyInput<Tensor>(
X
Xin Pan 已提交
472
    const std::string& name) const {
X
clean  
Xin Pan 已提交
473
  return LegacyInput<LoDTensor>(name);
X
Xin Pan 已提交
474 475
}

476
template <>
477
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
478 479 480 481
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
482
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
483
                 [&](const std::string& sub_name) -> const Tensor* {
484
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
485 486 487 488 489 490
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
491
                 });
492 493 494 495
  return res;
}

template <>
496
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
497
  return Output<LoDTensor>(name);
498 499
}

X
Xin Pan 已提交
500
template <>
X
clean  
Xin Pan 已提交
501 502
Tensor* ExecutionContext::LegacyOutput<Tensor>(const std::string& name) const {
  return LegacyOutput<LoDTensor>(name);
X
Xin Pan 已提交
503 504
}

505
template <>
506
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
507 508 509 510
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
511
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
512
                 [&](const std::string& sub_name) -> Tensor* {
513
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
514 515 516 517 518 519
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
520
                 });
521 522 523
  return res;
}

Y
Yu Yang 已提交
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

539 540
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
541 542 543
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
      : op_(op), scope_(scope), ctx_(ctx) {}
544 545

  bool HasInput(const std::string& name) const override {
546
    // has only one input
X
Xin Pan 已提交
547
    const auto& ins = ctx_.inputs;
548 549
    auto it = ins.find(name);
    if (it == ins.end()) {
550 551
      return false;
    }
552
    const auto& in = it->second;
X
Xin Pan 已提交
553
    if (in.size() == 0) return false;
T
tensor-tang 已提交
554
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
555
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
556
    return in[0] != nullptr;
557 558 559
  }

  bool HasOutput(const std::string& name) const override {
560
    // has only one output
X
Xin Pan 已提交
561
    const auto& outs = ctx_.outputs;
562 563
    auto it = outs.find(name);
    if (it == outs.end()) {
564 565
      return false;
    }
566
    const auto& out = it->second;
X
Xin Pan 已提交
567
    if (out.size() == 0) {
568 569
      return false;
    }
T
tensor-tang 已提交
570 571
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
572
    return out[0] != nullptr;
573 574 575
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
576 577
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
578
    if (it == ins.end() || it->second.empty()) {
579 580
      return false;
    }
X
Xin Pan 已提交
581 582
    for (auto& input : it->second) {
      if (input == nullptr) {
583 584 585 586 587 588 589
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
590 591
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
592
    if (it == outs.end() || it->second.empty()) {
593 594
      return false;
    }
X
Xin Pan 已提交
595 596
    for (auto& output : it->second) {
      if (output == nullptr) {
597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

615 616
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
617 618 619 620 621 622 623 624 625
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
626 627

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
Xin Pan 已提交
628 629
                   "The type of %s and %s is not the same.", in_var->Type(),
                   out_var->Type());
630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
648 649
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
650 651 652 653 654 655 656 657
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
658
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
659
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
660 661 662 663 664
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
665

M
mozga-intel 已提交
666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
685 686
  }

C
chengduo 已提交
687 688 689 690 691
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

692 693
  bool IsRuntime() const override { return true; }

694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

713
 protected:
714 715
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
716
    PADDLE_ENFORCE_NOT_NULL(var);
717 718 719 720 721
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
722 723 724 725 726 727 728
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
729
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
730
    PADDLE_THROW("Only compile time support this method");
731 732 733 734 735 736 737 738 739
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
740 741
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
742 743 744
    }
  }

F
fengjiayi 已提交
745 746
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
747
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
748 749
  }

750
  proto::VarType::Type GetVarType(const std::string& name) const override {
751 752 753 754
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

755 756 757 758 759 760 761 762 763 764 765 766 767 768
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
769 770
  }

771 772
  const OperatorBase& op_;
  const Scope& scope_;
X
Xin Pan 已提交
773
  const RuntimeContext& ctx_;
774 775
};

C
chengduoZH 已提交
776 777 778 779 780
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
781 782
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
783 784 785 786 787 788 789 790
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

B
baojun-nervana 已提交
791
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
792 793 794
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
795 796 797
  this->InferShape(&infer_shape_ctx);
}

798 799
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
X
Xin Pan 已提交
800
  RuntimeContext ctx(Inputs(), Outputs(), scope);
Y
Yu Yang 已提交
801
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
802
  auto* dev_ctx = pool.Get(place);
803

804 805 806 807
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
808 809
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
810 811
  }

Q
qiaolongfei 已提交
812 813
  OpKernelMap& kernels = kernels_iter->second;

X
Xin Pan 已提交
814 815
  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx));
M
minqiyang 已提交
816
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
817

818
  auto kernel_iter = kernels.find(expected_kernel_key);
819
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
820
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
821 822
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
823
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
824 825 826 827 828
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
829 830 831 832 833
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
834 835 836
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
X
Xin Pan 已提交
837
      PrepareData(scope, expected_kernel_key, &transfered_inplace_vars, &ctx);
838

Y
yuyang18 已提交
839 840 841 842 843 844
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
845
  }
Q
QI JUN 已提交
846

X
Xin Pan 已提交
847
  RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, ctx);
X
Xin Pan 已提交
848
  this->InferShape(&infer_shape_ctx);
X
clean  
Xin Pan 已提交
849 850
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
X
Xin Pan 已提交
851
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx, ctx));
D
dzhwinter 已提交
852

Y
yuyang18 已提交
853 854 855
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
856 857
  }

D
dzhwinter 已提交
858
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
859
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
860
    dev_ctx->Wait();
D
dzhwinter 已提交
861
  }
C
chengduoZH 已提交
862 863 864

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
865
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
866 867 868
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
869 870
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
871 872 873
      }
    }
  }
Q
Qiao Longfei 已提交
874
}
X
Xin Pan 已提交
875

Y
yuyang18 已提交
876 877 878 879
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
880
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
881 882
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
883 884 885
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
886
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
887 888 889 890
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
891
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
892
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
893 894
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
895 896
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
X
Xin Pan 已提交
897 898 899 900
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
901
      auto* var = input_vars[i];
X
Xin Pan 已提交
902

Y
yuyang18 已提交
903
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
904
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
905 906 907
        continue;
      }

C
chengduo 已提交
908
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
926 927
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
928

929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
944
      }
945
      if (!new_scope) {
Y
yuyang18 已提交
946 947 948 949
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
950
      input_vars[i] = trans_var;
951

Y
yuyang18 已提交
952
      Tensor out;
Y
yuyang18 已提交
953
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
954 955 956 957 958 959
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
960

961
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
962 963 964
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
965
  std::string last_input_name;
Y
Yu Yang 已提交
966 967 968 969 970 971 972 973 974 975 976 977 978
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
S
fix bug  
sneaxiy 已提交
979 980
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s is not initialized: %s",
                         ipt_name, DebugString());
Y
Yu Yang 已提交
981
          int tmp = static_cast<int>(t->type());
982 983
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
984 985
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
986
          data_type = tmp;
987
          last_input_name = ipt_name;
Y
Yu Yang 已提交
988 989 990 991 992
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
993
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
994
}
995

996 997 998 999 1000 1001 1002 1003
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1004 1005
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1006 1007
}

Q
Qiao Longfei 已提交
1008
}  // namespace framework
L
liaogang 已提交
1009
}  // namespace paddle