operator.cc 29.8 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
19

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
22
#include "paddle/fluid/framework/lod_tensor.h"
23
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
24 25
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
26
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
27

D
dzhwinter 已提交
28
DECLARE_bool(benchmark);
C
chengduoZH 已提交
29 30 31
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
32

Q
Qiao Longfei 已提交
33 34 35
namespace paddle {
namespace framework {

36 37 38 39 40
// Combine two hash values to a single hash.
inline size_t CombineHash(size_t seed, size_t a) {
  return (seed ^ a) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}

41 42 43 44 45 46
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
47

Q
qiaolongfei 已提交
48 49 50 51 52 53 54 55 56 57 58
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

59 60
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
61
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
62 63
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
64 65
  }

M
minqiyang 已提交
66 67
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
68
    if (UNLIKELY(!tensor.IsInitialized())) {
69
      return DDim({-1});
70
    }
M
minqiyang 已提交
71 72 73 74 75 76 77
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
78 79 80 81 82
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
83 84 85 86 87 88
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
89 90 91 92 93
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
94

M
minqiyang 已提交
95 96 97
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
98 99
      return "";
    }
M
minqiyang 已提交
100 101
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
102 103 104 105 106 107
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
108 109 110 111 112
  } else {
    return "";
  }
}

113 114 115 116 117 118
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
119 120
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
121 122 123 124 125
  }

  return -1;
}

Q
Qiao Longfei 已提交
126 127 128 129 130 131 132 133
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
134 135 136
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
137 138
      return default_lod;
    }
M
minqiyang 已提交
139
    return tensor.lod();
Q
Qiao Longfei 已提交
140 141 142 143 144
  } else {
    return default_lod;
  }
}

145
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
146
  VLOG(40) << place << " " << DebugStringEx(&scope);
147
  if (platform::is_gpu_place(place)) {
148
#ifndef PADDLE_WITH_CUDA
149
    PADDLE_THROW("Cannot run operator on place %s", place);
150
#else
151 152
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
153 154
#endif
  }
155

P
peizhilin 已提交
156 157 158
// The profile has a process-wide mutex, results in serious performance issue
// in concurrency scenerio. Here use an `if` to fix this issue.
// Please not remove the `if`, ask @Superjomn if there are any concern.
P
peizhilin 已提交
159
#ifndef _WIN32
160 161 162 163
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
P
peizhilin 已提交
164 165 166
  } else
#endif
  {
167 168
    RunImpl(scope, place);
  }
169
  VLOG(30) << place << " " << DebugStringEx(&scope);
170 171
}

172 173 174 175 176 177 178 179
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

180
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
181
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
182
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
183 184
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
185
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
186 187
}

Y
Yu Yang 已提交
188 189
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
190
  auto it = inputs_.find(name);
191 192
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
193
  return it->second;
Y
Yan Chunwei 已提交
194 195
}

196
bool OperatorBase::HasOutputs(const std::string& name) const {
197
  if (outputs_.find(name) != outputs_.end()) {
198 199 200 201 202 203
    return true;
  } else {
    return false;
  }
}

204
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
205
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
206
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
207 208
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
209
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
210 211
}

Y
Yu Yang 已提交
212 213
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
214
  auto it = outputs_.find(name);
215 216
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
217
  return it->second;
Y
Yan Chunwei 已提交
218 219
}

220
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
221
  std::stringstream ss;
Y
Yu Yang 已提交
222
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
223 224
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
225 226
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
227 228
      auto var_name = input.second[i];
      ss << var_name;
229
      if (scope) {
Q
Qiao Longfei 已提交
230 231 232 233 234 235 236 237 238 239 240
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
241
        }
242
      }
Y
Yu Yang 已提交
243 244 245
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
246
    }
Y
Yu Yang 已提交
247
    ss << "]";
Y
Yu Yang 已提交
248 249
    ++it;
    if (it != inputs_.end()) {
250 251
      ss << ", ";
    }
Q
Qiao Longfei 已提交
252
  }
Y
Yu Yang 已提交
253
  ss << "}, outputs:{";
Y
Yu Yang 已提交
254 255
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
256 257
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
258 259
      auto var_name = output.second[i];
      ss << var_name;
260
      if (scope) {
Q
Qiao Longfei 已提交
261 262 263 264 265 266 267
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
268 269
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
270 271
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
272
        }
273
      }
Y
Yu Yang 已提交
274 275 276
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
277
    }
Y
Yu Yang 已提交
278
    ss << "]";
Y
Yu Yang 已提交
279 280
    ++it;
    if (it != outputs_.end()) {
281 282
      ss << ", ";
    }
Q
Qiao Longfei 已提交
283
  }
Y
Yu Yang 已提交
284
  ss << "}.";
Q
Qiao Longfei 已提交
285 286 287
  return ss.str();
}

Y
Yu Yang 已提交
288
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
289 290
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
291 292
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
293 294
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
295
}
296

Q
qijun 已提交
297 298
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
299
  for (auto& o : inputs_) {
Q
qijun 已提交
300 301 302 303 304 305
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
306 307 308 309 310 311 312 313 314 315
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
316
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
317 318

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
319
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
320 321 322 323 324 325 326 327 328
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
329 330
}

331 332 333
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
334
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
335 336

  for (auto& in : op_info->Proto().inputs()) {
337 338 339 340
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
341 342 343
  }

  for (auto& out : op_info->Proto().outputs()) {
344 345 346 347 348
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

C
chengduo 已提交
365 366
static bool VarIsTensor(const Variable& var) {
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
367 368
}

C
chengduo 已提交
369
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
370 371 372 373
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
374
  } else {
Y
Yang Yang 已提交
375
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
376
                 var.Type().name());
Q
QI JUN 已提交
377 378 379
  }
}

C
chengduo 已提交
380
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
381
  if (var->IsType<LoDTensor>()) {
382
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
383
  } else if (var->IsType<SelectedRows>()) {
384
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
385
  } else {
Y
Yang Yang 已提交
386 387
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
388 389 390
  }
}

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

423
template <>
424
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
425
  return Input<LoDTensor>(name);
426 427 428
}

template <>
429
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
430 431 432 433
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
434
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
435
                 [&](const std::string& sub_name) -> const Tensor* {
436
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
437 438 439 440 441 442
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
443
                 });
444 445 446 447
  return res;
}

template <>
448
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
449
  return Output<LoDTensor>(name);
450 451 452
}

template <>
453
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
454 455 456 457
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
458
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
459
                 [&](const std::string& sub_name) -> Tensor* {
460
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
461 462 463 464 465 466
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
467
                 });
468 469 470
  return res;
}

Y
Yu Yang 已提交
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

486 487 488 489 490 491
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
492 493 494 495
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
496 497
      return false;
    }
498
    const auto& in = it->second;
T
tensor-tang 已提交
499
    if (in.size() == 0 || in[0] == kEmptyVarName) {
500 501
      return false;
    }
T
tensor-tang 已提交
502
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
503
                      "Input %s should not have more than one inputs", name);
504
    return scope_.FindVar(in[0]) != nullptr;
505 506 507
  }

  bool HasOutput(const std::string& name) const override {
508 509 510 511
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
512 513
      return false;
    }
514
    const auto& out = it->second;
T
tensor-tang 已提交
515
    if (out.size() == 0 || out[0] == kEmptyVarName) {
516 517
      return false;
    }
T
tensor-tang 已提交
518 519
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
520
    return scope_.FindVar(out[0]) != nullptr;
521 522 523
  }

  bool HasInputs(const std::string& name) const override {
524 525 526
    if (!op_.HasInputs(name)) {
      return false;
    }
527 528 529 530 531 532 533 534 535 536 537 538 539
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
540 541 542
    if (!op_.HasOutputs(name)) {
      return false;
    }
543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    const std::string& input_n = Inputs(in)[i];
    const std::string& output_n = Outputs(out)[j];

    Variable* in_var = scope_.FindVar(input_n);
    Variable* out_var = scope_.FindVar(output_n);
    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
                   "The type of %s and %s is not the same.", output_n,
                   GetDim(input_n));

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
597 598
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
599 600 601 602 603
    const std::vector<std::string>& inputs = Inputs(in);
    const std::vector<std::string>& outputs = Outputs(out);
    PADDLE_ENFORCE_LT(i, inputs.size());
    PADDLE_ENFORCE_LT(j, outputs.size());
    Variable* in_var = scope_.FindVar(inputs.at(i));
Q
Qiao Longfei 已提交
604
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
605
    Variable* out_var = scope_.FindVar(outputs.at(j));
Q
Qiao Longfei 已提交
606 607 608 609 610
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
611

M
mozga-intel 已提交
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
631 632
  }

633 634 635
  bool IsRuntime() const override { return true; }

 protected:
636 637
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
638
    PADDLE_ENFORCE_NOT_NULL(var);
639 640 641 642 643
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
644 645 646 647 648 649 650
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
651
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
652
    PADDLE_THROW("Only compile time support this method");
653 654 655 656 657 658 659 660 661
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
662 663
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
664 665 666
    }
  }

F
fengjiayi 已提交
667 668
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
669
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
670 671
  }

672
  proto::VarType::Type GetVarType(const std::string& name) const override {
673 674 675 676
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
677 678 679 680
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

681
 private:
682 683 684 685
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
686 687 688 689 690
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
691
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
692 693 694 695 696 697 698 699
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

700 701
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
702 703
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
704
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
705
  auto* dev_ctx = pool.Get(place);
706

707 708 709 710
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
711 712
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
713 714
  }

Q
qiaolongfei 已提交
715 716
  OpKernelMap& kernels = kernels_iter->second;

717 718
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
719

720 721 722 723
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
724 725
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
726
  VLOG(30) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
727

728
  auto kernel_iter = kernels.find(expected_kernel_key);
729
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
730
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
731 732
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
733
    VLOG(30) << "missing MKLDNN kernel: fallbacking to PLAIN one";
734 735 736 737 738
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
739 740 741 742 743
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
744 745 746 747
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
748

Y
yuyang18 已提交
749 750 751 752 753 754
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
755
  }
Q
QI JUN 已提交
756

Y
yuyang18 已提交
757
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
758

Y
yuyang18 已提交
759 760 761
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
762 763
  }

D
dzhwinter 已提交
764
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
765
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
766
    dev_ctx->Wait();
D
dzhwinter 已提交
767
  }
C
chengduoZH 已提交
768 769 770

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
771
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
772 773 774
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
775 776
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
777 778 779
      }
    }
  }
Q
Qiao Longfei 已提交
780
}
Y
yuyang18 已提交
781 782 783 784
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
785 786
    VLOG(30) << "share inplace var " + var_name +
                    " back to it's original scope";
C
chengduo 已提交
787 788
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
789 790 791
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
792
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
793 794 795 796 797 798 799
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
800 801 802 803 804 805 806 807 808 809 810
// In the inference scenerio, the scopes will be reused across the batches, so
// the `new_scope` here will result in GPU memroy explosion over the running of
// operators.
// We use a thread_local cache to fix that issue, the key in the cache is the
// combination of the `scope` argument, from_kernel_type, target_kernel_type.
// Have a discussion with @Superjomn or the inference developers if some changes
// on this logic for this macro might not tested on the other scenerios.
#ifdef PADDLE_ON_INFERENCE
  thread_local std::unordered_map<size_t, Scope*> infer_transfer_scope_cache;
#endif

Y
yuyang18 已提交
811 812 813 814 815
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
816
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
817 818 819
        continue;
      }

C
chengduo 已提交
820
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

838 839
      VLOG(30) << "Transform Variable " << var_name << " from "
               << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
840

841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856
#ifdef PADDLE_ON_INFERENCE
      size_t infer_cache_key =
          CombineHash(OpKernelType::Hash()(kernel_type_for_var),
                      OpKernelType::Hash()(expected_kernel_key));
      infer_cache_key =
          CombineHash(infer_cache_key, std::hash<const Scope*>()(&scope));

      auto it = infer_transfer_scope_cache.find(infer_cache_key);
      if (it != infer_transfer_scope_cache.end()) {
        new_scope = infer_transfer_scope_cache[infer_cache_key];
      } else {
        new_scope = &scope.NewScope();
        infer_transfer_scope_cache[infer_cache_key] = new_scope;
      }
#endif

Y
yuyang18 已提交
857 858 859 860 861
      if (new_scope == nullptr) {
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
862

Y
yuyang18 已提交
863
      Tensor out;
Y
yuyang18 已提交
864
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
865 866 867 868 869 870
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
871

872
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
873 874 875
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
876
  std::string last_input_name;
Y
Yu Yang 已提交
877 878 879 880 881 882 883 884 885 886 887 888 889 890
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
891 892
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
893 894
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
895
          data_type = tmp;
896
          last_input_name = ipt_name;
Y
Yu Yang 已提交
897 898 899 900 901
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
902
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
903
}
904

905 906 907 908 909 910 911 912
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
913 914
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
915 916
}

Q
Qiao Longfei 已提交
917
}  // namespace framework
L
liaogang 已提交
918
}  // namespace paddle