operator.cc 29.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
19

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
22
#include "paddle/fluid/framework/lod_tensor.h"
23
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/shape_inference.h"
25
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/var_type.h"
27
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
28

D
dzhwinter 已提交
29
DECLARE_bool(benchmark);
C
chengduoZH 已提交
30 31 32
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
33

Q
Qiao Longfei 已提交
34 35 36
namespace paddle {
namespace framework {

37 38 39 40 41 42
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
43

Q
qiaolongfei 已提交
44 45
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
M
minqiyang 已提交
46
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
47
  } else if (var->IsType<framework::SelectedRows>()) {
M
minqiyang 已提交
48
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
49 50 51 52 53
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

54 55
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
56
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
57 58
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
59 60
  }

M
minqiyang 已提交
61 62
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
63
    if (UNLIKELY(!tensor.IsInitialized())) {
64
      return DDim({-1});
65
    }
M
minqiyang 已提交
66 67 68 69 70 71 72
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
73 74 75 76 77
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
78 79 80 81 82 83
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
84 85 86 87 88
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
89

M
minqiyang 已提交
90 91 92
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
93 94
      return "";
    }
M
minqiyang 已提交
95
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
96
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
97 98 99 100
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
M
minqiyang 已提交
101
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
102
    }
D
dzhwinter 已提交
103 104 105 106 107
  } else {
    return "";
  }
}

108 109 110 111 112 113
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
114 115
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
116 117 118 119 120
  }

  return -1;
}

Q
Qiao Longfei 已提交
121 122 123 124 125 126 127 128
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
129 130 131
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
132 133
      return default_lod;
    }
M
minqiyang 已提交
134
    return tensor.lod();
Q
Qiao Longfei 已提交
135 136 137 138 139
  } else {
    return default_lod;
  }
}

140
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
M
minqiyang 已提交
141
  VLOG(4) << place << " " << DebugStringEx(&scope);
142
  if (platform::is_gpu_place(place)) {
143
#ifndef PADDLE_WITH_CUDA
144
    PADDLE_THROW("Cannot run operator on place %s", place);
145
#else
146 147
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
148 149
#endif
  }
150

P
peizhilin 已提交
151 152 153
  // The profile has a process-wide mutex, results in serious performance issue
  // in concurrency scenerio. Here use an `if` to fix this issue.
  // Please not remove the `if`, ask @Superjomn if there are any concern.
154 155 156 157
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
P
peizhilin 已提交
158
  } else {
159 160
    RunImpl(scope, place);
  }
M
minqiyang 已提交
161
  VLOG(3) << place << " " << DebugStringEx(&scope);
162 163
}

164
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
165
  return inputs_.find(name) != inputs_.end();
166 167
}

168
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
169
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
170
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
171 172
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
173
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
174 175
}

Y
Yu Yang 已提交
176 177
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
178
  auto it = inputs_.find(name);
179 180
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
181
  return it->second;
Y
Yan Chunwei 已提交
182 183
}

184
bool OperatorBase::HasOutputs(const std::string& name) const {
185
  if (outputs_.find(name) != outputs_.end()) {
186 187 188 189 190 191
    return true;
  } else {
    return false;
  }
}

192
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
193
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
194
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
195 196
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
197
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
198 199
}

Y
Yu Yang 已提交
200 201
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
202
  auto it = outputs_.find(name);
203 204
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
205
  return it->second;
Y
Yan Chunwei 已提交
206 207
}

208
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
209
  std::stringstream ss;
Y
Yu Yang 已提交
210
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
211 212
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
213 214
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
215 216
      auto var_name = input.second[i];
      ss << var_name;
217
      if (scope) {
Q
Qiao Longfei 已提交
218 219 220 221 222 223 224 225 226 227 228
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
229
        }
230
      }
Y
Yu Yang 已提交
231 232 233
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
234
    }
Y
Yu Yang 已提交
235
    ss << "]";
Y
Yu Yang 已提交
236 237
    ++it;
    if (it != inputs_.end()) {
238 239
      ss << ", ";
    }
Q
Qiao Longfei 已提交
240
  }
Y
Yu Yang 已提交
241
  ss << "}, outputs:{";
Y
Yu Yang 已提交
242 243
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
244 245
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
246 247
      auto var_name = output.second[i];
      ss << var_name;
248
      if (scope) {
Q
Qiao Longfei 已提交
249 250 251 252 253 254 255
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
256 257
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
258 259
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
260
        }
261
      }
Y
Yu Yang 已提交
262 263 264
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
265
    }
Y
Yu Yang 已提交
266
    ss << "]";
Y
Yu Yang 已提交
267 268
    ++it;
    if (it != outputs_.end()) {
269 270
      ss << ", ";
    }
Q
Qiao Longfei 已提交
271
  }
Y
Yu Yang 已提交
272
  ss << "}.";
Q
Qiao Longfei 已提交
273 274 275
  return ss.str();
}

Y
Yu Yang 已提交
276
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
277 278
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
279 280
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
281 282
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
283
}
284

Q
qijun 已提交
285 286
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
287
  for (auto& o : inputs_) {
Q
qijun 已提交
288 289 290 291 292 293
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
294 295 296 297 298 299 300 301 302 303
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
304
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
305 306

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
307
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
308 309 310 311 312 313 314 315 316
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
317 318
}

319 320 321
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
322
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
323 324

  for (auto& in : op_info->Proto().inputs()) {
325 326 327 328
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
329 330 331
  }

  for (auto& out : op_info->Proto().outputs()) {
332 333 334 335 336
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

C
chengduo 已提交
353 354
static bool VarIsTensor(const Variable& var) {
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
355 356
}

C
chengduo 已提交
357
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
358 359 360 361
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
362
  } else {
Y
Yang Yang 已提交
363
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
364
                 var.Type().name());
Q
QI JUN 已提交
365 366 367
  }
}

C
chengduo 已提交
368
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
369
  if (var->IsType<LoDTensor>()) {
370
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
371
  } else if (var->IsType<SelectedRows>()) {
372
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
373
  } else {
Y
Yang Yang 已提交
374 375
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
376 377 378
  }
}

379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

411
template <>
412
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
413
  return Input<LoDTensor>(name);
414 415 416
}

template <>
417
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
418 419 420 421
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
422
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
423
                 [&](const std::string& sub_name) -> const Tensor* {
424
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
425 426 427 428 429 430
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
431
                 });
432 433 434 435
  return res;
}

template <>
436
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
437
  return Output<LoDTensor>(name);
438 439 440
}

template <>
441
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
442 443 444 445
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
446
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
447
                 [&](const std::string& sub_name) -> Tensor* {
448
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
449 450 451 452 453 454
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
455
                 });
456 457 458
  return res;
}

Y
Yu Yang 已提交
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

474 475 476 477 478 479
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
480 481 482 483
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
484 485
      return false;
    }
486
    const auto& in = it->second;
T
tensor-tang 已提交
487
    if (in.size() == 0 || in[0] == kEmptyVarName) {
488 489
      return false;
    }
T
tensor-tang 已提交
490
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
491
                      "Input %s should not have more than one inputs", name);
492
    return scope_.FindVar(in[0]) != nullptr;
493 494 495
  }

  bool HasOutput(const std::string& name) const override {
496 497 498 499
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
500 501
      return false;
    }
502
    const auto& out = it->second;
T
tensor-tang 已提交
503
    if (out.size() == 0 || out[0] == kEmptyVarName) {
504 505
      return false;
    }
T
tensor-tang 已提交
506 507
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
508
    return scope_.FindVar(out[0]) != nullptr;
509 510 511
  }

  bool HasInputs(const std::string& name) const override {
512 513 514
    if (!op_.HasInputs(name)) {
      return false;
    }
515 516 517 518 519 520 521 522 523 524 525 526 527
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
528 529 530
    if (!op_.HasOutputs(name)) {
      return false;
    }
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    const std::string& input_n = Inputs(in)[i];
    const std::string& output_n = Outputs(out)[j];

    Variable* in_var = scope_.FindVar(input_n);
    Variable* out_var = scope_.FindVar(output_n);
    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
                   "The type of %s and %s is not the same.", output_n,
                   GetDim(input_n));

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
585 586
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
587 588 589 590 591
    const std::vector<std::string>& inputs = Inputs(in);
    const std::vector<std::string>& outputs = Outputs(out);
    PADDLE_ENFORCE_LT(i, inputs.size());
    PADDLE_ENFORCE_LT(j, outputs.size());
    Variable* in_var = scope_.FindVar(inputs.at(i));
Q
Qiao Longfei 已提交
592
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
593
    Variable* out_var = scope_.FindVar(outputs.at(j));
Q
Qiao Longfei 已提交
594 595 596 597 598
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
599

M
mozga-intel 已提交
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
619 620
  }

C
chengduo 已提交
621 622 623 624 625
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

626 627 628
  bool IsRuntime() const override { return true; }

 protected:
629 630
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
631
    PADDLE_ENFORCE_NOT_NULL(var);
632 633 634 635 636
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
637 638 639 640 641 642 643
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
644
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
645
    PADDLE_THROW("Only compile time support this method");
646 647 648 649 650 651 652 653 654
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
655 656
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
657 658 659
    }
  }

F
fengjiayi 已提交
660 661
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
662
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
663 664
  }

665
  proto::VarType::Type GetVarType(const std::string& name) const override {
666 667 668 669
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
670 671 672 673
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

674
 private:
675 676 677 678
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
679 680 681 682 683
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
M
minqiyang 已提交
684 685
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
686 687 688 689 690 691 692 693
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

694 695
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
696 697
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
698
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
699
  auto* dev_ctx = pool.Get(place);
700

701 702 703 704
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
705 706
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
707 708
  }

Q
qiaolongfei 已提交
709 710
  OpKernelMap& kernels = kernels_iter->second;

711 712
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
713

714 715 716 717
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
718 719
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
M
minqiyang 已提交
720
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
721

722
  auto kernel_iter = kernels.find(expected_kernel_key);
723
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
724
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
725 726
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
727
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
728 729 730 731 732
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
733 734 735 736 737
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
738 739 740 741
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
742

Y
yuyang18 已提交
743 744 745 746 747 748
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
749
  }
Q
QI JUN 已提交
750

Y
yuyang18 已提交
751
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
752

Y
yuyang18 已提交
753 754 755
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
756 757
  }

D
dzhwinter 已提交
758
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
759
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
760
    dev_ctx->Wait();
D
dzhwinter 已提交
761
  }
C
chengduoZH 已提交
762 763 764

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
765
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
766 767 768
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
769 770
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
771 772 773
      }
    }
  }
Q
Qiao Longfei 已提交
774
}
Y
yuyang18 已提交
775 776 777 778
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
779
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
780 781
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
782 783 784
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
785
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
786 787 788 789 790 791 792 793 794 795 796 797
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
798
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
799 800 801
        continue;
      }

C
chengduo 已提交
802
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
820 821
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
822

823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
838
      }
839
      if (!new_scope) {
Y
yuyang18 已提交
840 841 842 843
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
844

Y
yuyang18 已提交
845
      Tensor out;
Y
yuyang18 已提交
846
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
847 848 849 850 851 852
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
853

854
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
855 856 857
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
858
  std::string last_input_name;
Y
Yu Yang 已提交
859 860 861 862 863 864 865 866 867 868 869 870 871
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
M
minqiyang 已提交
872
          int tmp = static_cast<int>(t->type());
873 874
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
875 876
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
877
          data_type = tmp;
878
          last_input_name = ipt_name;
Y
Yu Yang 已提交
879 880 881 882 883
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
884
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
885
}
886

887 888 889 890 891 892 893 894
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
895 896
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
897 898
}

Q
Qiao Longfei 已提交
899
}  // namespace framework
L
liaogang 已提交
900
}  // namespace paddle