operator.cc 29.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
19

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
22
#include "paddle/fluid/framework/lod_tensor.h"
23
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/shape_inference.h"
25
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/var_type.h"
27
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
28

D
dzhwinter 已提交
29
DECLARE_bool(benchmark);
C
chengduoZH 已提交
30 31 32
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
33

Q
Qiao Longfei 已提交
34 35 36
namespace paddle {
namespace framework {

37 38 39 40 41 42
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
43

Q
qiaolongfei 已提交
44 45 46 47 48 49 50 51 52 53 54
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

55 56
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
57
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
58 59
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
60 61
  }

M
minqiyang 已提交
62 63
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
64
    if (UNLIKELY(!tensor.IsInitialized())) {
65
      return DDim({-1});
66
    }
M
minqiyang 已提交
67 68 69 70 71 72 73
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
74 75 76 77 78
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
79 80 81 82 83 84
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
85 86 87 88 89
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
90

M
minqiyang 已提交
91 92 93
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
94 95
      return "";
    }
M
minqiyang 已提交
96 97
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
98 99 100 101 102 103
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
104 105 106 107 108
  } else {
    return "";
  }
}

109 110 111 112 113 114
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
115 116
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
117 118 119 120 121
  }

  return -1;
}

Q
Qiao Longfei 已提交
122 123 124 125 126 127 128 129
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
130 131 132
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
133 134
      return default_lod;
    }
M
minqiyang 已提交
135
    return tensor.lod();
Q
Qiao Longfei 已提交
136 137 138 139 140
  } else {
    return default_lod;
  }
}

141
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
M
minqiyang 已提交
142
  VLOG(4) << place << " " << DebugStringEx(&scope);
143
  if (platform::is_gpu_place(place)) {
144
#ifndef PADDLE_WITH_CUDA
145
    PADDLE_THROW("Cannot run operator on place %s", place);
146
#else
147 148
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
149 150
#endif
  }
151

P
peizhilin 已提交
152 153 154
// The profile has a process-wide mutex, results in serious performance issue
// in concurrency scenerio. Here use an `if` to fix this issue.
// Please not remove the `if`, ask @Superjomn if there are any concern.
P
peizhilin 已提交
155
#ifndef _WIN32
156 157 158 159
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
M
minqiyang 已提交
160
  } else  // NOLINT
P
peizhilin 已提交
161 162
#endif
  {
163 164
    RunImpl(scope, place);
  }
M
minqiyang 已提交
165
  VLOG(3) << place << " " << DebugStringEx(&scope);
166 167
}

168 169 170 171 172 173 174 175
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

176
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
177
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
178
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
179 180
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
181
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
182 183
}

Y
Yu Yang 已提交
184 185
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
186
  auto it = inputs_.find(name);
187 188
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
189
  return it->second;
Y
Yan Chunwei 已提交
190 191
}

192
bool OperatorBase::HasOutputs(const std::string& name) const {
193
  if (outputs_.find(name) != outputs_.end()) {
194 195 196 197 198 199
    return true;
  } else {
    return false;
  }
}

200
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
201
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
202
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
203 204
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
205
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
206 207
}

Y
Yu Yang 已提交
208 209
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
210
  auto it = outputs_.find(name);
211 212
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
213
  return it->second;
Y
Yan Chunwei 已提交
214 215
}

216
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
217
  std::stringstream ss;
Y
Yu Yang 已提交
218
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
219 220
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
221 222
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
223 224
      auto var_name = input.second[i];
      ss << var_name;
225
      if (scope) {
Q
Qiao Longfei 已提交
226 227 228 229 230 231 232 233 234 235 236
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
237
        }
238
      }
Y
Yu Yang 已提交
239 240 241
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
242
    }
Y
Yu Yang 已提交
243
    ss << "]";
Y
Yu Yang 已提交
244 245
    ++it;
    if (it != inputs_.end()) {
246 247
      ss << ", ";
    }
Q
Qiao Longfei 已提交
248
  }
Y
Yu Yang 已提交
249
  ss << "}, outputs:{";
Y
Yu Yang 已提交
250 251
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
252 253
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
254 255
      auto var_name = output.second[i];
      ss << var_name;
256
      if (scope) {
Q
Qiao Longfei 已提交
257 258 259 260 261 262 263
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
264 265
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
266 267
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
268
        }
269
      }
Y
Yu Yang 已提交
270 271 272
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
273
    }
Y
Yu Yang 已提交
274
    ss << "]";
Y
Yu Yang 已提交
275 276
    ++it;
    if (it != outputs_.end()) {
277 278
      ss << ", ";
    }
Q
Qiao Longfei 已提交
279
  }
Y
Yu Yang 已提交
280
  ss << "}.";
Q
Qiao Longfei 已提交
281 282 283
  return ss.str();
}

Y
Yu Yang 已提交
284
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
285 286
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
287 288
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
289 290
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
291
}
292

Q
qijun 已提交
293 294
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
295
  for (auto& o : inputs_) {
Q
qijun 已提交
296 297 298 299 300 301
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
302 303 304 305 306 307 308 309 310 311
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
312
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
313 314

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
315
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
316 317 318 319 320 321 322 323 324
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
325 326
}

327 328 329
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
330
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
331 332

  for (auto& in : op_info->Proto().inputs()) {
333 334 335 336
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
337 338 339
  }

  for (auto& out : op_info->Proto().outputs()) {
340 341 342 343 344
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

C
chengduo 已提交
361 362
static bool VarIsTensor(const Variable& var) {
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
363 364
}

C
chengduo 已提交
365
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
366 367 368 369
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
370
  } else {
Y
Yang Yang 已提交
371
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
372
                 var.Type().name());
Q
QI JUN 已提交
373 374 375
  }
}

C
chengduo 已提交
376
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
377
  if (var->IsType<LoDTensor>()) {
378
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
379
  } else if (var->IsType<SelectedRows>()) {
380
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
381
  } else {
Y
Yang Yang 已提交
382 383
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
384 385 386
  }
}

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

419
template <>
420
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
421
  return Input<LoDTensor>(name);
422 423 424
}

template <>
425
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
426 427 428 429
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
430
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
431
                 [&](const std::string& sub_name) -> const Tensor* {
432
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
433 434 435 436 437 438
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
439
                 });
440 441 442 443
  return res;
}

template <>
444
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
445
  return Output<LoDTensor>(name);
446 447 448
}

template <>
449
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
450 451 452 453
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
454
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
455
                 [&](const std::string& sub_name) -> Tensor* {
456
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
457 458 459 460 461 462
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
463
                 });
464 465 466
  return res;
}

Y
Yu Yang 已提交
467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

482 483 484 485 486 487
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
488 489 490 491
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
492 493
      return false;
    }
494
    const auto& in = it->second;
T
tensor-tang 已提交
495
    if (in.size() == 0 || in[0] == kEmptyVarName) {
496 497
      return false;
    }
T
tensor-tang 已提交
498
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
499
                      "Input %s should not have more than one inputs", name);
500
    return scope_.FindVar(in[0]) != nullptr;
501 502 503
  }

  bool HasOutput(const std::string& name) const override {
504 505 506 507
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
508 509
      return false;
    }
510
    const auto& out = it->second;
T
tensor-tang 已提交
511
    if (out.size() == 0 || out[0] == kEmptyVarName) {
512 513
      return false;
    }
T
tensor-tang 已提交
514 515
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
516
    return scope_.FindVar(out[0]) != nullptr;
517 518 519
  }

  bool HasInputs(const std::string& name) const override {
520 521 522
    if (!op_.HasInputs(name)) {
      return false;
    }
523 524 525 526 527 528 529 530 531 532 533 534 535
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
536 537 538
    if (!op_.HasOutputs(name)) {
      return false;
    }
539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    const std::string& input_n = Inputs(in)[i];
    const std::string& output_n = Outputs(out)[j];

    Variable* in_var = scope_.FindVar(input_n);
    Variable* out_var = scope_.FindVar(output_n);
    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
                   "The type of %s and %s is not the same.", output_n,
                   GetDim(input_n));

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
593 594
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
595 596 597 598 599
    const std::vector<std::string>& inputs = Inputs(in);
    const std::vector<std::string>& outputs = Outputs(out);
    PADDLE_ENFORCE_LT(i, inputs.size());
    PADDLE_ENFORCE_LT(j, outputs.size());
    Variable* in_var = scope_.FindVar(inputs.at(i));
Q
Qiao Longfei 已提交
600
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
601
    Variable* out_var = scope_.FindVar(outputs.at(j));
Q
Qiao Longfei 已提交
602 603 604 605 606
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
607

M
mozga-intel 已提交
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
627 628
  }

629 630 631
  bool IsRuntime() const override { return true; }

 protected:
632 633
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
634
    PADDLE_ENFORCE_NOT_NULL(var);
635 636 637 638 639
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
640 641 642 643 644 645 646
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
647
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
648
    PADDLE_THROW("Only compile time support this method");
649 650 651 652 653 654 655 656 657
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
658 659
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
660 661 662
    }
  }

F
fengjiayi 已提交
663 664
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
665
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
666 667
  }

668
  proto::VarType::Type GetVarType(const std::string& name) const override {
669 670 671 672
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
673 674 675 676
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

677
 private:
678 679 680 681
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
682 683 684 685 686
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
687
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
688 689 690 691 692 693 694 695
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

696 697
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
698 699
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
700
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
701
  auto* dev_ctx = pool.Get(place);
702

703 704 705 706
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
707 708
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
709 710
  }

Q
qiaolongfei 已提交
711 712
  OpKernelMap& kernels = kernels_iter->second;

713 714
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
715

716 717 718 719
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
720 721
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
M
minqiyang 已提交
722
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
723

724
  auto kernel_iter = kernels.find(expected_kernel_key);
725
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
726
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
727 728
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
729
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
730 731 732 733 734
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
735 736 737 738 739
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
740 741 742 743
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
744

Y
yuyang18 已提交
745 746 747 748 749 750
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
751
  }
Q
QI JUN 已提交
752

Y
yuyang18 已提交
753
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
754

Y
yuyang18 已提交
755 756 757
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
758 759
  }

D
dzhwinter 已提交
760
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
761
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
762
    dev_ctx->Wait();
D
dzhwinter 已提交
763
  }
C
chengduoZH 已提交
764 765 766

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
767
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
768 769 770
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
771 772
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
773 774 775
      }
    }
  }
Q
Qiao Longfei 已提交
776
}
Y
yuyang18 已提交
777 778 779 780
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
781
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
782 783
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
784 785 786
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
787
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
788 789 790 791 792 793 794 795 796 797 798 799
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
800
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
801 802 803
        continue;
      }

C
chengduo 已提交
804
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
822 823
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
824

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
840
      }
841
      if (!new_scope) {
Y
yuyang18 已提交
842 843 844 845
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
846

Y
yuyang18 已提交
847
      Tensor out;
Y
yuyang18 已提交
848
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
849 850 851 852 853 854
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
855

856
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
857 858 859
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
860
  std::string last_input_name;
Y
Yu Yang 已提交
861 862 863 864 865 866 867 868 869 870 871 872 873 874
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
875 876
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
877 878
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
879
          data_type = tmp;
880
          last_input_name = ipt_name;
Y
Yu Yang 已提交
881 882 883 884 885
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
886
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
887
}
888

889 890 891 892 893 894 895 896
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
897 898
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
899 900
}

Q
Qiao Longfei 已提交
901
}  // namespace framework
L
liaogang 已提交
902
}  // namespace paddle