operator.cc 28.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14 15 16
#define GLOG_NO_ABBREVIATED_SEVERITIES
#define GOOGLE_GLOG_DLL_DECL

17 18
#include <gflags/gflags.h>
#include <glog/logging.h>
19

20
#include <algorithm>
21

Y
Yi Wang 已提交
22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
24
#include "paddle/fluid/framework/lod_tensor.h"
25
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
26 27
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
28
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
29

D
dzhwinter 已提交
30
DECLARE_bool(benchmark);
C
chengduoZH 已提交
31 32 33
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
34

Q
Qiao Longfei 已提交
35 36 37
namespace paddle {
namespace framework {

38 39 40 41 42 43
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
44

Q
qiaolongfei 已提交
45 46 47 48 49 50 51 52 53 54 55
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

56 57
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
58
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
59 60
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
61 62
  }

M
minqiyang 已提交
63 64
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
65
    if (UNLIKELY(!tensor.IsInitialized())) {
66
      return DDim({-1});
67
    }
M
minqiyang 已提交
68 69 70 71 72 73 74
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
75 76 77 78 79
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
80 81 82 83 84 85
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
86 87 88 89 90
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
91

M
minqiyang 已提交
92 93 94
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
95 96
      return "";
    }
M
minqiyang 已提交
97 98
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
99 100 101 102 103 104
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
105 106 107 108 109
  } else {
    return "";
  }
}

110 111 112 113 114 115
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
116 117
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
118 119 120 121 122
  }

  return -1;
}

Q
Qiao Longfei 已提交
123 124 125 126 127 128 129 130
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
131 132 133
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
134 135
      return default_lod;
    }
M
minqiyang 已提交
136
    return tensor.lod();
Q
Qiao Longfei 已提交
137 138 139 140 141
  } else {
    return default_lod;
  }
}

142
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
143
  VLOG(40) << place << " " << DebugStringEx(&scope);
144
  if (platform::is_gpu_place(place)) {
145
#ifndef PADDLE_WITH_CUDA
146
    PADDLE_THROW("Cannot run operator on place %s", place);
147
#else
148 149
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
150 151
#endif
  }
152 153 154 155 156 157 158 159 160 161 162

  // The profile has a process-wide mutex, results in serious performance issue
  // in concurrency scenerio. Here use an `if` to fix this issue.
  // Please not remove the `if`, ask @Superjomn if there are any concern.
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
  } else {
    RunImpl(scope, place);
  }
163
  VLOG(30) << place << " " << DebugStringEx(&scope);
164 165
}

166 167 168 169 170 171 172 173
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

174
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
175
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
176
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
177 178
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
179
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
180 181
}

Y
Yu Yang 已提交
182 183
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
184
  auto it = inputs_.find(name);
185 186
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
187
  return it->second;
Y
Yan Chunwei 已提交
188 189
}

190
bool OperatorBase::HasOutputs(const std::string& name) const {
191
  if (outputs_.find(name) != outputs_.end()) {
192 193 194 195 196 197
    return true;
  } else {
    return false;
  }
}

198
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
199
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
200
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
201 202
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
203
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
204 205
}

Y
Yu Yang 已提交
206 207
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
208
  auto it = outputs_.find(name);
209 210
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
211
  return it->second;
Y
Yan Chunwei 已提交
212 213
}

214
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
215
  std::stringstream ss;
Y
Yu Yang 已提交
216
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
217 218
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
219 220
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
221 222
      auto var_name = input.second[i];
      ss << var_name;
223
      if (scope) {
Q
Qiao Longfei 已提交
224 225 226 227 228 229 230 231 232 233 234
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
235
        }
236
      }
Y
Yu Yang 已提交
237 238 239
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
240
    }
Y
Yu Yang 已提交
241
    ss << "]";
Y
Yu Yang 已提交
242 243
    ++it;
    if (it != inputs_.end()) {
244 245
      ss << ", ";
    }
Q
Qiao Longfei 已提交
246
  }
Y
Yu Yang 已提交
247
  ss << "}, outputs:{";
Y
Yu Yang 已提交
248 249
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
250 251
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
252 253
      auto var_name = output.second[i];
      ss << var_name;
254
      if (scope) {
Q
Qiao Longfei 已提交
255 256 257 258 259 260 261
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
262 263
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
264 265
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
266
        }
267
      }
Y
Yu Yang 已提交
268 269 270
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
271
    }
Y
Yu Yang 已提交
272
    ss << "]";
Y
Yu Yang 已提交
273 274
    ++it;
    if (it != outputs_.end()) {
275 276
      ss << ", ";
    }
Q
Qiao Longfei 已提交
277
  }
Y
Yu Yang 已提交
278
  ss << "}.";
Q
Qiao Longfei 已提交
279 280 281
  return ss.str();
}

Y
Yu Yang 已提交
282
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
283 284
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
285 286
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
287 288
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
289
}
290

Q
qijun 已提交
291 292
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
293
  for (auto& o : inputs_) {
Q
qijun 已提交
294 295 296 297 298 299
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
300 301 302 303 304 305 306 307 308 309
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
310
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
311 312

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
313
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
314 315 316 317 318 319 320 321 322
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
323 324
}

325 326 327
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
328
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
329 330

  for (auto& in : op_info->Proto().inputs()) {
331 332 333 334
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
335 336 337
  }

  for (auto& out : op_info->Proto().outputs()) {
338 339 340 341 342
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

C
chengduo 已提交
359 360
static bool VarIsTensor(const Variable& var) {
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
361 362
}

C
chengduo 已提交
363
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
364 365 366 367
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
368
  } else {
Y
Yang Yang 已提交
369
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
370
                 var.Type().name());
Q
QI JUN 已提交
371 372 373
  }
}

C
chengduo 已提交
374
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
375
  if (var->IsType<LoDTensor>()) {
376
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
377
  } else if (var->IsType<SelectedRows>()) {
378
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
379
  } else {
Y
Yang Yang 已提交
380 381
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
382 383 384
  }
}

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

417
template <>
418
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
419
  return Input<LoDTensor>(name);
420 421 422
}

template <>
423
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
424 425 426 427
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
428
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
429
                 [&](const std::string& sub_name) -> const Tensor* {
430
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
431 432 433 434 435 436
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
437
                 });
438 439 440 441
  return res;
}

template <>
442
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
443
  return Output<LoDTensor>(name);
444 445 446
}

template <>
447
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
448 449 450 451
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
452
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
453
                 [&](const std::string& sub_name) -> Tensor* {
454
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
455 456 457 458 459 460
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
461
                 });
462 463 464
  return res;
}

Y
Yu Yang 已提交
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

480 481 482 483 484 485
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
486 487 488 489
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
490 491
      return false;
    }
492
    const auto& in = it->second;
T
tensor-tang 已提交
493
    if (in.size() == 0 || in[0] == kEmptyVarName) {
494 495
      return false;
    }
T
tensor-tang 已提交
496
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
497
                      "Input %s should not have more than one inputs", name);
498
    return scope_.FindVar(in[0]) != nullptr;
499 500 501
  }

  bool HasOutput(const std::string& name) const override {
502 503 504 505
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
506 507
      return false;
    }
508
    const auto& out = it->second;
T
tensor-tang 已提交
509
    if (out.size() == 0 || out[0] == kEmptyVarName) {
510 511
      return false;
    }
T
tensor-tang 已提交
512 513
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
514
    return scope_.FindVar(out[0]) != nullptr;
515 516 517
  }

  bool HasInputs(const std::string& name) const override {
518 519 520
    if (!op_.HasInputs(name)) {
      return false;
    }
521 522 523 524 525 526 527 528 529 530 531 532 533
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
534 535 536
    if (!op_.HasOutputs(name)) {
      return false;
    }
537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    const std::string& input_n = Inputs(in)[i];
    const std::string& output_n = Outputs(out)[j];

    Variable* in_var = scope_.FindVar(input_n);
    Variable* out_var = scope_.FindVar(output_n);
    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
                   "The type of %s and %s is not the same.", output_n,
                   GetDim(input_n));

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
591 592
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
593 594 595 596 597
    const std::vector<std::string>& inputs = Inputs(in);
    const std::vector<std::string>& outputs = Outputs(out);
    PADDLE_ENFORCE_LT(i, inputs.size());
    PADDLE_ENFORCE_LT(j, outputs.size());
    Variable* in_var = scope_.FindVar(inputs.at(i));
Q
Qiao Longfei 已提交
598
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
599
    Variable* out_var = scope_.FindVar(outputs.at(j));
Q
Qiao Longfei 已提交
600 601 602 603 604
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
605

M
mozga-intel 已提交
606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
625 626
  }

627 628 629
  bool IsRuntime() const override { return true; }

 protected:
630 631
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
632
    PADDLE_ENFORCE_NOT_NULL(var);
633 634 635 636 637
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
638 639 640 641 642 643 644
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
645
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
646
    PADDLE_THROW("Only compile time support this method");
647 648 649 650 651 652 653 654 655
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
656 657
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
658 659 660
    }
  }

F
fengjiayi 已提交
661 662
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
663
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
664 665
  }

666
  proto::VarType::Type GetVarType(const std::string& name) const override {
667 668 669 670
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
671 672 673 674
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

675
 private:
676 677 678 679
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
680 681 682 683 684
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
685
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
686 687 688 689 690 691 692 693
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

694 695
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
696 697
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
698
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
699
  auto* dev_ctx = pool.Get(place);
700

701 702 703 704
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
705 706
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
707 708
  }

Q
qiaolongfei 已提交
709 710
  OpKernelMap& kernels = kernels_iter->second;

711 712
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
713

714 715 716 717
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
718 719
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
720
  VLOG(30) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
721

722
  auto kernel_iter = kernels.find(expected_kernel_key);
723
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
724
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
725 726
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
727
    VLOG(30) << "missing MKLDNN kernel: fallbacking to PLAIN one";
728 729 730 731 732
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
733 734 735 736 737
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
738 739 740 741
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
742

Y
yuyang18 已提交
743 744 745 746 747 748
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
749
  }
Q
QI JUN 已提交
750

Y
yuyang18 已提交
751
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
752

Y
yuyang18 已提交
753 754 755
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
756 757
  }

D
dzhwinter 已提交
758
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
759
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
760
    dev_ctx->Wait();
D
dzhwinter 已提交
761
  }
C
chengduoZH 已提交
762 763 764

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
765
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
766 767 768
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
769 770
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
771 772 773
      }
    }
  }
Q
Qiao Longfei 已提交
774
}
Y
yuyang18 已提交
775 776 777 778
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
779 780
    VLOG(30) << "share inplace var " + var_name +
                    " back to it's original scope";
C
chengduo 已提交
781 782
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
783 784 785
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
786
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
787 788 789 790 791 792 793 794 795 796 797 798
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
799
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
800 801 802
        continue;
      }

C
chengduo 已提交
803
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

821 822
      VLOG(30) << "Transform Variable " << var_name << " from "
               << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
823 824 825 826 827 828 829

      if (new_scope == nullptr) {
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
      Tensor out;
Y
yuyang18 已提交
830
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
831 832 833 834 835 836
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
837

838
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
839 840 841
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
842
  std::string last_input_name;
Y
Yu Yang 已提交
843 844 845 846 847 848 849 850 851 852 853 854 855 856
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
857 858
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
859 860
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
861
          data_type = tmp;
862
          last_input_name = ipt_name;
Y
Yu Yang 已提交
863 864 865 866 867
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
868
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
869
}
870

871 872 873 874 875 876 877 878
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
879 880
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
881 882
}

Q
Qiao Longfei 已提交
883
}  // namespace framework
L
liaogang 已提交
884
}  // namespace paddle