operator.cc 28.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14 15 16
#define GLOG_NO_ABBREVIATED_SEVERITIES
#define GOOGLE_GLOG_DLL_DECL

17 18 19
#include <gflags/gflags.h>
#include <glog/logging.h>

20
#include <algorithm>
21

Y
Yi Wang 已提交
22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
24
#include "paddle/fluid/framework/lod_tensor.h"
25
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
26 27
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
28
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
29

D
dzhwinter 已提交
30
DECLARE_bool(benchmark);
C
chengduoZH 已提交
31 32 33
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
34

Q
Qiao Longfei 已提交
35 36 37
namespace paddle {
namespace framework {

38 39 40 41 42 43
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
44

Q
qiaolongfei 已提交
45 46 47 48 49 50 51 52 53 54 55
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

56 57
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
58
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
59 60
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
61 62
  }

M
minqiyang 已提交
63 64
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
65
    if (UNLIKELY(!tensor.IsInitialized())) {
66
      return DDim({-1});
67
    }
M
minqiyang 已提交
68 69 70 71 72 73 74
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
75 76 77 78 79
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
80 81 82 83 84 85
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
86 87 88 89 90
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
91

M
minqiyang 已提交
92 93 94
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
95 96
      return "";
    }
M
minqiyang 已提交
97 98
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
99 100 101 102 103 104
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
105 106 107 108 109
  } else {
    return "";
  }
}

110 111 112 113 114 115
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
116 117
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
118 119 120 121 122
  }

  return -1;
}

Q
Qiao Longfei 已提交
123 124 125 126 127 128 129 130
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
131 132 133
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
134 135
      return default_lod;
    }
M
minqiyang 已提交
136
    return tensor.lod();
Q
Qiao Longfei 已提交
137 138 139 140 141
  } else {
    return default_lod;
  }
}

142
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
143 144
  VLOG(4) << place << " " << DebugStringEx(&scope);
  if (platform::is_gpu_place(place)) {
145
#ifndef PADDLE_WITH_CUDA
146
    PADDLE_THROW("Cannot run operator on place %s", place);
147
#else
148 149
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
150 151
#endif
  }
152 153 154 155
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  platform::RecordEvent record_event(Type(), pool.Get(place));
  RunImpl(scope, place);
  VLOG(3) << place << " " << DebugStringEx(&scope);
156 157
}

158 159 160 161 162 163 164 165
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

166
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
167
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
168
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
169 170
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
171
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
172 173
}

Y
Yu Yang 已提交
174 175
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
176
  auto it = inputs_.find(name);
177 178
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
179
  return it->second;
Y
Yan Chunwei 已提交
180 181
}

182
bool OperatorBase::HasOutputs(const std::string& name) const {
183
  if (outputs_.find(name) != outputs_.end()) {
184 185 186 187 188 189
    return true;
  } else {
    return false;
  }
}

190
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
191
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
192
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
193 194
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
195
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
196 197
}

Y
Yu Yang 已提交
198 199
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
200
  auto it = outputs_.find(name);
201 202
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
203
  return it->second;
Y
Yan Chunwei 已提交
204 205
}

206
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
207
  std::stringstream ss;
Y
Yu Yang 已提交
208
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
209 210
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
211 212
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
213 214
      auto var_name = input.second[i];
      ss << var_name;
215
      if (scope) {
Q
Qiao Longfei 已提交
216 217 218 219 220 221 222 223 224 225 226
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
227
        }
228
      }
Y
Yu Yang 已提交
229 230 231
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
232
    }
Y
Yu Yang 已提交
233
    ss << "]";
Y
Yu Yang 已提交
234 235
    ++it;
    if (it != inputs_.end()) {
236 237
      ss << ", ";
    }
Q
Qiao Longfei 已提交
238
  }
Y
Yu Yang 已提交
239
  ss << "}, outputs:{";
Y
Yu Yang 已提交
240 241
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
242 243
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
244 245
      auto var_name = output.second[i];
      ss << var_name;
246
      if (scope) {
Q
Qiao Longfei 已提交
247 248 249 250 251 252 253 254 255
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
256
        }
257
      }
Y
Yu Yang 已提交
258 259 260
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
261
    }
Y
Yu Yang 已提交
262
    ss << "]";
Y
Yu Yang 已提交
263 264
    ++it;
    if (it != outputs_.end()) {
265 266
      ss << ", ";
    }
Q
Qiao Longfei 已提交
267
  }
Y
Yu Yang 已提交
268
  ss << "}.";
Q
Qiao Longfei 已提交
269 270 271
  return ss.str();
}

Y
Yu Yang 已提交
272
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
273 274
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
275 276
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
277 278
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
279
}
280

Q
qijun 已提交
281 282
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
283
  for (auto& o : inputs_) {
Q
qijun 已提交
284 285 286 287 288 289
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
290 291 292 293 294 295 296 297 298 299
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
300
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
301 302

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
303
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
304 305 306 307 308 309 310 311 312
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
313 314
}

315 316 317
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
318
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
319 320

  for (auto& in : op_info->Proto().inputs()) {
321 322 323 324
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
325 326 327
  }

  for (auto& out : op_info->Proto().outputs()) {
328 329 330 331 332
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

349 350 351 352
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

353
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
354
  if (var->IsType<LoDTensor>()) {
355
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
356
  } else if (var->IsType<SelectedRows>()) {
357
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
358
  } else {
Y
Yang Yang 已提交
359 360
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
361 362 363 364 365
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
366
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
367
  } else if (var->IsType<SelectedRows>()) {
368
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
369
  } else {
Y
Yang Yang 已提交
370 371
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
372 373 374
  }
}

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

407
template <>
408
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
409
  auto* var = InputVar(name);
410 411
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
412 413 414
}

template <>
415
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
416 417 418 419
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
420 421 422 423 424
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
425 426 427 428
  return res;
}

template <>
429
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
430
  auto var = OutputVar(name);
Q
QI JUN 已提交
431
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
432 433 434
}

template <>
435
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
436 437 438 439
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
440 441
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
442 443
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
444
                                         : GetMutableTensorFromVar(var);
445
                 });
446 447 448
  return res;
}

Y
Yu Yang 已提交
449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

464 465 466 467
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}
468

469 470 471 472 473 474 475 476
  bool HasInput(const std::string& name) const override {
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
      return false;
    }
    const auto& in = it->second;
T
tensor-tang 已提交
477
    if (in.size() == 0 || in[0] == kEmptyVarName) {
478 479
      return false;
    }
T
tensor-tang 已提交
480 481
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
                      "Input %s should not have more than one inputs", name);
482
    return scope_.FindVar(in[0]) != nullptr;
483 484
  }

485 486 487 488 489 490 491 492
  bool HasOutput(const std::string& name) const override {
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
      return false;
    }
    const auto& out = it->second;
T
tensor-tang 已提交
493
    if (out.size() == 0 || out[0] == kEmptyVarName) {
494 495
      return false;
    }
T
tensor-tang 已提交
496 497
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
498
    return scope_.FindVar(out[0]) != nullptr;
499
  }
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514

  bool HasInputs(const std::string& name) const override {
    if (!op_.HasInputs(name)) {
      return false;
    }
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
515
  }
516 517 518

  bool HasOutputs(const std::string& name) const override {
    if (!op_.HasOutputs(name)) {
T
tensor-tang 已提交
519 520
      return false;
    }
521 522 523 524 525 526 527 528 529 530
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
T
tensor-tang 已提交
531
  }
532

533 534 535 536 537 538 539 540 541 542 543 544
  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

545 546
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
547 548
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
    const std::string& input_n = Inputs(in)[i];
    const std::string& output_n = Outputs(out)[j];

    Variable* in_var = scope_.FindVar(input_n);
    Variable* out_var = scope_.FindVar(output_n);
    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
                   "The type of %s and %s is not the same.", output_n,
                   GetDim(input_n));

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    const std::vector<std::string>& inputs = Inputs(in);
    const std::vector<std::string>& outputs = Outputs(out);
    PADDLE_ENFORCE_LT(i, inputs.size());
    PADDLE_ENFORCE_LT(j, outputs.size());
    Variable* in_var = scope_.FindVar(inputs.at(i));
582
    if (!in_var->IsType<LoDTensor>()) return;
583
    Variable* out_var = scope_.FindVar(outputs.at(j));
584 585 586 587 588
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
589

M
mozga-intel 已提交
590 591 592 593
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
594 595 596 597 598 599 600 601 602 603 604 605 606
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
M
mozga-intel 已提交
607
#endif
608 609 610 611 612 613 614 615 616 617 618 619 620 621
      out_tensor->set_layout(in_tensor.layout());
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
D
dzhwinter 已提交
622
    out_tensor->set_layout(in_tensor.layout());
623
  }
624

625
  bool IsRuntime() const override { return true; }
F
fengjiayi 已提交
626

627 628 629 630 631 632 633 634 635 636 637 638 639 640
 protected:
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(var);
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
641 642
  }

643 644
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
645
  }
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
    }
  }

  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
    PADDLE_THROW("Only compile time support this method");
  }

  proto::VarType::Type GetVarType(const std::string& name) const override {
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

 private:
  const OperatorBase& op_;
  const Scope& scope_;
};
677

C
chengduoZH 已提交
678 679 680 681 682
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
683
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
684 685 686 687 688 689 690 691
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

692 693
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
694 695
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
696
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
697
  auto* dev_ctx = pool.Get(place);
698

699 700 701 702
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
703 704
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
705 706
  }

Q
qiaolongfei 已提交
707 708
  OpKernelMap& kernels = kernels_iter->second;

709 710
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
711

712 713 714 715
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
716 717
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
Q
qiaolongfei 已提交
718 719
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

720
  auto kernel_iter = kernels.find(expected_kernel_key);
721
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
722
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
723 724
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
P
Paweł Żelazko 已提交
725
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
726 727 728 729 730
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
731 732 733 734 735
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
736 737 738 739
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
740

Y
yuyang18 已提交
741 742 743 744 745 746
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
747
  }
Q
QI JUN 已提交
748

Y
yuyang18 已提交
749
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
750

Y
yuyang18 已提交
751 752 753
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
754 755
  }

D
dzhwinter 已提交
756
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
757
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
758
    dev_ctx->Wait();
D
dzhwinter 已提交
759
  }
C
chengduoZH 已提交
760 761 762

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
763
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
764 765 766
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
767 768
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
769 770 771
      }
    }
  }
Q
Qiao Longfei 已提交
772
}
Y
yuyang18 已提交
773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor =
        GetTensorFromVar(transfer_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
      if (var == nullptr || !VarIsTensor(var)) {
        continue;
      }

      auto* tensor_in = GetTensorFromVar(var);
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;

      if (new_scope == nullptr) {
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
      Tensor out;
Y
yuyang18 已提交
824
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
825 826 827 828 829 830
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
831

832
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
833 834 835
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
836
  std::string last_input_name;
Y
Yu Yang 已提交
837 838 839 840 841 842 843 844 845 846 847 848 849 850
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
851 852
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
853 854
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
855
          data_type = tmp;
856
          last_input_name = ipt_name;
Y
Yu Yang 已提交
857 858 859 860 861
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
862
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
863
}
864

865 866 867 868 869 870 871 872
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
873 874
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
875 876
}

Q
Qiao Longfei 已提交
877
}  // namespace framework
L
liaogang 已提交
878
}  // namespace paddle