operator.cc 26.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14 15 16
#define GLOG_NO_ABBREVIATED_SEVERITIES
#define GOOGLE_GLOG_DLL_DECL

17 18 19
#include <gflags/gflags.h>
#include <glog/logging.h>

20
#include <algorithm>
21

Y
Yi Wang 已提交
22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
24
#include "paddle/fluid/framework/lod_tensor.h"
25
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
26 27
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
28
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
29

D
dzhwinter 已提交
30
DECLARE_bool(benchmark);
C
chengduoZH 已提交
31 32 33
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
34

Q
Qiao Longfei 已提交
35 36 37
namespace paddle {
namespace framework {

38 39 40 41 42 43
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
44

Q
qiaolongfei 已提交
45 46 47 48 49 50 51 52 53 54 55
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

56 57
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
58
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
59 60
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
61 62
  }

M
minqiyang 已提交
63 64
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
65
    if (UNLIKELY(!tensor.IsInitialized())) {
66
      return DDim({-1});
67
    }
M
minqiyang 已提交
68 69 70 71 72 73 74
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
75 76 77 78 79
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
80 81 82 83 84 85
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
86 87 88 89 90
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
91

M
minqiyang 已提交
92 93 94
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
95 96
      return "";
    }
M
minqiyang 已提交
97 98
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
99 100 101 102 103 104
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
105 106 107 108 109
  } else {
    return "";
  }
}

110 111 112 113 114 115
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
116 117
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
118 119 120 121 122
  }

  return -1;
}

Q
Qiao Longfei 已提交
123 124 125 126 127 128 129 130
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
131 132 133
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
134 135
      return default_lod;
    }
M
minqiyang 已提交
136
    return tensor.lod();
Q
Qiao Longfei 已提交
137 138 139 140 141
  } else {
    return default_lod;
  }
}

142
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
143 144
  VLOG(4) << place << " " << DebugStringEx(&scope);
  if (platform::is_gpu_place(place)) {
145
#ifndef PADDLE_WITH_CUDA
146
    PADDLE_THROW("Cannot run operator on place %s", place);
147
#else
148 149
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
150 151
#endif
  }
D
dzhwinter 已提交
152
  VLOG(3) << "start pool";
153 154
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  platform::RecordEvent record_event(Type(), pool.Get(place));
D
dzhwinter 已提交
155
  VLOG(3) << "start RunImpl";
156 157
  RunImpl(scope, place);
  VLOG(3) << place << " " << DebugStringEx(&scope);
158 159
}

160 161 162 163 164 165 166 167
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

168
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
169
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
170
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
171 172
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
173
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
174 175
}

Y
Yu Yang 已提交
176 177
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
178
  auto it = inputs_.find(name);
179 180
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
181
  return it->second;
Y
Yan Chunwei 已提交
182 183
}

184
bool OperatorBase::HasOutputs(const std::string& name) const {
185
  if (outputs_.find(name) != outputs_.end()) {
186 187 188 189 190 191
    return true;
  } else {
    return false;
  }
}

192
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
193
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
194
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
195 196
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
197
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
198 199
}

Y
Yu Yang 已提交
200 201
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
202
  auto it = outputs_.find(name);
203 204
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
205
  return it->second;
Y
Yan Chunwei 已提交
206 207
}

208
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
209
  std::stringstream ss;
Y
Yu Yang 已提交
210
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
211 212
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
213 214
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
215 216
      auto var_name = input.second[i];
      ss << var_name;
217
      if (scope) {
Q
Qiao Longfei 已提交
218 219 220 221 222 223 224 225 226 227 228
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
229
        }
230
      }
Y
Yu Yang 已提交
231 232 233
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
234
    }
Y
Yu Yang 已提交
235
    ss << "]";
Y
Yu Yang 已提交
236 237
    ++it;
    if (it != inputs_.end()) {
238 239
      ss << ", ";
    }
Q
Qiao Longfei 已提交
240
  }
Y
Yu Yang 已提交
241
  ss << "}, outputs:{";
Y
Yu Yang 已提交
242 243
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
244 245
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
246 247
      auto var_name = output.second[i];
      ss << var_name;
248
      if (scope) {
Q
Qiao Longfei 已提交
249 250 251 252 253 254 255 256 257
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
258
        }
259
      }
Y
Yu Yang 已提交
260 261 262
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
263
    }
Y
Yu Yang 已提交
264
    ss << "]";
Y
Yu Yang 已提交
265 266
    ++it;
    if (it != outputs_.end()) {
267 268
      ss << ", ";
    }
Q
Qiao Longfei 已提交
269
  }
Y
Yu Yang 已提交
270
  ss << "}.";
Q
Qiao Longfei 已提交
271 272 273
  return ss.str();
}

Y
Yu Yang 已提交
274
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
275 276
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
277 278
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
279 280
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
281
}
282

Q
qijun 已提交
283 284
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
285
  for (auto& o : inputs_) {
Q
qijun 已提交
286 287 288 289 290 291
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
292 293 294 295 296 297 298 299 300 301
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
302
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
303 304

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
305
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
306 307 308 309 310 311 312 313 314
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
315 316
}

317 318 319
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
320
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
321 322

  for (auto& in : op_info->Proto().inputs()) {
323 324 325 326
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
327 328 329
  }

  for (auto& out : op_info->Proto().outputs()) {
330 331 332 333 334
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

351 352 353 354
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

355
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
356
  if (var->IsType<LoDTensor>()) {
357
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
358
  } else if (var->IsType<SelectedRows>()) {
359
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
360
  } else {
Y
Yang Yang 已提交
361 362
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
363 364 365 366 367
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
368
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
369
  } else if (var->IsType<SelectedRows>()) {
370
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
371
  } else {
Y
Yang Yang 已提交
372 373
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
374 375 376
  }
}

377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

409
template <>
410
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
411
  auto* var = InputVar(name);
412 413
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
414 415 416
}

template <>
417
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
418 419 420 421
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
422 423 424 425 426
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
427 428 429 430
  return res;
}

template <>
431
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
432
  auto var = OutputVar(name);
Q
QI JUN 已提交
433
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
434 435 436
}

template <>
437
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
438 439 440 441
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
442 443
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
444 445
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
446
                                         : GetMutableTensorFromVar(var);
447
                 });
448 449 450
  return res;
}

Y
Yu Yang 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

466 467 468 469 470 471
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
472 473 474
    if (!op_.HasInputs(name)) {
      return false;
    }
475 476 477 478 479
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
480 481
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Input %s should not have more than one inputs", name);
482 483 484 485 486 487
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
488 489 490
    if (!op_.HasOutputs(name)) {
      return false;
    }
491 492 493 494 495
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
496 497
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Output %s should not have more than one inputs", name);
498 499 500 501 502 503
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
504 505 506
    if (!op_.HasInputs(name)) {
      return false;
    }
507 508 509 510 511 512 513 514 515 516 517 518 519
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
520 521 522
    if (!op_.HasOutputs(name)) {
      return false;
    }
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
547 548 549 550 551 552 553 554 555 556 557 558
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
559

M
mozga-intel 已提交
560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
579 580 581 582 583 584 585 586 587 588 589 590 591 592
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
593 594
  }

595 596 597
  bool IsRuntime() const override { return true; }

 protected:
598 599
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
600
    PADDLE_ENFORCE_NOT_NULL(var);
601 602 603 604 605
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
606 607 608 609 610 611 612
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
613
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
614
    PADDLE_THROW("Only compile time support this method");
615 616 617 618 619 620 621 622 623
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
624 625
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
626 627 628
    }
  }

F
fengjiayi 已提交
629 630
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
631
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
632 633
  }

634
  proto::VarType::Type GetVarType(const std::string& name) const override {
635 636 637 638
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
639 640 641 642
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

643
 private:
644 645 646 647
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
648 649 650 651 652
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
653
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
654 655 656 657 658 659 660 661
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

662 663
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
664
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
D
dzhwinter 已提交
665
  VLOG(3) << "start Infershape";
666
  this->InferShape(&infer_shape_ctx);
D
dzhwinter 已提交
667
  VLOG(3) << "Infershape Pass";
Y
Yu Yang 已提交
668
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
669
  auto* dev_ctx = pool.Get(place);
670

671
  // check if op[type] has kernel registered.
D
dzhwinter 已提交
672
  VLOG(3) << "Start Kernels";
673
  auto& all_op_kernels = AllOpKernels();
D
dzhwinter 已提交
674
  VLOG(3) << "Kernel map finish";
675 676
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
677 678
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
679 680
  }

Q
qiaolongfei 已提交
681 682
  OpKernelMap& kernels = kernels_iter->second;

683 684
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
685

686 687 688 689
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
690 691
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
D
dzhwinter 已提交
692
  VLOG(3) << "expected_kernel_key: " << expected_kernel_key;
Q
qiaolongfei 已提交
693

694
  auto kernel_iter = kernels.find(expected_kernel_key);
695
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
696
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
697 698
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
P
Paweł Żelazko 已提交
699
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
700 701 702 703 704
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
705 706 707 708 709
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
710 711 712 713
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
714

Y
yuyang18 已提交
715 716 717 718 719 720
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
721
  }
Q
QI JUN 已提交
722

Y
yuyang18 已提交
723
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
724

Y
yuyang18 已提交
725 726 727
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
728 729
  }

D
dzhwinter 已提交
730
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
731
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
732
    dev_ctx->Wait();
D
dzhwinter 已提交
733
  }
C
chengduoZH 已提交
734 735 736

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
737
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
738 739 740
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
741 742
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
743 744 745
      }
    }
  }
Q
Qiao Longfei 已提交
746
}
Y
yuyang18 已提交
747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor =
        GetTensorFromVar(transfer_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
      if (var == nullptr || !VarIsTensor(var)) {
        continue;
      }

      auto* tensor_in = GetTensorFromVar(var);
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;

      if (new_scope == nullptr) {
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
      Tensor out;
Y
yuyang18 已提交
798
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
799 800 801 802 803 804
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
805

806
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
807 808 809
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
810
  std::string last_input_name;
Y
Yu Yang 已提交
811 812 813 814 815 816 817 818 819 820 821 822 823 824
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
825 826
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
827 828
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
829
          data_type = tmp;
830
          last_input_name = ipt_name;
Y
Yu Yang 已提交
831 832 833 834 835
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
836
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
837
}
838

839 840 841 842 843 844 845 846
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
847 848
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
849 850
}

Q
Qiao Longfei 已提交
851
}  // namespace framework
L
liaogang 已提交
852
}  // namespace paddle