operator.cc 27.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14 15 16
#define GLOG_NO_ABBREVIATED_SEVERITIES
#define GOOGLE_GLOG_DLL_DECL

17 18
#include <gflags/gflags.h>
#include <glog/logging.h>
19

20
#include <algorithm>
21

Y
Yi Wang 已提交
22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
24
#include "paddle/fluid/framework/lod_tensor.h"
25
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
26 27
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
28
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
29

D
dzhwinter 已提交
30
DECLARE_bool(benchmark);
C
chengduoZH 已提交
31 32 33
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
34

Q
Qiao Longfei 已提交
35 36 37
namespace paddle {
namespace framework {

38 39 40 41 42 43
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
44

Q
qiaolongfei 已提交
45 46 47 48 49 50 51 52 53 54 55
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

56 57
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
58
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
59 60
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
61 62
  }

M
minqiyang 已提交
63 64
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
65
    if (UNLIKELY(!tensor.IsInitialized())) {
66
      return DDim({-1});
67
    }
M
minqiyang 已提交
68 69 70 71 72 73 74
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
75 76 77 78 79
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
80 81 82 83 84 85
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
86 87 88 89 90
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
91

M
minqiyang 已提交
92 93 94
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
95 96
      return "";
    }
M
minqiyang 已提交
97 98
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
99 100 101 102 103 104
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
105 106 107 108 109
  } else {
    return "";
  }
}

110 111 112 113 114 115
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
116 117
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
118 119 120 121 122
  }

  return -1;
}

Q
Qiao Longfei 已提交
123 124 125 126 127 128 129 130
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
131 132 133
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
134 135
      return default_lod;
    }
M
minqiyang 已提交
136
    return tensor.lod();
Q
Qiao Longfei 已提交
137 138 139 140 141
  } else {
    return default_lod;
  }
}

142
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
143 144
  VLOG(4) << place << " " << DebugStringEx(&scope);
  if (platform::is_gpu_place(place)) {
145
#ifndef PADDLE_WITH_CUDA
146
    PADDLE_THROW("Cannot run operator on place %s", place);
147
#else
148 149
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
150 151
#endif
  }
152 153 154 155

  // The profile has a process-wide mutex, results in serious performance issue
  // in concurrency scenerio. Here use an `if` to fix this issue.
  // Please not remove the `if`, ask @Superjomn if there are any concern.
P
peizhilin 已提交
156
#ifndef _WIN32
157 158 159 160
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
P
peizhilin 已提交
161 162 163
  } else
#endif
  {
164 165
    RunImpl(scope, place);
  }
166
  VLOG(3) << place << " " << DebugStringEx(&scope);
167 168
}

169 170 171 172 173 174 175 176
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

177
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
178
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
179
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
180 181
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
182
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
183 184
}

Y
Yu Yang 已提交
185 186
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
187
  auto it = inputs_.find(name);
188 189
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
190
  return it->second;
Y
Yan Chunwei 已提交
191 192
}

193
bool OperatorBase::HasOutputs(const std::string& name) const {
194
  if (outputs_.find(name) != outputs_.end()) {
195 196 197 198 199 200
    return true;
  } else {
    return false;
  }
}

201
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
202
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
203
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
204 205
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
206
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
207 208
}

Y
Yu Yang 已提交
209 210
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
211
  auto it = outputs_.find(name);
212 213
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
214
  return it->second;
Y
Yan Chunwei 已提交
215 216
}

217
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
218
  std::stringstream ss;
Y
Yu Yang 已提交
219
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
220 221
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
222 223
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
224 225
      auto var_name = input.second[i];
      ss << var_name;
226
      if (scope) {
Q
Qiao Longfei 已提交
227 228 229 230 231 232 233 234 235 236 237
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
238
        }
239
      }
Y
Yu Yang 已提交
240 241 242
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
243
    }
Y
Yu Yang 已提交
244
    ss << "]";
Y
Yu Yang 已提交
245 246
    ++it;
    if (it != inputs_.end()) {
247 248
      ss << ", ";
    }
Q
Qiao Longfei 已提交
249
  }
Y
Yu Yang 已提交
250
  ss << "}, outputs:{";
Y
Yu Yang 已提交
251 252
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
253 254
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
255 256
      auto var_name = output.second[i];
      ss << var_name;
257
      if (scope) {
Q
Qiao Longfei 已提交
258 259 260 261 262 263 264 265 266
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
267
        }
268
      }
Y
Yu Yang 已提交
269 270 271
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
272
    }
Y
Yu Yang 已提交
273
    ss << "]";
Y
Yu Yang 已提交
274 275
    ++it;
    if (it != outputs_.end()) {
276 277
      ss << ", ";
    }
Q
Qiao Longfei 已提交
278
  }
Y
Yu Yang 已提交
279
  ss << "}.";
Q
Qiao Longfei 已提交
280 281 282
  return ss.str();
}

Y
Yu Yang 已提交
283
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
284 285
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
286 287
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
288 289
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
290
}
291

Q
qijun 已提交
292 293
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
294
  for (auto& o : inputs_) {
Q
qijun 已提交
295 296 297 298 299 300
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
301 302 303 304 305 306 307 308 309 310
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
311
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
312 313

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
314
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
315 316 317 318 319 320 321 322 323
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
324 325
}

326 327 328
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
329
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
330 331

  for (auto& in : op_info->Proto().inputs()) {
332 333 334 335
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
336 337 338
  }

  for (auto& out : op_info->Proto().outputs()) {
339 340 341 342 343
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

C
chengduo 已提交
360 361
static bool VarIsTensor(const Variable& var) {
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
362 363
}

C
chengduo 已提交
364 365 366 367 368
const Tensor* GetTensorFromVar(const Variable& var) {
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
369
  } else {
Y
Yang Yang 已提交
370
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
371
                 var.Type().name());
Q
QI JUN 已提交
372 373 374 375 376
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
377
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
378
  } else if (var->IsType<SelectedRows>()) {
379
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
380
  } else {
Y
Yang Yang 已提交
381 382
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
383 384 385
  }
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

418
template <>
419
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
420
  auto* var = InputVar(name);
C
chengduo 已提交
421
  return var == nullptr ? nullptr : GetTensorFromVar(*var);
422 423 424
}

template <>
425
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
426 427 428 429
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
430 431 432
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
433
                   return var == nullptr ? nullptr : GetTensorFromVar(*var);
434
                 });
435 436 437 438
  return res;
}

template <>
439
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
440
  auto var = OutputVar(name);
Q
QI JUN 已提交
441
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
442 443 444
}

template <>
445
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
446 447 448 449
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
450 451
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
452 453
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
454
                                         : GetMutableTensorFromVar(var);
455
                 });
456 457 458
  return res;
}

Y
Yu Yang 已提交
459 460 461 462 463 464 465 466 467 468 469 470 471 472 473
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

474 475 476 477 478 479
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
480 481 482 483
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
484 485
      return false;
    }
486
    const auto& in = it->second;
T
tensor-tang 已提交
487
    if (in.size() == 0 || in[0] == kEmptyVarName) {
488 489
      return false;
    }
T
tensor-tang 已提交
490
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
491
                      "Input %s should not have more than one inputs", name);
492
    return scope_.FindVar(in[0]) != nullptr;
493 494 495
  }

  bool HasOutput(const std::string& name) const override {
496 497 498 499
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
500 501
      return false;
    }
502
    const auto& out = it->second;
T
tensor-tang 已提交
503
    if (out.size() == 0 || out[0] == kEmptyVarName) {
504 505
      return false;
    }
T
tensor-tang 已提交
506 507
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
508
    return scope_.FindVar(out[0]) != nullptr;
509 510 511
  }

  bool HasInputs(const std::string& name) const override {
512 513 514
    if (!op_.HasInputs(name)) {
      return false;
    }
515 516 517 518 519 520 521 522 523 524 525 526 527
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
528 529 530
    if (!op_.HasOutputs(name)) {
      return false;
    }
531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    const std::string& input_n = Inputs(in)[i];
    const std::string& output_n = Outputs(out)[j];

    Variable* in_var = scope_.FindVar(input_n);
    Variable* out_var = scope_.FindVar(output_n);
    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
                   "The type of %s and %s is not the same.", output_n,
                   GetDim(input_n));

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
585 586
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
587 588 589 590 591
    const std::vector<std::string>& inputs = Inputs(in);
    const std::vector<std::string>& outputs = Outputs(out);
    PADDLE_ENFORCE_LT(i, inputs.size());
    PADDLE_ENFORCE_LT(j, outputs.size());
    Variable* in_var = scope_.FindVar(inputs.at(i));
Q
Qiao Longfei 已提交
592
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
593
    Variable* out_var = scope_.FindVar(outputs.at(j));
Q
Qiao Longfei 已提交
594 595 596 597 598
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
599

M
mozga-intel 已提交
600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
619 620
  }

621 622 623
  bool IsRuntime() const override { return true; }

 protected:
624 625
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
626
    PADDLE_ENFORCE_NOT_NULL(var);
627 628 629 630 631
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
632 633 634 635 636 637 638
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
639
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
640
    PADDLE_THROW("Only compile time support this method");
641 642 643 644 645 646 647 648 649
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
650 651
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
652 653 654
    }
  }

F
fengjiayi 已提交
655 656
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
657
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
658 659
  }

660
  proto::VarType::Type GetVarType(const std::string& name) const override {
661 662 663 664
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
665 666 667 668
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

669
 private:
670 671 672 673
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
674 675 676 677 678
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
679
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
680 681 682 683 684 685 686 687
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

688 689
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
690 691
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
692
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
693
  auto* dev_ctx = pool.Get(place);
694

695 696 697 698
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
699 700
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
701 702
  }

Q
qiaolongfei 已提交
703 704
  OpKernelMap& kernels = kernels_iter->second;

705 706
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
707

708 709 710 711
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
712 713
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
Q
qiaolongfei 已提交
714 715
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

716
  auto kernel_iter = kernels.find(expected_kernel_key);
717
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
718
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
719 720
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
P
Paweł Żelazko 已提交
721
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
722 723 724 725 726
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
727 728 729 730 731
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
732 733 734 735
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
736

Y
yuyang18 已提交
737 738 739 740 741 742
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
743
  }
Q
QI JUN 已提交
744

Y
yuyang18 已提交
745
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
746

Y
yuyang18 已提交
747 748 749
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
750 751
  }

D
dzhwinter 已提交
752
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
753
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
754
    dev_ctx->Wait();
D
dzhwinter 已提交
755
  }
C
chengduoZH 已提交
756 757 758

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
759
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
760 761 762
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
763 764
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
765 766 767
      }
    }
  }
Q
Qiao Longfei 已提交
768
}
Y
yuyang18 已提交
769 770 771 772 773 774
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
C
chengduo 已提交
775 776 777 778
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
    auto* transformed_tensor = GetTensorFromVar(*var);
Y
yuyang18 已提交
779 780 781 782 783 784 785 786 787 788 789 790
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
791
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
792 793 794
        continue;
      }

C
chengduo 已提交
795
      auto* tensor_in = GetTensorFromVar(*var);
Y
yuyang18 已提交
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;

      if (new_scope == nullptr) {
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
      Tensor out;
Y
yuyang18 已提交
822
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
823 824 825 826 827 828
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
829

830
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
831 832 833
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
834
  std::string last_input_name;
Y
Yu Yang 已提交
835 836 837 838 839 840 841 842 843 844 845 846 847 848
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
849 850
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
851 852
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
853
          data_type = tmp;
854
          last_input_name = ipt_name;
Y
Yu Yang 已提交
855 856 857 858 859
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
860
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
861
}
862

863 864 865 866 867 868 869 870
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
871 872
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
873 874
}

Q
Qiao Longfei 已提交
875
}  // namespace framework
L
liaogang 已提交
876
}  // namespace paddle