operator.cc 30.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
19

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
22
#include "paddle/fluid/framework/lod_tensor.h"
23
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/shape_inference.h"
25
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/var_type.h"
27
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
28

D
dzhwinter 已提交
29
DECLARE_bool(benchmark);
C
chengduoZH 已提交
30 31 32
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
Q
Qiao Longfei 已提交
33 34
DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op");
DEFINE_int32(min_param_size_to_use_multithread, 0, "");
D
dzhwinter 已提交
35

Q
Qiao Longfei 已提交
36 37 38
namespace paddle {
namespace framework {

39 40 41 42 43 44
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
45

Q
qiaolongfei 已提交
46 47 48 49 50 51 52 53 54 55 56
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

57 58
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
59
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
60 61
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
62 63
  }

M
minqiyang 已提交
64 65
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
66
    if (UNLIKELY(!tensor.IsInitialized())) {
67
      return DDim({-1});
68
    }
M
minqiyang 已提交
69 70 71 72 73 74 75
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
76 77 78 79 80
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
81 82 83 84 85 86
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
87 88 89 90 91
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
92

M
minqiyang 已提交
93 94 95
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
96 97
      return "";
    }
M
minqiyang 已提交
98 99
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
100 101 102 103 104 105
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
106 107 108 109 110
  } else {
    return "";
  }
}

111 112 113 114 115 116
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
117 118
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
119 120 121 122 123
  }

  return -1;
}

Q
Qiao Longfei 已提交
124 125 126 127 128 129 130 131
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
132 133 134
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
135 136
      return default_lod;
    }
M
minqiyang 已提交
137
    return tensor.lod();
Q
Qiao Longfei 已提交
138 139 140 141 142
  } else {
    return default_lod;
  }
}

143
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
M
minqiyang 已提交
144
  VLOG(4) << place << " " << DebugStringEx(&scope);
145
  if (platform::is_gpu_place(place)) {
146
#ifndef PADDLE_WITH_CUDA
147
    PADDLE_THROW("Cannot run operator on place %s", place);
148
#else
149 150
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
151 152
#endif
  }
153

P
peizhilin 已提交
154 155 156
  // The profile has a process-wide mutex, results in serious performance issue
  // in concurrency scenerio. Here use an `if` to fix this issue.
  // Please not remove the `if`, ask @Superjomn if there are any concern.
157 158 159 160
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
P
peizhilin 已提交
161
  } else {
162 163
    RunImpl(scope, place);
  }
M
minqiyang 已提交
164
  VLOG(3) << place << " " << DebugStringEx(&scope);
165 166
}

167 168 169 170 171 172 173 174
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

175
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
176
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
177
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
178 179
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
180
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
181 182
}

Y
Yu Yang 已提交
183 184
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
185
  auto it = inputs_.find(name);
186 187
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
188
  return it->second;
Y
Yan Chunwei 已提交
189 190
}

191
bool OperatorBase::HasOutputs(const std::string& name) const {
192
  if (outputs_.find(name) != outputs_.end()) {
193 194 195 196 197 198
    return true;
  } else {
    return false;
  }
}

199
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
200
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
201
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
202 203
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
204
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
205 206
}

Y
Yu Yang 已提交
207 208
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
209
  auto it = outputs_.find(name);
210 211
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
212
  return it->second;
Y
Yan Chunwei 已提交
213 214
}

215
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
216
  std::stringstream ss;
Y
Yu Yang 已提交
217
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
218 219
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
220 221
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
222 223
      auto var_name = input.second[i];
      ss << var_name;
224
      if (scope) {
Q
Qiao Longfei 已提交
225 226 227 228 229 230 231 232 233 234 235
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
236
        }
237
      }
Y
Yu Yang 已提交
238 239 240
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
241
    }
Y
Yu Yang 已提交
242
    ss << "]";
Y
Yu Yang 已提交
243 244
    ++it;
    if (it != inputs_.end()) {
245 246
      ss << ", ";
    }
Q
Qiao Longfei 已提交
247
  }
Y
Yu Yang 已提交
248
  ss << "}, outputs:{";
Y
Yu Yang 已提交
249 250
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
251 252
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
253 254
      auto var_name = output.second[i];
      ss << var_name;
255
      if (scope) {
Q
Qiao Longfei 已提交
256 257 258 259 260 261 262
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
263 264
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
265 266
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
267
        }
268
      }
Y
Yu Yang 已提交
269 270 271
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
272
    }
Y
Yu Yang 已提交
273
    ss << "]";
Y
Yu Yang 已提交
274 275
    ++it;
    if (it != outputs_.end()) {
276 277
      ss << ", ";
    }
Q
Qiao Longfei 已提交
278
  }
Y
Yu Yang 已提交
279
  ss << "}.";
Q
Qiao Longfei 已提交
280 281 282
  return ss.str();
}

Y
Yu Yang 已提交
283
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
284 285
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
286 287
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
288 289
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
290
}
291

Q
qijun 已提交
292 293
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
294
  for (auto& o : inputs_) {
Q
qijun 已提交
295 296 297 298 299 300
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
301 302 303 304 305 306 307 308 309 310
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
311
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
312 313

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
314
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
315 316 317 318 319 320 321 322 323
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
324 325
}

326 327 328
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
329
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
330 331

  for (auto& in : op_info->Proto().inputs()) {
332 333 334 335
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
336 337 338
  }

  for (auto& out : op_info->Proto().outputs()) {
339 340 341 342 343
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

C
chengduo 已提交
360 361
static bool VarIsTensor(const Variable& var) {
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
362 363
}

C
chengduo 已提交
364
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
365 366 367 368
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
369
  } else {
Y
Yang Yang 已提交
370
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
371
                 var.Type().name());
Q
QI JUN 已提交
372 373 374
  }
}

C
chengduo 已提交
375
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
376
  if (var->IsType<LoDTensor>()) {
377
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
378
  } else if (var->IsType<SelectedRows>()) {
379
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
380
  } else {
Y
Yang Yang 已提交
381 382
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
383 384 385
  }
}

386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

418
template <>
419
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
420
  return Input<LoDTensor>(name);
421 422 423
}

template <>
424
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
425 426 427 428
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
429
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
430
                 [&](const std::string& sub_name) -> const Tensor* {
431
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
432 433 434 435 436 437
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
438
                 });
439 440 441 442
  return res;
}

template <>
443
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
444
  return Output<LoDTensor>(name);
445 446 447
}

template <>
448
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
449 450 451 452
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
453
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
454
                 [&](const std::string& sub_name) -> Tensor* {
455
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
456 457 458 459 460 461
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
462
                 });
463 464 465
  return res;
}

Y
Yu Yang 已提交
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

481 482 483 484 485 486
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
487 488 489 490
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
491 492
      return false;
    }
493
    const auto& in = it->second;
T
tensor-tang 已提交
494
    if (in.size() == 0 || in[0] == kEmptyVarName) {
495 496
      return false;
    }
T
tensor-tang 已提交
497
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
498
                      "Input %s should not have more than one inputs", name);
499
    return scope_.FindVar(in[0]) != nullptr;
500 501 502
  }

  bool HasOutput(const std::string& name) const override {
503 504 505 506
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
507 508
      return false;
    }
509
    const auto& out = it->second;
T
tensor-tang 已提交
510
    if (out.size() == 0 || out[0] == kEmptyVarName) {
511 512
      return false;
    }
T
tensor-tang 已提交
513 514
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
515
    return scope_.FindVar(out[0]) != nullptr;
516 517 518
  }

  bool HasInputs(const std::string& name) const override {
519 520 521
    if (!op_.HasInputs(name)) {
      return false;
    }
522 523 524 525 526 527 528 529 530 531 532 533 534
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
535 536 537
    if (!op_.HasOutputs(name)) {
      return false;
    }
538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    const std::string& input_n = Inputs(in)[i];
    const std::string& output_n = Outputs(out)[j];

    Variable* in_var = scope_.FindVar(input_n);
    Variable* out_var = scope_.FindVar(output_n);
    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
                   "The type of %s and %s is not the same.", output_n,
                   GetDim(input_n));

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
592 593
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
594 595 596 597 598
    const std::vector<std::string>& inputs = Inputs(in);
    const std::vector<std::string>& outputs = Outputs(out);
    PADDLE_ENFORCE_LT(i, inputs.size());
    PADDLE_ENFORCE_LT(j, outputs.size());
    Variable* in_var = scope_.FindVar(inputs.at(i));
Q
Qiao Longfei 已提交
599
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
600
    Variable* out_var = scope_.FindVar(outputs.at(j));
Q
Qiao Longfei 已提交
601 602 603 604 605
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
606

M
mozga-intel 已提交
607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
626 627
  }

C
chengduo 已提交
628 629 630 631 632
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

633 634 635
  bool IsRuntime() const override { return true; }

 protected:
636 637
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
638
    PADDLE_ENFORCE_NOT_NULL(var);
639 640 641 642 643
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
644 645 646 647 648 649 650
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
651
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
652
    PADDLE_THROW("Only compile time support this method");
653 654 655 656 657 658 659 660 661
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
662 663
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
664 665 666
    }
  }

F
fengjiayi 已提交
667 668
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
669
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
670 671
  }

672
  proto::VarType::Type GetVarType(const std::string& name) const override {
673 674 675 676
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
677 678 679 680
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

681
 private:
682 683 684 685
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
686 687 688 689 690
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
691
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
692 693 694 695 696 697 698 699
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

B
baojun-nervana 已提交
700 701
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
                                           const platform::Place& place) const {
B
baojun-nervana 已提交
702 703 704 705
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
}

706 707
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
708 709
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
710
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
711
  auto* dev_ctx = pool.Get(place);
712

713 714 715 716
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
717 718
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
719 720
  }

Q
qiaolongfei 已提交
721 722
  OpKernelMap& kernels = kernels_iter->second;

723 724
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
725

726 727 728 729
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
730 731
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
M
minqiyang 已提交
732
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
733

734
  auto kernel_iter = kernels.find(expected_kernel_key);
735
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
736
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
737 738
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
739
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
740 741 742 743 744
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
745 746 747 748 749
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
750 751 752 753
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
754

Y
yuyang18 已提交
755 756 757 758 759 760
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
761
  }
Q
QI JUN 已提交
762

Y
yuyang18 已提交
763
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
764

Y
yuyang18 已提交
765 766 767
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
768 769
  }

D
dzhwinter 已提交
770
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
771
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
772
    dev_ctx->Wait();
D
dzhwinter 已提交
773
  }
C
chengduoZH 已提交
774 775 776

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
777
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
778 779 780
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
781 782
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
783 784 785
      }
    }
  }
Q
Qiao Longfei 已提交
786
}
Y
yuyang18 已提交
787 788 789 790
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
791
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
792 793
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
794 795 796
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
797
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
798 799 800 801 802 803 804 805 806 807 808 809
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
810
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
811 812 813
        continue;
      }

C
chengduo 已提交
814
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
832 833
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
834

835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
850
      }
851
      if (!new_scope) {
Y
yuyang18 已提交
852 853 854 855
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
856

Y
yuyang18 已提交
857
      Tensor out;
Y
yuyang18 已提交
858
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
859 860 861 862 863 864
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
865

866
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
867 868 869
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
870
  std::string last_input_name;
Y
Yu Yang 已提交
871 872 873 874 875 876 877 878 879 880 881 882 883
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
S
fix bug  
sneaxiy 已提交
884 885
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s is not initialized: %s",
                         ipt_name, DebugString());
Y
Yu Yang 已提交
886
          int tmp = static_cast<int>(ToDataType(t->type()));
887 888
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
889 890
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
891
          data_type = tmp;
892
          last_input_name = ipt_name;
Y
Yu Yang 已提交
893 894 895 896 897
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
898
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
899
}
900

901 902 903 904 905 906 907 908
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
909 910
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
911 912
}

Q
Qiao Longfei 已提交
913
}  // namespace framework
L
liaogang 已提交
914
}  // namespace paddle