operator.cc 27.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14 15 16
#define GLOG_NO_ABBREVIATED_SEVERITIES
#define GOOGLE_GLOG_DLL_DECL

17
#include "paddle/fluid/framework/operator.h"
18 19
#include <gflags/gflags.h>
#include <glog/logging.h>
20
#include <algorithm>
21 22 23
#include <sstream>
#include <string>
#include <vector>
Y
Yi Wang 已提交
24 25
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
26
#include "paddle/fluid/framework/lod_tensor.h"
27
#include "paddle/fluid/framework/op_proto_maker.h"
Y
Yi Wang 已提交
28 29
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
30
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
31

D
dzhwinter 已提交
32
DECLARE_bool(benchmark);
C
chengduoZH 已提交
33 34 35
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
36

Q
Qiao Longfei 已提交
37 38 39
namespace paddle {
namespace framework {

40 41 42 43 44 45
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
46

Q
qiaolongfei 已提交
47 48 49 50 51 52 53 54 55 56 57
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

58 59
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
60
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
61 62
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
63 64
  }

M
minqiyang 已提交
65 66
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
67
    if (UNLIKELY(!tensor.IsInitialized())) {
68
      return DDim({-1});
69
    }
M
minqiyang 已提交
70 71 72 73 74 75 76
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
77 78 79 80 81
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
82 83 84 85 86 87
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
88 89 90 91 92
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
93

M
minqiyang 已提交
94 95 96
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
97 98
      return "";
    }
M
minqiyang 已提交
99 100
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
101 102 103 104 105 106
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
107 108 109 110 111
  } else {
    return "";
  }
}

112 113 114 115 116 117
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
118 119
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
120 121 122 123 124
  }

  return -1;
}

Q
Qiao Longfei 已提交
125 126 127 128 129 130 131 132
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
133 134 135
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
136 137
      return default_lod;
    }
M
minqiyang 已提交
138
    return tensor.lod();
Q
Qiao Longfei 已提交
139 140 141 142 143
  } else {
    return default_lod;
  }
}

144
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
145 146 147 148 149
  try {
    if (VLOG_IS_ON(4)) {
      VLOG(4) << place << " " << DebugStringEx(&scope);
    }
    if (platform::is_gpu_place(place)) {
150
#ifndef PADDLE_WITH_CUDA
151
      PADDLE_THROW("Cannot run operator on place %s", place);
152
#else
153 154
      auto dev_id = boost::get<platform::CUDAPlace>(place).device;
      platform::SetDeviceId(dev_id);
155
#endif
156
    }
157 158 159 160 161 162 163

    if (platform::IsProfileEnabled()) {
      platform::DeviceContextPool& pool =
          platform::DeviceContextPool::Instance();
      platform::RecordEvent record_event(Type(), pool.Get(place));
    }

164
    RunImpl(scope, place);
165

166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191
    if (VLOG_IS_ON(3)) {
      VLOG(3) << place << " " << DebugStringEx(&scope);
    }
  } catch (platform::EnforceNotMet exception) {
    if (Attrs().count("sub_block") != 0) {
      throw exception;
    }

    auto& callstack = Attr<std::vector<std::string>>(
        OpProtoAndCheckerMaker::OpCreationCallstackAttrName());

    if (callstack.empty()) {
      throw exception;
    }
    std::ostringstream sout;
    sout << "Invoke operator " << Type() << " error.\n";
    sout << "Python Callstacks: \n";
    for (auto& line : callstack) {
      sout << line;
    }
    sout << "C++ Callstacks: \n";
    sout << exception.err_str_;
    exception.err_str_ = sout.str();
    throw exception;
  } catch (...) {
    std::rethrow_exception(std::current_exception());
192 193 194
  }
}

195 196 197 198 199 200 201 202
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

203
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
204
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
205
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
206 207
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
208
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
209 210
}

Y
Yu Yang 已提交
211 212
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
213
  auto it = inputs_.find(name);
214 215
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
216
  return it->second;
Y
Yan Chunwei 已提交
217 218
}

219
bool OperatorBase::HasOutputs(const std::string& name) const {
220
  if (outputs_.end() != outputs_.find(name)) {
221 222 223 224 225 226
    return true;
  } else {
    return false;
  }
}

227
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
228
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
229
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
230 231
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
232
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
233 234
}

Y
Yu Yang 已提交
235 236
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
237
  auto it = outputs_.find(name);
238 239
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
240
  return it->second;
Y
Yan Chunwei 已提交
241 242
}

243
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
244
  std::stringstream ss;
Y
Yu Yang 已提交
245
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
246 247
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
248 249
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
250 251
      auto var_name = input.second[i];
      ss << var_name;
252
      if (scope) {
Q
Qiao Longfei 已提交
253 254 255 256 257 258 259 260 261 262 263
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
264
        }
265
      }
Y
Yu Yang 已提交
266 267 268
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
269
    }
Y
Yu Yang 已提交
270
    ss << "]";
Y
Yu Yang 已提交
271 272
    ++it;
    if (it != inputs_.end()) {
273 274
      ss << ", ";
    }
Q
Qiao Longfei 已提交
275
  }
Y
Yu Yang 已提交
276
  ss << "}, outputs:{";
Y
Yu Yang 已提交
277 278
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
279 280
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
281 282
      auto var_name = output.second[i];
      ss << var_name;
283
      if (scope) {
Q
Qiao Longfei 已提交
284 285 286 287 288 289 290 291 292
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
293
        }
294
      }
Y
Yu Yang 已提交
295 296 297
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
298
    }
Y
Yu Yang 已提交
299
    ss << "]";
Y
Yu Yang 已提交
300 301
    ++it;
    if (it != outputs_.end()) {
302 303
      ss << ", ";
    }
Q
Qiao Longfei 已提交
304
  }
Y
Yu Yang 已提交
305
  ss << "}.";
Q
Qiao Longfei 已提交
306 307 308
  return ss.str();
}

Y
Yu Yang 已提交
309
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
310 311
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
312 313
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
314 315
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
316
}
317

Q
qijun 已提交
318 319
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
320
  for (auto& o : inputs_) {
Q
qijun 已提交
321 322 323 324 325 326
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
327 328 329 330 331 332 333 334 335 336
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
337
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
338 339

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
340
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
341 342 343 344 345 346 347 348 349
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
350 351
}

352 353 354
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
355
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
356 357

  for (auto& in : op_info->Proto().inputs()) {
358 359 360 361
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
362 363 364
  }

  for (auto& out : op_info->Proto().outputs()) {
365 366 367 368 369
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

386 387 388 389
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

390
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
391
  if (var->IsType<LoDTensor>()) {
392
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
393
  } else if (var->IsType<SelectedRows>()) {
394
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
395
  } else {
Y
Yang Yang 已提交
396 397
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
398 399 400 401 402
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
403
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
404
  } else if (var->IsType<SelectedRows>()) {
405
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
406
  } else {
Y
Yang Yang 已提交
407 408
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
409 410 411
  }
}

412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

444
template <>
445
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
446
  auto* var = InputVar(name);
447 448
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
449 450 451
}

template <>
452
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
453 454 455 456
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
457 458 459 460 461
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
462 463 464 465
  return res;
}

template <>
466
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
467
  auto var = OutputVar(name);
Q
QI JUN 已提交
468
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
469 470 471
}

template <>
472
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
473 474 475 476
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
477 478
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
479 480
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
481
                                         : GetMutableTensorFromVar(var);
482
                 });
483 484 485
  return res;
}

Y
Yu Yang 已提交
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

501 502 503 504 505 506
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
507 508 509 510
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
511 512
      return false;
    }
513
    const auto& in = it->second;
T
tensor-tang 已提交
514
    if (in.size() == 0 || in[0] == kEmptyVarName) {
515 516
      return false;
    }
T
tensor-tang 已提交
517
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
518
                      "Input %s should not have more than one inputs", name);
519
    return scope_.FindVar(in[0]) != nullptr;
520 521 522
  }

  bool HasOutput(const std::string& name) const override {
523 524 525 526
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
527 528
      return false;
    }
529
    const auto& out = it->second;
T
tensor-tang 已提交
530
    if (out.size() == 0 || out[0] == kEmptyVarName) {
531 532
      return false;
    }
T
tensor-tang 已提交
533 534
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
535
    return scope_.FindVar(out[0]) != nullptr;
536 537 538
  }

  bool HasInputs(const std::string& name) const override {
539 540 541
    if (!op_.HasInputs(name)) {
      return false;
    }
542 543 544 545 546 547 548 549 550 551 552 553 554
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
555 556 557
    if (!op_.HasOutputs(name)) {
      return false;
    }
558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
582 583 584 585 586 587 588 589 590 591 592 593
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
594

M
mozga-intel 已提交
595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
614 615 616 617 618 619 620 621 622 623 624 625 626 627
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
628 629
  }

630 631 632
  bool IsRuntime() const override { return true; }

 protected:
633 634
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
635
    PADDLE_ENFORCE_NOT_NULL(var);
636 637 638 639 640
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
641 642 643 644 645 646 647
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
648
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
649
    PADDLE_THROW("Only compile time support this method");
650 651 652 653 654 655 656 657 658
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
659 660
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
661 662 663
    }
  }

F
fengjiayi 已提交
664 665
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
666
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
667 668
  }

669
  proto::VarType::Type GetVarType(const std::string& name) const override {
670 671 672 673
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
674 675 676 677
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

678
 private:
679 680 681 682
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
683 684 685 686 687
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
688
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
689 690 691 692 693 694 695 696
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

697 698
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
699 700
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
701
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
702
  auto* dev_ctx = pool.Get(place);
703

704 705 706 707
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
708 709
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
710 711
  }

Q
qiaolongfei 已提交
712 713
  OpKernelMap& kernels = kernels_iter->second;

714 715
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
716

717 718 719 720
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
721 722
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
Q
qiaolongfei 已提交
723 724
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

725
  auto kernel_iter = kernels.find(expected_kernel_key);
726
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
727
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
728 729
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
P
Paweł Żelazko 已提交
730
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
731 732 733 734 735
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
736 737 738 739 740
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
741 742 743 744
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
745

Y
yuyang18 已提交
746 747 748 749 750 751
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
752
  }
Q
QI JUN 已提交
753

Y
yuyang18 已提交
754
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
755

Y
yuyang18 已提交
756 757 758
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
759 760
  }

D
dzhwinter 已提交
761
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
762
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
763
    dev_ctx->Wait();
D
dzhwinter 已提交
764
  }
C
chengduoZH 已提交
765 766 767

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
768
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
769 770 771
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
772 773
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
774 775 776
      }
    }
  }
Q
Qiao Longfei 已提交
777
}
Y
yuyang18 已提交
778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor =
        GetTensorFromVar(transfer_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
      if (var == nullptr || !VarIsTensor(var)) {
        continue;
      }

      auto* tensor_in = GetTensorFromVar(var);
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;

      if (new_scope == nullptr) {
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
      Tensor out;
Y
yuyang18 已提交
829
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
830 831 832 833 834 835
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
836

837
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
838 839 840
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
841
  std::string last_input_name;
Y
Yu Yang 已提交
842 843 844 845 846 847 848 849 850 851 852 853 854 855
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
856 857
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
858 859
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
860
          data_type = tmp;
861
          last_input_name = ipt_name;
Y
Yu Yang 已提交
862 863 864 865 866
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
867
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
868
}
869

870 871 872 873 874 875 876 877
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
878 879
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
880 881
}

Q
Qiao Longfei 已提交
882
}  // namespace framework
L
liaogang 已提交
883
}  // namespace paddle