operator.cc 26.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
14 15 16
#include <gflags/gflags.h>
#include <glog/logging.h>

17
#include <algorithm>
18

Y
Yi Wang 已提交
19 20
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
21
#include "paddle/fluid/framework/lod_tensor.h"
22
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
23 24
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
25
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
26

D
dzhwinter 已提交
27
DECLARE_bool(benchmark);
C
chengduoZH 已提交
28 29 30
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
31

Q
Qiao Longfei 已提交
32 33 34
namespace paddle {
namespace framework {

35 36 37 38 39 40
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
41

Q
qiaolongfei 已提交
42 43 44 45 46 47 48 49 50 51 52
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

53 54
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
55
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
56 57
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
58 59
  }

M
minqiyang 已提交
60 61
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
62
    if (UNLIKELY(!tensor.IsInitialized())) {
63
      return DDim({-1});
64
    }
M
minqiyang 已提交
65 66 67 68 69 70 71
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
72 73 74 75 76
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
77 78 79 80 81 82
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
83 84 85 86 87
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
88

M
minqiyang 已提交
89 90 91
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
92 93
      return "";
    }
M
minqiyang 已提交
94 95
    return DataTypeToString(ToDataType(tensor.type()));
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
96 97 98 99 100 101
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
      return DataTypeToString(ToDataType(tensor.type()));
    }
D
dzhwinter 已提交
102 103 104 105 106
  } else {
    return "";
  }
}

107 108 109 110 111 112
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
113 114
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
115 116 117 118 119
  }

  return -1;
}

Q
Qiao Longfei 已提交
120 121 122 123 124 125 126 127
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
128 129 130
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
131 132
      return default_lod;
    }
M
minqiyang 已提交
133
    return tensor.lod();
Q
Qiao Longfei 已提交
134 135 136 137 138
  } else {
    return default_lod;
  }
}

139
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
140 141
  VLOG(4) << place << " " << DebugStringEx(&scope);
  if (platform::is_gpu_place(place)) {
142
#ifndef PADDLE_WITH_CUDA
143
    PADDLE_THROW("Cannot run operator on place %s", place);
144
#else
145 146
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
147 148
#endif
  }
149 150 151 152
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  platform::RecordEvent record_event(Type(), pool.Get(place));
  RunImpl(scope, place);
  VLOG(3) << place << " " << DebugStringEx(&scope);
153 154
}

155 156 157 158 159 160 161 162
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

163
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
164
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
165
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
166 167
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
168
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
169 170
}

Y
Yu Yang 已提交
171 172
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
173
  auto it = inputs_.find(name);
174 175
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
176
  return it->second;
Y
Yan Chunwei 已提交
177 178
}

179
bool OperatorBase::HasOutputs(const std::string& name) const {
180
  if (outputs_.find(name) != outputs_.end()) {
181 182 183 184 185 186
    return true;
  } else {
    return false;
  }
}

187
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
188
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
189
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
190 191
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
192
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
193 194
}

Y
Yu Yang 已提交
195 196
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
197
  auto it = outputs_.find(name);
198 199
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
200
  return it->second;
Y
Yan Chunwei 已提交
201 202
}

203
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
204
  std::stringstream ss;
Y
Yu Yang 已提交
205
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
206 207
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
208 209
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
210 211
      auto var_name = input.second[i];
      ss << var_name;
212
      if (scope) {
Q
Qiao Longfei 已提交
213 214 215 216 217 218 219 220 221 222 223
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
224
        }
225
      }
Y
Yu Yang 已提交
226 227 228
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
229
    }
Y
Yu Yang 已提交
230
    ss << "]";
Y
Yu Yang 已提交
231 232
    ++it;
    if (it != inputs_.end()) {
233 234
      ss << ", ";
    }
Q
Qiao Longfei 已提交
235
  }
Y
Yu Yang 已提交
236
  ss << "}, outputs:{";
Y
Yu Yang 已提交
237 238
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
239 240
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
241 242
      auto var_name = output.second[i];
      ss << var_name;
243
      if (scope) {
Q
Qiao Longfei 已提交
244 245 246 247 248 249 250 251 252
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
253
        }
254
      }
Y
Yu Yang 已提交
255 256 257
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
258
    }
Y
Yu Yang 已提交
259
    ss << "]";
Y
Yu Yang 已提交
260 261
    ++it;
    if (it != outputs_.end()) {
262 263
      ss << ", ";
    }
Q
Qiao Longfei 已提交
264
  }
Y
Yu Yang 已提交
265
  ss << "}.";
Q
Qiao Longfei 已提交
266 267 268
  return ss.str();
}

Y
Yu Yang 已提交
269
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
270 271
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
272 273
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
274 275
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
276
}
277

Q
qijun 已提交
278 279
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
280
  for (auto& o : inputs_) {
Q
qijun 已提交
281 282 283 284 285 286
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
287 288 289 290 291 292 293 294 295 296
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
297
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
298 299

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
300
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
301 302 303 304 305 306 307 308 309
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
310 311
}

312 313 314
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
315
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
316 317

  for (auto& in : op_info->Proto().inputs()) {
318 319 320 321
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
322 323 324
  }

  for (auto& out : op_info->Proto().outputs()) {
325 326 327 328 329
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

346 347 348 349
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

350
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
351
  if (var->IsType<LoDTensor>()) {
352
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
353
  } else if (var->IsType<SelectedRows>()) {
354
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
355
  } else {
Y
Yang Yang 已提交
356 357
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
358 359 360 361 362
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
363
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
364
  } else if (var->IsType<SelectedRows>()) {
365
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
366
  } else {
Y
Yang Yang 已提交
367 368
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
369 370 371
  }
}

372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

404
template <>
405
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
406
  auto* var = InputVar(name);
407 408
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
409 410 411
}

template <>
412
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
413 414 415 416
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
417 418 419 420 421
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
422 423 424 425
  return res;
}

template <>
426
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
427
  auto var = OutputVar(name);
Q
QI JUN 已提交
428
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
429 430 431
}

template <>
432
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
433 434 435 436
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
437 438
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
439 440
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
441
                                         : GetMutableTensorFromVar(var);
442
                 });
443 444 445
  return res;
}

Y
Yu Yang 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

461 462 463 464
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}
465

466 467 468 469 470 471 472 473
  bool HasInput(const std::string& name) const override {
    // has only one input
    const auto& ins = op_.Inputs();
    auto it = ins.find(name);
    if (it == ins.end()) {
      return false;
    }
    const auto& in = it->second;
474

475
    if (in.size() != 1 || in[0] == kEmptyVarName) {
476 477
      return false;
    }
478
    return scope_.FindVar(in[0]) != nullptr;
479 480
  }

481 482 483 484 485 486 487 488 489 490 491 492
  bool HasOutput(const std::string& name) const override {
    // has only one output
    const auto& outs = op_.Outputs();
    auto it = outs.find(name);
    if (it == outs.end()) {
      return false;
    }
    const auto& out = it->second;
    if (out.size() != 1 || out[0] == kEmptyVarName) {
      return false;
    }
    return scope_.FindVar(out[0]) != nullptr;
493
  }
494 495 496 497 498 499 500 501 502 503 504 505 506 507 508

  bool HasInputs(const std::string& name) const override {
    if (!op_.HasInputs(name)) {
      return false;
    }
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
509
  }
510 511 512

  bool HasOutputs(const std::string& name) const override {
    if (!op_.HasOutputs(name)) {
T
tensor-tang 已提交
513 514
      return false;
    }
515 516 517 518 519 520 521 522 523 524
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
T
tensor-tang 已提交
525
  }
526

527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
551

M
mozga-intel 已提交
552 553 554 555
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
556 557 558 559 560 561 562 563 564 565 566 567 568
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
M
mozga-intel 已提交
569
#endif
570 571 572 573 574 575 576 577 578 579 580 581 582 583
      out_tensor->set_layout(in_tensor.layout());
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
D
dzhwinter 已提交
584
    out_tensor->set_layout(in_tensor.layout());
585
  }
586

587
  bool IsRuntime() const override { return true; }
F
fengjiayi 已提交
588

589 590 591 592 593 594 595 596 597 598 599 600 601 602
 protected:
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
    PADDLE_ENFORCE_NOT_NULL(var);
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
603 604
  }

605 606
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
607
  }
608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
    }
  }

  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
    PADDLE_THROW("Only compile time support this method");
  }

  proto::VarType::Type GetVarType(const std::string& name) const override {
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

 private:
  const OperatorBase& op_;
  const Scope& scope_;
};
639

C
chengduoZH 已提交
640 641 642 643 644
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
645
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
646 647 648 649 650 651 652 653
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

654 655
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
656 657
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
658
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
659
  auto* dev_ctx = pool.Get(place);
660

661 662 663 664
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
665 666
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
667 668
  }

Q
qiaolongfei 已提交
669 670
  OpKernelMap& kernels = kernels_iter->second;

671 672
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
673

674 675 676 677
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
678 679
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
Q
qiaolongfei 已提交
680 681
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

682
  auto kernel_iter = kernels.find(expected_kernel_key);
683
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
684
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
685 686
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
P
Paweł Żelazko 已提交
687
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
688 689 690 691 692
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
693 694 695 696 697
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
698 699 700 701
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
702

Y
yuyang18 已提交
703 704 705 706 707 708
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
709
  }
Q
QI JUN 已提交
710

Y
yuyang18 已提交
711
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
712

Y
yuyang18 已提交
713 714 715
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
716 717
  }

D
dzhwinter 已提交
718
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
719
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
720
    dev_ctx->Wait();
D
dzhwinter 已提交
721
  }
C
chengduoZH 已提交
722 723 724

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
725
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
726 727 728
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
729 730
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
731 732 733
      }
    }
  }
Q
Qiao Longfei 已提交
734
}
Y
yuyang18 已提交
735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor =
        GetTensorFromVar(transfer_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
      if (var == nullptr || !VarIsTensor(var)) {
        continue;
      }

      auto* tensor_in = GetTensorFromVar(var);
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;

      if (new_scope == nullptr) {
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
      Tensor out;
Y
yuyang18 已提交
786
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
787 788 789 790 791 792
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
793

794
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
795 796 797
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
798
  std::string last_input_name;
Y
Yu Yang 已提交
799 800 801 802 803 804 805 806 807 808 809 810 811 812
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
813 814
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
815 816
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
817
          data_type = tmp;
818
          last_input_name = ipt_name;
Y
Yu Yang 已提交
819 820 821 822 823
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
824
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
825
}
826

827 828 829 830 831 832 833 834
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
835 836
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
837 838
}

Q
Qiao Longfei 已提交
839
}  // namespace framework
L
liaogang 已提交
840
}  // namespace paddle