operator.cc 26.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#include <gflags/gflags.h>
D
dzhwinter 已提交
15
#include <glog/logging.h>
Q
Qiao Longfei 已提交
16

17
#include <algorithm>
D
dzhwinter 已提交
18

Y
Yi Wang 已提交
19 20
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
21
#include "paddle/fluid/framework/lod_tensor.h"
Y
Yi Wang 已提交
22 23 24
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
25
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
26

D
dzhwinter 已提交
27
DECLARE_bool(benchmark);
C
chengduoZH 已提交
28 29 30
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
31

Q
Qiao Longfei 已提交
32 33 34
namespace paddle {
namespace framework {

35 36 37 38 39 40
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
41

Q
qiaolongfei 已提交
42 43 44 45 46 47 48 49 50 51 52
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

53 54
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
55
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
56 57
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
58 59
  }

60 61 62 63 64 65 66 67 68 69 70 71 72 73
  if (var->IsInitialized()) {
    if (var->IsType<LoDTensor>()) {
      const LoDTensor& tensor = var->Get<LoDTensor>();
      if (tensor.IsInitialized()) {
        return tensor.dims();
      } else {
        return DDim({-1});
      }
    } else if (var->IsType<SelectedRows>()) {
      if (get_actual_dim) {
        return var->Get<SelectedRows>().value().dims();
      } else {
        return var->Get<SelectedRows>().GetCompleteDims();
      }
74
    } else {
75
      return DDim({-1});
76
    }
77 78 79 80 81
  } else {
    return DDim({-1});
  }
}

D
dzhwinter 已提交
82 83 84 85 86
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
87 88 89 90 91 92 93 94 95 96 97 98 99 100 101

  if (var->IsInitialized()) {
    if (var->IsType<LoDTensor>()) {
      const LoDTensor& tensor = var->Get<LoDTensor>();
      if (tensor.IsInitialized()) {
        return DataTypeToString(ToDataType(tensor.type()));
      } else {
        return "";
      }
    } else if (var->IsType<SelectedRows>()) {
      return DataTypeToString(
          ToDataType(var->Get<SelectedRows>().value().type()));
    } else {
      return "";
    }
D
dzhwinter 已提交
102 103 104 105 106
  } else {
    return "";
  }
}

107 108 109 110 111 112
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

113 114 115 116
  if (var->IsInitialized()) {
    if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().rows().size();
    }
117 118 119 120 121
  }

  return -1;
}

Q
Qiao Longfei 已提交
122 123 124 125 126 127 128 129
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

130 131 132 133 134 135 136 137 138 139 140
  if (var->IsInitialized()) {
    if (var->IsType<LoDTensor>()) {
      const LoDTensor& tensor = var->Get<LoDTensor>();
      if (tensor.IsInitialized()) {
        return tensor.lod();
      } else {
        return default_lod;
      }
    } else {
      return default_lod;
    }
Q
Qiao Longfei 已提交
141 142 143 144 145
  } else {
    return default_lod;
  }
}

146
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
147
  VLOG(10) << "- " << DebugStringEx(&scope);
148 149 150 151 152 153 154 155 156
  if (platform::is_gpu_place(place)) {
#ifndef PADDLE_WITH_CUDA
    PADDLE_THROW("Cannot run operator on place %s", place);
#else
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
#endif
  }
  RunImpl(scope, place);
157
  VLOG(10) << "+ " << DebugStringEx(&scope);
158 159
}

160 161 162 163 164 165 166 167
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

168
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
169
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
170
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
171 172
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
173
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
174 175
}

Y
Yu Yang 已提交
176 177
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
178
  auto it = inputs_.find(name);
179 180
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
181
  return it->second;
Y
Yan Chunwei 已提交
182 183
}

184 185 186 187 188 189 190 191
bool OperatorBase::HasOutputs(const std::string& name) const {
  if (outputs_.find(name) != outputs_.end()) {
    return true;
  } else {
    return false;
  }
}

192
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
193
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
194
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
195 196
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
197
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
198 199
}

Y
Yu Yang 已提交
200 201
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
202
  auto it = outputs_.find(name);
203 204
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
205
  return it->second;
Y
Yan Chunwei 已提交
206 207
}

208
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
209
  std::stringstream ss;
Y
Yu Yang 已提交
210
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
211 212
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
213 214 215
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
      ss << input.second[i];
216
      if (scope) {
217 218 219 220
        int row_size = GetRowSize(*scope, input.second[i]);
        if (row_size >= 0) {
          ss << "[row_size=" << row_size << "]";
        }
D
dzhwinter 已提交
221 222
        std::string dtype = GetDtype(*scope, input.second[i]);
        ss << ":" << dtype;
223
        ss << "[" << GetDims(*scope, input.second[i], true) << "]";
Q
Qiao Longfei 已提交
224
        ss << "(" << GetLoD(*scope, input.second[i]) << ")";
225
      }
Y
Yu Yang 已提交
226 227 228
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
229
    }
Y
Yu Yang 已提交
230
    ss << "]";
Y
Yu Yang 已提交
231 232
    ++it;
    if (it != inputs_.end()) {
233 234
      ss << ", ";
    }
Q
Qiao Longfei 已提交
235
  }
Y
Yu Yang 已提交
236
  ss << "}, outputs:{";
Y
Yu Yang 已提交
237 238
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
239 240 241
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
      ss << output.second[i];
242
      if (scope) {
243 244 245 246
        int row_size = GetRowSize(*scope, output.second[i]);
        if (row_size >= 0) {
          ss << "[row_size=" << row_size << "]";
        }
247
        ss << "[" << GetDims(*scope, output.second[i], true) << "]";
Q
Qiao Longfei 已提交
248
        ss << "(" << GetLoD(*scope, output.second[i]) << ")";
249
      }
Y
Yu Yang 已提交
250 251 252
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
253
    }
Y
Yu Yang 已提交
254
    ss << "]";
Y
Yu Yang 已提交
255 256
    ++it;
    if (it != outputs_.end()) {
257 258
      ss << ", ";
    }
Q
Qiao Longfei 已提交
259
  }
Y
Yu Yang 已提交
260
  ss << "}.";
Q
Qiao Longfei 已提交
261 262 263
  return ss.str();
}

Y
Yu Yang 已提交
264
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
265 266
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
267 268
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
269 270
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
271
}
272

Q
qijun 已提交
273 274
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
275
  for (auto& o : inputs_) {
Q
qijun 已提交
276 277 278 279 280 281
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
282 283 284 285 286 287 288 289 290 291
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
292
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
293 294

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
295
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
296 297 298 299 300 301 302 303 304
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
305 306
}

307 308 309
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
310
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
311 312

  for (auto& in : op_info->Proto().inputs()) {
313 314 315 316
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
317 318 319
  }

  for (auto& out : op_info->Proto().outputs()) {
320 321 322 323 324
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

341 342 343 344
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

345
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
346
  if (var->IsType<LoDTensor>()) {
347
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
348
  } else if (var->IsType<SelectedRows>()) {
349
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
350
  } else {
Y
Yang Yang 已提交
351 352
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
353 354 355 356 357
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
358
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
359
  } else if (var->IsType<SelectedRows>()) {
360
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
361
  } else {
Y
Yang Yang 已提交
362 363
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
364 365 366
  }
}

367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

399
template <>
400
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
401
  auto* var = InputVar(name);
402 403
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
404 405 406
}

template <>
407
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
408 409 410 411
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
412 413 414 415 416
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
417 418 419 420
  return res;
}

template <>
421
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
422
  auto var = OutputVar(name);
Q
QI JUN 已提交
423
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
424 425 426
}

template <>
427
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
428 429 430 431
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
432 433
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
434 435
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
436
                                         : GetMutableTensorFromVar(var);
437
                 });
438 439 440
  return res;
}

Y
Yu Yang 已提交
441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

456 457 458 459 460 461
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
462 463 464
    if (!op_.HasInputs(name)) {
      return false;
    }
465 466 467 468 469
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
470 471
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Input %s should not have more than one inputs", name);
472 473 474 475 476 477
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
478 479 480
    if (!op_.HasOutputs(name)) {
      return false;
    }
481 482 483 484 485
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
486 487
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Output %s should not have more than one inputs", name);
488 489 490 491 492 493
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
494 495 496
    if (!op_.HasInputs(name)) {
      return false;
    }
497 498 499 500 501 502 503 504 505 506 507 508 509
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
510 511 512
    if (!op_.HasOutputs(name)) {
      return false;
    }
513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
537 538 539 540 541 542 543 544 545 546 547 548
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
549

M
mozga-intel 已提交
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
583 584
  }

585 586 587
  bool IsRuntime() const override { return true; }

 protected:
588 589
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
590
    PADDLE_ENFORCE_NOT_NULL(var);
591 592 593 594 595
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
596 597 598 599 600 601 602
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
603
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
604
    PADDLE_THROW("Only compile time support this method");
605 606 607 608 609 610 611 612 613
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
614 615
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
616 617 618
    }
  }

F
fengjiayi 已提交
619 620
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
621
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
622 623
  }

624
  proto::VarType::Type GetVarType(const std::string& name) const override {
625 626 627 628
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
629 630 631 632
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

633
 private:
634 635 636 637
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
638 639 640 641 642
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
S
sneaxiy 已提交
643
  if (!IsType<float>(tensor.type()) && !IsType<double>(tensor.type())) {
C
chengduoZH 已提交
644 645 646 647 648 649 650 651
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

652 653
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
654 655
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
656
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
657
  auto* dev_ctx = pool.Get(place);
658 659 660 661

  // For profiling, don't move out of this function because that will result
  // in the failure of multi-GPU profiling.
  platform::RecordEvent record_event(Type(), dev_ctx);
662 663 664 665
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
666 667
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
668 669
  }

Q
qiaolongfei 已提交
670 671
  OpKernelMap& kernels = kernels_iter->second;

672 673
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
674

675 676 677 678
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

Y
yuyang18 已提交
679 680
  auto expected_kernel_key =
      this->GetExpectedKernelType(ExecutionContext(*this, scope, *dev_ctx));
Q
qiaolongfei 已提交
681 682
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

683
  auto kernel_iter = kernels.find(expected_kernel_key);
684
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
685
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
686 687
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
P
Paweł Żelazko 已提交
688
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
689 690 691 692 693
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
694 695 696 697 698
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
699 700 701 702
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
      TryTransferData(scope, expected_kernel_key, &transfered_inplace_vars);
703

Y
yuyang18 已提交
704 705 706 707 708 709
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
710
  }
Q
QI JUN 已提交
711

Y
yuyang18 已提交
712
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx));
D
dzhwinter 已提交
713

Y
yuyang18 已提交
714 715 716
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
717 718
  }

D
dzhwinter 已提交
719
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
720
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
721
    dev_ctx->Wait();
D
dzhwinter 已提交
722
  }
C
chengduoZH 已提交
723 724 725

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
726
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
727 728 729
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
730 731
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
732 733 734
      }
    }
  }
Q
Qiao Longfei 已提交
735
}
Y
yuyang18 已提交
736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor =
        GetTensorFromVar(transfer_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

Scope* OperatorWithKernel::TryTransferData(
    const Scope& scope, const OpKernelType& expected_kernel_key,
    std::vector<std::string>* transfered_inplace_vars) const {
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      // Only tensor can be tranfer to another device.
      if (var == nullptr || !VarIsTensor(var)) {
        continue;
      }

      auto* tensor_in = GetTensorFromVar(var);
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;

      if (new_scope == nullptr) {
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
      Tensor out;
Y
yuyang18 已提交
787
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
788 789 790 791 792 793
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
794

795
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
813 814 815 816
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
              "DataType of Paddle Op %s must be the same. Get %d != %d", Type(),
              data_type, tmp);
Y
Yu Yang 已提交
817 818 819 820 821 822
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
823
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
824
}
825

826 827 828 829 830 831 832 833
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
834 835
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
836 837
}

Q
Qiao Longfei 已提交
838
}  // namespace framework
L
liaogang 已提交
839
}  // namespace paddle