operator.cc 43.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
P
peizhilin 已提交
19 20
#include <sstream>
#include <string>
S
sneaxiy 已提交
21
#include <unordered_set>
P
peizhilin 已提交
22
#include <vector>
Y
Yi Wang 已提交
23 24
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
25
#include "paddle/fluid/framework/lod_tensor.h"
26
#include "paddle/fluid/framework/op_call_stack.h"
27
#include "paddle/fluid/framework/op_proto_maker.h"
28
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/framework/shape_inference.h"
30
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
31
#include "paddle/fluid/framework/var_type.h"
32
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
33

D
dzhwinter 已提交
34
DECLARE_bool(benchmark);
35
DECLARE_bool(check_nan_inf);
Q
Qiao Longfei 已提交
36
DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op");
P
pkpk 已提交
37 38 39
DEFINE_bool(fast_check_nan_inf, false,
            "Fast checking NAN/INF after each operation. It will be a little"
            "bit slow, much faster than check_nan_inf");
D
dzhwinter 已提交
40

Q
Qiao Longfei 已提交
41 42 43
namespace paddle {
namespace framework {

44 45 46 47 48 49
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
50

51 52
static DDim GetDimsDebug(const Scope& scope, const std::string& name,
                         bool get_actual_dim = false) {
53
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
54 55
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
56 57
  }

M
minqiyang 已提交
58 59 60 61 62 63 64 65 66
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
67 68 69 70 71
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
72 73 74 75 76 77
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
78 79 80 81 82
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
83

M
minqiyang 已提交
84 85 86
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
87 88
      return "";
    }
Y
Yu Yang 已提交
89
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
90
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
91 92 93 94
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
95
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
96
    }
D
dzhwinter 已提交
97 98 99 100 101
  } else {
    return "";
  }
}

102 103 104 105 106 107
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
108 109
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
110 111 112 113 114
  }

  return -1;
}

115
static LoD GetLoDDebug(const Scope& scope, const std::string& name) {
Q
Qiao Longfei 已提交
116 117 118 119 120 121 122
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
123 124 125
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.lod();
Q
Qiao Longfei 已提交
126 127 128 129 130
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
131 132 133 134 135
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
136
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
137 138 139 140 141 142
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
143
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
144 145 146 147 148 149
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

150
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
151 152 153
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
154
#ifndef PADDLE_WITH_CUDA
P
peizhilin 已提交
155
      PADDLE_THROW("Cannot run operator on place %s", place);
156
#else
P
peizhilin 已提交
157 158
      auto dev_id = boost::get<platform::CUDAPlace>(place).device;
      platform::SetDeviceId(dev_id);
159
#endif
P
peizhilin 已提交
160
    }
P
peizhilin 已提交
161

P
peizhilin 已提交
162 163 164 165 166
    // The profile has a process-wide mutex, results in serious performance
    // issue
    // in concurrency scenerio. Here use an `if` to fix this issue.
    // Please not remove the `if`, ask @Superjomn if there are any concern.
    if (platform::IsProfileEnabled()) {
167
      platform::RecordEvent record_event(Type());
P
peizhilin 已提交
168 169 170 171 172
      RunImpl(scope, place);
    } else {
      RunImpl(scope, place);
    }
    VLOG(3) << place << " " << DebugStringEx(&scope);
173
  } catch (platform::EnforceNotMet& exception) {
174
    framework::InsertCallStackInfo(Type(), Attrs(), &exception);
175
    throw std::move(exception);
176 177 178 179 180 181
  } catch (platform::EOFException&) {
    std::rethrow_exception(std::current_exception());
  } catch (std::exception& ex) {
    LOG(WARNING) << Type() << " raises an exception "
                 << platform::demangle(typeid(ex).name()) << ", " << ex.what();
    std::rethrow_exception(std::current_exception());
P
peizhilin 已提交
182
  } catch (...) {
183
    LOG(WARNING) << Type() << " raises an unknown exception";
P
peizhilin 已提交
184
    std::rethrow_exception(std::current_exception());
185
  }
186 187
}

188
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
189
  return inputs_.find(name) != inputs_.end();
190 191
}

192
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
193
  auto& ins = Inputs(name);
194 195 196 197 198
  PADDLE_ENFORCE_LE(
      ins.size(), 1UL,
      platform::errors::AlreadyExists(
          "Operator %s's input %s should contain only one variable.", type_,
          name));
Y
Yu Yang 已提交
199
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
200 201
}

Y
Yu Yang 已提交
202 203
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
204
  auto it = inputs_.find(name);
205 206
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
207
  return it->second;
Y
Yan Chunwei 已提交
208 209
}

210
bool OperatorBase::HasOutputs(const std::string& name) const {
211
  if (outputs_.find(name) != outputs_.end()) {
212 213 214 215 216 217
    return true;
  } else {
    return false;
  }
}

218
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
219
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
220
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
221 222
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
223
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
224 225
}

Y
Yu Yang 已提交
226 227
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
228
  auto it = outputs_.find(name);
229 230
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
231
  return it->second;
Y
Yan Chunwei 已提交
232 233
}

234
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
235
  std::stringstream ss;
Y
Yu Yang 已提交
236
  ss << "Op(" << type_ << "), inputs:{";
237

238
  const std::unordered_set<std::string>* no_need_buffer_vars = nullptr;
239 240
  if (info_ && info_->NoNeedBufferVarsInferer()) {
    no_need_buffer_vars =
241 242
        &(Info().NoNeedBufferVarsInferer()(Inputs(), Outputs(), Attrs()));
    if (no_need_buffer_vars->empty()) no_need_buffer_vars = nullptr;
243 244
  }

Y
Yu Yang 已提交
245 246
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
247 248
    bool is_no_need_buffer_var =
        (no_need_buffer_vars && no_need_buffer_vars->count(input.first) > 0);
Y
Yu Yang 已提交
249 250
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
251 252
      auto var_name = input.second[i];
      ss << var_name;
253
      if (scope) {
Q
Qiao Longfei 已提交
254 255 256 257 258 259 260
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
261 262 263
          std::string dtype = is_no_need_buffer_var
                                  ? "unknown_dtype"
                                  : GetDtype(*scope, var_name);
Q
Qiao Longfei 已提交
264
          ss << ":" << dtype;
265 266
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
267
        }
268
      }
Y
Yu Yang 已提交
269 270 271
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
272
    }
Y
Yu Yang 已提交
273
    ss << "]";
Y
Yu Yang 已提交
274 275
    ++it;
    if (it != inputs_.end()) {
276 277
      ss << ", ";
    }
Q
Qiao Longfei 已提交
278
  }
Y
Yu Yang 已提交
279
  ss << "}, outputs:{";
Y
Yu Yang 已提交
280 281
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
282 283
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
284 285
      auto var_name = output.second[i];
      ss << var_name;
286
      if (scope) {
Q
Qiao Longfei 已提交
287 288 289 290 291 292 293
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
294 295
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
296 297
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
298
        }
299
      }
Y
Yu Yang 已提交
300 301 302
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
303
    }
Y
Yu Yang 已提交
304
    ss << "]";
Y
Yu Yang 已提交
305 306
    ++it;
    if (it != outputs_.end()) {
307 308
      ss << ", ";
    }
Q
Qiao Longfei 已提交
309
  }
Y
Yu Yang 已提交
310
  ss << "}.";
Q
Qiao Longfei 已提交
311 312 313
  return ss.str();
}

Y
Yu Yang 已提交
314
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
315 316
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
317
                           const AttributeMap& attrs)
S
sneaxiy 已提交
318 319 320 321 322 323
    : type_(type),
      inputs_(inputs),
      outputs_(outputs),
      attrs_(attrs),
      // NOTE(zjl): why op_info may be nullptr?
      info_(OpInfoMap::Instance().GetNullable(type)) {
H
hong 已提交
324 325 326 327 328 329 330 331
  // In dygraph mode, all the OperatorBase will be constructed by function:
  // framework::OpRegistry::CreateOp(type, {}, {}, {}, false).
  // Inputs, outputs and attrs will be set to empty map
  // to improve the execution efficiency of dygraph.
  if (inputs_.size() > 0 || outputs_.size() > 0) {
    GenerateTemporaryNames();
    CheckAllInputOutputSet();
  }
Y
Yu Yang 已提交
332
}
333

Q
qijun 已提交
334 335
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
336
  for (auto& o : inputs_) {
Q
qijun 已提交
337 338 339 340 341 342
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
343 344 345 346 347 348 349 350 351 352
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
S
sneaxiy 已提交
353
  auto& info = Info();
Y
Yu Yang 已提交
354 355

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
356
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
357 358 359 360 361 362 363 364 365
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
366 367
}

368
void OperatorBase::CheckAllInputOutputSet() const {
S
sneaxiy 已提交
369
  if (info_ == nullptr || info_->proto_ == nullptr) return;
370

S
sneaxiy 已提交
371
  for (auto& in : info_->Proto().inputs()) {
372 373 374 375
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
376 377
  }

S
sneaxiy 已提交
378
  for (auto& out : info_->Proto().outputs()) {
379 380 381 382 383
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
400
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
401
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
402 403
}

C
chengduo 已提交
404
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
405 406 407 408
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
409
  } else {
Y
Yang Yang 已提交
410
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
411
                 ToTypeName(var.Type()));
Q
QI JUN 已提交
412 413 414
  }
}

C
chengduo 已提交
415
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
416
  if (var->IsType<LoDTensor>()) {
417
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
418
  } else if (var->IsType<SelectedRows>()) {
419
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
420
  } else {
Y
Yang Yang 已提交
421
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
422
                 ToTypeName(var->Type()));
Q
QI JUN 已提交
423 424 425
  }
}

426
bool ExecutionContext::HasInput(const std::string& name) const {
427
  auto* var = InputVar(name);
428 429 430 431
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
432
  auto* var = OutputVar(name);
433 434 435
  return var != nullptr;
}

X
Xin Pan 已提交
436 437 438 439
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

440 441 442 443 444
  PADDLE_ENFORCE_LE(
      it->second.size(), 1UL,
      platform::errors::AlreadyExists(
          "Operator %s's input %s should contain only one variable.",
          op_.Type(), name));
X
Xin Pan 已提交
445 446 447
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
448
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
449 450 451 452 453 454 455 456 457
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

458
template <>
459
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
460
  return Input<LoDTensor>(name);
461 462 463
}

template <>
464
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
465
    const std::string& name) const {
H
hong 已提交
466 467
  auto vars = MultiInputVar(name);
  if (vars.size() == 0) {
X
Xin Pan 已提交
468 469 470 471 472
    return {};
  }
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
H
hong 已提交
473
                 [&](const Variable* var) -> const Tensor* {
X
Xin Pan 已提交
474 475 476 477
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "should be LoDTensor, but the received type is %s",
S
sneaxiy 已提交
478
                       ToTypeName(var->Type()));
X
Xin Pan 已提交
479 480 481 482 483
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

484
template <>
485
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
486
  return Output<LoDTensor>(name);
487 488 489
}

template <>
490
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
491
    const std::string& name) const {
H
hong 已提交
492 493 494
  auto vars = MultiOutputVar(name);

  if (vars.size() == 0) {
495 496
    return {};
  }
497
  std::vector<Tensor*> res;
498 499 500 501 502
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> Tensor* {
                   return var == nullptr ? nullptr
                                         : var->GetMutable<LoDTensor>();
503
                 });
504 505 506
  return res;
}

Y
Yu Yang 已提交
507 508 509 510 511 512 513 514 515 516 517 518 519 520 521
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

522 523
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
524 525
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
G
Gabor Buella 已提交
526
      : op_(op), ctx_(ctx) {}
527 528

  bool HasInput(const std::string& name) const override {
529
    // has only one input
X
Xin Pan 已提交
530
    const auto& ins = ctx_.inputs;
531 532
    auto it = ins.find(name);
    if (it == ins.end()) {
533 534
      return false;
    }
535
    const auto& in = it->second;
X
Xin Pan 已提交
536
    if (in.size() == 0) return false;
T
tensor-tang 已提交
537
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
538
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
539
    return in[0] != nullptr;
540 541 542
  }

  bool HasOutput(const std::string& name) const override {
543
    // has only one output
X
Xin Pan 已提交
544
    const auto& outs = ctx_.outputs;
545 546
    auto it = outs.find(name);
    if (it == outs.end()) {
547 548
      return false;
    }
549
    const auto& out = it->second;
X
Xin Pan 已提交
550
    if (out.size() == 0) {
551 552
      return false;
    }
T
tensor-tang 已提交
553 554
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
555
    return out[0] != nullptr;
556 557 558
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
559 560
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
561
    if (it == ins.end() || it->second.empty()) {
562 563
      return false;
    }
X
Xin Pan 已提交
564 565
    for (auto& input : it->second) {
      if (input == nullptr) {
566 567 568 569 570 571 572
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
573 574
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
575
    if (it == outs.end() || it->second.empty()) {
576 577
      return false;
    }
X
Xin Pan 已提交
578 579
    for (auto& output : it->second) {
      if (output == nullptr) {
580 581 582 583 584 585 586 587
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

H
hong 已提交
588
  std::vector<std::string> Inputs(const std::string& name) const override {
589 590 591
    return op_.Inputs(name);
  }

H
hong 已提交
592
  std::vector<std::string> Outputs(const std::string& name) const override {
593 594 595
    return op_.Outputs(name);
  }

596 597
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
598 599 600 601 602 603 604 605 606
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
607 608

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
609
                   "The type of %s and %s is not the same.", in, out);
610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

H
hong 已提交
628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
  void ShareAllLoD(const std::string& in,
                   const std::string& out) const override {
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE_NE(in_it, ctx_.inputs.end(),
                      platform::errors::NotFound(
                          "Input [%s] found error in Op [%s]", in, op_.Type()));
    PADDLE_ENFORCE_NE(
        out_it, ctx_.outputs.end(),
        platform::errors::NotFound("Output [%s] found error in Op [%s]", out,
                                   op_.Type()));

    auto& in_var_list = in_it->second;
    auto& out_var_list = out_it->second;

    PADDLE_ENFORCE_EQ(
        in_var_list.size(), out_var_list.size(),
        platform::errors::PreconditionNotMet(
            "Op [%s]: Input var size should be equal with ouput var size",
            op_.Type()));

    auto& out_var_names = op_.Outputs(out);

    for (size_t i = 0; i < in_var_list.size(); ++i) {
      if (out_var_names[i] == framework::kEmptyVarName) {
        continue;
      }

      Variable* in_var = in_var_list[i];
      if (!in_var->IsType<LoDTensor>()) return;
      Variable* out_var = out_var_list[i];
      PADDLE_ENFORCE_EQ(out_var->IsType<LoDTensor>(), true,
                        platform::errors::PreconditionNotMet(
                            "The %d-th output of Output(%s) must be LoDTensor.",
                            i, out_var_names[i]));
      auto& in_tensor = in_var->Get<LoDTensor>();
      auto* out_tensor = out_var->GetMutable<LoDTensor>();
      out_tensor->set_lod(in_tensor.lod());
#ifdef PADDLE_WITH_MKLDNN
      if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
        out_tensor->set_layout(in_tensor.layout());
    }
  }

Q
Qiao Longfei 已提交
673 674
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
675 676 677 678 679 680 681 682
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
683
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
684
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
685 686
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
687
    auto& in_tensor = in_var->Get<LoDTensor>();
Q
Qiao Longfei 已提交
688 689
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
690

M
mozga-intel 已提交
691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
710 711
  }

712
  int32_t GetLoDLevel(const std::string& in, size_t i = 0) const override {
713
    PADDLE_THROW(
714
        "GetLoDLevel is only used in compile time. The calculation of "
715 716 717 718
        "output's actual lod is different among operators so that should be "
        "set in the runtime kernel.");
  }

719 720
  void SetLoDLevel(const std::string& out, int32_t lod_level,
                   size_t j = 0) const override {
721
    PADDLE_THROW(
722
        "SetLoDLevel is only used in compile time. The calculation of "
723 724
        "output's actual lod is different among operators so that should be "
        "set in the runtime kernel.");
C
chengduo 已提交
725 726
  }

727 728
  bool IsRuntime() const override { return true; }

729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
748 749 750 751 752 753 754 755 756 757 758 759 760
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
761 762 763 764 765 766 767 768 769 770
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
771 772 773 774 775 776 777 778 779 780 781 782 783 784
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

785
 protected:
X
Xin Pan 已提交
786
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
787
    PADDLE_ENFORCE_NOT_NULL(var);
788 789 790 791 792
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
793
      PADDLE_THROW(
X
Xin Pan 已提交
794
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
795
          "type_id is %s.",
S
sneaxiy 已提交
796
          ToTypeName(var->Type()));
F
fengjiayi 已提交
797 798 799
    }
  }

X
Xin Pan 已提交
800 801 802 803 804 805 806 807
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
808
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
809
    PADDLE_THROW("Only compile time support this method");
810 811
  }

X
Xin Pan 已提交
812
  void SetDim(Variable* var, const DDim& dim) {
813 814 815 816 817
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
818
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
819
                   ToTypeName(var->Type()));
X
Xin Pan 已提交
820 821 822 823 824 825 826 827 828 829 830 831
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
832 833 834
    }
  }

F
fengjiayi 已提交
835 836
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
837
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
838 839
  }

X
Xin Pan 已提交
840 841 842 843 844 845 846 847 848 849 850
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
851 852 853
    return ToVarType(var->Type());
  }

854 855 856 857 858 859 860 861 862 863 864 865 866 867
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
868 869
  }

870
  const OperatorBase& op_;
X
Xin Pan 已提交
871
  const RuntimeContext& ctx_;
872 873
};

874 875
static void CheckTensorNANOrInf(const std::string& op_type,
                                const std::string& name,
C
chengduoZH 已提交
876 877 878 879
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
880 881
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
882 883 884
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
885
                 "Operator %s output Tensor %s contains Inf", op_type, name);
C
chengduoZH 已提交
886
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
887
                 "Operator %s output Tensor %s contains NAN", op_type, name);
C
chengduoZH 已提交
888 889
}

B
baojun-nervana 已提交
890
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
891 892 893
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
894 895 896
  this->InferShape(&infer_shape_ctx);
}

X
polish  
Xin Pan 已提交
897 898 899 900 901 902 903 904 905 906
std::vector<KernelConfig>* OperatorWithKernel::GetKernelConfig(
    const OpKernelType& key) const {
  auto config_iter = kernel_configs_map_.find(key);
  std::vector<KernelConfig>* kernel_configs = nullptr;
  if (config_iter != kernel_configs_map_.end()) {
    kernel_configs = &(config_iter->second);
  }
  return kernel_configs;
}

L
luotao1 已提交
907 908
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
L
luotao1 已提交
909 910
  // To reduce the elapsed time of HasAttr, we use bool variable to record the
  // result of HasAttr.
911 912 913
  if (!enable_cache_runtime_context_ && HasAttr(kEnableCacheRuntimeContext))
    enable_cache_runtime_context_ = true;
  if (!all_kernels_must_compute_runtime_shape_ &&
L
luotao1 已提交
914
      HasAttr(kAllKernelsMustComputeRuntimeShape))
915 916
    all_kernels_must_compute_runtime_shape_ = true;
  if (!enable_cache_runtime_context_) {
L
luotao1 已提交
917 918 919 920
    RuntimeContext ctx(Inputs(), Outputs(), scope);
    RunImpl(scope, place, &ctx);
  } else {
    const Scope* cur_scope = &scope;
921 922 923 924 925 926
    if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
      std::lock_guard<std::mutex> lock(cache_update_mutex_);
      if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
        runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
        pre_scope_ = cur_scope;
      }
L
luotao1 已提交
927 928 929 930 931 932 933 934
    }
    RunImpl(scope, place, runtime_ctx_.get());
  }
}

void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place,
                                 RuntimeContext* runtime_ctx) const {
Y
Yu Yang 已提交
935
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
936
  auto* dev_ctx = pool.Get(place);
937

938
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
939
    ChooseKernel(*runtime_ctx, scope, place);
940 941
  }

L
Liu Yiqun 已提交
942
  std::vector<KernelConfig>* kernel_configs = GetKernelConfig(*kernel_type_);
943

Y
yuyang18 已提交
944 945
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
T
Tao Luo 已提交
946
  auto* transfer_scope =
947
      PrepareData(scope, *kernel_type_, &transfered_inplace_vars, runtime_ctx);
948

Y
yuyang18 已提交
949 950 951 952
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

953 954
  if (!(kernel_type_->place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(kernel_type_->place_);
955
  }
Q
QI JUN 已提交
956

957
  if (!all_kernels_must_compute_runtime_shape_) {
L
luotao1 已提交
958
    RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, *runtime_ctx);
959 960
    this->InferShape(&infer_shape_ctx);
  }
X
clean  
Xin Pan 已提交
961 962
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
963 964
  (*kernel_func_)(ExecutionContext(*this, exec_scope, *dev_ctx, *runtime_ctx,
                                   kernel_configs));
D
dzhwinter 已提交
965

Y
yuyang18 已提交
966 967 968
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
969 970
  }

D
dzhwinter 已提交
971
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
972
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
973
    dev_ctx->Wait();
D
dzhwinter 已提交
974
  }
C
chengduoZH 已提交
975

P
pkpk 已提交
976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994
  if (FLAGS_fast_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
      // only check inserted vars,
      // please see executor.py for details of fast_check_nan_inf
      if (vname.rfind("debug_var") == 0) {
        VLOG(3) << "debugging nan/inf in var " << vname;

        auto* var = exec_scope.FindVar(vname);
        if (var == nullptr) continue;
        if (var->IsType<framework::LoDTensor>()) {
          CheckTensorNANOrInf(type_, vname, var->Get<framework::LoDTensor>());
        } else if (var->IsType<framework::SelectedRows>()) {
          CheckTensorNANOrInf(type_, vname,
                              var->Get<framework::SelectedRows>().value());
        }
      }
    }
  }

C
chengduoZH 已提交
995 996
  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
997
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
998 999
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
1000
        CheckTensorNANOrInf(type_, vname, var->Get<framework::LoDTensor>());
1001
      } else if (var->IsType<framework::SelectedRows>()) {
1002 1003
        CheckTensorNANOrInf(type_, vname,
                            var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
1004 1005 1006
      }
    }
  }
1007 1008 1009 1010 1011 1012 1013

  // To solve issue #15032, have a discussion with @Luotao for cpu inference,
  // do not cache transfer scope, hence in this case delete transfer scope
  // after run to avoid memory leak
  if (transfer_scope && !run_by_executor_ && !enable_cache_transfer_scope_) {
    scope.DeleteScope(transfer_scope);
  }
Q
Qiao Longfei 已提交
1014
}
X
Xin Pan 已提交
1015

L
Liu Yiqun 已提交
1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
void OperatorWithKernel::ChooseKernel(const RuntimeContext& ctx,
                                      const Scope& scope,
                                      const platform::Place& place) const {
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(place);

  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
  }

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx, nullptr));
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

  auto kernel_iter = kernels.find(expected_kernel_key);
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

1052 1053 1054 1055 1056
  std::lock_guard<std::mutex> lock(cache_update_mutex_);
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
    kernel_type_.reset(new OpKernelType(expected_kernel_key));
    kernel_func_.reset(new OpKernelFunc(kernel_iter->second));
  }
L
Liu Yiqun 已提交
1057 1058
}

Y
yuyang18 已提交
1059 1060 1061 1062
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1063
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1064 1065 1066
    auto* origin_var = scope.FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(origin_var, "The var[%s] should not be nullptr.",
                            var_name);
C
chengduo 已提交
1067
    auto* original_tensor =
C
chengduo 已提交
1068
        GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var);
C
chengduo 已提交
1069
    auto* var = transfer_scope.FindVar(var_name);
C
chengduo 已提交
1070 1071
    PADDLE_ENFORCE_NOT_NULL(var, "The var[%s] should not be nullptr.",
                            var_name);
C
chengduo 已提交
1072
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1073 1074 1075 1076
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
1077
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
1078
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1079 1080
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1081
  Scope* new_scope = nullptr;
S
sneaxiy 已提交
1082

1083
  const std::unordered_set<std::string>* no_buffer_ins = nullptr;
S
sneaxiy 已提交
1084 1085 1086 1087
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
1088 1089
      no_buffer_ins = &(no_buffer_inferer(Inputs(), Outputs(), Attrs()));
      if (no_buffer_ins->empty()) no_buffer_ins = nullptr;
S
sneaxiy 已提交
1090 1091 1092
    }
  }

Y
yuyang18 已提交
1093
  for (auto& var_name_item : Inputs()) {
1094
    if (no_buffer_ins && no_buffer_ins->count(var_name_item.first) > 0) {
G
gongweibao 已提交
1095
      VLOG(7) << "Skip scanning input " << var_name_item.first
S
sneaxiy 已提交
1096
              << " in Operator " << type_;
S
sneaxiy 已提交
1097 1098 1099
      continue;
    }

X
Xin Pan 已提交
1100 1101 1102 1103
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1104
      auto* var = input_vars[i];
X
Xin Pan 已提交
1105

Y
yuyang18 已提交
1106
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1107
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1108 1109 1110
        continue;
      }

C
chengduo 已提交
1111
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
1129 1130
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1131

1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
1144 1145 1146 1147 1148 1149 1150 1151 1152
      //
      // To solve issue #15032, have a discussion with @Luotao for cpu
      // inference, for all cpu kernels cases without GPU participation, here
      // not do transfer scope caching, and cpu inference performance is not
      // impacted by test.
      enable_cache_transfer_scope_ = false;
      if (!run_by_executor_ &&
          (platform::is_gpu_place(kernel_type_for_var.place_) ||
           platform::is_gpu_place(expected_kernel_key.place_))) {
1153 1154
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1155
        enable_cache_transfer_scope_ = true;
1156
      }
1157
      if (!new_scope) {
Y
yuyang18 已提交
1158 1159
        new_scope = &scope.NewScope();
      }
1160 1161 1162 1163
      // For inference, if a gpu model has an op which could only run on CPU,
      // each result of different input will be the same with the first one.
      // The reason is that if a gpu tensor is the input of a cpu kernel,
      // we will create a new cpu tensor in new scope.
1164
      // However, if enable_cache_runtime_context_, we get the cpu tensor each
1165 1166 1167
      // time, not the gpu tensor.
      // Thus, we set pre_scope_ = nullptr to trigger `new RuntimeContext()` in
      // RunImpl().
1168
      if (enable_cache_runtime_context_) {
1169 1170
        pre_scope_ = nullptr;
      }
Y
yuyang18 已提交
1171 1172

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1173
      input_vars[i] = trans_var;
1174

Y
yuyang18 已提交
1175
      Tensor out;
Y
yuyang18 已提交
1176
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1177 1178 1179 1180 1181 1182
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1183

1184 1185 1186 1187 1188
void OperatorWithKernel::ParseInputDataType(
    const ExecutionContext& ctx, const std::string& name,
    proto::VarType::Type* data_type) const {
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
H
hong 已提交
1189
  const std::vector<Variable*> vars = ctx.MultiInputVar(name);
1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201
  for (size_t i = 0; i < vars.size(); ++i) {
    const Variable* var = vars[i];
    if (var != nullptr) {
      const Tensor* t = nullptr;
      if (var->IsType<Tensor>()) {
        t = &var->Get<Tensor>();
      } else if (var->IsType<LoDTensor>()) {
        t = &var->Get<LoDTensor>();
      } else if (var->IsType<SelectedRows>()) {
        t = &(var->Get<SelectedRows>().value());
      }
      if (t != nullptr) {
1202 1203 1204 1205 1206
        PADDLE_ENFORCE_EQ(
            t->IsInitialized(), true,
            platform::errors::InvalidArgument(
                "The Tensor in the %s Op's Input Variable %s(%s) is "
                "not initialized.",
H
hong 已提交
1207
                Type(), name, ctx.InputNames(name).at(i)));
1208
        proto::VarType::Type tmp = t->type();
1209 1210 1211 1212 1213 1214 1215 1216
        PADDLE_ENFORCE(
            tmp == *data_type || *data_type == dafault_data_type,
            platform::errors::InvalidArgument(
                "The DataType of %s Op's duplicable Variable %s must be "
                "consistent. The current variable type is (%s), but the "
                "previous variable type is (%s).",
                Type(), name, DataTypeToString(tmp),
                DataTypeToString(*data_type)));
1217 1218 1219 1220 1221 1222
        *data_type = tmp;
      }
    }
  }
}

1223
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1224
    const ExecutionContext& ctx) const {
1225 1226 1227
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
H
hong 已提交
1228 1229
  for (auto& input : ctx.InNameList()) {
    ParseInputDataType(ctx, input, &data_type);
Y
Yu Yang 已提交
1230
  }
1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246
  PADDLE_ENFORCE_NE(data_type, dafault_data_type,
                    "DataType should be indicated by input Variable.");
  return data_type;
}

proto::VarType::Type OperatorWithKernel::IndicateVarDataType(
    const ExecutionContext& ctx, const std::string& name) const {
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
  ParseInputDataType(ctx, name, &data_type);
  PADDLE_ENFORCE_NE(
      data_type, dafault_data_type,
      "The Input Variable(%s) of %s Op used to determine kernel data type "
      "is empty or not LoDTensor or SelectedRows.",
      name, Type());
1247
  return data_type;
Y
Yu Yang 已提交
1248
}
1249

1250 1251 1252 1253 1254 1255 1256 1257
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1258 1259
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1260 1261
}

Q
Qiao Longfei 已提交
1262
}  // namespace framework
L
liaogang 已提交
1263
}  // namespace paddle