operator.cc 40.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
P
peizhilin 已提交
19 20
#include <sstream>
#include <string>
S
sneaxiy 已提交
21
#include <unordered_set>
P
peizhilin 已提交
22
#include <vector>
Y
Yi Wang 已提交
23 24
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
25
#include "paddle/fluid/framework/lod_tensor.h"
26
#include "paddle/fluid/framework/op_call_stack.h"
27
#include "paddle/fluid/framework/op_proto_maker.h"
28
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/framework/shape_inference.h"
30
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
31
#include "paddle/fluid/framework/var_type.h"
32
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
33

D
dzhwinter 已提交
34
DECLARE_bool(benchmark);
35
DECLARE_bool(check_nan_inf);
Q
Qiao Longfei 已提交
36
DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op");
P
pkpk 已提交
37 38 39
DEFINE_bool(fast_check_nan_inf, false,
            "Fast checking NAN/INF after each operation. It will be a little"
            "bit slow, much faster than check_nan_inf");
D
dzhwinter 已提交
40

Q
Qiao Longfei 已提交
41 42 43
namespace paddle {
namespace framework {

44 45 46 47 48 49
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
50

Q
qiaolongfei 已提交
51 52
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
Y
Yu Yang 已提交
53
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
54
  } else if (var->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
55
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
56 57 58 59 60
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

61 62
static DDim GetDimsDebug(const Scope& scope, const std::string& name,
                         bool get_actual_dim = false) {
63
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
64 65
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
66 67
  }

M
minqiyang 已提交
68 69
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
70 71 72
    if (UNLIKELY(!tensor.IsInitialized())) {
      return DDim({-1});
    }
M
minqiyang 已提交
73 74 75 76 77 78 79
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
80 81 82 83 84
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
85 86 87 88 89 90
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
91 92 93 94 95
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
96

M
minqiyang 已提交
97 98 99
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
100 101
      return "";
    }
Y
Yu Yang 已提交
102
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
103
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
104 105 106 107
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
108
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
109
    }
D
dzhwinter 已提交
110 111 112 113 114
  } else {
    return "";
  }
}

115 116 117 118 119 120
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
121 122
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
123 124 125 126 127
  }

  return -1;
}

128
static LoD GetLoDDebug(const Scope& scope, const std::string& name) {
Q
Qiao Longfei 已提交
129 130 131 132 133 134 135
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
136 137
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
138 139 140
    if (UNLIKELY(!tensor.IsInitialized())) {
      return default_lod;
    }
M
minqiyang 已提交
141
    return tensor.lod();
Q
Qiao Longfei 已提交
142 143 144 145 146
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
147 148 149 150 151
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
152
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
153 154 155 156 157 158
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
159
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
160 161 162 163 164 165
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

166
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
167 168 169
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
170
#ifndef PADDLE_WITH_CUDA
P
peizhilin 已提交
171
      PADDLE_THROW("Cannot run operator on place %s", place);
172
#else
P
peizhilin 已提交
173 174
      auto dev_id = boost::get<platform::CUDAPlace>(place).device;
      platform::SetDeviceId(dev_id);
175
#endif
P
peizhilin 已提交
176
    }
P
peizhilin 已提交
177

P
peizhilin 已提交
178 179 180 181 182
    // The profile has a process-wide mutex, results in serious performance
    // issue
    // in concurrency scenerio. Here use an `if` to fix this issue.
    // Please not remove the `if`, ask @Superjomn if there are any concern.
    if (platform::IsProfileEnabled()) {
183
      platform::RecordEvent record_event(Type());
P
peizhilin 已提交
184 185 186 187 188 189
      RunImpl(scope, place);
    } else {
      RunImpl(scope, place);
    }
    VLOG(3) << place << " " << DebugStringEx(&scope);
  } catch (platform::EnforceNotMet exception) {
190
    framework::InsertCallStackInfo(Type(), Attrs(), &exception);
191
    throw std::move(exception);
P
peizhilin 已提交
192 193
  } catch (...) {
    std::rethrow_exception(std::current_exception());
194
  }
195 196
}

197
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
198
  return inputs_.find(name) != inputs_.end();
199 200
}

201
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
202
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
203
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
204 205
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
206
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
207 208
}

Y
Yu Yang 已提交
209 210
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
211
  auto it = inputs_.find(name);
212 213
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
214
  return it->second;
Y
Yan Chunwei 已提交
215 216
}

217
bool OperatorBase::HasOutputs(const std::string& name) const {
218
  if (outputs_.find(name) != outputs_.end()) {
219 220 221 222 223 224
    return true;
  } else {
    return false;
  }
}

225
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
226
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
227
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
228 229
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
230
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
231 232
}

Y
Yu Yang 已提交
233 234
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
235
  auto it = outputs_.find(name);
236 237
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
238
  return it->second;
Y
Yan Chunwei 已提交
239 240
}

241
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
242
  std::stringstream ss;
Y
Yu Yang 已提交
243
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
244 245
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
246 247
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
248 249
      auto var_name = input.second[i];
      ss << var_name;
250
      if (scope) {
Q
Qiao Longfei 已提交
251 252 253 254 255 256 257 258 259
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
260 261
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
262
        }
263
      }
Y
Yu Yang 已提交
264 265 266
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
267
    }
Y
Yu Yang 已提交
268
    ss << "]";
Y
Yu Yang 已提交
269 270
    ++it;
    if (it != inputs_.end()) {
271 272
      ss << ", ";
    }
Q
Qiao Longfei 已提交
273
  }
Y
Yu Yang 已提交
274
  ss << "}, outputs:{";
Y
Yu Yang 已提交
275 276
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
277 278
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
279 280
      auto var_name = output.second[i];
      ss << var_name;
281
      if (scope) {
Q
Qiao Longfei 已提交
282 283 284 285 286 287 288
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
289 290
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
291 292
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
293
        }
294
      }
Y
Yu Yang 已提交
295 296 297
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
298
    }
Y
Yu Yang 已提交
299
    ss << "]";
Y
Yu Yang 已提交
300 301
    ++it;
    if (it != outputs_.end()) {
302 303
      ss << ", ";
    }
Q
Qiao Longfei 已提交
304
  }
Y
Yu Yang 已提交
305
  ss << "}.";
Q
Qiao Longfei 已提交
306 307 308
  return ss.str();
}

Y
Yu Yang 已提交
309
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
310 311
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
312
                           const AttributeMap& attrs)
S
sneaxiy 已提交
313 314 315 316 317 318
    : type_(type),
      inputs_(inputs),
      outputs_(outputs),
      attrs_(attrs),
      // NOTE(zjl): why op_info may be nullptr?
      info_(OpInfoMap::Instance().GetNullable(type)) {
319 320
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
321
}
322

Q
qijun 已提交
323 324
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
325
  for (auto& o : inputs_) {
Q
qijun 已提交
326 327 328 329 330 331
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
332 333 334 335 336 337 338 339 340 341
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
S
sneaxiy 已提交
342
  auto& info = Info();
Y
Yu Yang 已提交
343 344

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
345
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
346 347 348 349 350 351 352 353 354
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
355 356
}

357
void OperatorBase::CheckAllInputOutputSet() const {
S
sneaxiy 已提交
358
  if (info_ == nullptr || info_->proto_ == nullptr) return;
359

S
sneaxiy 已提交
360
  for (auto& in : info_->Proto().inputs()) {
361 362 363 364
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
365 366
  }

S
sneaxiy 已提交
367
  for (auto& out : info_->Proto().outputs()) {
368 369 370 371 372
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
389
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
390
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
391 392
}

C
chengduo 已提交
393
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
394 395 396 397
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
398
  } else {
Y
Yang Yang 已提交
399
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
400
                 ToTypeName(var.Type()));
Q
QI JUN 已提交
401 402 403
  }
}

C
chengduo 已提交
404
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
405
  if (var->IsType<LoDTensor>()) {
406
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
407
  } else if (var->IsType<SelectedRows>()) {
408
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
409
  } else {
Y
Yang Yang 已提交
410
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
411
                 ToTypeName(var->Type()));
Q
QI JUN 已提交
412 413 414
  }
}

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

X
Xin Pan 已提交
447 448 449 450 451 452 453 454 455 456
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
457
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
458 459 460 461 462 463 464 465 466
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

467
template <>
468
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
469
  return Input<LoDTensor>(name);
470 471 472
}

template <>
473
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
474
    const std::string& name) const {
X
Xin Pan 已提交
475 476 477 478 479 480 481 482 483 484 485 486 487
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> const Tensor* {
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "should be LoDTensor, but the received type is %s",
S
sneaxiy 已提交
488
                       ToTypeName(var->Type()));
X
Xin Pan 已提交
489 490 491 492 493
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

494
template <>
495
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
496
  return Output<LoDTensor>(name);
497 498 499
}

template <>
500
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
501
    const std::string& name) const {
502 503 504 505 506
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
507
  std::vector<Tensor*> res;
508 509 510 511 512
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> Tensor* {
                   return var == nullptr ? nullptr
                                         : var->GetMutable<LoDTensor>();
513
                 });
514 515 516
  return res;
}

Y
Yu Yang 已提交
517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

532 533
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
534 535
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
G
Gabor Buella 已提交
536
      : op_(op), ctx_(ctx) {}
537 538

  bool HasInput(const std::string& name) const override {
539
    // has only one input
X
Xin Pan 已提交
540
    const auto& ins = ctx_.inputs;
541 542
    auto it = ins.find(name);
    if (it == ins.end()) {
543 544
      return false;
    }
545
    const auto& in = it->second;
X
Xin Pan 已提交
546
    if (in.size() == 0) return false;
T
tensor-tang 已提交
547
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
548
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
549
    return in[0] != nullptr;
550 551 552
  }

  bool HasOutput(const std::string& name) const override {
553
    // has only one output
X
Xin Pan 已提交
554
    const auto& outs = ctx_.outputs;
555 556
    auto it = outs.find(name);
    if (it == outs.end()) {
557 558
      return false;
    }
559
    const auto& out = it->second;
X
Xin Pan 已提交
560
    if (out.size() == 0) {
561 562
      return false;
    }
T
tensor-tang 已提交
563 564
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
565
    return out[0] != nullptr;
566 567 568
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
569 570
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
571
    if (it == ins.end() || it->second.empty()) {
572 573
      return false;
    }
X
Xin Pan 已提交
574 575
    for (auto& input : it->second) {
      if (input == nullptr) {
576 577 578 579 580 581 582
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
583 584
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
585
    if (it == outs.end() || it->second.empty()) {
586 587
      return false;
    }
X
Xin Pan 已提交
588 589
    for (auto& output : it->second) {
      if (output == nullptr) {
590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

608 609
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
610 611 612 613 614 615 616 617 618
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
619 620

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
621
                   "The type of %s and %s is not the same.", in, out);
622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
640 641
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
642 643 644 645 646 647 648 649
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
650
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
651
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
652 653 654 655 656
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
657

M
mozga-intel 已提交
658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
677 678
  }

C
chengduo 已提交
679 680 681 682 683
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

684 685
  bool IsRuntime() const override { return true; }

686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
705 706 707 708 709 710 711 712 713 714 715 716 717
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
718 719 720 721 722 723 724 725 726 727
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
728 729 730 731 732 733 734 735 736 737 738 739 740 741
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

742
 protected:
X
Xin Pan 已提交
743
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
744
    PADDLE_ENFORCE_NOT_NULL(var);
745 746 747 748 749
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
750
      PADDLE_THROW(
X
Xin Pan 已提交
751
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
752
          "type_id is %s.",
S
sneaxiy 已提交
753
          ToTypeName(var->Type()));
F
fengjiayi 已提交
754 755 756
    }
  }

X
Xin Pan 已提交
757 758 759 760 761 762 763 764
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
765
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
766
    PADDLE_THROW("Only compile time support this method");
767 768
  }

X
Xin Pan 已提交
769
  void SetDim(Variable* var, const DDim& dim) {
770 771 772 773 774
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
775
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
776
                   ToTypeName(var->Type()));
X
Xin Pan 已提交
777 778 779 780 781 782 783 784 785 786 787 788
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
789 790 791
    }
  }

F
fengjiayi 已提交
792 793
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
794
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
795 796
  }

X
Xin Pan 已提交
797 798 799 800 801 802 803 804 805 806 807
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
808 809 810
    return ToVarType(var->Type());
  }

811 812 813 814 815 816 817 818 819 820 821 822 823 824
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
825 826
  }

827
  const OperatorBase& op_;
X
Xin Pan 已提交
828
  const RuntimeContext& ctx_;
829 830
};

831 832
static void CheckTensorNANOrInf(const std::string& op_type,
                                const std::string& name,
C
chengduoZH 已提交
833 834 835 836
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
837 838
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
839 840 841
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
842
                 "Operator %s output Tensor %s contains Inf", op_type, name);
C
chengduoZH 已提交
843
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
844
                 "Operator %s output Tensor %s contains NAN", op_type, name);
C
chengduoZH 已提交
845 846
}

B
baojun-nervana 已提交
847
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
848 849 850
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
851 852 853
  this->InferShape(&infer_shape_ctx);
}

X
polish  
Xin Pan 已提交
854 855 856 857 858 859 860 861 862 863
std::vector<KernelConfig>* OperatorWithKernel::GetKernelConfig(
    const OpKernelType& key) const {
  auto config_iter = kernel_configs_map_.find(key);
  std::vector<KernelConfig>* kernel_configs = nullptr;
  if (config_iter != kernel_configs_map_.end()) {
    kernel_configs = &(config_iter->second);
  }
  return kernel_configs;
}

L
luotao1 已提交
864 865
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
L
luotao1 已提交
866 867
  // To reduce the elapsed time of HasAttr, we use bool variable to record the
  // result of HasAttr.
868 869 870
  if (!enable_cache_runtime_context_ && HasAttr(kEnableCacheRuntimeContext))
    enable_cache_runtime_context_ = true;
  if (!all_kernels_must_compute_runtime_shape_ &&
L
luotao1 已提交
871
      HasAttr(kAllKernelsMustComputeRuntimeShape))
872 873
    all_kernels_must_compute_runtime_shape_ = true;
  if (!enable_cache_runtime_context_) {
L
luotao1 已提交
874 875 876 877
    RuntimeContext ctx(Inputs(), Outputs(), scope);
    RunImpl(scope, place, &ctx);
  } else {
    const Scope* cur_scope = &scope;
878 879 880 881 882 883
    if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
      std::lock_guard<std::mutex> lock(cache_update_mutex_);
      if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
        runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
        pre_scope_ = cur_scope;
      }
L
luotao1 已提交
884 885 886 887 888 889 890 891
    }
    RunImpl(scope, place, runtime_ctx_.get());
  }
}

void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place,
                                 RuntimeContext* runtime_ctx) const {
Y
Yu Yang 已提交
892
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
893
  auto* dev_ctx = pool.Get(place);
894

895
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
896
    ChooseKernel(*runtime_ctx, scope, place);
897 898
  }

L
Liu Yiqun 已提交
899
  std::vector<KernelConfig>* kernel_configs = GetKernelConfig(*kernel_type_);
900

Y
yuyang18 已提交
901 902
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
T
Tao Luo 已提交
903
  auto* transfer_scope =
904
      PrepareData(scope, *kernel_type_, &transfered_inplace_vars, runtime_ctx);
905

Y
yuyang18 已提交
906 907 908 909
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

910 911
  if (!(kernel_type_->place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(kernel_type_->place_);
912
  }
Q
QI JUN 已提交
913

914
  if (!all_kernels_must_compute_runtime_shape_) {
L
luotao1 已提交
915
    RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, *runtime_ctx);
916 917
    this->InferShape(&infer_shape_ctx);
  }
X
clean  
Xin Pan 已提交
918 919
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
920 921
  (*kernel_func_)(ExecutionContext(*this, exec_scope, *dev_ctx, *runtime_ctx,
                                   kernel_configs));
D
dzhwinter 已提交
922

Y
yuyang18 已提交
923 924 925
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
926 927
  }

D
dzhwinter 已提交
928
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
929
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
930
    dev_ctx->Wait();
D
dzhwinter 已提交
931
  }
C
chengduoZH 已提交
932

P
pkpk 已提交
933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951
  if (FLAGS_fast_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
      // only check inserted vars,
      // please see executor.py for details of fast_check_nan_inf
      if (vname.rfind("debug_var") == 0) {
        VLOG(3) << "debugging nan/inf in var " << vname;

        auto* var = exec_scope.FindVar(vname);
        if (var == nullptr) continue;
        if (var->IsType<framework::LoDTensor>()) {
          CheckTensorNANOrInf(type_, vname, var->Get<framework::LoDTensor>());
        } else if (var->IsType<framework::SelectedRows>()) {
          CheckTensorNANOrInf(type_, vname,
                              var->Get<framework::SelectedRows>().value());
        }
      }
    }
  }

C
chengduoZH 已提交
952 953
  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
954
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
955 956
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
957
        CheckTensorNANOrInf(type_, vname, var->Get<framework::LoDTensor>());
958
      } else if (var->IsType<framework::SelectedRows>()) {
959 960
        CheckTensorNANOrInf(type_, vname,
                            var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
961 962 963
      }
    }
  }
964 965 966 967 968 969 970

  // To solve issue #15032, have a discussion with @Luotao for cpu inference,
  // do not cache transfer scope, hence in this case delete transfer scope
  // after run to avoid memory leak
  if (transfer_scope && !run_by_executor_ && !enable_cache_transfer_scope_) {
    scope.DeleteScope(transfer_scope);
  }
Q
Qiao Longfei 已提交
971
}
X
Xin Pan 已提交
972

L
Liu Yiqun 已提交
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008
void OperatorWithKernel::ChooseKernel(const RuntimeContext& ctx,
                                      const Scope& scope,
                                      const platform::Place& place) const {
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(place);

  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
  }

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx, nullptr));
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

  auto kernel_iter = kernels.find(expected_kernel_key);
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

1009 1010 1011 1012 1013
  std::lock_guard<std::mutex> lock(cache_update_mutex_);
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
    kernel_type_.reset(new OpKernelType(expected_kernel_key));
    kernel_func_.reset(new OpKernelFunc(kernel_iter->second));
  }
L
Liu Yiqun 已提交
1014 1015
}

Y
yuyang18 已提交
1016 1017 1018 1019
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1020
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1021 1022 1023
    auto* origin_var = scope.FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(origin_var, "The var[%s] should not be nullptr.",
                            var_name);
C
chengduo 已提交
1024
    auto* original_tensor =
C
chengduo 已提交
1025
        GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var);
C
chengduo 已提交
1026
    auto* var = transfer_scope.FindVar(var_name);
C
chengduo 已提交
1027 1028
    PADDLE_ENFORCE_NOT_NULL(var, "The var[%s] should not be nullptr.",
                            var_name);
C
chengduo 已提交
1029
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1030 1031 1032 1033
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
1034
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
1035
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1036 1037
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1038
  Scope* new_scope = nullptr;
S
sneaxiy 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048

  std::unordered_set<std::string> no_buffer_ins;
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
      no_buffer_ins = no_buffer_inferer(Inputs(), Outputs(), Attrs());
    }
  }

Y
yuyang18 已提交
1049
  for (auto& var_name_item : Inputs()) {
S
sneaxiy 已提交
1050 1051 1052 1053 1054
    // NOTE(zjl): STL does not guarantee fast std::unordered_set::count when set
    // is empty. At least STL implemented on my mac does calculate hash code
    // of search key even though the set is empty.
    if (!no_buffer_ins.empty() &&
        no_buffer_ins.count(var_name_item.first) > 0) {
G
gongweibao 已提交
1055
      VLOG(7) << "Skip scanning input " << var_name_item.first
S
sneaxiy 已提交
1056
              << " in Operator " << type_;
S
sneaxiy 已提交
1057 1058 1059
      continue;
    }

X
Xin Pan 已提交
1060 1061 1062 1063
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1064
      auto* var = input_vars[i];
X
Xin Pan 已提交
1065

Y
yuyang18 已提交
1066
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1067
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1068 1069 1070
        continue;
      }

C
chengduo 已提交
1071
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
1089 1090
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1091

1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
1104 1105 1106 1107 1108 1109 1110 1111 1112
      //
      // To solve issue #15032, have a discussion with @Luotao for cpu
      // inference, for all cpu kernels cases without GPU participation, here
      // not do transfer scope caching, and cpu inference performance is not
      // impacted by test.
      enable_cache_transfer_scope_ = false;
      if (!run_by_executor_ &&
          (platform::is_gpu_place(kernel_type_for_var.place_) ||
           platform::is_gpu_place(expected_kernel_key.place_))) {
1113 1114
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1115
        enable_cache_transfer_scope_ = true;
1116
      }
1117
      if (!new_scope) {
Y
yuyang18 已提交
1118 1119
        new_scope = &scope.NewScope();
      }
1120 1121 1122 1123
      // For inference, if a gpu model has an op which could only run on CPU,
      // each result of different input will be the same with the first one.
      // The reason is that if a gpu tensor is the input of a cpu kernel,
      // we will create a new cpu tensor in new scope.
1124
      // However, if enable_cache_runtime_context_, we get the cpu tensor each
1125 1126 1127
      // time, not the gpu tensor.
      // Thus, we set pre_scope_ = nullptr to trigger `new RuntimeContext()` in
      // RunImpl().
1128
      if (enable_cache_runtime_context_) {
1129 1130
        pre_scope_ = nullptr;
      }
Y
yuyang18 已提交
1131 1132

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1133
      input_vars[i] = trans_var;
1134

Y
yuyang18 已提交
1135
      Tensor out;
Y
yuyang18 已提交
1136
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1137 1138 1139 1140 1141 1142
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1143

1144
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1145
    const ExecutionContext& ctx) const {
1146 1147 1148
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
Y
Yu Yang 已提交
1149
  for (auto& input : this->inputs_) {
X
Xin Pan 已提交
1150 1151 1152
    const std::vector<const Variable*> vars = ctx.MultiInputVar(input.first);
    for (size_t i = 0; i < vars.size(); ++i) {
      const Variable* var = vars[i];
Y
Yu Yang 已提交
1153 1154 1155 1156 1157 1158 1159 1160 1161 1162
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
1163
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s(%lu) is not initialized",
X
Xin Pan 已提交
1164
                         input.first, i);
1165
          proto::VarType::Type tmp = t->type();
1166
          PADDLE_ENFORCE(
1167
              tmp == data_type || data_type == dafault_data_type,
1168
              "DataType of Paddle Op %s %s must be the same. Get (%s) != (%s)",
1169 1170
              Type(), input.first, DataTypeToString(data_type),
              DataTypeToString(tmp));
Y
Yu Yang 已提交
1171 1172 1173 1174 1175
          data_type = tmp;
        }
      }
    }
  }
1176 1177 1178
  PADDLE_ENFORCE(data_type != dafault_data_type,
                 "DataType should be indicated by input");
  return data_type;
Y
Yu Yang 已提交
1179
}
1180

1181 1182 1183 1184 1185 1186 1187 1188
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1189 1190
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1191 1192
}

Q
Qiao Longfei 已提交
1193
}  // namespace framework
L
liaogang 已提交
1194
}  // namespace paddle