operator.cc 41.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
P
peizhilin 已提交
19 20
#include <sstream>
#include <string>
S
sneaxiy 已提交
21
#include <unordered_set>
P
peizhilin 已提交
22
#include <vector>
Y
Yi Wang 已提交
23 24
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
25
#include "paddle/fluid/framework/lod_tensor.h"
26
#include "paddle/fluid/framework/op_call_stack.h"
27
#include "paddle/fluid/framework/op_proto_maker.h"
28
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
29
#include "paddle/fluid/framework/shape_inference.h"
30
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
31
#include "paddle/fluid/framework/var_type.h"
32
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
33

D
dzhwinter 已提交
34
DECLARE_bool(benchmark);
35
DECLARE_bool(check_nan_inf);
Q
Qiao Longfei 已提交
36
DEFINE_int32(inner_op_parallelism, 0, "number of threads for inner op");
P
pkpk 已提交
37 38 39
DEFINE_bool(fast_check_nan_inf, false,
            "Fast checking NAN/INF after each operation. It will be a little"
            "bit slow, much faster than check_nan_inf");
D
dzhwinter 已提交
40

Q
Qiao Longfei 已提交
41 42 43
namespace paddle {
namespace framework {

44 45 46 47 48 49
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
50

51 52
static DDim GetDimsDebug(const Scope& scope, const std::string& name,
                         bool get_actual_dim = false) {
53
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
54 55
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
56 57
  }

M
minqiyang 已提交
58 59 60 61 62 63 64 65 66
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
67 68 69 70 71
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
72 73 74 75 76 77
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
78 79 80 81 82
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
83

M
minqiyang 已提交
84 85 86
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
87 88
      return "";
    }
Y
Yu Yang 已提交
89
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
90
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
91 92 93 94
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
95
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
96
    }
D
dzhwinter 已提交
97 98 99 100 101
  } else {
    return "";
  }
}

102 103 104 105 106 107
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
108 109
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
110 111 112 113 114
  }

  return -1;
}

115
static LoD GetLoDDebug(const Scope& scope, const std::string& name) {
Q
Qiao Longfei 已提交
116 117 118 119 120 121 122
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
123 124 125
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    return tensor.lod();
Q
Qiao Longfei 已提交
126 127 128 129 130
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
131 132 133 134 135
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
136
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
137 138 139 140 141 142
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
143
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
144 145 146 147 148 149
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

150
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
151 152 153
  try {
    VLOG(4) << place << " " << DebugStringEx(&scope);
    if (platform::is_gpu_place(place)) {
154
#ifndef PADDLE_WITH_CUDA
P
peizhilin 已提交
155
      PADDLE_THROW("Cannot run operator on place %s", place);
156
#else
P
peizhilin 已提交
157 158
      auto dev_id = boost::get<platform::CUDAPlace>(place).device;
      platform::SetDeviceId(dev_id);
159
#endif
P
peizhilin 已提交
160
    }
P
peizhilin 已提交
161

P
peizhilin 已提交
162 163 164 165 166
    // The profile has a process-wide mutex, results in serious performance
    // issue
    // in concurrency scenerio. Here use an `if` to fix this issue.
    // Please not remove the `if`, ask @Superjomn if there are any concern.
    if (platform::IsProfileEnabled()) {
167
      platform::RecordEvent record_event(Type());
P
peizhilin 已提交
168 169 170 171 172
      RunImpl(scope, place);
    } else {
      RunImpl(scope, place);
    }
    VLOG(3) << place << " " << DebugStringEx(&scope);
173
  } catch (platform::EnforceNotMet& exception) {
174
    framework::InsertCallStackInfo(Type(), Attrs(), &exception);
175
    throw std::move(exception);
176 177 178 179 180 181
  } catch (platform::EOFException&) {
    std::rethrow_exception(std::current_exception());
  } catch (std::exception& ex) {
    LOG(WARNING) << Type() << " raises an exception "
                 << platform::demangle(typeid(ex).name()) << ", " << ex.what();
    std::rethrow_exception(std::current_exception());
P
peizhilin 已提交
182
  } catch (...) {
183
    LOG(WARNING) << Type() << " raises an unknown exception";
P
peizhilin 已提交
184
    std::rethrow_exception(std::current_exception());
185
  }
186 187
}

188
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
189
  return inputs_.find(name) != inputs_.end();
190 191
}

192
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
193
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
194
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
195 196
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
197
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
198 199
}

Y
Yu Yang 已提交
200 201
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
202
  auto it = inputs_.find(name);
203 204
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
205
  return it->second;
Y
Yan Chunwei 已提交
206 207
}

208
bool OperatorBase::HasOutputs(const std::string& name) const {
209
  if (outputs_.find(name) != outputs_.end()) {
210 211 212 213 214 215
    return true;
  } else {
    return false;
  }
}

216
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
217
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
218
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
219 220
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
221
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
222 223
}

Y
Yu Yang 已提交
224 225
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
226
  auto it = outputs_.find(name);
227 228
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
229
  return it->second;
Y
Yan Chunwei 已提交
230 231
}

232
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
233
  std::stringstream ss;
Y
Yu Yang 已提交
234
  ss << "Op(" << type_ << "), inputs:{";
235

236
  const std::unordered_set<std::string>* no_need_buffer_vars = nullptr;
237 238
  if (info_ && info_->NoNeedBufferVarsInferer()) {
    no_need_buffer_vars =
239 240
        &(Info().NoNeedBufferVarsInferer()(Inputs(), Outputs(), Attrs()));
    if (no_need_buffer_vars->empty()) no_need_buffer_vars = nullptr;
241 242
  }

Y
Yu Yang 已提交
243 244
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
245 246
    bool is_no_need_buffer_var =
        (no_need_buffer_vars && no_need_buffer_vars->count(input.first) > 0);
Y
Yu Yang 已提交
247 248
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
249 250
      auto var_name = input.second[i];
      ss << var_name;
251
      if (scope) {
Q
Qiao Longfei 已提交
252 253 254 255 256 257 258
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
259 260 261
          std::string dtype = is_no_need_buffer_var
                                  ? "unknown_dtype"
                                  : GetDtype(*scope, var_name);
Q
Qiao Longfei 已提交
262
          ss << ":" << dtype;
263 264
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
265
        }
266
      }
Y
Yu Yang 已提交
267 268 269
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
270
    }
Y
Yu Yang 已提交
271
    ss << "]";
Y
Yu Yang 已提交
272 273
    ++it;
    if (it != inputs_.end()) {
274 275
      ss << ", ";
    }
Q
Qiao Longfei 已提交
276
  }
Y
Yu Yang 已提交
277
  ss << "}, outputs:{";
Y
Yu Yang 已提交
278 279
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
280 281
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
282 283
      auto var_name = output.second[i];
      ss << var_name;
284
      if (scope) {
Q
Qiao Longfei 已提交
285 286 287 288 289 290 291
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
292 293
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
294 295
          ss << "[" << GetDimsDebug(*scope, var_name, true) << "]";
          ss << "(" << GetLoDDebug(*scope, var_name) << ")";
296
        }
297
      }
Y
Yu Yang 已提交
298 299 300
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
301
    }
Y
Yu Yang 已提交
302
    ss << "]";
Y
Yu Yang 已提交
303 304
    ++it;
    if (it != outputs_.end()) {
305 306
      ss << ", ";
    }
Q
Qiao Longfei 已提交
307
  }
Y
Yu Yang 已提交
308
  ss << "}.";
Q
Qiao Longfei 已提交
309 310 311
  return ss.str();
}

Y
Yu Yang 已提交
312
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
313 314
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
315
                           const AttributeMap& attrs)
S
sneaxiy 已提交
316 317 318 319 320 321
    : type_(type),
      inputs_(inputs),
      outputs_(outputs),
      attrs_(attrs),
      // NOTE(zjl): why op_info may be nullptr?
      info_(OpInfoMap::Instance().GetNullable(type)) {
322 323
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
324
}
325

Q
qijun 已提交
326 327
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
328
  for (auto& o : inputs_) {
Q
qijun 已提交
329 330 331 332 333 334
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
335 336 337 338 339 340 341 342 343 344
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
S
sneaxiy 已提交
345
  auto& info = Info();
Y
Yu Yang 已提交
346 347

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
348
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
349 350 351 352 353 354 355 356 357
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
358 359
}

360
void OperatorBase::CheckAllInputOutputSet() const {
S
sneaxiy 已提交
361
  if (info_ == nullptr || info_->proto_ == nullptr) return;
362

S
sneaxiy 已提交
363
  for (auto& in : info_->Proto().inputs()) {
364 365 366 367
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
368 369
  }

S
sneaxiy 已提交
370
  for (auto& out : info_->Proto().outputs()) {
371 372 373 374 375
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
392
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
393
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
394 395
}

C
chengduo 已提交
396
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
397 398 399 400
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
401
  } else {
Y
Yang Yang 已提交
402
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
403
                 ToTypeName(var.Type()));
Q
QI JUN 已提交
404 405 406
  }
}

C
chengduo 已提交
407
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
408
  if (var->IsType<LoDTensor>()) {
409
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
410
  } else if (var->IsType<SelectedRows>()) {
411
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
412
  } else {
Y
Yang Yang 已提交
413
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
414
                 ToTypeName(var->Type()));
Q
QI JUN 已提交
415 416 417
  }
}

418
bool ExecutionContext::HasInput(const std::string& name) const {
419
  auto* var = InputVar(name);
420 421 422 423
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
424
  auto* var = OutputVar(name);
425 426 427
  return var != nullptr;
}

X
Xin Pan 已提交
428 429 430 431 432 433 434 435 436 437
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
438
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
439 440 441 442 443 444 445 446 447
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

448
template <>
449
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
450
  return Input<LoDTensor>(name);
451 452 453
}

template <>
454
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
455
    const std::string& name) const {
X
Xin Pan 已提交
456 457 458 459 460 461 462 463 464 465 466 467 468
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> const Tensor* {
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "should be LoDTensor, but the received type is %s",
S
sneaxiy 已提交
469
                       ToTypeName(var->Type()));
X
Xin Pan 已提交
470 471 472 473 474
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

475
template <>
476
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
477
  return Output<LoDTensor>(name);
478 479 480
}

template <>
481
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
482
    const std::string& name) const {
483 484 485 486 487
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
488
  std::vector<Tensor*> res;
489 490 491 492 493
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> Tensor* {
                   return var == nullptr ? nullptr
                                         : var->GetMutable<LoDTensor>();
494
                 });
495 496 497
  return res;
}

Y
Yu Yang 已提交
498 499 500 501 502 503 504 505 506 507 508 509 510 511 512
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

513 514
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
515 516
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
G
Gabor Buella 已提交
517
      : op_(op), ctx_(ctx) {}
518 519

  bool HasInput(const std::string& name) const override {
520
    // has only one input
X
Xin Pan 已提交
521
    const auto& ins = ctx_.inputs;
522 523
    auto it = ins.find(name);
    if (it == ins.end()) {
524 525
      return false;
    }
526
    const auto& in = it->second;
X
Xin Pan 已提交
527
    if (in.size() == 0) return false;
T
tensor-tang 已提交
528
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
529
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
530
    return in[0] != nullptr;
531 532 533
  }

  bool HasOutput(const std::string& name) const override {
534
    // has only one output
X
Xin Pan 已提交
535
    const auto& outs = ctx_.outputs;
536 537
    auto it = outs.find(name);
    if (it == outs.end()) {
538 539
      return false;
    }
540
    const auto& out = it->second;
X
Xin Pan 已提交
541
    if (out.size() == 0) {
542 543
      return false;
    }
T
tensor-tang 已提交
544 545
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
546
    return out[0] != nullptr;
547 548 549
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
550 551
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
552
    if (it == ins.end() || it->second.empty()) {
553 554
      return false;
    }
X
Xin Pan 已提交
555 556
    for (auto& input : it->second) {
      if (input == nullptr) {
557 558 559 560 561 562 563
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
564 565
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
566
    if (it == outs.end() || it->second.empty()) {
567 568
      return false;
    }
X
Xin Pan 已提交
569 570
    for (auto& output : it->second) {
      if (output == nullptr) {
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

589 590
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
591 592 593 594 595 596 597 598 599
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
600 601

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
602
                   "The type of %s and %s is not the same.", in, out);
603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
621 622
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
623 624 625 626 627 628 629 630
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
631
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
632
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
633 634
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
635
    auto& in_tensor = in_var->Get<LoDTensor>();
Q
Qiao Longfei 已提交
636 637
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
638

M
mozga-intel 已提交
639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
658 659
  }

C
chengduo 已提交
660 661
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
662 663 664 665 666 667 668 669 670 671 672 673
    PADDLE_THROW(
        "DecreaseLoDLevel is only used in compile time. The calculation of "
        "output's actual lod is different among operators so that should be "
        "set in the runtime kernel.");
  }

  void IncreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW(
        "IncreaseLoDLevel is only used in compile time. The calculation of "
        "output's actual lod is different among operators so that should be "
        "set in the runtime kernel.");
C
chengduo 已提交
674 675
  }

676 677
  bool IsRuntime() const override { return true; }

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
697 698 699 700 701 702 703 704 705 706 707 708 709
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
710 711 712 713 714 715 716 717 718 719
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
720 721 722 723 724 725 726 727 728 729 730 731 732 733
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

734
 protected:
X
Xin Pan 已提交
735
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
736
    PADDLE_ENFORCE_NOT_NULL(var);
737 738 739 740 741
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
742
      PADDLE_THROW(
X
Xin Pan 已提交
743
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
744
          "type_id is %s.",
S
sneaxiy 已提交
745
          ToTypeName(var->Type()));
F
fengjiayi 已提交
746 747 748
    }
  }

X
Xin Pan 已提交
749 750 751 752 753 754 755 756
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
757
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
758
    PADDLE_THROW("Only compile time support this method");
759 760
  }

X
Xin Pan 已提交
761
  void SetDim(Variable* var, const DDim& dim) {
762 763 764 765 766
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
767
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
S
sneaxiy 已提交
768
                   ToTypeName(var->Type()));
X
Xin Pan 已提交
769 770 771 772 773 774 775 776 777 778 779 780
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
781 782 783
    }
  }

F
fengjiayi 已提交
784 785
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
786
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
787 788
  }

X
Xin Pan 已提交
789 790 791 792 793 794 795 796 797 798 799
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
800 801 802
    return ToVarType(var->Type());
  }

803 804 805 806 807 808 809 810 811 812 813 814 815 816
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
817 818
  }

819
  const OperatorBase& op_;
X
Xin Pan 已提交
820
  const RuntimeContext& ctx_;
821 822
};

823 824
static void CheckTensorNANOrInf(const std::string& op_type,
                                const std::string& name,
C
chengduoZH 已提交
825 826 827 828
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
829 830
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
831 832 833
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
834
                 "Operator %s output Tensor %s contains Inf", op_type, name);
C
chengduoZH 已提交
835
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
836
                 "Operator %s output Tensor %s contains NAN", op_type, name);
C
chengduoZH 已提交
837 838
}

B
baojun-nervana 已提交
839
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
840 841 842
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
843 844 845
  this->InferShape(&infer_shape_ctx);
}

X
polish  
Xin Pan 已提交
846 847 848 849 850 851 852 853 854 855
std::vector<KernelConfig>* OperatorWithKernel::GetKernelConfig(
    const OpKernelType& key) const {
  auto config_iter = kernel_configs_map_.find(key);
  std::vector<KernelConfig>* kernel_configs = nullptr;
  if (config_iter != kernel_configs_map_.end()) {
    kernel_configs = &(config_iter->second);
  }
  return kernel_configs;
}

L
luotao1 已提交
856 857
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
L
luotao1 已提交
858 859
  // To reduce the elapsed time of HasAttr, we use bool variable to record the
  // result of HasAttr.
860 861 862
  if (!enable_cache_runtime_context_ && HasAttr(kEnableCacheRuntimeContext))
    enable_cache_runtime_context_ = true;
  if (!all_kernels_must_compute_runtime_shape_ &&
L
luotao1 已提交
863
      HasAttr(kAllKernelsMustComputeRuntimeShape))
864 865
    all_kernels_must_compute_runtime_shape_ = true;
  if (!enable_cache_runtime_context_) {
L
luotao1 已提交
866 867 868 869
    RuntimeContext ctx(Inputs(), Outputs(), scope);
    RunImpl(scope, place, &ctx);
  } else {
    const Scope* cur_scope = &scope;
870 871 872 873 874 875
    if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
      std::lock_guard<std::mutex> lock(cache_update_mutex_);
      if (runtime_ctx_.get() == nullptr || pre_scope_ != cur_scope) {
        runtime_ctx_.reset(new RuntimeContext(Inputs(), Outputs(), scope));
        pre_scope_ = cur_scope;
      }
L
luotao1 已提交
876 877 878 879 880 881 882 883
    }
    RunImpl(scope, place, runtime_ctx_.get());
  }
}

void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place,
                                 RuntimeContext* runtime_ctx) const {
Y
Yu Yang 已提交
884
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
885
  auto* dev_ctx = pool.Get(place);
886

887
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
888
    ChooseKernel(*runtime_ctx, scope, place);
889 890
  }

L
Liu Yiqun 已提交
891
  std::vector<KernelConfig>* kernel_configs = GetKernelConfig(*kernel_type_);
892

Y
yuyang18 已提交
893 894
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
T
Tao Luo 已提交
895
  auto* transfer_scope =
896
      PrepareData(scope, *kernel_type_, &transfered_inplace_vars, runtime_ctx);
897

Y
yuyang18 已提交
898 899 900 901
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

902 903
  if (!(kernel_type_->place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(kernel_type_->place_);
904
  }
Q
QI JUN 已提交
905

906
  if (!all_kernels_must_compute_runtime_shape_) {
L
luotao1 已提交
907
    RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, *runtime_ctx);
908 909
    this->InferShape(&infer_shape_ctx);
  }
X
clean  
Xin Pan 已提交
910 911
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
912 913
  (*kernel_func_)(ExecutionContext(*this, exec_scope, *dev_ctx, *runtime_ctx,
                                   kernel_configs));
D
dzhwinter 已提交
914

Y
yuyang18 已提交
915 916 917
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
918 919
  }

D
dzhwinter 已提交
920
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
921
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
922
    dev_ctx->Wait();
D
dzhwinter 已提交
923
  }
C
chengduoZH 已提交
924

P
pkpk 已提交
925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
  if (FLAGS_fast_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
      // only check inserted vars,
      // please see executor.py for details of fast_check_nan_inf
      if (vname.rfind("debug_var") == 0) {
        VLOG(3) << "debugging nan/inf in var " << vname;

        auto* var = exec_scope.FindVar(vname);
        if (var == nullptr) continue;
        if (var->IsType<framework::LoDTensor>()) {
          CheckTensorNANOrInf(type_, vname, var->Get<framework::LoDTensor>());
        } else if (var->IsType<framework::SelectedRows>()) {
          CheckTensorNANOrInf(type_, vname,
                              var->Get<framework::SelectedRows>().value());
        }
      }
    }
  }

C
chengduoZH 已提交
944 945
  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
946
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
947 948
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
949
        CheckTensorNANOrInf(type_, vname, var->Get<framework::LoDTensor>());
950
      } else if (var->IsType<framework::SelectedRows>()) {
951 952
        CheckTensorNANOrInf(type_, vname,
                            var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
953 954 955
      }
    }
  }
956 957 958 959 960 961 962

  // To solve issue #15032, have a discussion with @Luotao for cpu inference,
  // do not cache transfer scope, hence in this case delete transfer scope
  // after run to avoid memory leak
  if (transfer_scope && !run_by_executor_ && !enable_cache_transfer_scope_) {
    scope.DeleteScope(transfer_scope);
  }
Q
Qiao Longfei 已提交
963
}
X
Xin Pan 已提交
964

L
Liu Yiqun 已提交
965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
void OperatorWithKernel::ChooseKernel(const RuntimeContext& ctx,
                                      const Scope& scope,
                                      const platform::Place& place) const {
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(place);

  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
  }

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx, nullptr));
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

  auto kernel_iter = kernels.find(expected_kernel_key);
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

1001 1002 1003 1004 1005
  std::lock_guard<std::mutex> lock(cache_update_mutex_);
  if (kernel_type_.get() == nullptr || kernel_func_.get() == nullptr) {
    kernel_type_.reset(new OpKernelType(expected_kernel_key));
    kernel_func_.reset(new OpKernelFunc(kernel_iter->second));
  }
L
Liu Yiqun 已提交
1006 1007
}

Y
yuyang18 已提交
1008 1009 1010 1011
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1012
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1013 1014 1015
    auto* origin_var = scope.FindVar(var_name);
    PADDLE_ENFORCE_NOT_NULL(origin_var, "The var[%s] should not be nullptr.",
                            var_name);
C
chengduo 已提交
1016
    auto* original_tensor =
C
chengduo 已提交
1017
        GetMutableLoDTensorOrSelectedRowsValueFromVar(origin_var);
C
chengduo 已提交
1018
    auto* var = transfer_scope.FindVar(var_name);
C
chengduo 已提交
1019 1020
    PADDLE_ENFORCE_NOT_NULL(var, "The var[%s] should not be nullptr.",
                            var_name);
C
chengduo 已提交
1021
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1022 1023 1024 1025
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
1026
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
1027
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1028 1029
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1030
  Scope* new_scope = nullptr;
S
sneaxiy 已提交
1031

1032
  const std::unordered_set<std::string>* no_buffer_ins = nullptr;
S
sneaxiy 已提交
1033 1034 1035 1036
  if (info_) {
    auto& no_buffer_inferer = info_->NoNeedBufferVarsInferer();
    // Some op may not register NoNeedBufferVarsInferer
    if (no_buffer_inferer) {
1037 1038
      no_buffer_ins = &(no_buffer_inferer(Inputs(), Outputs(), Attrs()));
      if (no_buffer_ins->empty()) no_buffer_ins = nullptr;
S
sneaxiy 已提交
1039 1040 1041
    }
  }

Y
yuyang18 已提交
1042
  for (auto& var_name_item : Inputs()) {
1043
    if (no_buffer_ins && no_buffer_ins->count(var_name_item.first) > 0) {
G
gongweibao 已提交
1044
      VLOG(7) << "Skip scanning input " << var_name_item.first
S
sneaxiy 已提交
1045
              << " in Operator " << type_;
S
sneaxiy 已提交
1046 1047 1048
      continue;
    }

X
Xin Pan 已提交
1049 1050 1051 1052
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1053
      auto* var = input_vars[i];
X
Xin Pan 已提交
1054

Y
yuyang18 已提交
1055
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1056
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1057 1058 1059
        continue;
      }

C
chengduo 已提交
1060
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
1078 1079
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1080

1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
1093 1094 1095 1096 1097 1098 1099 1100 1101
      //
      // To solve issue #15032, have a discussion with @Luotao for cpu
      // inference, for all cpu kernels cases without GPU participation, here
      // not do transfer scope caching, and cpu inference performance is not
      // impacted by test.
      enable_cache_transfer_scope_ = false;
      if (!run_by_executor_ &&
          (platform::is_gpu_place(kernel_type_for_var.place_) ||
           platform::is_gpu_place(expected_kernel_key.place_))) {
1102 1103
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1104
        enable_cache_transfer_scope_ = true;
1105
      }
1106
      if (!new_scope) {
Y
yuyang18 已提交
1107 1108
        new_scope = &scope.NewScope();
      }
1109 1110 1111 1112
      // For inference, if a gpu model has an op which could only run on CPU,
      // each result of different input will be the same with the first one.
      // The reason is that if a gpu tensor is the input of a cpu kernel,
      // we will create a new cpu tensor in new scope.
1113
      // However, if enable_cache_runtime_context_, we get the cpu tensor each
1114 1115 1116
      // time, not the gpu tensor.
      // Thus, we set pre_scope_ = nullptr to trigger `new RuntimeContext()` in
      // RunImpl().
1117
      if (enable_cache_runtime_context_) {
1118 1119
        pre_scope_ = nullptr;
      }
Y
yuyang18 已提交
1120 1121

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1122
      input_vars[i] = trans_var;
1123

Y
yuyang18 已提交
1124
      Tensor out;
Y
yuyang18 已提交
1125
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1126 1127 1128 1129 1130 1131
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1132

1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167
void OperatorWithKernel::ParseInputDataType(
    const ExecutionContext& ctx, const std::string& name,
    proto::VarType::Type* data_type) const {
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  const std::vector<const Variable*> vars = ctx.MultiInputVar(name);
  for (size_t i = 0; i < vars.size(); ++i) {
    const Variable* var = vars[i];
    if (var != nullptr) {
      const Tensor* t = nullptr;
      if (var->IsType<Tensor>()) {
        t = &var->Get<Tensor>();
      } else if (var->IsType<LoDTensor>()) {
        t = &var->Get<LoDTensor>();
      } else if (var->IsType<SelectedRows>()) {
        t = &(var->Get<SelectedRows>().value());
      }
      if (t != nullptr) {
        PADDLE_ENFORCE_EQ(t->IsInitialized(), true,
                          "The Tensor in the %s Op's Input Variable %s(%s) is "
                          "not initialized.",
                          Type(), name, ctx.Inputs(name).at(i));
        proto::VarType::Type tmp = t->type();
        PADDLE_ENFORCE(tmp == *data_type || *data_type == dafault_data_type,
                       "The DataType of %s Op's duplicable Variable %s must be "
                       "consistent. The current variable type is (%s), but the "
                       "previous variable type is (%s).",
                       Type(), name, DataTypeToString(tmp),
                       DataTypeToString(*data_type));
        *data_type = tmp;
      }
    }
  }
}

1168
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1169
    const ExecutionContext& ctx) const {
1170 1171 1172
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
H
hong 已提交
1173
  for (auto& input : ctx.Context().inputs) {
1174
    ParseInputDataType(ctx, input.first, &data_type);
Y
Yu Yang 已提交
1175
  }
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
  PADDLE_ENFORCE_NE(data_type, dafault_data_type,
                    "DataType should be indicated by input Variable.");
  return data_type;
}

proto::VarType::Type OperatorWithKernel::IndicateVarDataType(
    const ExecutionContext& ctx, const std::string& name) const {
  proto::VarType::Type dafault_data_type =
      static_cast<proto::VarType::Type>(-1);
  proto::VarType::Type data_type = dafault_data_type;
  ParseInputDataType(ctx, name, &data_type);
  PADDLE_ENFORCE_NE(
      data_type, dafault_data_type,
      "The Input Variable(%s) of %s Op used to determine kernel data type "
      "is empty or not LoDTensor or SelectedRows.",
      name, Type());
1192
  return data_type;
Y
Yu Yang 已提交
1193
}
1194

1195 1196 1197 1198 1199 1200 1201 1202
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1203 1204
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1205 1206
}

Q
Qiao Longfei 已提交
1207
}  // namespace framework
L
liaogang 已提交
1208
}  // namespace paddle