operator.cc 37.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
P
peizhilin 已提交
19

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
22
#include "paddle/fluid/framework/lod_tensor.h"
23
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/shape_inference.h"
25
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/var_type.h"
27
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
28

D
dzhwinter 已提交
29
DECLARE_bool(benchmark);
C
chengduoZH 已提交
30 31 32
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
33

Q
Qiao Longfei 已提交
34 35 36
namespace paddle {
namespace framework {

37 38 39 40 41 42
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
43

Q
qiaolongfei 已提交
44 45
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
Y
Yu Yang 已提交
46
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
47
  } else if (var->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
48
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
49 50 51 52 53
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

54 55
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
56
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
57 58
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
59 60
  }

M
minqiyang 已提交
61 62
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
63
    if (UNLIKELY(!tensor.IsInitialized())) {
64
      return DDim({-1});
65
    }
M
minqiyang 已提交
66 67 68 69 70 71 72
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
73 74 75 76 77
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
78 79 80 81 82 83
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
84 85 86 87 88
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
89

M
minqiyang 已提交
90 91 92
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
93 94
      return "";
    }
Y
Yu Yang 已提交
95
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
96
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
97 98 99 100
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
101
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
102
    }
D
dzhwinter 已提交
103 104 105 106 107
  } else {
    return "";
  }
}

108 109 110 111 112 113
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
114 115
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
116 117 118 119 120
  }

  return -1;
}

Q
Qiao Longfei 已提交
121 122 123 124 125 126 127 128
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
129 130 131
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
132 133
      return default_lod;
    }
M
minqiyang 已提交
134
    return tensor.lod();
Q
Qiao Longfei 已提交
135 136 137 138 139
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
140 141 142 143 144
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
145
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
146 147 148 149 150 151
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
152
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
153 154 155 156 157 158
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

159
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
160 161
  VLOG(4) << place << " " << DebugStringEx(&scope);
  if (platform::is_gpu_place(place)) {
162
#ifndef PADDLE_WITH_CUDA
P
peizhilin 已提交
163
    PADDLE_THROW("Cannot run operator on place %s", place);
164
#else
P
peizhilin 已提交
165 166
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
167
#endif
P
peizhilin 已提交
168
  }
P
peizhilin 已提交
169

P
peizhilin 已提交
170 171 172 173 174 175 176 177 178
  // The profile has a process-wide mutex, results in serious performance issue
  // in concurrency scenerio. Here use an `if` to fix this issue.
  // Please not remove the `if`, ask @Superjomn if there are any concern.
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
  } else {
    RunImpl(scope, place);
179
  }
P
peizhilin 已提交
180
  VLOG(3) << place << " " << DebugStringEx(&scope);
181 182
}

X
Xin Pan 已提交
183 184 185 186 187
void OperatorBase::Run(const RuntimeContext& ctx,
                       const platform::Place& place) {
  RunImpl(ctx, place);
}

188
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
189
  return inputs_.find(name) != inputs_.end();
190 191
}

192
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
193
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
194
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
195 196
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
197
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
198 199
}

Y
Yu Yang 已提交
200 201
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
202
  auto it = inputs_.find(name);
203 204
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
205
  return it->second;
Y
Yan Chunwei 已提交
206 207
}

208
bool OperatorBase::HasOutputs(const std::string& name) const {
209
  if (outputs_.find(name) != outputs_.end()) {
210 211 212 213 214 215
    return true;
  } else {
    return false;
  }
}

216
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
217
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
218
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
219 220
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
221
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
222 223
}

Y
Yu Yang 已提交
224 225
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
226
  auto it = outputs_.find(name);
227 228
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
229
  return it->second;
Y
Yan Chunwei 已提交
230 231
}

232
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
233
  std::stringstream ss;
Y
Yu Yang 已提交
234
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
235 236
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
237 238
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
239 240
      auto var_name = input.second[i];
      ss << var_name;
241
      if (scope) {
Q
Qiao Longfei 已提交
242 243 244 245 246 247 248 249 250 251 252
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
253
        }
254
      }
Y
Yu Yang 已提交
255 256 257
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
258
    }
Y
Yu Yang 已提交
259
    ss << "]";
Y
Yu Yang 已提交
260 261
    ++it;
    if (it != inputs_.end()) {
262 263
      ss << ", ";
    }
Q
Qiao Longfei 已提交
264
  }
Y
Yu Yang 已提交
265
  ss << "}, outputs:{";
Y
Yu Yang 已提交
266 267
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
268 269
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
270 271
      auto var_name = output.second[i];
      ss << var_name;
272
      if (scope) {
Q
Qiao Longfei 已提交
273 274 275 276 277 278 279
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
280 281
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
282 283
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
284
        }
285
      }
Y
Yu Yang 已提交
286 287 288
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
289
    }
Y
Yu Yang 已提交
290
    ss << "]";
Y
Yu Yang 已提交
291 292
    ++it;
    if (it != outputs_.end()) {
293 294
      ss << ", ";
    }
Q
Qiao Longfei 已提交
295
  }
Y
Yu Yang 已提交
296
  ss << "}.";
Q
Qiao Longfei 已提交
297 298 299
  return ss.str();
}

Y
Yu Yang 已提交
300
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
301 302
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
303 304
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
305 306
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
307
}
308

Q
qijun 已提交
309 310
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
311
  for (auto& o : inputs_) {
Q
qijun 已提交
312 313 314 315 316 317
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
318 319 320 321 322 323 324 325 326 327
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
328
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
329 330

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
331
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
332 333 334 335 336 337 338 339 340
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
341 342
}

343 344 345
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
346
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
347 348

  for (auto& in : op_info->Proto().inputs()) {
349 350 351 352
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
353 354 355
  }

  for (auto& out : op_info->Proto().outputs()) {
356 357 358 359 360
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
377
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
378
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
379 380
}

C
chengduo 已提交
381
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
382 383 384 385
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
386
  } else {
Y
Yang Yang 已提交
387
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
388
                 var.Type().name());
Q
QI JUN 已提交
389 390 391
  }
}

C
chengduo 已提交
392
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
393
  if (var->IsType<LoDTensor>()) {
394
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
395
  } else if (var->IsType<SelectedRows>()) {
396
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
397
  } else {
Y
Yang Yang 已提交
398 399
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
400 401 402
  }
}

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

X
Xin Pan 已提交
435 436 437 438 439 440 441 442 443 444
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
445 446 447 448
const Variable* ExecutionContext::LegacyInputVar(
    const std::string& name) const {
  auto ipt = op_.Input(name);
  return ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
X
Xin Pan 已提交
449 450
}

X
clean  
Xin Pan 已提交
451
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
452 453 454 455 456 457 458 459 460
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
461 462 463 464 465
Variable* ExecutionContext::LegacyOutputVar(const std::string& name) const {
  auto opt = op_.Output(name);
  return opt == kEmptyVarName ? nullptr : scope_.FindVar(opt);
}

466
template <>
467
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
468
  return Input<LoDTensor>(name);
469 470
}

X
Xin Pan 已提交
471
template <>
X
clean  
Xin Pan 已提交
472
const Tensor* ExecutionContext::LegacyInput<Tensor>(
X
Xin Pan 已提交
473
    const std::string& name) const {
X
clean  
Xin Pan 已提交
474
  return LegacyInput<LoDTensor>(name);
X
Xin Pan 已提交
475 476
}

477
template <>
478
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
479
    const std::string& name) const {
X
Xin Pan 已提交
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> const Tensor* {
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "should be LoDTensor, but the received type is %s",
                       var->Type().name());
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

template <>
const std::vector<const Tensor*> ExecutionContext::LegacyMultiInput<Tensor>(
    const std::string& name) const {
502 503 504
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
505
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
506
                 [&](const std::string& sub_name) -> const Tensor* {
507
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
508 509 510 511 512 513
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
514
                 });
515 516 517 518
  return res;
}

template <>
519
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
520
  return Output<LoDTensor>(name);
521 522
}

X
Xin Pan 已提交
523
template <>
X
clean  
Xin Pan 已提交
524 525
Tensor* ExecutionContext::LegacyOutput<Tensor>(const std::string& name) const {
  return LegacyOutput<LoDTensor>(name);
X
Xin Pan 已提交
526 527
}

528
template <>
529
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
530 531 532 533
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
534
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
535
                 [&](const std::string& sub_name) -> Tensor* {
536
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
537 538 539 540 541 542
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
543
                 });
544 545 546
  return res;
}

Y
Yu Yang 已提交
547 548 549 550 551 552 553 554 555 556 557 558 559 560 561
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

562 563
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
564 565 566
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
      : op_(op), scope_(scope), ctx_(ctx) {}
567 568

  bool HasInput(const std::string& name) const override {
569
    // has only one input
X
Xin Pan 已提交
570
    const auto& ins = ctx_.inputs;
571 572
    auto it = ins.find(name);
    if (it == ins.end()) {
573 574
      return false;
    }
575
    const auto& in = it->second;
X
Xin Pan 已提交
576
    if (in.size() == 0) return false;
T
tensor-tang 已提交
577
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
578
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
579
    return in[0] != nullptr;
580 581 582
  }

  bool HasOutput(const std::string& name) const override {
583
    // has only one output
X
Xin Pan 已提交
584
    const auto& outs = ctx_.outputs;
585 586
    auto it = outs.find(name);
    if (it == outs.end()) {
587 588
      return false;
    }
589
    const auto& out = it->second;
X
Xin Pan 已提交
590
    if (out.size() == 0) {
591 592
      return false;
    }
T
tensor-tang 已提交
593 594
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
595
    return out[0] != nullptr;
596 597 598
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
599 600
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
601
    if (it == ins.end() || it->second.empty()) {
602 603
      return false;
    }
X
Xin Pan 已提交
604 605
    for (auto& input : it->second) {
      if (input == nullptr) {
606 607 608 609 610 611 612
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
613 614
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
615
    if (it == outs.end() || it->second.empty()) {
616 617
      return false;
    }
X
Xin Pan 已提交
618 619
    for (auto& output : it->second) {
      if (output == nullptr) {
620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

638 639
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
640 641 642 643 644 645 646 647 648
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
649 650

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
651
                   "The type of %s and %s is not the same.", in, out);
652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
670 671
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
672 673 674 675 676 677 678 679
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
680
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
681
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
682 683 684 685 686
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
687

M
mozga-intel 已提交
688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
707 708
  }

C
chengduo 已提交
709 710 711 712 713
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

714 715
  bool IsRuntime() const override { return true; }

716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
735 736 737 738 739 740 741 742 743 744 745 746 747
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
748 749 750 751 752 753 754 755 756 757
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
758 759 760 761 762 763 764 765 766 767 768 769 770 771
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

772
 protected:
X
Xin Pan 已提交
773
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
774
    PADDLE_ENFORCE_NOT_NULL(var);
775 776 777 778 779
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
780
      PADDLE_THROW(
X
Xin Pan 已提交
781
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
782
          "type_id is %s.",
X
Xin Pan 已提交
783
          var->Type().name());
F
fengjiayi 已提交
784 785 786
    }
  }

X
Xin Pan 已提交
787 788 789 790 791 792 793 794
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
795
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
796
    PADDLE_THROW("Only compile time support this method");
797 798
  }

X
Xin Pan 已提交
799
  void SetDim(Variable* var, const DDim& dim) {
800 801 802 803 804
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
805 806 807 808 809 810 811 812 813 814 815 816 817 818
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                   var->Type().name());
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
819 820 821
    }
  }

F
fengjiayi 已提交
822 823
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
824
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
825 826
  }

X
Xin Pan 已提交
827 828 829 830 831 832 833 834 835 836 837
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
838 839 840
    return ToVarType(var->Type());
  }

841 842 843 844 845 846 847 848 849 850 851 852 853 854
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
855 856
  }

857 858
  const OperatorBase& op_;
  const Scope& scope_;
X
Xin Pan 已提交
859
  const RuntimeContext& ctx_;
860 861
};

C
chengduoZH 已提交
862 863 864 865 866
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
867 868
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
869 870 871 872 873 874 875 876
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

B
baojun-nervana 已提交
877
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
878 879 880
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
881 882 883
  this->InferShape(&infer_shape_ctx);
}

884 885
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
X
Xin Pan 已提交
886
  RuntimeContext ctx(Inputs(), Outputs(), scope);
Y
Yu Yang 已提交
887
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
888
  auto* dev_ctx = pool.Get(place);
889

890 891 892 893
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
894 895
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
896 897
  }

Q
qiaolongfei 已提交
898 899
  OpKernelMap& kernels = kernels_iter->second;

X
Xin Pan 已提交
900 901
  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx));
M
minqiyang 已提交
902
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
903

904
  auto kernel_iter = kernels.find(expected_kernel_key);
905
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
906
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
907 908
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
909
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
910 911 912 913 914
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
915 916 917 918 919
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
920 921 922
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
X
Xin Pan 已提交
923
      PrepareData(scope, expected_kernel_key, &transfered_inplace_vars, &ctx);
924

Y
yuyang18 已提交
925 926 927 928 929 930
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
931
  }
Q
QI JUN 已提交
932

X
Xin Pan 已提交
933
  RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, ctx);
X
Xin Pan 已提交
934
  this->InferShape(&infer_shape_ctx);
X
clean  
Xin Pan 已提交
935 936
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
X
Xin Pan 已提交
937
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx, ctx));
D
dzhwinter 已提交
938

Y
yuyang18 已提交
939 940 941
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
942 943
  }

D
dzhwinter 已提交
944
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
945
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
946
    dev_ctx->Wait();
D
dzhwinter 已提交
947
  }
C
chengduoZH 已提交
948 949 950

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
951
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
952 953 954
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
955 956
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
957 958 959
      }
    }
  }
Q
Qiao Longfei 已提交
960
}
X
Xin Pan 已提交
961

X
Xin Pan 已提交
962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
void OperatorWithKernel::RunImpl(const RuntimeContext& ctx,
                                 const platform::Place& place) const {
  Scope scope;
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(place);

  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
  }

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx));
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

  auto kernel_iter = kernels.find(expected_kernel_key);
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
  this->InferShape(&infer_shape_ctx);
  kernel_iter->second(ExecutionContext(*this, scope, *dev_ctx, ctx));
}

Y
yuyang18 已提交
1007 1008 1009 1010
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1011
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1012 1013
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
1014 1015 1016
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
1017
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1018 1019 1020 1021
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
1022
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
1023
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1024 1025
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1026 1027
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
X
Xin Pan 已提交
1028 1029 1030 1031
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1032
      auto* var = input_vars[i];
X
Xin Pan 已提交
1033

Y
yuyang18 已提交
1034
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1035
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1036 1037 1038
        continue;
      }

C
chengduo 已提交
1039
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
1057 1058
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1059

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1075
      }
1076
      if (!new_scope) {
Y
yuyang18 已提交
1077 1078 1079 1080
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1081
      input_vars[i] = trans_var;
1082

Y
yuyang18 已提交
1083
      Tensor out;
Y
yuyang18 已提交
1084
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1085 1086 1087 1088 1089 1090
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1091

1092
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1093 1094 1095
    const ExecutionContext& ctx) const {
  int data_type = -1;
  for (auto& input : this->inputs_) {
X
Xin Pan 已提交
1096
    for (const Variable* var : ctx.MultiInputVar(input.first)) {
Y
Yu Yang 已提交
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
1107 1108
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s is not initialized",
                         ipt_name);
Y
Yu Yang 已提交
1109
          int tmp = static_cast<int>(t->type());
1110 1111
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
X
Xin Pan 已提交
1112 1113
              "DataType of Paddle Op %s must be the same. Get (%d) != (%d)",
              Type(), data_type, tmp);
Y
Yu Yang 已提交
1114 1115 1116 1117 1118 1119
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
1120
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
1121
}
1122

1123 1124 1125 1126 1127 1128 1129 1130
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1131 1132
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1133 1134
}

Q
Qiao Longfei 已提交
1135
}  // namespace framework
L
liaogang 已提交
1136
}  // namespace paddle