operator.cc 38.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
Y
Yi Wang 已提交
19 20
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
21
#include "paddle/fluid/framework/lod_tensor.h"
22
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
23
#include "paddle/fluid/framework/shape_inference.h"
24
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
25
#include "paddle/fluid/framework/var_type.h"
26
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
27

D
dzhwinter 已提交
28
DECLARE_bool(benchmark);
C
chengduoZH 已提交
29 30 31
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
32

Q
Qiao Longfei 已提交
33 34 35
namespace paddle {
namespace framework {

36 37 38 39 40 41
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
42

Q
qiaolongfei 已提交
43 44
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
Y
Yu Yang 已提交
45
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
46
  } else if (var->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
47
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
48 49 50 51 52
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

53 54
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
55
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
56 57
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
58 59
  }

M
minqiyang 已提交
60 61
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
62
    if (UNLIKELY(!tensor.IsInitialized())) {
63
      return DDim({-1});
64
    }
M
minqiyang 已提交
65 66 67 68 69 70 71
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
72 73 74 75 76
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
77 78 79 80 81 82
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
83 84 85 86 87
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
88

M
minqiyang 已提交
89 90 91
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
92 93
      return "";
    }
Y
Yu Yang 已提交
94
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
95
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
96 97 98 99
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
100
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
101
    }
D
dzhwinter 已提交
102 103 104 105 106
  } else {
    return "";
  }
}

107 108 109 110 111 112
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
113 114
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
115 116 117 118 119
  }

  return -1;
}

Q
Qiao Longfei 已提交
120 121 122 123 124 125 126 127
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
128 129 130
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
131 132
      return default_lod;
    }
M
minqiyang 已提交
133
    return tensor.lod();
Q
Qiao Longfei 已提交
134 135 136 137 138
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
139 140 141 142 143
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
144
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
145 146 147 148 149 150
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
151
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
152 153 154 155 156 157
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

158
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
P
peizhilin 已提交
159 160
  VLOG(4) << place << " " << DebugStringEx(&scope);
  if (platform::is_gpu_place(place)) {
161
#ifndef PADDLE_WITH_CUDA
P
peizhilin 已提交
162
    PADDLE_THROW("Cannot run operator on place %s", place);
163
#else
P
peizhilin 已提交
164 165
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
166
#endif
P
peizhilin 已提交
167
  }
P
peizhilin 已提交
168

P
peizhilin 已提交
169 170 171 172 173 174 175 176 177
  // The profile has a process-wide mutex, results in serious performance issue
  // in concurrency scenerio. Here use an `if` to fix this issue.
  // Please not remove the `if`, ask @Superjomn if there are any concern.
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
  } else {
    RunImpl(scope, place);
178
  }
P
peizhilin 已提交
179
  VLOG(3) << place << " " << DebugStringEx(&scope);
180 181
}

X
Xin Pan 已提交
182 183
void OperatorBase::RunPrepared(const RuntimeContext& ctx,
                               const platform::Place& place) {
X
clean  
Xin Pan 已提交
184
  RunImplPrepared(ctx, place);
X
Xin Pan 已提交
185 186
}

187
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
188
  return inputs_.find(name) != inputs_.end();
189 190
}

191
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
192
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
193
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
194 195
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
196
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
197 198
}

Y
Yu Yang 已提交
199 200
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
201
  auto it = inputs_.find(name);
202 203
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
204
  return it->second;
Y
Yan Chunwei 已提交
205 206
}

207
bool OperatorBase::HasOutputs(const std::string& name) const {
208
  if (outputs_.find(name) != outputs_.end()) {
209 210 211 212 213 214
    return true;
  } else {
    return false;
  }
}

215
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
216
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
217
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
218 219
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
220
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
221 222
}

Y
Yu Yang 已提交
223 224
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
225
  auto it = outputs_.find(name);
226 227
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
228
  return it->second;
Y
Yan Chunwei 已提交
229 230
}

231
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
232
  std::stringstream ss;
Y
Yu Yang 已提交
233
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
234 235
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
236 237
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
238 239
      auto var_name = input.second[i];
      ss << var_name;
240
      if (scope) {
Q
Qiao Longfei 已提交
241 242 243 244 245 246 247 248 249 250 251
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
252
        }
253
      }
Y
Yu Yang 已提交
254 255 256
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
257
    }
Y
Yu Yang 已提交
258
    ss << "]";
Y
Yu Yang 已提交
259 260
    ++it;
    if (it != inputs_.end()) {
261 262
      ss << ", ";
    }
Q
Qiao Longfei 已提交
263
  }
Y
Yu Yang 已提交
264
  ss << "}, outputs:{";
Y
Yu Yang 已提交
265 266
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
267 268
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
269 270
      auto var_name = output.second[i];
      ss << var_name;
271
      if (scope) {
Q
Qiao Longfei 已提交
272 273 274 275 276 277 278
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
279 280
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
281 282
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
283
        }
284
      }
Y
Yu Yang 已提交
285 286 287
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
288
    }
Y
Yu Yang 已提交
289
    ss << "]";
Y
Yu Yang 已提交
290 291
    ++it;
    if (it != outputs_.end()) {
292 293
      ss << ", ";
    }
Q
Qiao Longfei 已提交
294
  }
Y
Yu Yang 已提交
295
  ss << "}.";
Q
Qiao Longfei 已提交
296 297 298
  return ss.str();
}

Y
Yu Yang 已提交
299
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
300 301
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
302 303
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
304 305
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
306
}
307

Q
qijun 已提交
308 309
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
310
  for (auto& o : inputs_) {
Q
qijun 已提交
311 312 313 314 315 316
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
317 318 319 320 321 322 323 324 325 326
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
327
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
328 329

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
330
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
331 332 333 334 335 336 337 338 339
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
340 341
}

342 343 344
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
345
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
346 347

  for (auto& in : op_info->Proto().inputs()) {
348 349 350 351
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
352 353 354
  }

  for (auto& out : op_info->Proto().outputs()) {
355 356 357 358 359
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
376
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
377
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
378 379
}

C
chengduo 已提交
380
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
381 382 383 384
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
385
  } else {
Y
Yang Yang 已提交
386
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
387
                 var.Type().name());
Q
QI JUN 已提交
388 389 390
  }
}

C
chengduo 已提交
391
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
392
  if (var->IsType<LoDTensor>()) {
393
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
394
  } else if (var->IsType<SelectedRows>()) {
395
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
396
  } else {
Y
Yang Yang 已提交
397 398
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
399 400 401
  }
}

402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

X
Xin Pan 已提交
434 435 436 437 438 439 440 441 442 443
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
444 445 446 447
const Variable* ExecutionContext::LegacyInputVar(
    const std::string& name) const {
  auto ipt = op_.Input(name);
  return ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
X
Xin Pan 已提交
448 449
}

X
clean  
Xin Pan 已提交
450
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
451 452 453 454 455 456 457 458 459
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
460 461 462 463 464
Variable* ExecutionContext::LegacyOutputVar(const std::string& name) const {
  auto opt = op_.Output(name);
  return opt == kEmptyVarName ? nullptr : scope_.FindVar(opt);
}

465
template <>
466
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
467
  return Input<LoDTensor>(name);
468 469
}

X
Xin Pan 已提交
470
template <>
X
clean  
Xin Pan 已提交
471
const Tensor* ExecutionContext::LegacyInput<Tensor>(
X
Xin Pan 已提交
472
    const std::string& name) const {
X
clean  
Xin Pan 已提交
473
  return LegacyInput<LoDTensor>(name);
X
Xin Pan 已提交
474 475
}

476
template <>
477
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
478
    const std::string& name) const {
X
Xin Pan 已提交
479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) {
    return {};
  }
  const std::vector<Variable*>& vars = it->second;
  std::vector<const Tensor*> res;
  res.reserve(vars.size());
  std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                 [&](Variable* var) -> const Tensor* {
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "should be LoDTensor, but the received type is %s",
                       var->Type().name());
                   return &(var->Get<LoDTensor>());
                 });
  return res;
}

template <>
const std::vector<const Tensor*> ExecutionContext::LegacyMultiInput<Tensor>(
    const std::string& name) const {
501 502 503
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
504
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
505
                 [&](const std::string& sub_name) -> const Tensor* {
506
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
507 508 509 510 511 512
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
513
                 });
514 515 516 517
  return res;
}

template <>
518
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
519
  return Output<LoDTensor>(name);
520 521
}

X
Xin Pan 已提交
522
template <>
X
clean  
Xin Pan 已提交
523 524
Tensor* ExecutionContext::LegacyOutput<Tensor>(const std::string& name) const {
  return LegacyOutput<LoDTensor>(name);
X
Xin Pan 已提交
525 526
}

527
template <>
528
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
529 530 531 532
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
533
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
534
                 [&](const std::string& sub_name) -> Tensor* {
535
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
536 537 538 539 540 541
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
542
                 });
543 544 545
  return res;
}

Y
Yu Yang 已提交
546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

561 562
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
563 564 565
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
      : op_(op), scope_(scope), ctx_(ctx) {}
566 567

  bool HasInput(const std::string& name) const override {
568
    // has only one input
X
Xin Pan 已提交
569
    const auto& ins = ctx_.inputs;
570 571
    auto it = ins.find(name);
    if (it == ins.end()) {
572 573
      return false;
    }
574
    const auto& in = it->second;
X
Xin Pan 已提交
575
    if (in.size() == 0) return false;
T
tensor-tang 已提交
576
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
577
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
578
    return in[0] != nullptr;
579 580 581
  }

  bool HasOutput(const std::string& name) const override {
582
    // has only one output
X
Xin Pan 已提交
583
    const auto& outs = ctx_.outputs;
584 585
    auto it = outs.find(name);
    if (it == outs.end()) {
586 587
      return false;
    }
588
    const auto& out = it->second;
X
Xin Pan 已提交
589
    if (out.size() == 0) {
590 591
      return false;
    }
T
tensor-tang 已提交
592 593
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
594
    return out[0] != nullptr;
595 596 597
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
598 599
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
600
    if (it == ins.end() || it->second.empty()) {
601 602
      return false;
    }
X
Xin Pan 已提交
603 604
    for (auto& input : it->second) {
      if (input == nullptr) {
605 606 607 608 609 610 611
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
612 613
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
614
    if (it == outs.end() || it->second.empty()) {
615 616
      return false;
    }
X
Xin Pan 已提交
617 618
    for (auto& output : it->second) {
      if (output == nullptr) {
619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

637 638
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
639 640 641 642 643 644 645 646 647
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
648 649

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
650
                   "The type of %s and %s is not the same.", in, out);
651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
669 670
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
671 672 673 674 675 676 677 678
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
679
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
680
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
681 682 683 684 685
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
686

M
mozga-intel 已提交
687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
706 707
  }

C
chengduo 已提交
708 709 710 711 712
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

713 714
  bool IsRuntime() const override { return true; }

715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
734 735 736 737 738 739 740 741 742 743 744 745 746
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
747 748 749 750 751 752 753 754 755 756
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
757 758 759 760 761 762 763 764 765 766 767 768 769 770
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

771
 protected:
X
Xin Pan 已提交
772
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
773
    PADDLE_ENFORCE_NOT_NULL(var);
774 775 776 777 778
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
779
      PADDLE_THROW(
X
Xin Pan 已提交
780
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
781
          "type_id is %s.",
X
Xin Pan 已提交
782
          var->Type().name());
F
fengjiayi 已提交
783 784 785
    }
  }

X
Xin Pan 已提交
786 787 788 789 790 791 792 793
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
794
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
795
    PADDLE_THROW("Only compile time support this method");
796 797
  }

X
Xin Pan 已提交
798
  void SetDim(Variable* var, const DDim& dim) {
799 800 801 802 803
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
804 805 806 807 808 809 810 811 812 813 814 815 816 817
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                   var->Type().name());
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
818 819 820
    }
  }

F
fengjiayi 已提交
821 822
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
823
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
824 825
  }

X
Xin Pan 已提交
826 827 828 829 830 831 832 833 834 835 836
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
  }

  proto::VarType::Type GetVarType(Variable* var) const {
837 838 839
    return ToVarType(var->Type());
  }

840 841 842 843 844 845 846 847 848 849 850 851 852 853
 private:
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
854 855
  }

856 857
  const OperatorBase& op_;
  const Scope& scope_;
X
Xin Pan 已提交
858
  const RuntimeContext& ctx_;
859 860
};

C
chengduoZH 已提交
861 862 863 864 865
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
866 867
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
868 869 870 871 872 873 874 875
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

B
baojun-nervana 已提交
876
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
877 878 879
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
880 881 882
  this->InferShape(&infer_shape_ctx);
}

883 884
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
X
Xin Pan 已提交
885
  RuntimeContext ctx(Inputs(), Outputs(), scope);
Y
Yu Yang 已提交
886
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
887
  auto* dev_ctx = pool.Get(place);
888

889 890 891 892
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
893 894
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
895 896
  }

Q
qiaolongfei 已提交
897 898
  OpKernelMap& kernels = kernels_iter->second;

X
Xin Pan 已提交
899 900
  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx));
M
minqiyang 已提交
901
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
Q
qiaolongfei 已提交
902

903
  auto kernel_iter = kernels.find(expected_kernel_key);
904
#ifdef PADDLE_WITH_MKLDNN
P
Paweł Żelazko 已提交
905
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
906 907
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
M
minqiyang 已提交
908
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
909 910 911 912 913
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
914 915 916 917 918
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

Y
yuyang18 已提交
919 920 921
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
X
Xin Pan 已提交
922
      PrepareData(scope, expected_kernel_key, &transfered_inplace_vars, &ctx);
923

Y
yuyang18 已提交
924 925 926 927 928 929
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
930
  }
Q
QI JUN 已提交
931

X
Xin Pan 已提交
932
  RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, ctx);
X
Xin Pan 已提交
933
  this->InferShape(&infer_shape_ctx);
X
clean  
Xin Pan 已提交
934 935
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
X
Xin Pan 已提交
936
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx, ctx));
D
dzhwinter 已提交
937

Y
yuyang18 已提交
938 939 940
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
941 942
  }

D
dzhwinter 已提交
943
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
944
  if (FLAGS_benchmark) {
Y
yuyang18 已提交
945
    dev_ctx->Wait();
D
dzhwinter 已提交
946
  }
C
chengduoZH 已提交
947 948 949

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
Y
yuyang18 已提交
950
      auto* var = exec_scope.FindVar(vname);
C
chengduoZH 已提交
951 952 953
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
954 955
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
956 957 958
      }
    }
  }
Q
Qiao Longfei 已提交
959
}
X
Xin Pan 已提交
960

X
clean  
Xin Pan 已提交
961 962 963
void OperatorWithKernel::RunImplPrepared(const RuntimeContext& ctx,
                                         const platform::Place& place) const {
  Scope dummy_scope;
X
Xin Pan 已提交
964 965 966 967 968 969 970 971 972 973 974 975 976 977
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(place);

  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
  }

  OpKernelMap& kernels = kernels_iter->second;

  auto expected_kernel_key = this->GetExpectedKernelType(
X
clean  
Xin Pan 已提交
978
      ExecutionContext(*this, dummy_scope, *dev_ctx, ctx));
X
Xin Pan 已提交
979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

  auto kernel_iter = kernels.find(expected_kernel_key);
#ifdef PADDLE_WITH_MKLDNN
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

X
clean  
Xin Pan 已提交
1001
  RuntimeInferShapeContext infer_shape_ctx(*this, dummy_scope, ctx);
X
Xin Pan 已提交
1002
  this->InferShape(&infer_shape_ctx);
X
clean  
Xin Pan 已提交
1003
  kernel_iter->second(ExecutionContext(*this, dummy_scope, *dev_ctx, ctx));
X
Xin Pan 已提交
1004 1005
}

Y
yuyang18 已提交
1006 1007 1008 1009
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
1010
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
1011 1012
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
1013 1014 1015
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
1016
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1017 1018 1019 1020
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
1021
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
1022
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
1023 1024
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
1025 1026
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
X
Xin Pan 已提交
1027 1028 1029 1030
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
1031
      auto* var = input_vars[i];
X
Xin Pan 已提交
1032

Y
yuyang18 已提交
1033
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
1034
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
1035 1036 1037
        continue;
      }

C
chengduo 已提交
1038
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
1056 1057
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
1058

1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1074
      }
1075
      if (!new_scope) {
Y
yuyang18 已提交
1076 1077 1078 1079
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1080
      input_vars[i] = trans_var;
1081

Y
yuyang18 已提交
1082
      Tensor out;
Y
yuyang18 已提交
1083
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1084 1085 1086 1087 1088 1089
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1090

1091
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1092 1093 1094
    const ExecutionContext& ctx) const {
  int data_type = -1;
  for (auto& input : this->inputs_) {
X
Xin Pan 已提交
1095 1096 1097
    const std::vector<const Variable*> vars = ctx.MultiInputVar(input.first);
    for (size_t i = 0; i < vars.size(); ++i) {
      const Variable* var = vars[i];
Y
Yu Yang 已提交
1098 1099 1100 1101 1102 1103 1104 1105 1106 1107
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
X
Xin Pan 已提交
1108 1109
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s(%lu)is not initialized",
                         input.first, i);
Y
Yu Yang 已提交
1110
          int tmp = static_cast<int>(t->type());
1111 1112
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
X
Xin Pan 已提交
1113 1114
              "DataType of Paddle Op %s must be the same. Get (%d) != (%d)",
              Type(), data_type, tmp);
Y
Yu Yang 已提交
1115 1116 1117 1118 1119 1120
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
1121
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
1122
}
1123

1124 1125 1126 1127 1128 1129 1130 1131
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1132 1133
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1134 1135
}

Q
Qiao Longfei 已提交
1136
}  // namespace framework
L
liaogang 已提交
1137
}  // namespace paddle