operator.cc 35.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14

15 16
#include <gflags/gflags.h>
#include <glog/logging.h>
17

18
#include <algorithm>
19

Y
Yi Wang 已提交
20 21
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
22
#include "paddle/fluid/framework/lod_tensor.h"
23
#include "paddle/fluid/framework/operator.h"
Y
Yi Wang 已提交
24
#include "paddle/fluid/framework/shape_inference.h"
25
#include "paddle/fluid/framework/transfer_scope_cache.h"
Y
Yi Wang 已提交
26
#include "paddle/fluid/framework/var_type.h"
27
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
28

D
dzhwinter 已提交
29
DECLARE_bool(benchmark);
C
chengduoZH 已提交
30 31 32
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
33

Q
Qiao Longfei 已提交
34 35 36
namespace paddle {
namespace framework {

37 38 39 40 41 42
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
43

Q
qiaolongfei 已提交
44 45
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
Y
Yu Yang 已提交
46
    return var->Get<framework::LoDTensor>().type();
Q
qiaolongfei 已提交
47
  } else if (var->IsType<framework::SelectedRows>()) {
Y
Yu Yang 已提交
48
    return var->Get<framework::SelectedRows>().value().type();
Q
qiaolongfei 已提交
49 50 51 52 53
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

54 55
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
56
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
57 58
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
59 60
  }

M
minqiyang 已提交
61 62
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
M
minqiyang 已提交
63
    if (UNLIKELY(!tensor.IsInitialized())) {
64
      return DDim({-1});
65
    }
M
minqiyang 已提交
66 67 68 69 70 71 72
    return tensor.dims();
  } else if (var->IsType<SelectedRows>()) {
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
73 74 75 76 77
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
78 79 80 81 82 83
static bool VarInited(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) return false;
  return var->IsInitialized();
}

D
dzhwinter 已提交
84 85 86 87 88
static std::string GetDtype(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return "";
  }
89

M
minqiyang 已提交
90 91 92
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
93 94
      return "";
    }
Y
Yu Yang 已提交
95
    return DataTypeToString(tensor.type());
M
minqiyang 已提交
96
  } else if (var->IsType<SelectedRows>()) {
Q
Qiao Longfei 已提交
97 98 99 100
    auto tensor = var->Get<SelectedRows>().value();
    if (UNLIKELY(!tensor.IsInitialized())) {
      return "uninited";
    } else {
Y
Yu Yang 已提交
101
      return DataTypeToString(tensor.type());
Q
Qiao Longfei 已提交
102
    }
D
dzhwinter 已提交
103 104 105 106 107
  } else {
    return "";
  }
}

108 109 110 111 112 113
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

M
minqiyang 已提交
114 115
  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
116 117 118 119 120
  }

  return -1;
}

Q
Qiao Longfei 已提交
121 122 123 124 125 126 127 128
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

M
minqiyang 已提交
129 130 131
  if (var->IsType<LoDTensor>()) {
    const LoDTensor& tensor = var->Get<LoDTensor>();
    if (UNLIKELY(!tensor.IsInitialized())) {
132 133
      return default_lod;
    }
M
minqiyang 已提交
134
    return tensor.lod();
Q
Qiao Longfei 已提交
135 136 137 138 139
  } else {
    return default_lod;
  }
}

X
Xin Pan 已提交
140 141 142 143 144
RuntimeContext::RuntimeContext(const VariableNameMap& innames,
                               const VariableNameMap& outnames,
                               const Scope& scope) {
  for (auto& var_name_item : innames) {
    std::vector<Variable*>& input_vars = inputs[var_name_item.first];
X
Xin Pan 已提交
145
    input_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
146 147 148 149 150 151
    for (auto& var_name : var_name_item.second) {
      input_vars.push_back(scope.FindVar(var_name));
    }
  }
  for (auto& var_name_item : outnames) {
    std::vector<Variable*>& output_vars = outputs[var_name_item.first];
X
Xin Pan 已提交
152
    output_vars.reserve(var_name_item.second.size());
X
Xin Pan 已提交
153 154 155 156 157 158
    for (auto& var_name : var_name_item.second) {
      output_vars.push_back(scope.FindVar(var_name));
    }
  }
}

159
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
M
minqiyang 已提交
160
  VLOG(4) << place << " " << DebugStringEx(&scope);
161
  if (platform::is_gpu_place(place)) {
162
#ifndef PADDLE_WITH_CUDA
163
    PADDLE_THROW("Cannot run operator on place %s", place);
164
#else
165 166
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
167 168
#endif
  }
169

P
peizhilin 已提交
170 171 172
  // The profile has a process-wide mutex, results in serious performance issue
  // in concurrency scenerio. Here use an `if` to fix this issue.
  // Please not remove the `if`, ask @Superjomn if there are any concern.
173 174 175 176
  if (platform::IsProfileEnabled()) {
    platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
    platform::RecordEvent record_event(Type(), pool.Get(place));
    RunImpl(scope, place);
P
peizhilin 已提交
177
  } else {
178 179
    RunImpl(scope, place);
  }
M
minqiyang 已提交
180
  VLOG(3) << place << " " << DebugStringEx(&scope);
181 182
}

183
bool OperatorBase::HasInputs(const std::string& name) const {
M
minqiyang 已提交
184
  return inputs_.find(name) != inputs_.end();
185 186
}

187
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
188
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
189
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
190 191
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
192
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
193 194
}

Y
Yu Yang 已提交
195 196
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
197
  auto it = inputs_.find(name);
198 199
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
200
  return it->second;
Y
Yan Chunwei 已提交
201 202
}

203
bool OperatorBase::HasOutputs(const std::string& name) const {
204
  if (outputs_.find(name) != outputs_.end()) {
205 206 207 208 209 210
    return true;
  } else {
    return false;
  }
}

211
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
212
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
213
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
214 215
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
216
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
217 218
}

Y
Yu Yang 已提交
219 220
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
221
  auto it = outputs_.find(name);
222 223
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
224
  return it->second;
Y
Yan Chunwei 已提交
225 226
}

227
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
228
  std::stringstream ss;
Y
Yu Yang 已提交
229
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
230 231
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
232 233
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
Q
Qiao Longfei 已提交
234 235
      auto var_name = input.second[i];
      ss << var_name;
236
      if (scope) {
Q
Qiao Longfei 已提交
237 238 239 240 241 242 243 244 245 246 247
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, var_name);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
          std::string dtype = GetDtype(*scope, var_name);
          ss << ":" << dtype;
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
248
        }
249
      }
Y
Yu Yang 已提交
250 251 252
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
253
    }
Y
Yu Yang 已提交
254
    ss << "]";
Y
Yu Yang 已提交
255 256
    ++it;
    if (it != inputs_.end()) {
257 258
      ss << ", ";
    }
Q
Qiao Longfei 已提交
259
  }
Y
Yu Yang 已提交
260
  ss << "}, outputs:{";
Y
Yu Yang 已提交
261 262
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
263 264
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
Q
Qiao Longfei 已提交
265 266
      auto var_name = output.second[i];
      ss << var_name;
267
      if (scope) {
Q
Qiao Longfei 已提交
268 269 270 271 272 273 274
        if (!VarInited(*scope, var_name)) {
          ss << "[uninited]";
        } else {
          int row_size = GetRowSize(*scope, output.second[i]);
          if (row_size >= 0) {
            ss << "[row_size=" << row_size << "]";
          }
C
chengduo 已提交
275 276
          std::string dtype = GetDtype(*scope, output.second[i]);
          ss << ":" << dtype;
Q
Qiao Longfei 已提交
277 278
          ss << "[" << GetDims(*scope, var_name, true) << "]";
          ss << "(" << GetLoD(*scope, var_name) << ")";
279
        }
280
      }
Y
Yu Yang 已提交
281 282 283
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
284
    }
Y
Yu Yang 已提交
285
    ss << "]";
Y
Yu Yang 已提交
286 287
    ++it;
    if (it != outputs_.end()) {
288 289
      ss << ", ";
    }
Q
Qiao Longfei 已提交
290
  }
Y
Yu Yang 已提交
291
  ss << "}.";
Q
Qiao Longfei 已提交
292 293 294
  return ss.str();
}

Y
Yu Yang 已提交
295
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
296 297
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
298 299
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
300 301
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
302
}
303

Q
qijun 已提交
304 305
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
306
  for (auto& o : inputs_) {
Q
qijun 已提交
307 308 309 310 311 312
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
313 314 315 316 317 318 319 320 321 322
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
323
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
324 325

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
326
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
327 328 329 330 331 332 333 334 335
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
336 337
}

338 339 340
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
341
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
342 343

  for (auto& in : op_info->Proto().inputs()) {
344 345 346 347
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
348 349 350
  }

  for (auto& out : op_info->Proto().outputs()) {
351 352 353 354 355
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

B
baojun-nervana 已提交
372
static bool VarIsTensor(const Variable& var) {
C
chengduo 已提交
373
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
374 375
}

C
chengduo 已提交
376
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var) {
C
chengduo 已提交
377 378 379 380
  if (var.IsType<LoDTensor>()) {
    return static_cast<const Tensor*>(&(var.Get<LoDTensor>()));
  } else if (var.IsType<SelectedRows>()) {
    return &(var.Get<SelectedRows>().value());
Q
QI JUN 已提交
381
  } else {
Y
Yang Yang 已提交
382
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
C
chengduo 已提交
383
                 var.Type().name());
Q
QI JUN 已提交
384 385 386
  }
}

C
chengduo 已提交
387
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var) {
Q
QI JUN 已提交
388
  if (var->IsType<LoDTensor>()) {
389
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
390
  } else if (var->IsType<SelectedRows>()) {
391
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
392
  } else {
Y
Yang Yang 已提交
393 394
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
395 396 397
  }
}

398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

X
Xin Pan 已提交
430 431 432 433 434 435 436 437 438 439
const Variable* ExecutionContext::InputVar(const std::string& name) const {
  auto it = ctx_.inputs.find(name);
  if (it == ctx_.inputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's input %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
440 441 442 443
const Variable* ExecutionContext::LegacyInputVar(
    const std::string& name) const {
  auto ipt = op_.Input(name);
  return ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
X
Xin Pan 已提交
444 445
}

X
clean  
Xin Pan 已提交
446
Variable* ExecutionContext::OutputVar(const std::string& name) const {
X
Xin Pan 已提交
447 448 449 450 451 452 453 454 455
  auto it = ctx_.outputs.find(name);
  if (it == ctx_.outputs.end()) return nullptr;

  PADDLE_ENFORCE_LE(it->second.size(), 1UL,
                    "Operator %s's output %s should contain only one variable.",
                    op_.Type(), name);
  return it->second.empty() ? nullptr : it->second[0];
}

X
clean  
Xin Pan 已提交
456 457 458 459 460
Variable* ExecutionContext::LegacyOutputVar(const std::string& name) const {
  auto opt = op_.Output(name);
  return opt == kEmptyVarName ? nullptr : scope_.FindVar(opt);
}

461
template <>
462
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
C
chengduo 已提交
463
  return Input<LoDTensor>(name);
464 465
}

X
Xin Pan 已提交
466
template <>
X
clean  
Xin Pan 已提交
467
const Tensor* ExecutionContext::LegacyInput<Tensor>(
X
Xin Pan 已提交
468
    const std::string& name) const {
X
clean  
Xin Pan 已提交
469
  return LegacyInput<LoDTensor>(name);
X
Xin Pan 已提交
470 471
}

472
template <>
473
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
474 475 476 477
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
478
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
479
                 [&](const std::string& sub_name) -> const Tensor* {
480
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
481 482 483 484 485 486
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return &(var->Get<LoDTensor>());
487
                 });
488 489 490 491
  return res;
}

template <>
492
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
C
chengduo 已提交
493
  return Output<LoDTensor>(name);
494 495
}

X
Xin Pan 已提交
496
template <>
X
clean  
Xin Pan 已提交
497 498
Tensor* ExecutionContext::LegacyOutput<Tensor>(const std::string& name) const {
  return LegacyOutput<LoDTensor>(name);
X
Xin Pan 已提交
499 500
}

501
template <>
502
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
503 504 505 506
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
507
  std::transform(names.begin(), names.end(), std::back_inserter(res),
C
chengduo 已提交
508
                 [&](const std::string& sub_name) -> Tensor* {
509
                   auto var = scope_.FindVar(sub_name);
C
chengduo 已提交
510 511 512 513 514 515
                   if (var == nullptr) return nullptr;
                   PADDLE_ENFORCE(
                       var->IsType<LoDTensor>(),
                       "%s should be LoDTensor, but the received type is %s",
                       sub_name, var->Type().name());
                   return var->GetMutable<LoDTensor>();
516
                 });
517 518 519
  return res;
}

Y
Yu Yang 已提交
520 521 522 523 524 525 526 527 528 529 530 531 532 533 534
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

535 536
class RuntimeInferShapeContext : public InferShapeContext {
 public:
X
Xin Pan 已提交
537 538 539
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope,
                           const RuntimeContext& ctx)
      : op_(op), scope_(scope), ctx_(ctx) {}
540 541

  bool HasInput(const std::string& name) const override {
542
    // has only one input
X
Xin Pan 已提交
543
    const auto& ins = ctx_.inputs;
544 545
    auto it = ins.find(name);
    if (it == ins.end()) {
546 547
      return false;
    }
548
    const auto& in = it->second;
X
Xin Pan 已提交
549
    if (in.size() == 0) return false;
T
tensor-tang 已提交
550
    PADDLE_ENFORCE_EQ(in.size(), 1UL,
F
fengjiayi 已提交
551
                      "Input %s should not have more than one inputs", name);
X
Xin Pan 已提交
552
    return in[0] != nullptr;
553 554 555
  }

  bool HasOutput(const std::string& name) const override {
556
    // has only one output
X
Xin Pan 已提交
557
    const auto& outs = ctx_.outputs;
558 559
    auto it = outs.find(name);
    if (it == outs.end()) {
560 561
      return false;
    }
562
    const auto& out = it->second;
X
Xin Pan 已提交
563
    if (out.size() == 0) {
564 565
      return false;
    }
T
tensor-tang 已提交
566 567
    PADDLE_ENFORCE_EQ(out.size(), 1UL,
                      "Output %s should not have more than one outputs", name);
X
Xin Pan 已提交
568
    return out[0] != nullptr;
569 570 571
  }

  bool HasInputs(const std::string& name) const override {
X
Xin Pan 已提交
572 573
    const auto& ins = ctx_.inputs;
    auto it = ins.find(name);
X
fix  
Xin Pan 已提交
574
    if (it == ins.end() || it->second.empty()) {
575 576
      return false;
    }
X
Xin Pan 已提交
577 578
    for (auto& input : it->second) {
      if (input == nullptr) {
579 580 581 582 583 584 585
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
X
Xin Pan 已提交
586 587
    const auto& outs = ctx_.outputs;
    auto it = outs.find(name);
X
fix  
Xin Pan 已提交
588
    if (it == outs.end() || it->second.empty()) {
589 590
      return false;
    }
X
Xin Pan 已提交
591 592
    for (auto& output : it->second) {
      if (output == nullptr) {
593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

611 612
  void ShareDim(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) override {
X
Xin Pan 已提交
613 614 615 616 617 618 619 620 621
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second[i];
    Variable* out_var = out_it->second[j];
622 623

    PADDLE_ENFORCE(in_var->Type() == out_var->Type(),
X
fix  
Xin Pan 已提交
624
                   "The type of %s and %s is not the same.", in, out);
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642

    if (in_var->IsType<framework::SelectedRows>()) {
      auto& in_sele_rows = in_var->Get<framework::SelectedRows>();
      auto out_sele_rows = out_var->GetMutable<framework::SelectedRows>();
      out_sele_rows->mutable_value()->Resize(in_sele_rows.value().dims());
      out_sele_rows->set_rows(in_sele_rows.rows());
      out_sele_rows->set_height(in_sele_rows.height());
    } else if (in_var->IsType<framework::LoDTensor>()) {
      auto& in_lod_tensor = in_var->Get<framework::LoDTensor>();
      auto* out_lod_tensor = out_var->GetMutable<framework::LoDTensor>();
      out_lod_tensor->Resize(in_lod_tensor.dims());
    } else {
      PADDLE_THROW(
          "Currently, the input type of ShareDim only can be LoDTensor "
          "or SelectedRows.");
    }
  }

Q
Qiao Longfei 已提交
643 644
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
X
Xin Pan 已提交
645 646 647 648 649 650 651 652
    auto in_it = ctx_.inputs.find(in);
    auto out_it = ctx_.outputs.find(out);
    PADDLE_ENFORCE(in_it != ctx_.inputs.end() && in_it->second.size() > i,
                   "Inputs %s should have %llu argument", in, i);
    PADDLE_ENFORCE(out_it != ctx_.outputs.end() && out_it->second.size() > j,
                   "Outputs %s should have %llu argument", out, j);

    Variable* in_var = in_it->second.at(i);
Q
Qiao Longfei 已提交
653
    if (!in_var->IsType<LoDTensor>()) return;
X
Xin Pan 已提交
654
    Variable* out_var = out_it->second.at(j);
Q
Qiao Longfei 已提交
655 656 657 658 659
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
660

M
mozga-intel 已提交
661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
680 681
  }

C
chengduo 已提交
682 683 684 685 686
  void DecreaseLoDLevel(const std::string& in, const std::string& out,
                        size_t i = 0, size_t j = 0) const override {
    PADDLE_THROW("DecreaseLoDLevel is only used in compile time.");
  }

687 688
  bool IsRuntime() const override { return true; }

689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707
  // TODO(paddle-dev): Can this be template?
  std::vector<InferShapeVarPtr> GetInputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = InputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

  std::vector<InferShapeVarPtr> GetOutputVarPtrs(
      const std::string& name) override {
    const std::vector<Variable*>& vars = OutputVars(name);
    std::vector<InferShapeVarPtr> res;
    res.reserve(vars.size());
    res.insert(res.begin(), vars.begin(), vars.end());
    return res;
  }

X
Xin Pan 已提交
708 709 710 711 712 713 714 715 716 717 718 719 720
  DDim GetInputDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Input(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    return this->GetDim(vars[0]);
  }

  std::vector<DDim> GetInputsDim(const std::string& name) const override {
    const std::vector<Variable*>& vars = InputVars(name);
    return GetDims(vars);
  }

X
Xin Pan 已提交
721 722 723 724 725 726 727 728 729 730
  std::vector<proto::VarType::Type> GetInputsVarType(
      const std::string& name) const override {
    return GetVarTypes(InputVars(name));
  }

  std::vector<proto::VarType::Type> GetOutputsVarType(
      const std::string& name) const override {
    return GetVarTypes(OutputVars(name));
  }

X
Xin Pan 已提交
731 732 733 734 735 736 737 738 739 740 741 742 743 744
  void SetOutputDim(const std::string& name, const DDim& dim) override {
    auto& vars = OutputVars(name);
    PADDLE_ENFORCE_EQ(vars.size(), 1UL,
                      "Output(%s) should hold one element, but now it holds %d",
                      name, vars.size());
    SetDim(vars[0], dim);
  }

  void SetOutputsDim(const std::string& name,
                     const std::vector<DDim>& dims) override {
    auto& vars = OutputVars(name);
    SetDims(vars, dims);
  }

745
 protected:
X
Xin Pan 已提交
746
  DDim GetDim(Variable* var) const {
F
fengjiayi 已提交
747
    PADDLE_ENFORCE_NOT_NULL(var);
748 749 750 751 752
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
753
      PADDLE_THROW(
X
Xin Pan 已提交
754
          "Only LoDTensor/SelectedRows support 'GetDim', but Variables "
F
fengjiayi 已提交
755
          "type_id is %s.",
X
Xin Pan 已提交
756
          var->Type().name());
F
fengjiayi 已提交
757 758 759
    }
  }

X
Xin Pan 已提交
760 761 762 763 764 765 766 767
  std::vector<DDim> GetDims(const std::vector<Variable*>& vars) const {
    std::vector<DDim> ret;
    ret.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(ret),
                   [this](Variable* var) { return this->GetDim(var); });
    return ret;
  }

F
fengjiayi 已提交
768
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
769
    PADDLE_THROW("Only compile time support this method");
770 771
  }

X
Xin Pan 已提交
772
  void SetDim(Variable* var, const DDim& dim) {
773 774 775 776 777
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
X
Xin Pan 已提交
778 779 780 781 782 783 784 785 786 787 788 789 790 791
      PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                   var->Type().name());
    }
  }

  void SetDims(const std::vector<Variable*>& vars,
               const std::vector<DDim>& dims) {
    size_t length = vars.size();
    PADDLE_ENFORCE_EQ(length, dims.size());
    for (size_t i = 0; i < length; ++i) {
      if (vars[i] == nullptr) {
        continue;
      }
      SetDim(vars[i], dims[i]);
792 793 794
    }
  }

F
fengjiayi 已提交
795 796
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
797
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
798 799
  }

X
Xin Pan 已提交
800 801 802 803 804 805 806 807
  std::vector<proto::VarType::Type> GetVarTypes(
      const std::vector<Variable*>& vars) const {
    std::vector<proto::VarType::Type> retv;
    retv.resize(vars.size());
    std::transform(vars.begin(), vars.end(), retv.begin(),
                   std::bind(std::mem_fn(&RuntimeInferShapeContext::GetVarType),
                             this, std::placeholders::_1));
    return retv;
808 809
  }

X
Xin Pan 已提交
810
  proto::VarType::Type GetVarType(Variable* var) const {
811
    return ToVarType(var->Type());
F
fengjiayi 已提交
812 813
  }

814
 private:
815 816 817 818 819 820 821 822 823 824 825 826 827
  const std::vector<Variable*>& InputVars(const std::string& name) const {
    auto it = ctx_.inputs.find(name);
    PADDLE_ENFORCE(it != ctx_.inputs.end(),
                   "Operator %s does not have the input %s.", op_.Type(), name);
    return it->second;
  }

  const std::vector<Variable*>& OutputVars(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    PADDLE_ENFORCE(it != ctx_.outputs.end(),
                   "Operator %s does not have the outputs %s.", op_.Type(),
                   name);
    return it->second;
F
fengjiayi 已提交
828 829
  }

830 831
  const OperatorBase& op_;
  const Scope& scope_;
X
Xin Pan 已提交
832
  const RuntimeContext& ctx_;
833 834
};

C
chengduoZH 已提交
835 836 837 838 839
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
Y
Yu Yang 已提交
840 841
  if (tensor.type() != proto::VarType::FP32 &&
      tensor.type() != proto::VarType::FP64) {
C
chengduoZH 已提交
842 843 844 845 846 847 848 849
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

B
baojun-nervana 已提交
850
void OperatorWithKernel::RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
851 852 853
                                           const platform::Place& place,
                                           const RuntimeContext& ctx) const {
  RuntimeInferShapeContext infer_shape_ctx(*this, scope, ctx);
B
baojun-nervana 已提交
854 855 856
  this->InferShape(&infer_shape_ctx);
}

857 858
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
X
Xin Pan 已提交
859
  RuntimeContext ctx(Inputs(), Outputs(), scope);
M
minqiyang 已提交
860 861
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(place);
Q
qiaolongfei 已提交
862

M
minqiyang 已提交
863 864 865 866 867 868 869
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
  }
Q
qiaolongfei 已提交
870

M
minqiyang 已提交
871
  OpKernelMap& kernels = kernels_iter->second;
872

X
Xin Pan 已提交
873 874
  auto expected_kernel_key = this->GetExpectedKernelType(
      ExecutionContext(*this, scope, *dev_ctx, ctx));
M
minqiyang 已提交
875
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;
M
minqiyang 已提交
876

M
minqiyang 已提交
877
  auto kernel_iter = kernels.find(expected_kernel_key);
878
#ifdef PADDLE_WITH_MKLDNN
M
minqiyang 已提交
879 880 881 882 883 884 885 886
  // workaround for missing MKLDNN kernel when FLAGS_use_mkldnn env var is set
  if (kernel_iter == kernels.end() &&
      expected_kernel_key.library_type_ == LibraryType::kMKLDNN) {
    VLOG(3) << "missing MKLDNN kernel: fallbacking to PLAIN one";
    expected_kernel_key.library_type_ = LibraryType::kPlain;
    expected_kernel_key.data_layout_ = DataLayout::kAnyLayout;
    kernel_iter = kernels.find(expected_kernel_key);
  }
887
#endif
M
minqiyang 已提交
888 889 890 891
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }
892

M
minqiyang 已提交
893 894 895
  // do data transformScope &transfer_scope;
  std::vector<std::string> transfered_inplace_vars;
  auto* transfer_scope =
X
Xin Pan 已提交
896
      PrepareData(scope, expected_kernel_key, &transfered_inplace_vars, &ctx);
897

M
minqiyang 已提交
898 899 900
  // exec scope is the scope that kernel actually executed on.
  const Scope& exec_scope =
      (transfer_scope == nullptr ? scope : *transfer_scope);
Y
yuyang18 已提交
901

M
minqiyang 已提交
902 903 904
  if (!(expected_kernel_key.place_ == dev_ctx->GetPlace())) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }
Q
QI JUN 已提交
905

X
Xin Pan 已提交
906
  RuntimeInferShapeContext infer_shape_ctx(*this, exec_scope, ctx);
X
Xin Pan 已提交
907
  this->InferShape(&infer_shape_ctx);
X
clean  
Xin Pan 已提交
908 909
  // TODO(panyx0718): ExecutionContext should only depend on RuntimeContext
  // not Scope. Imperative mode only pass inputs and get outputs.
X
Xin Pan 已提交
910
  kernel_iter->second(ExecutionContext(*this, exec_scope, *dev_ctx, ctx));
D
dzhwinter 已提交
911

M
minqiyang 已提交
912 913 914 915
  if (!transfered_inplace_vars.empty()) {
    // there is inplace variable has been transfered.
    TransferInplaceVarsBack(scope, transfered_inplace_vars, *transfer_scope);
  }
916

M
minqiyang 已提交
917 918 919 920
  /*For profiling/benchmark only*/
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
  }
C
chengduoZH 已提交
921

M
minqiyang 已提交
922 923 924 925 926 927 928 929
  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
      auto* var = exec_scope.FindVar(vname);
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
      } else if (var->IsType<framework::SelectedRows>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::SelectedRows>().value());
C
chengduoZH 已提交
930 931 932
      }
    }
  }
Q
Qiao Longfei 已提交
933
}
X
Xin Pan 已提交
934

Y
yuyang18 已提交
935 936 937 938
void OperatorWithKernel::TransferInplaceVarsBack(
    const Scope& scope, const std::vector<std::string>& inplace_vars,
    const Scope& transfer_scope) const {
  for (auto& var_name : inplace_vars) {
M
minqiyang 已提交
939
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
C
chengduo 已提交
940 941
    auto* original_tensor =
        GetMutableLoDTensorOrSelectedRowsValueFromVar(scope.FindVar(var_name));
C
chengduo 已提交
942 943 944
    auto* var = transfer_scope.FindVar(var_name);
    PADDLE_ENFORCE(var != nullptr, "The var[%s] should not be nullptr",
                   var_name);
C
chengduo 已提交
945
    auto* transformed_tensor = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
946 947 948 949
    original_tensor->ShareDataWith(*transformed_tensor);
  }
}

X
Xin Pan 已提交
950
Scope* OperatorWithKernel::PrepareData(
Y
yuyang18 已提交
951
    const Scope& scope, const OpKernelType& expected_kernel_key,
X
Xin Pan 已提交
952 953
    std::vector<std::string>* transfered_inplace_vars,
    RuntimeContext* ctx) const {
Y
yuyang18 已提交
954 955
  Scope* new_scope = nullptr;
  for (auto& var_name_item : Inputs()) {
X
Xin Pan 已提交
956 957 958 959
    std::vector<Variable*>& input_vars = ctx->inputs[var_name_item.first];

    for (size_t i = 0; i < var_name_item.second.size(); ++i) {
      auto& var_name = var_name_item.second[i];
X
Xin Pan 已提交
960
      auto* var = input_vars[i];
X
Xin Pan 已提交
961

Y
yuyang18 已提交
962
      // Only tensor can be tranfer to another device.
C
chengduo 已提交
963
      if (var == nullptr || !VarIsTensor(*var)) {
Y
yuyang18 已提交
964 965 966
        continue;
      }

C
chengduo 已提交
967
      auto* tensor_in = GetLoDTensorOrSelectedRowsValueFromVar(*var);
Y
yuyang18 已提交
968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984
      if (!tensor_in->IsInitialized()) {
        continue;
      }

      auto kernel_type_for_var = GetKernelTypeForVar(
          var_name_item.first, *tensor_in, expected_kernel_key);

      if (!NeedTransform(kernel_type_for_var, expected_kernel_key)) {
        continue;
      }

      auto out_var_names = OutputVars(true);
      if (std::find(out_var_names.begin(), out_var_names.end(), var_name) !=
          out_var_names.end()) {
        transfered_inplace_vars->emplace_back(var_name);
      }

M
minqiyang 已提交
985 986
      VLOG(3) << "Transform Variable " << var_name << " from "
              << kernel_type_for_var << " to " << expected_kernel_key;
Y
yuyang18 已提交
987

988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002
      // In the inference scenerio, the scopes will be reused across the
      // batches, so the `new_scope` here will result in GPU memroy explosion
      // over the  running of operators.
      // We use a thread_local cache to fix that issue, the key in the cache is
      // the combination of the `scope` argument, from_kernel_type,
      // target_kernel_type.
      // Have a discussion with @Superjomn or the inference developers if some
      // changes on this logic for this macro might not tested on the other
      // scenerios.
      // If this op is not called by an Executor or ParallelExecutor, it should
      // called by a NaiveExecutor, the NaiveExecutor will cache the scopes and
      // variables, that behavior a lot different.
      if (!run_by_executor_) {
        new_scope = TryCreateTransferScope(kernel_type_for_var,
                                           expected_kernel_key, &scope);
1003
      }
1004
      if (!new_scope) {
Y
yuyang18 已提交
1005 1006 1007 1008
        new_scope = &scope.NewScope();
      }

      auto* trans_var = new_scope->Var(var_name);
X
fix  
Xin Pan 已提交
1009
      input_vars[i] = trans_var;
1010

Y
yuyang18 已提交
1011
      Tensor out;
Y
yuyang18 已提交
1012
      TransformData(expected_kernel_key, kernel_type_for_var, *tensor_in, &out);
Y
yuyang18 已提交
1013 1014 1015 1016 1017 1018
      SetTensorToVariable(*var, out, trans_var);
    }
  }

  return new_scope;
}
Q
Qiao Longfei 已提交
1019

1020
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
1021 1022 1023
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
1024
  std::string last_input_name;
Y
Yu Yang 已提交
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
S
fix bug  
sneaxiy 已提交
1038 1039
          PADDLE_ENFORCE(t->IsInitialized(), "Input %s is not initialized: %s",
                         ipt_name, DebugString());
Y
Yu Yang 已提交
1040
          int tmp = static_cast<int>(t->type());
1041 1042
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
1043 1044
              "DataType of Paddle Op %s must be the same. Get %s(%d) != %s(%d)",
              Type(), last_input_name, data_type, ipt_name, tmp);
Y
Yu Yang 已提交
1045
          data_type = tmp;
1046
          last_input_name = ipt_name;
Y
Yu Yang 已提交
1047 1048 1049 1050 1051
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
1052
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
1053
}
1054

1055 1056 1057 1058 1059 1060 1061 1062
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
1063 1064
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
1065 1066
}

Q
Qiao Longfei 已提交
1067
}  // namespace framework
L
liaogang 已提交
1068
}  // namespace paddle