operator.cc 22.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#include <gflags/gflags.h>
D
dzhwinter 已提交
15
#include <glog/logging.h>
Q
Qiao Longfei 已提交
16

17
#include <algorithm>
D
dzhwinter 已提交
18

Y
Yi Wang 已提交
19 20 21 22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
24
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
25

D
dzhwinter 已提交
26
DECLARE_bool(benchmark);
C
chengduoZH 已提交
27 28 29
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
30

Q
Qiao Longfei 已提交
31 32 33
namespace paddle {
namespace framework {

34 35 36 37 38 39
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
40

Q
qiaolongfei 已提交
41 42 43 44 45 46 47 48 49 50 51
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

52 53
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
54
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
55 56
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
57 58 59
  }

  if (var->IsType<LoDTensor>()) {
60 61
    return var->Get<LoDTensor>().dims();
  } else if (var->IsType<SelectedRows>()) {
62 63 64 65 66
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
67 68 69 70 71
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

  if (var->IsType<LoDTensor>()) {
    return var->Get<LoDTensor>().lod();
  } else {
    return default_lod;
  }
}

87 88 89 90 91 92 93 94 95 96 97 98
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
  if (platform::is_gpu_place(place)) {
#ifndef PADDLE_WITH_CUDA
    PADDLE_THROW("Cannot run operator on place %s", place);
#else
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
#endif
  }
  RunImpl(scope, place);
}

99 100 101 102 103 104 105 106
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

107
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
108
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
109
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
110 111
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
112
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
113 114
}

Y
Yu Yang 已提交
115 116
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
117
  auto it = inputs_.find(name);
118 119
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
120
  return it->second;
Y
Yan Chunwei 已提交
121 122
}

123 124 125 126 127 128 129 130
bool OperatorBase::HasOutputs(const std::string& name) const {
  if (outputs_.find(name) != outputs_.end()) {
    return true;
  } else {
    return false;
  }
}

131
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
132
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
133
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
134 135
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
136
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
137 138
}

Y
Yu Yang 已提交
139 140
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
141
  auto it = outputs_.find(name);
142 143
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
144
  return it->second;
Y
Yan Chunwei 已提交
145 146
}

147
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
148
  std::stringstream ss;
Y
Yu Yang 已提交
149
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
150 151
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
152 153 154
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
      ss << input.second[i];
155
      if (scope) {
156
        ss << "[" << GetDims(*scope, input.second[i], true) << "]";
Q
Qiao Longfei 已提交
157
        ss << "(" << GetLoD(*scope, input.second[i]) << ")";
158
      }
Y
Yu Yang 已提交
159 160 161
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
162
    }
Y
Yu Yang 已提交
163
    ss << "]";
Y
Yu Yang 已提交
164 165
    ++it;
    if (it != inputs_.end()) {
166 167
      ss << ", ";
    }
Q
Qiao Longfei 已提交
168
  }
Y
Yu Yang 已提交
169
  ss << "}, outputs:{";
Y
Yu Yang 已提交
170 171
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
172 173 174
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
      ss << output.second[i];
175
      if (scope) {
176
        ss << "[" << GetDims(*scope, output.second[i], true) << "]";
Q
Qiao Longfei 已提交
177
        ss << "(" << GetLoD(*scope, output.second[i]) << ")";
178
      }
Y
Yu Yang 已提交
179 180 181
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
182
    }
Y
Yu Yang 已提交
183
    ss << "]";
Y
Yu Yang 已提交
184 185
    ++it;
    if (it != outputs_.end()) {
186 187
      ss << ", ";
    }
Q
Qiao Longfei 已提交
188
  }
Y
Yu Yang 已提交
189
  ss << "}.";
Q
Qiao Longfei 已提交
190 191 192
  return ss.str();
}

Y
Yu Yang 已提交
193
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
194 195
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
196 197
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
198 199
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
200
}
201

Q
qijun 已提交
202 203
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
204
  for (auto& o : inputs_) {
Q
qijun 已提交
205 206 207 208 209 210
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
211 212 213 214 215 216 217 218 219 220
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
221
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
222 223

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
224
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
225 226 227 228 229 230 231 232 233
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
234 235
}

236 237 238
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
239
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
240 241

  for (auto& in : op_info->Proto().inputs()) {
242 243 244 245
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
246 247 248
  }

  for (auto& out : op_info->Proto().outputs()) {
249 250 251 252 253
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

270 271 272 273
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

274
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
275
  if (var->IsType<LoDTensor>()) {
276
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
277
  } else if (var->IsType<SelectedRows>()) {
278
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
279
  } else {
Y
Yang Yang 已提交
280 281
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
282 283 284 285 286
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
287
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
288
  } else if (var->IsType<SelectedRows>()) {
289
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
290
  } else {
Y
Yang Yang 已提交
291 292
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
293 294 295
  }
}

296
template <>
297
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
298
  auto* var = InputVar(name);
299 300
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
301 302 303
}

template <>
304
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
305 306 307 308
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
309 310 311 312 313
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
314 315 316 317
  return res;
}

template <>
318
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
319
  auto var = OutputVar(name);
Q
QI JUN 已提交
320
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
321 322 323
}

template <>
324
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
325 326 327 328
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
329 330
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
331 332
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
333
                                         : GetMutableTensorFromVar(var);
334
                 });
335 336 337
  return res;
}

Y
Yu Yang 已提交
338 339 340 341 342 343 344 345 346 347 348 349 350 351 352
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

353 354 355 356 357 358
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
359 360 361
    if (!op_.HasInputs(name)) {
      return false;
    }
362 363 364 365 366
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
367 368
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Input %s should not have more than one inputs", name);
369 370 371 372 373 374
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
375 376 377
    if (!op_.HasOutputs(name)) {
      return false;
    }
378 379 380 381 382
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
383 384
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Output %s should not have more than one inputs", name);
385 386 387 388 389 390
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
391 392 393
    if (!op_.HasInputs(name)) {
      return false;
    }
394 395 396 397 398 399 400 401 402 403 404 405 406
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
407 408 409
    if (!op_.HasOutputs(name)) {
      return false;
    }
410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
434 435 436 437 438 439 440 441 442 443 444 445
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
446

M
mozga-intel 已提交
447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
466 467 468 469 470 471 472 473 474 475 476 477 478 479
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
480 481
  }

482 483 484
  bool IsRuntime() const override { return true; }

 protected:
485 486
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
487
    PADDLE_ENFORCE_NOT_NULL(var);
488 489 490 491 492
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
493 494 495 496 497 498 499
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
500
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
501
    PADDLE_THROW("Only compile time support this method");
502 503 504 505 506 507 508 509 510
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
511 512
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
513 514 515
    }
  }

F
fengjiayi 已提交
516 517
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
518
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
519 520
  }

521
  proto::VarType::Type GetVarType(const std::string& name) const override {
522 523 524 525
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
526 527 528 529
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

530
 private:
531 532 533 534
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
  if (tensor.type().hash_code() != typeid(float).hash_code() &&   // NOLINT
      tensor.type().hash_code() != typeid(double).hash_code()) {  // NOLINT
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

550 551
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
552 553
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
554
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
555
  auto* dev_ctx = pool.Get(place);
556 557 558 559

  // For profiling, don't move out of this function because that will result
  // in the failure of multi-GPU profiling.
  platform::RecordEvent record_event(Type(), dev_ctx);
560 561 562 563
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
564 565
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
566 567
  }

D
dzhwinter 已提交
568
  ExecutionContext ctx(*this, scope, *dev_ctx);
569

Q
qiaolongfei 已提交
570 571
  OpKernelMap& kernels = kernels_iter->second;

572 573
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
574

575 576 577 578 579
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

  auto expected_kernel_key = this->GetExpectedKernelType(ctx);
Q
qiaolongfei 已提交
580 581
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

582 583 584 585 586 587 588
  auto kernel_iter = kernels.find(expected_kernel_key);
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

  // do data transform
589 590
  Scope& new_scope = scope.NewScope();

591
  std::vector<std::string> inplace_vars;
592 593 594 595 596 597 598 599
  for (auto& var_name_item : this->Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      if (var && VarIsTensor(var)) {
        auto* tensor_in = GetTensorFromVar(var);
        if (tensor_in->IsInitialized()) {
          auto kernel_type_for_var = this->GetKernelTypeForVar(
              var_name_item.first, *tensor_in, expected_kernel_key);
600
          if (TransFromNeeded(kernel_type_for_var, expected_kernel_key)) {
601 602 603
            auto out_var_names = OutputVars(true);
            if (std::find(out_var_names.begin(), out_var_names.end(),
                          var_name) != out_var_names.end()) {
604
              inplace_vars.push_back(var_name);
605
            }
606 607
            VLOG(3) << "Transform Variable " << var_name << " from "
                    << kernel_type_for_var << " to " << expected_kernel_key;
608
            auto* trans_var = new_scope.Var(var_name);
609 610 611
            std::shared_ptr<Tensor> out(new Tensor);
            DataTransform(expected_kernel_key, kernel_type_for_var, *tensor_in,
                          out.get());
612
            CopyVariableWithTensor(*var, *(out.get()), trans_var);
613
          }
Q
QI JUN 已提交
614 615
        }
      }
616 617
    }
  }
Q
QI JUN 已提交
618

D
dzhwinter 已提交
619 620 621 622
  auto* new_dev_ctx = pool.Get(expected_kernel_key.place_);
  kernel_iter->second->Compute(
      ExecutionContext(*this, new_scope, *new_dev_ctx));

623 624 625 626 627 628 629
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor = GetTensorFromVar(new_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }

D
dzhwinter 已提交
630
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
631
  if (FLAGS_benchmark) {
D
dzhwinter 已提交
632 633
    new_dev_ctx->Wait();
  }
C
chengduoZH 已提交
634 635 636 637 638 639 640 641 642 643

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
      auto* var = new_scope.FindVar(vname);
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
      }
    }
  }
Q
Qiao Longfei 已提交
644 645
}

646
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
664 665 666 667
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
              "DataType of Paddle Op %s must be the same. Get %d != %d", Type(),
              data_type, tmp);
Y
Yu Yang 已提交
668 669 670 671 672 673
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
674
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
675
}
676

677 678 679 680 681 682 683 684
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
685 686
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
687 688
}

Q
Qiao Longfei 已提交
689
}  // namespace framework
L
liaogang 已提交
690
}  // namespace paddle