operator.cc 23.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#include <gflags/gflags.h>
D
dzhwinter 已提交
15
#include <glog/logging.h>
Q
Qiao Longfei 已提交
16

17
#include <algorithm>
D
dzhwinter 已提交
18

Y
Yi Wang 已提交
19 20 21 22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
24
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
25

D
dzhwinter 已提交
26
DECLARE_bool(benchmark);
C
chengduoZH 已提交
27 28 29
DEFINE_bool(check_nan_inf, false,
            "Checking whether operator produce NAN/INF or not. It will be "
            "extremely slow so please use this flag wisely.");
D
dzhwinter 已提交
30

Q
Qiao Longfei 已提交
31 32 33
namespace paddle {
namespace framework {

34 35 36 37 38 39
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
40

Q
qiaolongfei 已提交
41 42 43 44 45 46 47 48 49 50 51
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

52 53
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
54
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
55 56
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
57 58 59
  }

  if (var->IsType<LoDTensor>()) {
60 61
    return var->Get<LoDTensor>().dims();
  } else if (var->IsType<SelectedRows>()) {
62 63 64 65 66
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
67 68 69 70 71
  } else {
    return DDim({-1});
  }
}

72 73 74 75 76 77 78 79 80 81 82 83 84
static int GetRowSize(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  if (var == nullptr) {
    return -1;
  }

  if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().rows().size();
  }

  return -1;
}

Q
Qiao Longfei 已提交
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

  if (var->IsType<LoDTensor>()) {
    return var->Get<LoDTensor>().lod();
  } else {
    return default_lod;
  }
}

100 101 102 103 104 105 106 107 108 109 110 111
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
  if (platform::is_gpu_place(place)) {
#ifndef PADDLE_WITH_CUDA
    PADDLE_THROW("Cannot run operator on place %s", place);
#else
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
#endif
  }
  RunImpl(scope, place);
}

112 113 114 115 116 117 118 119
bool OperatorBase::HasInputs(const std::string& name) const {
  if (inputs_.find(name) != inputs_.end()) {
    return true;
  } else {
    return false;
  }
}

120
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
121
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
122
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
123 124
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
125
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
126 127
}

Y
Yu Yang 已提交
128 129
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
130
  auto it = inputs_.find(name);
131 132
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
133
  return it->second;
Y
Yan Chunwei 已提交
134 135
}

136 137 138 139 140 141 142 143
bool OperatorBase::HasOutputs(const std::string& name) const {
  if (outputs_.find(name) != outputs_.end()) {
    return true;
  } else {
    return false;
  }
}

144
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
145
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
146
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
147 148
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
149
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
150 151
}

Y
Yu Yang 已提交
152 153
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
154
  auto it = outputs_.find(name);
155 156
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
157
  return it->second;
Y
Yan Chunwei 已提交
158 159
}

160
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
161
  std::stringstream ss;
Y
Yu Yang 已提交
162
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
163 164
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
165 166 167
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
      ss << input.second[i];
168
      if (scope) {
169 170 171 172
        int row_size = GetRowSize(*scope, input.second[i]);
        if (row_size >= 0) {
          ss << "[row_size=" << row_size << "]";
        }
173
        ss << "[" << GetDims(*scope, input.second[i], true) << "]";
Q
Qiao Longfei 已提交
174
        ss << "(" << GetLoD(*scope, input.second[i]) << ")";
175
      }
Y
Yu Yang 已提交
176 177 178
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
179
    }
Y
Yu Yang 已提交
180
    ss << "]";
Y
Yu Yang 已提交
181 182
    ++it;
    if (it != inputs_.end()) {
183 184
      ss << ", ";
    }
Q
Qiao Longfei 已提交
185
  }
Y
Yu Yang 已提交
186
  ss << "}, outputs:{";
Y
Yu Yang 已提交
187 188
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
189 190 191
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
      ss << output.second[i];
192
      if (scope) {
193 194 195 196
        int row_size = GetRowSize(*scope, output.second[i]);
        if (row_size >= 0) {
          ss << "[row_size=" << row_size << "]";
        }
197
        ss << "[" << GetDims(*scope, output.second[i], true) << "]";
Q
Qiao Longfei 已提交
198
        ss << "(" << GetLoD(*scope, output.second[i]) << ")";
199
      }
Y
Yu Yang 已提交
200 201 202
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
203
    }
Y
Yu Yang 已提交
204
    ss << "]";
Y
Yu Yang 已提交
205 206
    ++it;
    if (it != outputs_.end()) {
207 208
      ss << ", ";
    }
Q
Qiao Longfei 已提交
209
  }
Y
Yu Yang 已提交
210
  ss << "}.";
Q
Qiao Longfei 已提交
211 212 213
  return ss.str();
}

Y
Yu Yang 已提交
214
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
215 216
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
217 218
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
219 220
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
221
}
222

Q
qijun 已提交
223 224
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
225
  for (auto& o : inputs_) {
Q
qijun 已提交
226 227 228 229 230 231
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
232 233 234 235 236 237 238 239 240 241
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
242
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
243 244

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
245
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
246 247 248 249 250 251 252 253 254
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
255 256
}

257 258 259
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
260
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
261 262

  for (auto& in : op_info->Proto().inputs()) {
263 264 265 266
    if (!in.dispensable()) {
      PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
                     "Operator %s's input, %s, is not set", Type(), in.name());
    }
267 268 269
  }

  for (auto& out : op_info->Proto().outputs()) {
270 271 272 273 274
    if (!out.dispensable()) {
      PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
                     "Operator %s's output, %s, is not set", Type(),
                     out.name());
    }
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

291 292 293 294
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

295
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
296
  if (var->IsType<LoDTensor>()) {
297
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
298
  } else if (var->IsType<SelectedRows>()) {
299
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
300
  } else {
Y
Yang Yang 已提交
301 302
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
303 304 305 306 307
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
308
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
309
  } else if (var->IsType<SelectedRows>()) {
310
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
311
  } else {
Y
Yang Yang 已提交
312 313
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
314 315 316
  }
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
bool ExecutionContext::HasInput(const std::string& name) const {
  if (!op_.HasInputs(name)) {
    return false;
  }
  auto& ins = Inputs(name);
  size_t length = ins.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Input %s should not have more than one inputs", name);
  auto arg = ins[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

bool ExecutionContext::HasOutput(const std::string& name) const {
  if (!op_.HasOutputs(name)) {
    return false;
  }
  auto& outs = Outputs(name);
  size_t length = outs.size();
  if (length == 0) {
    return false;
  }
  PADDLE_ENFORCE_EQ(length, 1UL,
                    "Output %s should not have more than one inputs", name);
  auto arg = outs[0];
  auto* var = arg == kEmptyVarName ? nullptr : scope_.FindVar(arg);
  return var != nullptr;
}

349
template <>
350
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
351
  auto* var = InputVar(name);
352 353
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
354 355 356
}

template <>
357
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
358 359 360 361
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
362 363 364 365 366
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
367 368 369 370
  return res;
}

template <>
371
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
372
  auto var = OutputVar(name);
Q
QI JUN 已提交
373
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
374 375 376
}

template <>
377
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
378 379 380 381
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
382 383
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
384 385
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
386
                                         : GetMutableTensorFromVar(var);
387
                 });
388 389 390
  return res;
}

Y
Yu Yang 已提交
391 392 393 394 395 396 397 398 399 400 401 402 403 404 405
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

406 407 408 409 410 411
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
412 413 414
    if (!op_.HasInputs(name)) {
      return false;
    }
415 416 417 418 419
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
420 421
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Input %s should not have more than one inputs", name);
422 423 424 425 426 427
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
428 429 430
    if (!op_.HasOutputs(name)) {
      return false;
    }
431 432 433 434 435
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
436 437
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Output %s should not have more than one inputs", name);
438 439 440 441 442 443
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
444 445 446
    if (!op_.HasInputs(name)) {
      return false;
    }
447 448 449 450 451 452 453 454 455 456 457 458 459
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
460 461 462
    if (!op_.HasOutputs(name)) {
      return false;
    }
463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
487 488 489 490 491 492 493 494 495 496 497 498
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
499

M
mozga-intel 已提交
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
// TODO(dzhwinter) : reuse ShareLoD in most operators.
// Need to call ShareLayout explicitly in sequence related ops.
// Shall we have a better method to shared info between in/out Tensor?
#ifdef PADDLE_WITH_MKLDNN
    // Fix me: ugly workaround below
    // Correct solution:
    //    set_layout() should NOT be called here (i.e. ShareLoD). Instead,
    //    layout of output tensor should be set "manually" in Compute()
    //    of each OPKernel. The reason layout should NOT be shared between
    //    input and output "automatically" (now by InferShape()->ShareLoD())
    //    is that layout transform may occur after InferShape().
    // Workaround:
    //    Skip set_layout() when input layout is kMKLDNN
    //    This is to avoid kMKLDNN is populated wrongly into a non-MKLDNN
    //    OPKernel. In all MKLDNN OPkernel, set_layout(kMKLDNN) should be called
    //    in Compute()
    if (in_tensor.layout() != DataLayout::kMKLDNN)
#endif
      out_tensor->set_layout(in_tensor.layout());
D
dzhwinter 已提交
519 520 521 522 523 524 525 526 527 528 529 530 531 532
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
533 534
  }

535 536 537
  bool IsRuntime() const override { return true; }

 protected:
538 539
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
F
fengjiayi 已提交
540
    PADDLE_ENFORCE_NOT_NULL(var);
541 542 543 544 545
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
546 547 548 549 550 551 552
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
553
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
554
    PADDLE_THROW("Only compile time support this method");
555 556 557 558 559 560 561 562 563
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
564 565
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
566 567 568
    }
  }

F
fengjiayi 已提交
569 570
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
571
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
572 573
  }

574
  proto::VarType::Type GetVarType(const std::string& name) const override {
575 576 577 578
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
579 580 581 582
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

583
 private:
584 585 586 587
  const OperatorBase& op_;
  const Scope& scope_;
};

C
chengduoZH 已提交
588 589 590 591 592 593 594 595 596 597 598 599 600 601 602
static void CheckTensorNANOrInf(const std::string& name,
                                const framework::Tensor& tensor) {
  if (tensor.memory_size() == 0) {
    return;
  }
  if (tensor.type().hash_code() != typeid(float).hash_code() &&   // NOLINT
      tensor.type().hash_code() != typeid(double).hash_code()) {  // NOLINT
    return;
  }
  PADDLE_ENFORCE(!framework::TensorContainsInf(tensor),
                 "Tensor %s contains Inf", name);
  PADDLE_ENFORCE(!framework::TensorContainsNAN(tensor),
                 "Tensor %s contains NAN", name);
}

603 604
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
605 606
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
607
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
608
  auto* dev_ctx = pool.Get(place);
609 610 611 612

  // For profiling, don't move out of this function because that will result
  // in the failure of multi-GPU profiling.
  platform::RecordEvent record_event(Type(), dev_ctx);
613 614 615 616
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
617 618
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
619 620
  }

D
dzhwinter 已提交
621
  ExecutionContext ctx(*this, scope, *dev_ctx);
622

Q
qiaolongfei 已提交
623 624
  OpKernelMap& kernels = kernels_iter->second;

625 626
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
627

628 629 630 631 632
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

  auto expected_kernel_key = this->GetExpectedKernelType(ctx);
Q
qiaolongfei 已提交
633 634
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

635 636 637 638 639 640 641
  auto kernel_iter = kernels.find(expected_kernel_key);
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

  // do data transform
642 643
  Scope& new_scope = scope.NewScope();

644
  std::vector<std::string> inplace_vars;
645 646 647 648 649 650 651 652
  for (auto& var_name_item : this->Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      if (var && VarIsTensor(var)) {
        auto* tensor_in = GetTensorFromVar(var);
        if (tensor_in->IsInitialized()) {
          auto kernel_type_for_var = this->GetKernelTypeForVar(
              var_name_item.first, *tensor_in, expected_kernel_key);
653
          if (TransFromNeeded(kernel_type_for_var, expected_kernel_key)) {
654 655 656
            auto out_var_names = OutputVars(true);
            if (std::find(out_var_names.begin(), out_var_names.end(),
                          var_name) != out_var_names.end()) {
657
              inplace_vars.push_back(var_name);
658
            }
659 660
            VLOG(3) << "Transform Variable " << var_name << " from "
                    << kernel_type_for_var << " to " << expected_kernel_key;
661
            auto* trans_var = new_scope.Var(var_name);
662 663 664
            std::shared_ptr<Tensor> out(new Tensor);
            DataTransform(expected_kernel_key, kernel_type_for_var, *tensor_in,
                          out.get());
665
            CopyVariableWithTensor(*var, *(out.get()), trans_var);
666
          }
Q
QI JUN 已提交
667 668
        }
      }
669 670
    }
  }
Q
QI JUN 已提交
671

D
dzhwinter 已提交
672 673 674 675
  auto* new_dev_ctx = pool.Get(expected_kernel_key.place_);
  kernel_iter->second->Compute(
      ExecutionContext(*this, new_scope, *new_dev_ctx));

676 677 678 679 680 681 682
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor = GetTensorFromVar(new_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }

D
dzhwinter 已提交
683
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
684
  if (FLAGS_benchmark) {
D
dzhwinter 已提交
685 686
    new_dev_ctx->Wait();
  }
C
chengduoZH 已提交
687 688 689 690 691 692 693 694 695 696

  if (FLAGS_check_nan_inf) {
    for (auto& vname : OutputVars(true)) {
      auto* var = new_scope.FindVar(vname);
      if (var == nullptr) continue;
      if (var->IsType<framework::LoDTensor>()) {
        CheckTensorNANOrInf(vname, var->Get<framework::LoDTensor>());
      }
    }
  }
Q
Qiao Longfei 已提交
697 698
}

699
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
717 718 719 720
          PADDLE_ENFORCE(
              tmp == data_type || data_type == -1,
              "DataType of Paddle Op %s must be the same. Get %d != %d", Type(),
              data_type, tmp);
Y
Yu Yang 已提交
721 722 723 724 725 726
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
727
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
728
}
729

730 731 732 733 734 735 736 737
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
M
mozga-intel 已提交
738 739
  return OpKernelType(expected_kernel_type.data_type_, tensor.place(),
                      tensor.layout());
740 741
}

Q
Qiao Longfei 已提交
742
}  // namespace framework
L
liaogang 已提交
743
}  // namespace paddle