operator.cc 18.8 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#include <gflags/gflags.h>
D
dzhwinter 已提交
15
#include <glog/logging.h>
Q
Qiao Longfei 已提交
16

17
#include <algorithm>
D
dzhwinter 已提交
18

19
#include "paddle/framework/data_transform.h"
20
#include "paddle/framework/device_data_transform.h"
D
dzhwinter 已提交
21 22
#include "paddle/framework/executor.h"
#include "paddle/framework/operator.h"
23
#include "paddle/framework/shape_inference.h"
24
#include "paddle/framework/var_type.h"
Q
Qiao Longfei 已提交
25

D
dzhwinter 已提交
26 27 28 29
DEFINE_bool(op_sync, false,
            "Default cuda is asynchronous device, set to True will"
            "force op run in synchronous mode.");

Q
Qiao Longfei 已提交
30 31 32
namespace paddle {
namespace framework {

D
dzhwinter 已提交
33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

void UseCPU() {
  kKernelPriority.clear();
  /*Plain CPU*/
  auto pair0 = std::make_tuple(platform::CPUPlace(), LibraryType::kPlain);
  kKernelPriority.insert(kKernelPriority.begin(), pair0);
}

void UseMKLDNN() {
  UseCPU();
#if PADDLE_WITH_MKLML
  {
    /*MKLDNN Kernel*/
    auto pair0 = std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN);
    kKernelPriority.insert(kKernelPriority.begin(), pair0);
  }
#endif
}

void UseCUDA() {
  UseMKLDNN();
#if PADDLE_WITH_CUDA
  /*Plain GPU*/
  auto pair0 = std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain);
  kKernelPriority.insert(kKernelPriority.begin(), pair0);
#endif
}

void UseCUDNN() {
  UseCUDA();
#if PADDLE_WITH_CUDA
  if (platform::dynload::HasCUDNN()) {
    /*CUDNN Kernel*/
    auto pair0 = std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN);
    kKernelPriority.insert(kKernelPriority.begin(), pair0);
  }
#endif
}

void UseALL() {
  UseCPU();
  UseMKLDNN();
  UseCUDA();
  UseCUDNN();
}

80 81
static DDim GetDims(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
82 83 84
  if (var == nullptr) {
    return DDim({-1});
  } else if (var->IsType<LoDTensor>()) {
85 86 87 88 89 90 91 92
    return var->Get<LoDTensor>().dims();
  } else if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().GetCompleteDims();
  } else {
    return DDim({-1});
  }
}

93
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
94
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
95
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
96 97
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
98
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
99 100
}

Y
Yu Yang 已提交
101 102
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
103
  auto it = inputs_.find(name);
104 105
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
106
  return it->second;
Y
Yan Chunwei 已提交
107 108
}

109
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
110
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
111
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
112 113
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
114
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
115 116
}

Y
Yu Yang 已提交
117 118
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
119
  auto it = outputs_.find(name);
120 121
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
122
  return it->second;
Y
Yan Chunwei 已提交
123 124
}

125
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
126
  std::stringstream ss;
Y
Yu Yang 已提交
127
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
128 129
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
130 131 132
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
      ss << input.second[i];
133 134 135
      if (scope) {
        ss << "(" << GetDims(*scope, input.second[i]) << ")";
      }
Y
Yu Yang 已提交
136 137 138
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
139
    }
Y
Yu Yang 已提交
140
    ss << "]";
Y
Yu Yang 已提交
141 142
    ++it;
    if (it != inputs_.end()) {
143 144
      ss << ", ";
    }
Q
Qiao Longfei 已提交
145
  }
Y
Yu Yang 已提交
146
  ss << "}, outputs:{";
Y
Yu Yang 已提交
147 148
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
149 150 151
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
      ss << output.second[i];
152 153 154
      if (scope) {
        ss << "(" << GetDims(*scope, output.second[i]) << ")";
      }
Y
Yu Yang 已提交
155 156 157
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
158
    }
Y
Yu Yang 已提交
159
    ss << "]";
Y
Yu Yang 已提交
160 161
    ++it;
    if (it != outputs_.end()) {
162 163
      ss << ", ";
    }
Q
Qiao Longfei 已提交
164
  }
Y
Yu Yang 已提交
165
  ss << "}.";
Q
Qiao Longfei 已提交
166 167 168
  return ss.str();
}

D
dongzhihong 已提交
169 170
void OperatorBase::Rename(const std::string& old_name,
                          const std::string& new_name) {
Y
Yu Yang 已提交
171 172 173 174 175 176 177
  for (auto& input : inputs_) {
    std::replace(input.second.begin(), input.second.end(), old_name, new_name);
  }
  for (auto& output : outputs_) {
    std::replace(output.second.begin(), output.second.end(), old_name,
                 new_name);
  }
D
dongzhihong 已提交
178 179
}

Y
Yu Yang 已提交
180
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
181 182
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
183 184
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
185 186
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
187
}
188

Q
qijun 已提交
189 190
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
191
  for (auto& o : inputs_) {
Q
qijun 已提交
192 193 194 195 196 197
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
198 199 200 201 202 203 204 205 206 207
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
208
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
209 210

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
211
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
212 213 214 215 216 217 218 219 220
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
221 222
}

223 224 225
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
226
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
227 228 229

  for (auto& in : op_info->Proto().inputs()) {
    PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
Y
Yu Yang 已提交
230
                   "Type %s's input %s is not set", Type(), in.name());
231 232 233 234
  }

  for (auto& out : op_info->Proto().outputs()) {
    PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
Y
Yu Yang 已提交
235
                   "Type %s's output %s is not set", Type(), out.name());
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

252 253 254 255
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

Q
QI JUN 已提交
256 257 258 259 260 261 262
static const Tensor* GetTensorFromVar(const Variable* var) {
  const Tensor* t = nullptr;
  if (var->IsType<LoDTensor>()) {
    t = &(var->Get<LoDTensor>());
  } else if (var->IsType<SelectedRows>()) {
    t = &(var->Get<SelectedRows>().value());
  } else {
Y
Yang Yang 已提交
263 264
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
265 266 267 268 269 270 271 272 273 274 275
  }
  return t;
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  Tensor* t = nullptr;
  if (var->IsType<LoDTensor>()) {
    t = var->GetMutable<LoDTensor>();
  } else if (var->IsType<SelectedRows>()) {
    t = var->GetMutable<SelectedRows>()->mutable_value();
  } else {
Y
Yang Yang 已提交
276 277
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
278 279 280 281
  }
  return t;
}

282
template <>
283
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
284
  auto* var = InputVar(name);
285
  return var == nullptr ? nullptr : GetTensorFromVar(var);
286 287 288
}

template <>
289
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
290 291 292 293
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
294 295 296 297 298
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
299 300 301 302
  return res;
}

template <>
303
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
304
  auto var = OutputVar(name);
Q
QI JUN 已提交
305
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
306 307 308
}

template <>
309
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
310 311 312 313
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
314 315
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
316 317
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
318
                                         : GetMutableTensorFromVar(var);
319
                 });
320 321 322
  return res;
}

Y
Yu Yang 已提交
323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
    PADDLE_ENFORCE_EQ(length, 1UL, "Input %s should have more than one inputs",
                      name);
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
    PADDLE_ENFORCE_EQ(length, 1UL, "Output %s should have more than one inputs",
                      name);
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  DDim GetInputDim(const std::string& name) const override {
    return GetDim(op_.Input(name));
  }

  void SetOutputDim(const std::string& name, const DDim& dim) override {
    SetDim(op_.Output(name), dim);
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
415 416 417 418 419 420 421 422 423 424 425 426
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445

    // TODO(dzhwinter) : reuse ShareLoD in most operators.
    // Need to call ShareLayout explicitly in sequence related ops.
    // Shall we have a better method to shared info between in/out Tensor?
    out_tensor->set_layout(in_tensor.layout());
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
446 447
  }

448 449 450
  bool IsRuntime() const override { return true; }

 protected:
451 452 453 454 455 456 457
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
Y
Yang Yang 已提交
458 459
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
460 461 462 463 464 465 466 467 468 469
    }
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
470 471
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
472 473 474
    }
  }

475
  proto::VarDesc::VarType GetVarType(const std::string& name) const override {
476 477 478 479 480
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

 private:
481 482 483 484 485
  const OperatorBase& op_;
  const Scope& scope_;
};

void OperatorWithKernel::Run(const Scope& scope,
D
dzhwinter 已提交
486
                             const platform::Place& place) const {
487 488
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
489 490
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto dev_ctx = pool.Get(place);
491 492 493 494 495

  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
496 497
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
498 499
  }

D
dzhwinter 已提交
500
  ExecutionContext ctx(*this, scope, *dev_ctx);
501 502
  auto expected_kernel_key = this->GetExpectedKernelType(ctx);

Q
qiaolongfei 已提交
503 504 505 506 507 508 509 510 511 512 513 514 515 516
  OpKernelMap& kernels = kernels_iter->second;

  for (auto& candidate : kKernelPriority) {
    auto candidate_key =
        OpKernelType(expected_kernel_key.data_type_, std::get<0>(candidate),
                     expected_kernel_key.data_layout_, std::get<1>(candidate));

    if ((candidate_key == expected_kernel_key) ||
        (kernels.count(candidate_key))) {
      expected_kernel_key = candidate_key;
      break;
    }
  }

Q
qiaolongfei 已提交
517 518
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
  Scope& new_scope = scope.NewScope();

  for (auto& var_name_item : this->Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      if (var && VarIsTensor(var)) {
        auto* tensor_in = GetTensorFromVar(var);
        if (tensor_in->IsInitialized()) {
          auto kernel_type_for_var = this->GetKernelTypeForVar(
              var_name_item.first, *tensor_in, expected_kernel_key);
          if (kernel_type_for_var != expected_kernel_key) {
            auto out_var_names = OutputVars(true);
            if (std::find(out_var_names.begin(), out_var_names.end(),
                          var_name) != out_var_names.end()) {
              PADDLE_THROW(
                  "var %s is both input and output, "
                  "does not support transform",
                  var_name);
            }
            VLOG(3) << "need to do transform for var " << var_name;
            auto* trans_var = new_scope.Var(var_name);
            auto* out = DataTransform(expected_kernel_key, kernel_type_for_var,
                                      *tensor_in);
            CopyVariableWithTensor(*var, *out, *trans_var);
          }
Q
QI JUN 已提交
544 545
        }
      }
546 547
    }
  }
Q
QI JUN 已提交
548

D
dzhwinter 已提交
549 550
  auto kernel_iter = kernels.find(expected_kernel_key);

D
dzhwinter 已提交
551 552 553 554 555 556 557 558
  auto* new_dev_ctx = pool.Get(expected_kernel_key.place_);
  kernel_iter->second->Compute(
      ExecutionContext(*this, new_scope, *new_dev_ctx));

  /*For profiling/benchmark only*/
  if (FLAGS_op_sync) {
    new_dev_ctx->Wait();
  }
Q
Qiao Longfei 已提交
559 560
}

561
proto::DataType OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
          PADDLE_ENFORCE(tmp == data_type || data_type == -1,
                         "DataType of Paddle Op %s must be the same.", Type());
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
587
  return static_cast<proto::DataType>(data_type);
Y
Yu Yang 已提交
588
}
589

590 591 592 593 594 595 596 597 598 599 600
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
  return OpKernelType(expected_kernel_type.data_type_, tensor.place());
}

Q
Qiao Longfei 已提交
601
}  // namespace framework
L
liaogang 已提交
602
}  // namespace paddle