operator.cc 18.7 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#include <glog/logging.h>
Q
Qiao Longfei 已提交
15

16
#include <algorithm>
T
tensor-tang 已提交
17
#include <atomic>
D
dzhwinter 已提交
18

19
#include "paddle/framework/data_transform.h"
D
dzhwinter 已提交
20
#include "paddle/framework/executor.h"
21
#include "paddle/framework/lod_tensor_array.h"
D
dzhwinter 已提交
22
#include "paddle/framework/operator.h"
23
#include "paddle/framework/shape_inference.h"
24
#include "paddle/framework/var_type.h"
Q
Qiao Longfei 已提交
25 26 27 28

namespace paddle {
namespace framework {

D
dzhwinter 已提交
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

void UseCPU() {
  kKernelPriority.clear();
  /*Plain CPU*/
  auto pair0 = std::make_tuple(platform::CPUPlace(), LibraryType::kPlain);
  kKernelPriority.insert(kKernelPriority.begin(), pair0);
}

void UseMKLDNN() {
  UseCPU();
#if PADDLE_WITH_MKLML
  {
    /*MKLDNN Kernel*/
    auto pair0 = std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN);
    kKernelPriority.insert(kKernelPriority.begin(), pair0);
  }
#endif
}

void UseCUDA() {
  UseMKLDNN();
#if PADDLE_WITH_CUDA
  /*Plain GPU*/
  auto pair0 = std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain);
  kKernelPriority.insert(kKernelPriority.begin(), pair0);
#endif
}

void UseCUDNN() {
  UseCUDA();
#if PADDLE_WITH_CUDA
  if (platform::dynload::HasCUDNN()) {
    /*CUDNN Kernel*/
    auto pair0 = std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN);
    kKernelPriority.insert(kKernelPriority.begin(), pair0);
  }
#endif
}

void UseALL() {
  UseCPU();
  UseMKLDNN();
  UseCUDA();
  UseCUDNN();
}

76
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
77
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
78
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
79 80
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
81
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
82 83
}

Y
Yu Yang 已提交
84 85
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
86
  auto it = inputs_.find(name);
87 88
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
89
  return it->second;
Y
Yan Chunwei 已提交
90 91
}

92
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
93
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
94
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
95 96
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
97
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
98 99
}

Y
Yu Yang 已提交
100 101
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
102
  auto it = outputs_.find(name);
103 104
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
105
  return it->second;
Y
Yan Chunwei 已提交
106 107
}

Q
Qiao Longfei 已提交
108 109
std::string OperatorBase::DebugString() const {
  std::stringstream ss;
Y
Yu Yang 已提交
110
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
111 112
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
113 114 115 116 117 118
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
      ss << input.second[i];
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
119
    }
Y
Yu Yang 已提交
120
    ss << "]";
Y
Yu Yang 已提交
121 122
    ++it;
    if (it != inputs_.end()) {
123 124
      ss << ", ";
    }
Q
Qiao Longfei 已提交
125
  }
Y
Yu Yang 已提交
126
  ss << "}, outputs:{";
Y
Yu Yang 已提交
127 128
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
129 130 131 132 133 134
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
      ss << output.second[i];
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
135
    }
Y
Yu Yang 已提交
136
    ss << "]";
Y
Yu Yang 已提交
137 138
    ++it;
    if (it != outputs_.end()) {
139 140
      ss << ", ";
    }
Q
Qiao Longfei 已提交
141
  }
Y
Yu Yang 已提交
142
  ss << "}.";
Q
Qiao Longfei 已提交
143 144 145
  return ss.str();
}

D
dongzhihong 已提交
146 147
void OperatorBase::Rename(const std::string& old_name,
                          const std::string& new_name) {
Y
Yu Yang 已提交
148 149 150 151 152 153 154
  for (auto& input : inputs_) {
    std::replace(input.second.begin(), input.second.end(), old_name, new_name);
  }
  for (auto& output : outputs_) {
    std::replace(output.second.begin(), output.second.end(), old_name,
                 new_name);
  }
D
dongzhihong 已提交
155 156
}

Y
Yu Yang 已提交
157
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
158 159
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
160 161
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
162 163
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
164
}
165

Q
qijun 已提交
166 167
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
168
  for (auto& o : inputs_) {
Q
qijun 已提交
169 170 171 172 173 174
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
175 176 177 178 179 180 181 182 183 184
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
185
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
186 187

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
188
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
189 190 191 192 193 194 195 196 197
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
198 199
}

200 201 202
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
203
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
204 205 206

  for (auto& in : op_info->Proto().inputs()) {
    PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
Y
Yu Yang 已提交
207
                   "Type %s's input %s is not set", Type(), in.name());
208 209 210 211
  }

  for (auto& out : op_info->Proto().outputs()) {
    PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
Y
Yu Yang 已提交
212
                   "Type %s's output %s is not set", Type(), out.name());
213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

Q
QI JUN 已提交
229 230 231 232 233 234 235
static const Tensor* GetTensorFromVar(const Variable* var) {
  const Tensor* t = nullptr;
  if (var->IsType<LoDTensor>()) {
    t = &(var->Get<LoDTensor>());
  } else if (var->IsType<SelectedRows>()) {
    t = &(var->Get<SelectedRows>().value());
  } else {
Y
Yang Yang 已提交
236 237
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
238 239 240 241 242 243 244 245 246 247 248
  }
  return t;
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  Tensor* t = nullptr;
  if (var->IsType<LoDTensor>()) {
    t = var->GetMutable<LoDTensor>();
  } else if (var->IsType<SelectedRows>()) {
    t = var->GetMutable<SelectedRows>()->mutable_value();
  } else {
Y
Yang Yang 已提交
249 250
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
251 252 253 254
  }
  return t;
}

255
template <>
256
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
257
  auto* var = InputVar(name);
258
  return var == nullptr ? nullptr : GetTensorFromVar(var);
259 260 261
}

template <>
262
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
263 264 265 266
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
267 268 269 270 271
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
272 273 274 275
  return res;
}

template <>
276
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
277
  auto var = OutputVar(name);
Q
QI JUN 已提交
278
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
279 280 281
}

template <>
282
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
283 284 285 286
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
287 288
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
289 290
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
291
                                         : GetMutableTensorFromVar(var);
292
                 });
293 294 295
  return res;
}

Y
Yu Yang 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
    PADDLE_ENFORCE_EQ(length, 1UL, "Input %s should have more than one inputs",
                      name);
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
    PADDLE_ENFORCE_EQ(length, 1UL, "Output %s should have more than one inputs",
                      name);
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  DDim GetInputDim(const std::string& name) const override {
    return GetDim(op_.Input(name));
  }

  void SetOutputDim(const std::string& name, const DDim& dim) override {
    SetDim(op_.Output(name), dim);
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
388 389 390 391 392 393 394 395 396 397 398 399 400 401
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
  }

402 403 404
  bool IsRuntime() const override { return true; }

 protected:
405 406 407 408 409 410 411
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
Y
Yang Yang 已提交
412 413
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
414 415 416 417 418 419 420 421 422 423
    }
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
424 425
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
426 427 428
    }
  }

429
  proto::VarDesc::VarType GetVarType(const std::string& name) const override {
430 431 432 433 434
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

 private:
435 436 437 438
  const OperatorBase& op_;
  const Scope& scope_;
};

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
const platform::DeviceContext* GetDeviceContext(
    framework::KernelTypePair& kernel_pair) {
  auto& actual_kernel_key = kernel_pair.first;
  auto& expected_kernel_key = kernel_pair.second;
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();

  if (platform::is_gpu_place(actual_kernel_key.place_) &&
      platform::is_cpu_place(expected_kernel_key.place_)) {
    return pool.Get(actual_kernel_key.place_);
  } else if (platform::is_cpu_place(actual_kernel_key.place_) &&
             platform::is_gpu_place(expected_kernel_key.place_)) {
    return pool.Get(expected_kernel_key.place_);
  } else {
    PADDLE_THROW(
        "Currently, model parallelism is only supported between CPU and CUDA");
  }
}

D
dzhwinter 已提交
457 458 459 460 461 462
const platform::DeviceContext* GetDeviceContext(
    const framework::OpKernelType& kernel) {
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  return pool.Get(kernel.place_);
}

463
void OperatorWithKernel::Run(const Scope& scope,
D
dzhwinter 已提交
464
                             const platform::Place& place) const {
465 466
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
467 468
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto dev_ctx = pool.Get(place);
469 470 471 472 473

  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
474 475
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
476 477 478 479
  }

  // check if op[type] have kernel for kernel_key
  OpKernelMap& kernels = kernels_iter->second;
D
dzhwinter 已提交
480 481

  ExecutionContext ctx(*this, scope, *dev_ctx);
Q
Qiao Longfei 已提交
482
  auto actual_kernel_key = GetActualKernelType(ctx);
483

D
dzhwinter 已提交
484
  auto expected_kernel_key = GetExpectedKernelType(actual_kernel_key);
485

486
  if (actual_kernel_key == expected_kernel_key) {
Q
QI JUN 已提交
487 488 489 490
    PADDLE_ENFORCE_EQ(actual_kernel_key.place_, expected_kernel_key.place_,
                      "Currently, model parallelism is only supported between "
                      "CPU and other devices. For example, multi-GPU model "
                      "parallelism will failed.");
491
  } else {
D
dzhwinter 已提交
492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
    // find the best key candidate
    const DataTransformFnMap& trans_map = DataTransformFnMap::Instance();
    for (auto& candidate : kKernelPriority) {
      auto candidate_key =
          OpKernelType(actual_kernel_key.data_type_, std::get<0>(candidate),
                       actual_kernel_key.data_layout_, std::get<1>(candidate));

      auto candidate_pair = std::make_pair(actual_kernel_key, candidate_key);
      if ((actual_kernel_key == candidate_key) ||
          (kernels.count(candidate_key) &&
           trans_map.GetNullable(candidate_pair))) {
        expected_kernel_key = candidate_key;
        break;
      }
    }

508
    auto kernel_pair = std::make_pair(actual_kernel_key, expected_kernel_key);
D
dzhwinter 已提交
509
    const DataTransformFn* trans_fun = trans_map.GetNullable(kernel_pair);
Q
QI JUN 已提交
510 511 512 513 514 515 516 517 518 519 520 521 522 523
    if (trans_fun) {
      auto input_vars = this->InputVars();
      // TODO(qijun) filter the input vars that do not need to be transformed

      // filter vars that has been transformed
      std::vector<std::string> need_trans;
      for (auto var_name : input_vars) {
        auto var_name_trans =
            var_name + framework::KernelTypeToString(expected_kernel_key);
        if (!scope.FindVar(var_name_trans)) {
          const_cast<Scope&>(scope).Var(var_name_trans);
          need_trans.push_back(var_name);
        }
      }
524

Q
QI JUN 已提交
525
      if (!need_trans.empty()) {
526
        auto trans_dev_ctx = GetDeviceContext(kernel_pair);
Q
QI JUN 已提交
527 528 529 530 531

        // Wait for transform starting
        dev_ctx->Wait();

        for (auto var_name : need_trans) {
D
dzhwinter 已提交
532
          (*trans_fun)(trans_dev_ctx, kernel_pair, *(scope.FindVar(var_name)),
Q
QI JUN 已提交
533 534 535 536
                       scope.FindVar(var_name + framework::KernelTypeToString(
                                                    expected_kernel_key)));
        }
        // Wait for data transform finishing
537
        trans_dev_ctx->Wait();
Q
QI JUN 已提交
538
      }
539 540
    }
  }
Q
QI JUN 已提交
541

D
dzhwinter 已提交
542 543 544 545 546 547 548 549 550 551 552 553 554 555
  VLOG(10) << "Actual kernel: " << actual_kernel_key
           << "Expected kernel: " << expected_kernel_key;

  auto kernel_iter = kernels.find(expected_kernel_key);

  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("The operator %s does not support %s", type_,
                 expected_kernel_key);
  }

  auto* expected_dev_ctx = GetDeviceContext(expected_kernel_key);
  ExecutionContext expected_ctx(*this, scope, *expected_dev_ctx);

  kernel_iter->second->Compute(expected_ctx);
556
}
Q
Qiao Longfei 已提交
557 558

OpKernelType OperatorWithKernel::GetActualKernelType(
Y
Yu Yang 已提交
559
    const ExecutionContext& ctx) const {
Q
QI JUN 已提交
560
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
Y
Yu Yang 已提交
561
}
Q
Qiao Longfei 已提交
562 563 564 565 566 567

OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const OpKernelType& actual_kernel_type) const {
  return actual_kernel_type;
}

568
proto::DataType OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
          PADDLE_ENFORCE(tmp == data_type || data_type == -1,
                         "DataType of Paddle Op %s must be the same.", Type());
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
594
  return static_cast<proto::DataType>(data_type);
Y
Yu Yang 已提交
595
}
596

Q
Qiao Longfei 已提交
597
}  // namespace framework
L
liaogang 已提交
598
}  // namespace paddle