operator.cc 18.0 KB
Newer Older
Q
Qiao Longfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#include <glog/logging.h>
Q
Qiao Longfei 已提交
15

16
#include <algorithm>
D
dzhwinter 已提交
17

18
#include "paddle/framework/data_transform.h"
19
#include "paddle/framework/device_data_transform.h"
D
dzhwinter 已提交
20 21
#include "paddle/framework/executor.h"
#include "paddle/framework/operator.h"
22
#include "paddle/framework/shape_inference.h"
23
#include "paddle/framework/var_type.h"
Q
Qiao Longfei 已提交
24 25 26 27

namespace paddle {
namespace framework {

D
dzhwinter 已提交
28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

void UseCPU() {
  kKernelPriority.clear();
  /*Plain CPU*/
  auto pair0 = std::make_tuple(platform::CPUPlace(), LibraryType::kPlain);
  kKernelPriority.insert(kKernelPriority.begin(), pair0);
}

void UseMKLDNN() {
  UseCPU();
#if PADDLE_WITH_MKLML
  {
    /*MKLDNN Kernel*/
    auto pair0 = std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN);
    kKernelPriority.insert(kKernelPriority.begin(), pair0);
  }
#endif
}

void UseCUDA() {
  UseMKLDNN();
#if PADDLE_WITH_CUDA
  /*Plain GPU*/
  auto pair0 = std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain);
  kKernelPriority.insert(kKernelPriority.begin(), pair0);
#endif
}

void UseCUDNN() {
  UseCUDA();
#if PADDLE_WITH_CUDA
  if (platform::dynload::HasCUDNN()) {
    /*CUDNN Kernel*/
    auto pair0 = std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN);
    kKernelPriority.insert(kKernelPriority.begin(), pair0);
  }
#endif
}

void UseALL() {
  UseCPU();
  UseMKLDNN();
  UseCUDA();
  UseCUDNN();
}

75 76
static DDim GetDims(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
77 78 79
  if (var == nullptr) {
    return DDim({-1});
  } else if (var->IsType<LoDTensor>()) {
80 81 82 83 84 85 86 87
    return var->Get<LoDTensor>().dims();
  } else if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().GetCompleteDims();
  } else {
    return DDim({-1});
  }
}

88
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
89
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
90
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
91 92
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
93
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
94 95
}

Y
Yu Yang 已提交
96 97
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
98
  auto it = inputs_.find(name);
99 100
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
101
  return it->second;
Y
Yan Chunwei 已提交
102 103
}

104
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
105
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
106
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
107 108
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
109
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
110 111
}

Y
Yu Yang 已提交
112 113
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
114
  auto it = outputs_.find(name);
115 116
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
117
  return it->second;
Y
Yan Chunwei 已提交
118 119
}

120
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
121
  std::stringstream ss;
Y
Yu Yang 已提交
122
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
123 124
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
125 126 127
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
      ss << input.second[i];
128 129 130
      if (scope) {
        ss << "(" << GetDims(*scope, input.second[i]) << ")";
      }
Y
Yu Yang 已提交
131 132 133
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
134
    }
Y
Yu Yang 已提交
135
    ss << "]";
Y
Yu Yang 已提交
136 137
    ++it;
    if (it != inputs_.end()) {
138 139
      ss << ", ";
    }
Q
Qiao Longfei 已提交
140
  }
Y
Yu Yang 已提交
141
  ss << "}, outputs:{";
Y
Yu Yang 已提交
142 143
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
144 145 146
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
      ss << output.second[i];
147 148 149
      if (scope) {
        ss << "(" << GetDims(*scope, output.second[i]) << ")";
      }
Y
Yu Yang 已提交
150 151 152
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
153
    }
Y
Yu Yang 已提交
154
    ss << "]";
Y
Yu Yang 已提交
155 156
    ++it;
    if (it != outputs_.end()) {
157 158
      ss << ", ";
    }
Q
Qiao Longfei 已提交
159
  }
Y
Yu Yang 已提交
160
  ss << "}.";
Q
Qiao Longfei 已提交
161 162 163
  return ss.str();
}

D
dongzhihong 已提交
164 165
void OperatorBase::Rename(const std::string& old_name,
                          const std::string& new_name) {
Y
Yu Yang 已提交
166 167 168 169 170 171 172
  for (auto& input : inputs_) {
    std::replace(input.second.begin(), input.second.end(), old_name, new_name);
  }
  for (auto& output : outputs_) {
    std::replace(output.second.begin(), output.second.end(), old_name,
                 new_name);
  }
D
dongzhihong 已提交
173 174
}

Y
Yu Yang 已提交
175
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
176 177
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
178 179
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
180 181
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
182
}
183

Q
qijun 已提交
184 185
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
186
  for (auto& o : inputs_) {
Q
qijun 已提交
187 188 189 190 191 192
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
193 194 195 196 197 198 199 200 201 202
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
203
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
204 205

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
206
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
207 208 209 210 211 212 213 214 215
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
216 217
}

218 219 220
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
221
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
222 223 224

  for (auto& in : op_info->Proto().inputs()) {
    PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
Y
Yu Yang 已提交
225
                   "Type %s's input %s is not set", Type(), in.name());
226 227 228 229
  }

  for (auto& out : op_info->Proto().outputs()) {
    PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
Y
Yu Yang 已提交
230
                   "Type %s's output %s is not set", Type(), out.name());
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

247 248 249 250
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

Q
QI JUN 已提交
251 252 253 254 255 256 257
static const Tensor* GetTensorFromVar(const Variable* var) {
  const Tensor* t = nullptr;
  if (var->IsType<LoDTensor>()) {
    t = &(var->Get<LoDTensor>());
  } else if (var->IsType<SelectedRows>()) {
    t = &(var->Get<SelectedRows>().value());
  } else {
Y
Yang Yang 已提交
258 259
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
260 261 262 263 264 265 266 267 268 269 270
  }
  return t;
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  Tensor* t = nullptr;
  if (var->IsType<LoDTensor>()) {
    t = var->GetMutable<LoDTensor>();
  } else if (var->IsType<SelectedRows>()) {
    t = var->GetMutable<SelectedRows>()->mutable_value();
  } else {
Y
Yang Yang 已提交
271 272
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
273 274 275 276
  }
  return t;
}

277
template <>
278
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
279
  auto* var = InputVar(name);
280
  return var == nullptr ? nullptr : GetTensorFromVar(var);
281 282 283
}

template <>
284
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
285 286 287 288
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
289 290 291 292 293
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
294 295 296 297
  return res;
}

template <>
298
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
299
  auto var = OutputVar(name);
Q
QI JUN 已提交
300
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
301 302 303
}

template <>
304
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
305 306 307 308
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
309 310
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
311 312
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
313
                                         : GetMutableTensorFromVar(var);
314
                 });
315 316 317
  return res;
}

Y
Yu Yang 已提交
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
    PADDLE_ENFORCE_EQ(length, 1UL, "Input %s should have more than one inputs",
                      name);
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
    PADDLE_ENFORCE_EQ(length, 1UL, "Output %s should have more than one inputs",
                      name);
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  DDim GetInputDim(const std::string& name) const override {
    return GetDim(op_.Input(name));
  }

  void SetOutputDim(const std::string& name, const DDim& dim) override {
    SetDim(op_.Output(name), dim);
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
410 411 412 413 414 415 416 417 418 419 420 421
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

    // TODO(dzhwinter) : reuse ShareLoD in most operators.
    // Need to call ShareLayout explicitly in sequence related ops.
    // Shall we have a better method to shared info between in/out Tensor?
    out_tensor->set_layout(in_tensor.layout());
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
441 442
  }

443 444 445
  bool IsRuntime() const override { return true; }

 protected:
446 447 448 449 450 451 452
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
Y
Yang Yang 已提交
453 454
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
455 456 457 458 459 460 461 462 463 464
    }
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
465 466
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
467 468 469
    }
  }

470
  proto::VarDesc::VarType GetVarType(const std::string& name) const override {
471 472 473 474 475
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

 private:
476 477 478 479 480
  const OperatorBase& op_;
  const Scope& scope_;
};

void OperatorWithKernel::Run(const Scope& scope,
D
dzhwinter 已提交
481
                             const platform::Place& place) const {
482 483
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
484 485
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto dev_ctx = pool.Get(place);
486 487 488 489 490

  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
491 492
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
493 494
  }

D
dzhwinter 已提交
495
  ExecutionContext ctx(*this, scope, *dev_ctx);
496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
  auto expected_kernel_key = this->GetExpectedKernelType(ctx);

  Scope& new_scope = scope.NewScope();

  for (auto& var_name_item : this->Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      if (var && VarIsTensor(var)) {
        auto* tensor_in = GetTensorFromVar(var);
        if (tensor_in->IsInitialized()) {
          auto kernel_type_for_var = this->GetKernelTypeForVar(
              var_name_item.first, *tensor_in, expected_kernel_key);
          if (kernel_type_for_var != expected_kernel_key) {
            auto out_var_names = OutputVars(true);
            if (std::find(out_var_names.begin(), out_var_names.end(),
                          var_name) != out_var_names.end()) {
              PADDLE_THROW(
                  "var %s is both input and output, "
                  "does not support transform",
                  var_name);
            }
            VLOG(3) << "need to do transform for var " << var_name;
            auto* trans_var = new_scope.Var(var_name);
            auto* out = DataTransform(expected_kernel_key, kernel_type_for_var,
                                      *tensor_in);
            CopyVariableWithTensor(*var, *out, *trans_var);
          }
Q
QI JUN 已提交
523 524
        }
      }
525 526
    }
  }
Q
QI JUN 已提交
527

528
  OpKernelMap& kernels = kernels_iter->second;
D
dzhwinter 已提交
529 530
  auto kernel_iter = kernels.find(expected_kernel_key);

531
  kernel_iter->second->Compute(ExecutionContext(*this, new_scope, *dev_ctx));
Q
Qiao Longfei 已提交
532 533
}

534
proto::DataType OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
          PADDLE_ENFORCE(tmp == data_type || data_type == -1,
                         "DataType of Paddle Op %s must be the same.", Type());
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
560
  return static_cast<proto::DataType>(data_type);
Y
Yu Yang 已提交
561
}
562

563 564 565 566 567 568 569 570 571 572 573
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
  return OpKernelType(expected_kernel_type.data_type_, tensor.place());
}

Q
Qiao Longfei 已提交
574
}  // namespace framework
L
liaogang 已提交
575
}  // namespace paddle