operator.cc 20.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#include <gflags/gflags.h>
D
dzhwinter 已提交
15
#include <glog/logging.h>
Q
Qiao Longfei 已提交
16

17
#include <algorithm>
D
dzhwinter 已提交
18

Y
Yi Wang 已提交
19 20 21 22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
24
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
25

D
dzhwinter 已提交
26
DECLARE_bool(benchmark);
D
dzhwinter 已提交
27

Q
Qiao Longfei 已提交
28 29 30
namespace paddle {
namespace framework {

31 32 33 34 35 36
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
37

Q
qiaolongfei 已提交
38 39 40 41 42 43 44 45 46 47 48
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

49 50
static DDim GetDims(const Scope& scope, const std::string& name,
                    bool get_actual_dim = false) {
51
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
52 53
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
54 55 56
  }

  if (var->IsType<LoDTensor>()) {
57 58
    return var->Get<LoDTensor>().dims();
  } else if (var->IsType<SelectedRows>()) {
59 60 61 62 63
    if (get_actual_dim) {
      return var->Get<SelectedRows>().value().dims();
    } else {
      return var->Get<SelectedRows>().GetCompleteDims();
    }
64 65 66 67 68
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

  if (var->IsType<LoDTensor>()) {
    return var->Get<LoDTensor>().lod();
  } else {
    return default_lod;
  }
}

84 85 86 87 88 89 90 91 92 93 94 95
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
  if (platform::is_gpu_place(place)) {
#ifndef PADDLE_WITH_CUDA
    PADDLE_THROW("Cannot run operator on place %s", place);
#else
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
#endif
  }
  RunImpl(scope, place);
}

96
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
97
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
98
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
99 100
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
101
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
102 103
}

Y
Yu Yang 已提交
104 105
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
106
  auto it = inputs_.find(name);
107 108
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
109
  return it->second;
Y
Yan Chunwei 已提交
110 111
}

112
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
113
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
114
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
115 116
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
117
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
118 119
}

Y
Yu Yang 已提交
120 121
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
122
  auto it = outputs_.find(name);
123 124
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
125
  return it->second;
Y
Yan Chunwei 已提交
126 127
}

128
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
129
  std::stringstream ss;
Y
Yu Yang 已提交
130
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
131 132
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
133 134 135
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
      ss << input.second[i];
136
      if (scope) {
137
        ss << "[" << GetDims(*scope, input.second[i], true) << "]";
Q
Qiao Longfei 已提交
138
        ss << "(" << GetLoD(*scope, input.second[i]) << ")";
139
      }
Y
Yu Yang 已提交
140 141 142
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
143
    }
Y
Yu Yang 已提交
144
    ss << "]";
Y
Yu Yang 已提交
145 146
    ++it;
    if (it != inputs_.end()) {
147 148
      ss << ", ";
    }
Q
Qiao Longfei 已提交
149
  }
Y
Yu Yang 已提交
150
  ss << "}, outputs:{";
Y
Yu Yang 已提交
151 152
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
153 154 155
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
      ss << output.second[i];
156
      if (scope) {
157
        ss << "[" << GetDims(*scope, output.second[i], true) << "]";
Q
Qiao Longfei 已提交
158
        ss << "(" << GetLoD(*scope, output.second[i]) << ")";
159
      }
Y
Yu Yang 已提交
160 161 162
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
163
    }
Y
Yu Yang 已提交
164
    ss << "]";
Y
Yu Yang 已提交
165 166
    ++it;
    if (it != outputs_.end()) {
167 168
      ss << ", ";
    }
Q
Qiao Longfei 已提交
169
  }
Y
Yu Yang 已提交
170
  ss << "}.";
Q
Qiao Longfei 已提交
171 172 173
  return ss.str();
}

D
dongzhihong 已提交
174 175
void OperatorBase::Rename(const std::string& old_name,
                          const std::string& new_name) {
Y
Yu Yang 已提交
176 177 178 179 180 181 182
  for (auto& input : inputs_) {
    std::replace(input.second.begin(), input.second.end(), old_name, new_name);
  }
  for (auto& output : outputs_) {
    std::replace(output.second.begin(), output.second.end(), old_name,
                 new_name);
  }
D
dongzhihong 已提交
183 184
}

Y
Yu Yang 已提交
185
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
186 187
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
188 189
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
190 191
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
192
}
193

Q
qijun 已提交
194 195
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
196
  for (auto& o : inputs_) {
Q
qijun 已提交
197 198 199 200 201 202
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
203 204 205 206 207 208 209 210 211 212
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
213
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
214 215

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
216
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
217 218 219 220 221 222 223 224 225
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
226 227
}

228 229 230
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
231
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
232 233 234

  for (auto& in : op_info->Proto().inputs()) {
    PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
Y
Yu Yang 已提交
235
                   "Type %s's input %s is not set", Type(), in.name());
236 237 238 239
  }

  for (auto& out : op_info->Proto().outputs()) {
    PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
Y
Yu Yang 已提交
240
                   "Type %s's output %s is not set", Type(), out.name());
241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

257 258 259 260
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

261
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
262
  if (var->IsType<LoDTensor>()) {
263
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
264
  } else if (var->IsType<SelectedRows>()) {
265
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
266
  } else {
Y
Yang Yang 已提交
267 268
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
269 270 271 272 273
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
274
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
275
  } else if (var->IsType<SelectedRows>()) {
276
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
277
  } else {
Y
Yang Yang 已提交
278 279
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
280 281 282
  }
}

283
template <>
284
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
285
  auto* var = InputVar(name);
286 287
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
288 289 290
}

template <>
291
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
292 293 294 295
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
296 297 298 299 300
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
301 302 303 304
  return res;
}

template <>
305
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
306
  auto var = OutputVar(name);
Q
QI JUN 已提交
307
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
308 309 310
}

template <>
311
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
312 313 314 315
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
316 317
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
318 319
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
320
                                         : GetMutableTensorFromVar(var);
321
                 });
322 323 324
  return res;
}

Y
Yu Yang 已提交
325 326 327 328 329
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
330

Y
Yu Yang 已提交
331 332 333 334 335 336 337 338 339 340
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

341 342 343 344 345 346 347 348 349 350 351
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
352 353
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Input %s should not have more than one inputs", name);
354 355 356 357 358 359 360 361 362 363 364
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
365 366
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Output %s should not have more than one inputs", name);
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
410 411 412 413 414 415 416 417 418 419 420 421
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

    // TODO(dzhwinter) : reuse ShareLoD in most operators.
    // Need to call ShareLayout explicitly in sequence related ops.
    // Shall we have a better method to shared info between in/out Tensor?
    out_tensor->set_layout(in_tensor.layout());
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
441 442
  }

443 444 445
  bool IsRuntime() const override { return true; }

 protected:
446 447 448 449 450 451 452
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
453 454 455 456 457 458 459
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
460
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
461
    PADDLE_THROW("Only compile time support this method");
462 463 464 465 466 467 468 469 470
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
471 472
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
473 474 475
    }
  }

F
fengjiayi 已提交
476 477
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
478
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
479 480
  }

481
  proto::VarType::Type GetVarType(const std::string& name) const override {
482 483 484 485
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
486 487 488 489
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

490
 private:
491 492 493 494
  const OperatorBase& op_;
  const Scope& scope_;
};

495 496
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
497 498
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
499
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
500
  auto* dev_ctx = pool.Get(place);
501 502 503 504

  // For profiling, don't move out of this function because that will result
  // in the failure of multi-GPU profiling.
  platform::RecordEvent record_event(Type(), dev_ctx);
505 506 507 508
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
509 510
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
511 512
  }

D
dzhwinter 已提交
513
  ExecutionContext ctx(*this, scope, *dev_ctx);
514

Q
qiaolongfei 已提交
515 516
  OpKernelMap& kernels = kernels_iter->second;

517 518
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
519

520 521 522 523 524
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

  auto expected_kernel_key = this->GetExpectedKernelType(ctx);
Q
qiaolongfei 已提交
525 526
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

527 528 529 530 531 532 533
  auto kernel_iter = kernels.find(expected_kernel_key);
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

  // do data transform
534 535
  Scope& new_scope = scope.NewScope();

536
  std::vector<std::string> inplace_vars;
537 538 539 540 541 542 543 544
  for (auto& var_name_item : this->Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      if (var && VarIsTensor(var)) {
        auto* tensor_in = GetTensorFromVar(var);
        if (tensor_in->IsInitialized()) {
          auto kernel_type_for_var = this->GetKernelTypeForVar(
              var_name_item.first, *tensor_in, expected_kernel_key);
545
          if (TransFromNeeded(kernel_type_for_var, expected_kernel_key)) {
546 547 548
            auto out_var_names = OutputVars(true);
            if (std::find(out_var_names.begin(), out_var_names.end(),
                          var_name) != out_var_names.end()) {
549
              inplace_vars.push_back(var_name);
550
            }
551 552
            VLOG(3) << "Transform Variable " << var_name << " from "
                    << kernel_type_for_var << " to " << expected_kernel_key;
553
            auto* trans_var = new_scope.Var(var_name);
554 555 556 557
            std::shared_ptr<Tensor> out(new Tensor);
            DataTransform(expected_kernel_key, kernel_type_for_var, *tensor_in,
                          out.get());
            CopyVariableWithTensor(*var, *(out.get()), *trans_var);
558
          }
Q
QI JUN 已提交
559 560
        }
      }
561 562
    }
  }
Q
QI JUN 已提交
563

D
dzhwinter 已提交
564 565 566 567
  auto* new_dev_ctx = pool.Get(expected_kernel_key.place_);
  kernel_iter->second->Compute(
      ExecutionContext(*this, new_scope, *new_dev_ctx));

568 569 570 571 572 573 574
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor = GetTensorFromVar(new_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }

D
dzhwinter 已提交
575
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
576
  if (FLAGS_benchmark) {
D
dzhwinter 已提交
577 578
    new_dev_ctx->Wait();
  }
Q
Qiao Longfei 已提交
579 580
}

581
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
          PADDLE_ENFORCE(tmp == data_type || data_type == -1,
                         "DataType of Paddle Op %s must be the same.", Type());
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
607
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
608
}
609

610 611 612 613 614 615 616 617 618 619 620
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
  return OpKernelType(expected_kernel_type.data_type_, tensor.place());
}

Q
Qiao Longfei 已提交
621
}  // namespace framework
L
liaogang 已提交
622
}  // namespace paddle