operator.cc 20.0 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
D
dzhwinter 已提交
14
#include <gflags/gflags.h>
D
dzhwinter 已提交
15
#include <glog/logging.h>
Q
Qiao Longfei 已提交
16

17
#include <algorithm>
D
dzhwinter 已提交
18

Y
Yi Wang 已提交
19 20 21 22 23
#include "paddle/fluid/framework/data_transform.h"
#include "paddle/fluid/framework/executor.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/fluid/framework/shape_inference.h"
#include "paddle/fluid/framework/var_type.h"
24
#include "paddle/fluid/platform/profiler.h"
Q
Qiao Longfei 已提交
25

D
dzhwinter 已提交
26
DECLARE_bool(benchmark);
D
dzhwinter 已提交
27

Q
Qiao Longfei 已提交
28 29 30
namespace paddle {
namespace framework {

31 32 33 34 35 36
std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority = {
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kCUDNN),
    std::make_tuple(platform::CUDAPlace(0), LibraryType::kPlain),
    std::make_tuple(platform::CPUPlace(), LibraryType::kMKLDNN),
    std::make_tuple(platform::CPUPlace(), LibraryType::kPlain),
};
D
dzhwinter 已提交
37

Q
qiaolongfei 已提交
38 39 40 41 42 43 44 45 46 47 48
proto::VarType::Type GetDataTypeOfVar(const Variable* var) {
  if (var->IsType<framework::LoDTensor>()) {
    return framework::ToDataType(var->Get<framework::LoDTensor>().type());
  } else if (var->IsType<framework::SelectedRows>()) {
    return framework::ToDataType(
        var->Get<framework::SelectedRows>().value().type());
  } else {
    PADDLE_THROW("Var should be LoDTensor or SelectedRows");
  }
}

49 50
static DDim GetDims(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
Q
qiaolongfei 已提交
51 52
  if (var == nullptr) {
    return DDim({-1});
Q
Qiao Longfei 已提交
53 54 55
  }

  if (var->IsType<LoDTensor>()) {
56 57 58 59 60 61 62 63
    return var->Get<LoDTensor>().dims();
  } else if (var->IsType<SelectedRows>()) {
    return var->Get<SelectedRows>().GetCompleteDims();
  } else {
    return DDim({-1});
  }
}

Q
Qiao Longfei 已提交
64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
static LoD GetLoD(const Scope& scope, const std::string& name) {
  Variable* var = scope.FindVar(name);
  auto default_lod = LoD({{}});

  if (var == nullptr) {
    return default_lod;
  }

  if (var->IsType<LoDTensor>()) {
    return var->Get<LoDTensor>().lod();
  } else {
    return default_lod;
  }
}

79 80 81 82 83 84 85 86 87 88 89 90
void OperatorBase::Run(const Scope& scope, const platform::Place& place) {
  if (platform::is_gpu_place(place)) {
#ifndef PADDLE_WITH_CUDA
    PADDLE_THROW("Cannot run operator on place %s", place);
#else
    auto dev_id = boost::get<platform::CUDAPlace>(place).device;
    platform::SetDeviceId(dev_id);
#endif
  }
  RunImpl(scope, place);
}

91
std::string OperatorBase::Input(const std::string& name) const {
Y
Yu Yang 已提交
92
  auto& ins = Inputs(name);
Y
Yu Yang 已提交
93
  PADDLE_ENFORCE_LE(ins.size(), 1UL,
94 95
                    "Operator %s's input %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
96
  return ins.empty() ? kEmptyVarName : ins[0];
Y
Yan Chunwei 已提交
97 98
}

Y
Yu Yang 已提交
99 100
const std::vector<std::string>& OperatorBase::Inputs(
    const std::string& name) const {
Y
Yu Yang 已提交
101
  auto it = inputs_.find(name);
102 103
  PADDLE_ENFORCE(it != inputs_.end(), "Operator %s does not have the input %s.",
                 type_, name);
Y
Yu Yang 已提交
104
  return it->second;
Y
Yan Chunwei 已提交
105 106
}

107
std::string OperatorBase::Output(const std::string& name) const {
Y
Yu Yang 已提交
108
  auto& outs = Outputs(name);
Y
Yu Yang 已提交
109
  PADDLE_ENFORCE_LE(outs.size(), 1UL,
110 111
                    "Operator %s's output %s should contain only one variable.",
                    type_, name);
Y
Yu Yang 已提交
112
  return outs.empty() ? kEmptyVarName : outs[0];
Y
Yan Chunwei 已提交
113 114
}

Y
Yu Yang 已提交
115 116
const std::vector<std::string>& OperatorBase::Outputs(
    const std::string& name) const {
Y
Yu Yang 已提交
117
  auto it = outputs_.find(name);
118 119
  PADDLE_ENFORCE(it != outputs_.end(),
                 "Operator %s does not have an output called %s.", type_, name);
Y
Yu Yang 已提交
120
  return it->second;
Y
Yan Chunwei 已提交
121 122
}

123
std::string OperatorBase::DebugStringEx(const Scope* scope) const {
Q
Qiao Longfei 已提交
124
  std::stringstream ss;
Y
Yu Yang 已提交
125
  ss << "Op(" << type_ << "), inputs:{";
Y
Yu Yang 已提交
126 127
  for (auto it = inputs_.begin(); it != inputs_.end();) {
    auto& input = *it;
Y
Yu Yang 已提交
128 129 130
    ss << input.first << "[";
    for (size_t i = 0; i < input.second.size(); ++i) {
      ss << input.second[i];
131
      if (scope) {
Q
Qiao Longfei 已提交
132 133
        ss << "[" << GetDims(*scope, input.second[i]) << "]";
        ss << "(" << GetLoD(*scope, input.second[i]) << ")";
134
      }
Y
Yu Yang 已提交
135 136 137
      if (i != input.second.size() - 1) {
        ss << ", ";
      }
138
    }
Y
Yu Yang 已提交
139
    ss << "]";
Y
Yu Yang 已提交
140 141
    ++it;
    if (it != inputs_.end()) {
142 143
      ss << ", ";
    }
Q
Qiao Longfei 已提交
144
  }
Y
Yu Yang 已提交
145
  ss << "}, outputs:{";
Y
Yu Yang 已提交
146 147
  for (auto it = outputs_.begin(); it != outputs_.end();) {
    auto& output = *it;
Y
Yu Yang 已提交
148 149 150
    ss << output.first << "[";
    for (size_t i = 0; i < output.second.size(); ++i) {
      ss << output.second[i];
151
      if (scope) {
Q
Qiao Longfei 已提交
152 153
        ss << "[" << GetDims(*scope, output.second[i]) << "]";
        ss << "(" << GetLoD(*scope, output.second[i]) << ")";
154
      }
Y
Yu Yang 已提交
155 156 157
      if (i != output.second.size() - 1) {
        ss << ", ";
      }
158
    }
Y
Yu Yang 已提交
159
    ss << "]";
Y
Yu Yang 已提交
160 161
    ++it;
    if (it != outputs_.end()) {
162 163
      ss << ", ";
    }
Q
Qiao Longfei 已提交
164
  }
Y
Yu Yang 已提交
165
  ss << "}.";
Q
Qiao Longfei 已提交
166 167 168
  return ss.str();
}

D
dongzhihong 已提交
169 170
void OperatorBase::Rename(const std::string& old_name,
                          const std::string& new_name) {
Y
Yu Yang 已提交
171 172 173 174 175 176 177
  for (auto& input : inputs_) {
    std::replace(input.second.begin(), input.second.end(), old_name, new_name);
  }
  for (auto& output : outputs_) {
    std::replace(output.second.begin(), output.second.end(), old_name,
                 new_name);
  }
D
dongzhihong 已提交
178 179
}

Y
Yu Yang 已提交
180
OperatorBase::OperatorBase(const std::string& type,
Y
Yu Yang 已提交
181 182
                           const VariableNameMap& inputs,
                           const VariableNameMap& outputs,
Y
Yu Yang 已提交
183 184
                           const AttributeMap& attrs)
    : type_(type), inputs_(inputs), outputs_(outputs), attrs_(attrs) {
185 186
  GenerateTemporaryNames();
  CheckAllInputOutputSet();
Y
Yu Yang 已提交
187
}
188

Q
qijun 已提交
189 190
std::vector<std::string> OperatorBase::InputVars() const {
  std::vector<std::string> ret_val;
Y
Yu Yang 已提交
191
  for (auto& o : inputs_) {
Q
qijun 已提交
192 193 194 195 196 197
    ret_val.reserve(ret_val.size() + o.second.size());
    ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
  }
  return ret_val;
}

Y
Yu Yang 已提交
198 199 200 201 202 203 204 205 206 207
std::vector<std::string> OperatorBase::OutputVars(bool has_intermediate) const {
  std::vector<std::string> ret_val;
  if (has_intermediate) {
    // push all outputs into ret_val
    for (auto& o : outputs_) {
      ret_val.reserve(ret_val.size() + o.second.size());
      ret_val.insert(ret_val.end(), o.second.begin(), o.second.end());
    }
    return ret_val;
  }
Y
Yu Yang 已提交
208
  auto& info = OpInfoMap::Instance().Get(Type());
Y
Yu Yang 已提交
209 210

  // get all OpProto::Var for outputs
Y
Yu Yang 已提交
211
  for (auto& o : info.Proto().outputs()) {
Y
Yu Yang 已提交
212 213 214 215 216 217 218 219 220
    // ignore all intermediate output
    if (o.intermediate()) continue;
    auto out = outputs_.find(o.name());
    if (out != outputs_.end()) {
      ret_val.reserve(ret_val.size() + out->second.size());
      ret_val.insert(ret_val.end(), out->second.begin(), out->second.end());
    }
  }
  return ret_val;
D
dongzhihong 已提交
221 222
}

223 224 225
void OperatorBase::CheckAllInputOutputSet() const {
  auto& info_map = OpInfoMap::Instance();
  auto* op_info = info_map.GetNullable(Type());
Y
Yu Yang 已提交
226
  if (op_info == nullptr || op_info->proto_ == nullptr) return;
227 228 229

  for (auto& in : op_info->Proto().inputs()) {
    PADDLE_ENFORCE(inputs_.find(in.name()) != inputs_.end(),
Y
Yu Yang 已提交
230
                   "Type %s's input %s is not set", Type(), in.name());
231 232 233 234
  }

  for (auto& out : op_info->Proto().outputs()) {
    PADDLE_ENFORCE(outputs_.find(out.name()) != outputs_.end(),
Y
Yu Yang 已提交
235
                   "Type %s's output %s is not set", Type(), out.name());
236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
  }
}

void OperatorBase::GenerateTemporaryNames() {
  static std::atomic<size_t> gUniqId(0UL);
  for (auto& output : outputs_) {
    for (auto& output_name : output.second) {
      if (output_name == kTempVarName) {
        output_name += type_;
        output_name += "@";
        output_name += std::to_string(gUniqId.fetch_add(1));
      }
    }
  }
}

252 253 254 255
static bool VarIsTensor(const Variable* var) {
  return var->IsType<LoDTensor>() || var->IsType<SelectedRows>();
}

256
static const Tensor* GetTensorFromVar(Variable* var) {
Q
QI JUN 已提交
257
  if (var->IsType<LoDTensor>()) {
258
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
259
  } else if (var->IsType<SelectedRows>()) {
260
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
261
  } else {
Y
Yang Yang 已提交
262 263
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
264 265 266 267 268
  }
}

static Tensor* GetMutableTensorFromVar(Variable* var) {
  if (var->IsType<LoDTensor>()) {
269
    return var->GetMutable<LoDTensor>();
Q
QI JUN 已提交
270
  } else if (var->IsType<SelectedRows>()) {
271
    return var->GetMutable<SelectedRows>()->mutable_value();
Q
QI JUN 已提交
272
  } else {
Y
Yang Yang 已提交
273 274
    PADDLE_THROW("Variable type_id %s, expect LoDTensor/SelectedRows.",
                 var->Type().name());
Q
QI JUN 已提交
275 276 277
  }
}

278
template <>
279
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const {
280
  auto* var = InputVar(name);
281 282
  return var == nullptr ? nullptr
                        : GetTensorFromVar(const_cast<Variable*>(var));
283 284 285
}

template <>
286
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
287 288 289 290
    const std::string& name) const {
  auto names = op().Inputs(name);
  std::vector<const Tensor*> res;
  res.reserve(names.size());
291 292 293 294 295
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr : GetTensorFromVar(var);
                 });
296 297 298 299
  return res;
}

template <>
300
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const {
301
  auto var = OutputVar(name);
Q
QI JUN 已提交
302
  return var == nullptr ? nullptr : GetMutableTensorFromVar(var);
303 304 305
}

template <>
306
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
307 308 309 310
    const std::string& name) const {
  auto names = op().Outputs(name);
  std::vector<Tensor*> res;
  res.reserve(names.size());
311 312
  std::transform(names.begin(), names.end(), std::back_inserter(res),
                 [&](const std::string& sub_name) {
313 314
                   auto var = scope_.FindVar(sub_name);
                   return var == nullptr ? nullptr
Q
QI JUN 已提交
315
                                         : GetMutableTensorFromVar(var);
316
                 });
317 318 319
  return res;
}

Y
Yu Yang 已提交
320 321 322 323 324
bool OpSupportGPU(const std::string& op_type) {
  auto& all_kernels = OperatorWithKernel::AllOpKernels();
  auto it = all_kernels.find(op_type);
  if (it == all_kernels.end()) {
    // All control operator must support GPU
325

Y
Yu Yang 已提交
326 327 328 329 330 331 332 333 334 335
    return true;
  }
  for (auto& kern_pair : it->second) {
    if (platform::is_gpu_place(kern_pair.first.place_)) {
      return true;
    }
  }
  return false;
}

336 337 338 339 340 341 342 343 344 345 346
class RuntimeInferShapeContext : public InferShapeContext {
 public:
  RuntimeInferShapeContext(const OperatorBase& op, const Scope& scope)
      : op_(op), scope_(scope) {}

  bool HasInput(const std::string& name) const override {
    auto& ins = Inputs(name);
    size_t length = ins.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
347 348
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Input %s should not have more than one inputs", name);
349 350 351 352 353 354 355 356 357 358 359
    auto ipt = ins[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasOutput(const std::string& name) const override {
    auto& outs = Outputs(name);
    size_t length = outs.size();
    if (length == 0) {
      return false;
    }
F
fengjiayi 已提交
360 361
    PADDLE_ENFORCE_EQ(length, 1UL,
                      "Output %s should not have more than one inputs", name);
362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404
    auto ipt = outs[0];
    auto* var = ipt == kEmptyVarName ? nullptr : scope_.FindVar(ipt);
    return var != nullptr;
  }

  bool HasInputs(const std::string& name) const override {
    auto inputs = op_.Inputs(name);
    if (inputs.empty()) {
      return false;
    }
    for (auto& input : inputs) {
      if (scope_.FindVar(input) == nullptr) {
        return false;
      }
    }
    return true;
  }

  bool HasOutputs(const std::string& name) const override {
    auto outputs = op_.Outputs(name);
    if (outputs.empty()) {
      return false;
    }
    for (auto& output : outputs) {
      if (scope_.FindVar(output) == nullptr) {
        return false;
      }
    }
    return true;
  }

  AttrReader Attrs() const override { return AttrReader(op_.Attrs()); }

  const std::vector<std::string>& Inputs(
      const std::string& name) const override {
    return op_.Inputs(name);
  }

  const std::vector<std::string>& Outputs(
      const std::string& name) const override {
    return op_.Outputs(name);
  }

Q
Qiao Longfei 已提交
405 406 407 408 409 410 411 412 413 414 415 416
  void ShareLoD(const std::string& in, const std::string& out, size_t i = 0,
                size_t j = 0) const override {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_lod(in_tensor.lod());
D
dzhwinter 已提交
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435

    // TODO(dzhwinter) : reuse ShareLoD in most operators.
    // Need to call ShareLayout explicitly in sequence related ops.
    // Shall we have a better method to shared info between in/out Tensor?
    out_tensor->set_layout(in_tensor.layout());
  }

  void ShareLayout(const std::string& in, const std::string& out, size_t i = 0,
                   size_t j = 0) const {
    PADDLE_ENFORCE_LT(i, Inputs(in).size());
    PADDLE_ENFORCE_LT(j, Outputs(out).size());
    Variable* in_var = scope_.FindVar(Inputs(in)[i]);
    Variable* out_var = scope_.FindVar(Outputs(out)[j]);
    if (!in_var->IsType<LoDTensor>()) return;
    PADDLE_ENFORCE(out_var->IsType<LoDTensor>(),
                   "The %d-th output of Output(%s) must be LoDTensor.", j, out);
    auto in_tensor = in_var->Get<LoDTensor>();
    auto* out_tensor = out_var->GetMutable<LoDTensor>();
    out_tensor->set_layout(in_tensor.layout());
Q
Qiao Longfei 已提交
436 437
  }

438 439 440
  bool IsRuntime() const override { return true; }

 protected:
441 442 443 444 445 446 447
  DDim GetDim(const std::string& name) const override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      return var->Get<LoDTensor>().dims();
    } else if (var->IsType<SelectedRows>()) {
      return var->Get<SelectedRows>().GetCompleteDims();
    } else {
F
fengjiayi 已提交
448 449 450 451 452 453 454
      PADDLE_THROW(
          "Only LoDTensor/SelectedRows support 'GetDim', but Variable %s's "
          "type_id is %s.",
          name, var->Type().name());
    }
  }

F
fengjiayi 已提交
455
  std::vector<DDim> GetRepeatedDims(const std::string& name) const override {
Y
Yu Yang 已提交
456
    PADDLE_THROW("Only compile time support this method");
457 458 459 460 461 462 463 464 465
  }

  void SetDim(const std::string& name, const DDim& dim) override {
    Variable* var = scope_.FindVar(name);
    if (var->IsType<LoDTensor>()) {
      var->GetMutable<LoDTensor>()->Resize(dim);
    } else if (var->IsType<SelectedRows>()) {
      var->GetMutable<SelectedRows>()->set_height(dim[0]);
    } else {
Y
Yang Yang 已提交
466 467
      PADDLE_THROW("Variable %s type_id %s, expect LoDTensor/SelectedRows.",
                   name, var->Type().name());
468 469 470
    }
  }

F
fengjiayi 已提交
471 472
  void SetRepeatedDims(const std::string& name,
                       const std::vector<DDim>& dims) override {
Y
Yu Yang 已提交
473
    PADDLE_THROW("Only compile time support this method");
F
fengjiayi 已提交
474 475
  }

476
  proto::VarType::Type GetVarType(const std::string& name) const override {
477 478 479 480
    auto* var = scope_.FindVar(name);
    return ToVarType(var->Type());
  }

F
fengjiayi 已提交
481 482 483 484
  InferShapeVarPtr GetVarPtr(const std::string& name) override {
    return scope_.FindVar(name);
  }

485
 private:
486 487 488 489
  const OperatorBase& op_;
  const Scope& scope_;
};

490 491
void OperatorWithKernel::RunImpl(const Scope& scope,
                                 const platform::Place& place) const {
492 493
  RuntimeInferShapeContext infer_shape_ctx(*this, scope);
  this->InferShape(&infer_shape_ctx);
Y
Yu Yang 已提交
494
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
495
  auto* dev_ctx = pool.Get(place);
496 497 498 499

  // For profiling, don't move out of this function because that will result
  // in the failure of multi-GPU profiling.
  platform::RecordEvent record_event(Type(), dev_ctx);
500 501 502 503
  // check if op[type] has kernel registered.
  auto& all_op_kernels = AllOpKernels();
  auto kernels_iter = all_op_kernels.find(type_);
  if (kernels_iter == all_op_kernels.end()) {
Y
Yu Yang 已提交
504 505
    PADDLE_THROW(
        "There are no kernels which are registered in the %s operator.", type_);
506 507
  }

D
dzhwinter 已提交
508
  ExecutionContext ctx(*this, scope, *dev_ctx);
509

Q
qiaolongfei 已提交
510 511
  OpKernelMap& kernels = kernels_iter->second;

512 513
  // TODO(dzhwinter) : kernel fallback mechanism will be added when all the
  // transform functions are ready.
Q
qiaolongfei 已提交
514

515 516 517 518 519
  // for (auto& candidate : kKernelPriority) {
  //   Do selection
  // }

  auto expected_kernel_key = this->GetExpectedKernelType(ctx);
Q
qiaolongfei 已提交
520 521
  VLOG(3) << "expected_kernel_key:" << expected_kernel_key;

522 523 524 525 526 527 528
  auto kernel_iter = kernels.find(expected_kernel_key);
  if (kernel_iter == kernels.end()) {
    PADDLE_THROW("op %s does not have kernel for %s", type_,
                 KernelTypeToString(expected_kernel_key));
  }

  // do data transform
529 530
  Scope& new_scope = scope.NewScope();

531
  std::vector<std::string> inplace_vars;
532 533 534 535 536 537 538 539
  for (auto& var_name_item : this->Inputs()) {
    for (auto& var_name : var_name_item.second) {
      auto* var = scope.FindVar(var_name);
      if (var && VarIsTensor(var)) {
        auto* tensor_in = GetTensorFromVar(var);
        if (tensor_in->IsInitialized()) {
          auto kernel_type_for_var = this->GetKernelTypeForVar(
              var_name_item.first, *tensor_in, expected_kernel_key);
540
          if (TransFromNeeded(kernel_type_for_var, expected_kernel_key)) {
541 542 543
            auto out_var_names = OutputVars(true);
            if (std::find(out_var_names.begin(), out_var_names.end(),
                          var_name) != out_var_names.end()) {
544
              inplace_vars.push_back(var_name);
545
            }
546 547
            VLOG(3) << "Transform Variable " << var_name << " from "
                    << kernel_type_for_var << " to " << expected_kernel_key;
548
            auto* trans_var = new_scope.Var(var_name);
549 550 551 552
            std::shared_ptr<Tensor> out(new Tensor);
            DataTransform(expected_kernel_key, kernel_type_for_var, *tensor_in,
                          out.get());
            CopyVariableWithTensor(*var, *(out.get()), *trans_var);
553
          }
Q
QI JUN 已提交
554 555
        }
      }
556 557
    }
  }
Q
QI JUN 已提交
558

D
dzhwinter 已提交
559 560 561 562
  auto* new_dev_ctx = pool.Get(expected_kernel_key.place_);
  kernel_iter->second->Compute(
      ExecutionContext(*this, new_scope, *new_dev_ctx));

563 564 565 566 567 568 569
  for (auto& var_name : inplace_vars) {
    VLOG(3) << "share inplace var " + var_name + " back to it's original scope";
    auto* original_tensor = GetMutableTensorFromVar(scope.FindVar(var_name));
    auto* transformed_tensor = GetTensorFromVar(new_scope.FindVar(var_name));
    original_tensor->ShareDataWith(*transformed_tensor);
  }

D
dzhwinter 已提交
570
  /*For profiling/benchmark only*/
D
dzhwinter 已提交
571
  if (FLAGS_benchmark) {
D
dzhwinter 已提交
572 573
    new_dev_ctx->Wait();
  }
Q
Qiao Longfei 已提交
574 575
}

576
proto::VarType::Type OperatorWithKernel::IndicateDataType(
Y
Yu Yang 已提交
577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
    const ExecutionContext& ctx) const {
  auto& scope = ctx.scope();
  int data_type = -1;
  for (auto& input : this->inputs_) {
    for (auto& ipt_name : input.second) {
      auto* var = scope.FindVar(ipt_name);
      if (var != nullptr) {
        const Tensor* t = nullptr;
        if (var->IsType<Tensor>()) {
          t = &var->Get<Tensor>();
        } else if (var->IsType<LoDTensor>()) {
          t = &var->Get<LoDTensor>();
        } else if (var->IsType<SelectedRows>()) {
          t = &(var->Get<SelectedRows>().value());
        }
        if (t != nullptr) {
          int tmp = static_cast<int>(ToDataType(t->type()));
          PADDLE_ENFORCE(tmp == data_type || data_type == -1,
                         "DataType of Paddle Op %s must be the same.", Type());
          data_type = tmp;
        }
      }
    }
  }
  PADDLE_ENFORCE(data_type != -1, "DataType should be indicated by input");
602
  return static_cast<proto::VarType::Type>(data_type);
Y
Yu Yang 已提交
603
}
604

605 606 607 608 609 610 611 612 613 614 615
OpKernelType OperatorWithKernel::GetExpectedKernelType(
    const ExecutionContext& ctx) const {
  return OpKernelType(IndicateDataType(ctx), ctx.GetPlace());
}

OpKernelType OperatorWithKernel::GetKernelTypeForVar(
    const std::string& var_name, const Tensor& tensor,
    const OpKernelType& expected_kernel_type) const {
  return OpKernelType(expected_kernel_type.data_type_, tensor.place());
}

Q
Qiao Longfei 已提交
616
}  // namespace framework
L
liaogang 已提交
617
}  // namespace paddle