operator.h 19.7 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
20
#include <mutex>  // NOLINT
Q
Qiao Longfei 已提交
21
#include <string>
D
dzhwinter 已提交
22
#include <tuple>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
L
luotao1 已提交
24
#include <utility>
Q
Qiao Longfei 已提交
25 26
#include <vector>

Y
Yu Yang 已提交
27
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
28 29 30 31 32 33 34 35
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
36
#include "paddle/fluid/framework/unused_var_check.h"
37
#include "paddle/fluid/memory/malloc.h"
Y
Yi Wang 已提交
38 39
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
C
chentianyu03 已提交
40
#include "paddle/utils/flat_hash_map.h"
Q
Qiao Longfei 已提交
41

W
wanghuancoder 已提交
42 43 44 45 46 47 48 49 50
namespace paddle {
namespace framework {
class InferShapeContext;
class OpInfo;
class Scope;
class Variable;
}  // namespace framework
}  // namespace paddle

Q
Qiao Longfei 已提交
51 52
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
53 54 55
namespace paddle {
namespace framework {

56
/// If a variable is a empty variable, that name will be used.
57
constexpr char kEmptyVarName[] = "@EMPTY@";
58 59 60

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
61
constexpr char kTempVarName[] = "@TEMP@";
62 63

/// If a variable's name has a certain suffix, it means that the
T
tianshuo78520a 已提交
64 65
/// variable is the gradient of another variable.
/// e.g. Variable "x@GRAD" is the gradient of variable "x".
66
constexpr char kGradVarSuffix[] = "@GRAD";
67

M
minqiyang 已提交
68 69
constexpr size_t kGradVarSuffixSize = 5U;

70
/// Variables with this suffix are supposed to be filled up with zeros.
71
constexpr char kZeroVarSuffix[] = "@ZERO";
72

C
chengduo 已提交
73 74 75
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

L
luotao1 已提交
76 77 78 79 80 81 82 83
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
84 85 86 87 88 89 90 91 92
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
93
// define some kernel priority
94
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
95 96
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

97
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
98 99 100 101 102
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
103 104
}

M
minqiyang 已提交
105
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
106
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
107 108 109 110 111
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
112 113
}

C
chengduo 已提交
114 115
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
116

117
class ExecutionContext;
W
wanghuancoder 已提交
118
class OperatorBase;
119

X
Xin Pan 已提交
120 121
class RuntimeContext {
 public:
X
Xin Pan 已提交
122 123
  RuntimeContext(const VariableNameMap& innames,
                 const VariableNameMap& outnames, const Scope& scope);
X
Xin Pan 已提交
124

X
Xin Pan 已提交
125 126 127 128
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
129 130 131 132
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
133
/**
X
Xin Pan 已提交
134
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
135 136 137 138 139 140
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
Y
Yu Yang 已提交
141 142
  OperatorBase(const std::string& type, const VariableNameMap& inputs,
               const VariableNameMap& outputs, const AttributeMap& attrs);
143

Q
Qiao Longfei 已提交
144 145
  virtual ~OperatorBase() {}

146
  /// Executor will call this interface function to Run an op.
147 148
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
149

T
typhoonzero 已提交
150 151 152
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

153 154 155
  /// if scope is not null, also show dimensions of arguments
  virtual std::string DebugStringEx(const Scope* scope) const;
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
156

157
  virtual bool SupportGPU() const { return false; }
B
Baibaifan 已提交
158
  virtual bool SupportNPU() const { return false; }
159

160 161
  const std::string& Type() const { return type_; }

M
Michal Gallus 已提交
162
  bool HasAttr(const std::string& name) const { return attrs_.count(name); }
163 164
  template <typename T>
  inline const T& Attr(const std::string& name) const {
165 166 167
    PADDLE_ENFORCE_NE(
        attrs_.find(name), attrs_.end(),
        platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
168
    return BOOST_GET_CONST(T, attrs_.at(name));
169
  }
170 171 172 173 174 175 176 177
  void SetAttr(const std::string& name, const Attribute& v) {
    PADDLE_ENFORCE_EQ(
        HasAttr(name), true,
        platform::errors::NotFound(
            "The attribute %s is not found in operator %s", name, Type()));

    attrs_[name] = v;
  }
178
  const AttributeMap& Attrs() const { return attrs_; }
D
dongzhihong 已提交
179

Y
Yu Yang 已提交
180 181
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
182

S
sneaxiy 已提交
183
  const OpInfo& Info() const {
184 185 186
    PADDLE_ENFORCE_NOT_NULL(
        info_, platform::errors::NotFound(
                   "OpInfo of operator (%s) is not found.", type_));
S
sneaxiy 已提交
187 188 189
    return *info_;
  }

190
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
191
  //! Get a input with argument's name described in `op_proto`
192
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
193
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
194
  const std::vector<std::string>& Inputs(const std::string& name) const;
195
  //! Get all inputs variable names
Q
qijun 已提交
196 197
  std::vector<std::string> InputVars() const;

198
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
199
  //! Get a output with argument's name described in `op_proto`
200
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
201 202
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
203
  const std::vector<std::string>& Outputs(const std::string& name) const;
204
  //! Get all outputs variable names
Y
Yu Yang 已提交
205
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
206

207
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
208

B
baojun-nervana 已提交
209
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
210 211
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
212

Z
Zhang Ting 已提交
213 214 215 216 217
  virtual platform::Place GetExecutionPlace(
      const platform::Place& place) const {
    return place;
  }

Q
qiaolongfei 已提交
218
 protected:
Q
Qiao Longfei 已提交
219
  std::string type_;
D
dongzhihong 已提交
220
  // NOTE: in case of OpGrad, inputs_ contains:
221
  // I (Inputs)
D
dongzhihong 已提交
222 223
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
224
  VariableNameMap inputs_;
Y
Yu Yang 已提交
225

D
dongzhihong 已提交
226 227
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
228
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
229
  AttributeMap attrs_;
S
sneaxiy 已提交
230 231 232 233

  // OpInfo
  const OpInfo* info_;

234 235
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
236 237 238 239

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
240 241
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
242 243
};

244
class ExecutionContext {
Y
Yan Chunwei 已提交
245
 public:
246
  ExecutionContext(const OperatorBase& op, const Scope& scope,
X
Xin Pan 已提交
247
                   const platform::DeviceContext& device_context,
248 249
                   const RuntimeContext& ctx)
      : op_(op), scope_(scope), device_context_(device_context), ctx_(ctx) {}
H
hong 已提交
250
  virtual ~ExecutionContext() {}
251

H
hong 已提交
252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271
  virtual std::string InputName(const std::string& name) const {
    return op_.Input(name);
  }
  virtual std::vector<std::string> InputNames(const std::string& name) const {
    return op_.Inputs(name);
  }
  virtual std::string OutputName(const std::string& name) const {
    return op_.Output(name);
  }

  virtual std::vector<std::string> OutputNames(const std::string& name) const {
    return op_.Outputs(name);
  }

  virtual bool HasAttr(const std::string& name) const {
    return op_.HasAttr(name);
  }
  virtual const AttributeMap& Attrs() const { return op_.Attrs(); }

  const std::string& Type() const { return op_.Type(); }
Q
qiaolongfei 已提交
272 273 274

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
275
  template <typename T>
Y
Yu Yang 已提交
276
  inline const T& Attr(const std::string& name) const {
277
    return BOOST_GET_CONST(T, GetAttr(name));
Q
qiaolongfei 已提交
278 279
  }

H
hong 已提交
280 281 282
  virtual const Attribute& GetAttr(const std::string& name) const {
    return op_.Attrs().at(name);
  }
283

H
hong 已提交
284
  virtual bool HasInput(const std::string& name) const;
285

H
hong 已提交
286
  virtual bool HasOutput(const std::string& name) const;
287

H
hong 已提交
288
  virtual size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
289
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
290 291
  }

H
hong 已提交
292
  virtual size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
293
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
294 295
  }

H
hong 已提交
296
  virtual const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
297

H
hong 已提交
298
  virtual Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
299

H
hong 已提交
300
  virtual const std::vector<Variable*> MultiInputVar(
301
      const std::string& name) const {
302 303
    LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
304 305 306 307
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
308
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
309 310
  }

H
hong 已提交
311
  virtual std::vector<Variable*> MultiOutputVar(const std::string& name) const {
X
Xin Pan 已提交
312 313 314 315 316 317 318
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

H
hong 已提交
319 320 321 322 323 324 325 326 327 328 329
  virtual std::vector<std::string> InNameList() const {
    std::vector<std::string> vec_temp;
    vec_temp.reserve(ctx_.inputs.size());

    for (auto& input : ctx_.inputs) {
      vec_temp.push_back(input.first);
    }

    return vec_temp;
  }

330 331
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
332
    auto* var = InputVar(name);
333
    return var == nullptr ? nullptr : &var->Get<T>();
334 335 336 337
  }

  template <typename T>
  T* Output(const std::string& name) const {
338
    auto var = OutputVar(name);
339
    return var == nullptr ? nullptr : var->GetMutable<T>();
340 341 342 343
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
344 345
    LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
346 347
    auto vars = MultiInputVar(name);
    if (vars.size() == 0) {
X
Xin Pan 已提交
348 349 350 351 352
      return {};
    }
    std::vector<const T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
H
hong 已提交
353
                   [&](const Variable* var) -> const T* {
X
Xin Pan 已提交
354 355 356 357 358 359 360
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
H
hong 已提交
361 362 363
    auto vars = MultiOutputVar(name);

    if (vars.size() == 0) {
X
Xin Pan 已提交
364 365
      return {};
    }
H
hong 已提交
366

X
Xin Pan 已提交
367 368 369 370 371 372
    std::vector<T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
H
hong 已提交
373

X
Xin Pan 已提交
374 375 376
    return res;
  }

377
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
378

Q
QI JUN 已提交
379 380 381 382 383
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

384
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
385
    return device_context_;
Q
qijun 已提交
386
  }
Q
qijun 已提交
387

388
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
QI JUN 已提交
389
  const inline platform::CUDADeviceContext& cuda_device_context() const {
390 391 392
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true,
                      platform::errors::PreconditionNotMet(
                          "Current device context place is not GPUPlace."));
Q
QI JUN 已提交
393 394 395 396 397
    return *reinterpret_cast<const platform::CUDADeviceContext*>(
        &device_context_);
  }
#endif

X
Xin Pan 已提交
398 399 400
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
401
    auto tmp_allocation_ptr = memory::Alloc(dev_ctx, product(dim) * sizeof(T));
X
Xin Pan 已提交
402 403 404 405 406
    auto& deleter = tmp_allocation_ptr.get_deleter();
    auto* allocation_ptr = tmp_allocation_ptr.release();
    auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
        allocation_ptr, deleter);

407 408 409 410 411 412
    PADDLE_ENFORCE_GE(
        allocation_ptr->size(), framework::product(dim) * sizeof(T),
        platform::errors::PreconditionNotMet(
            "The data memory size(%d) is less than the tensor needed memory "
            "size(%d).",
            allocation_ptr->size(), framework::product(dim) * sizeof(T)));
X
Xin Pan 已提交
413 414 415 416 417 418 419 420

    paddle::framework::Tensor temp_tensor(
        framework::ToDataType(std::type_index(typeid(T))));
    temp_tensor.Resize(dim);
    temp_tensor.ResetHolder(std::move(shared_allocation));
    return temp_tensor;
  }

H
hong 已提交
421 422 423
  const RuntimeContext Context() const { return ctx_; }

  std::string DebugString() const { return op_.DebugString(); }
424
  const OperatorBase& GetOp() const { return op_; }
H
hong 已提交
425

426
 private:
427 428
  const OperatorBase& op_;
  const Scope& scope_;
429
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
430
  const RuntimeContext& ctx_;
Q
Qiao Longfei 已提交
431 432
};

433 434 435 436 437 438 439 440 441 442 443 444 445 446
template <>
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;

template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
447
class OpKernelBase {
Q
qijun 已提交
448
 public:
Q
qijun 已提交
449
  /**
450
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
451 452
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
453
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
454 455
   */

456
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
457

Y
Yu Yang 已提交
458 459 460 461 462 463 464
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
465 466
};

Y
Yu Yang 已提交
467 468
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
469
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
470
  using OpKernelMap =
Y
yuyang18 已提交
471
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
472

Y
Yu Yang 已提交
473 474
  OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
                     const VariableNameMap& outputs, const AttributeMap& attrs)
Y
Yu Yang 已提交
475 476
      : OperatorBase(type, inputs, outputs, attrs) {}

C
chentianyu03 已提交
477
  static paddle::flat_hash_map<std::string /* op_type */, OpKernelMap>&
Y
Yu Yang 已提交
478
  AllOpKernels() {
C
chentianyu03 已提交
479
    static paddle::flat_hash_map<std::string, OpKernelMap> g_all_op_kernels;
Y
Yu Yang 已提交
480
    return g_all_op_kernels;
Y
Yu Yang 已提交
481
  }
Y
Yan Chunwei 已提交
482

483 484 485 486 487
  bool IsMKLDNNType() const {
    return ((this->kernel_type_) && (this->kernel_type_->data_layout_ ==
                                     framework::DataLayout::kMKLDNN));
  }

488
  bool SupportGPU() const override {
Y
Yu Yang 已提交
489 490 491 492 493
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_gpu_place(kern_pair.first.place_);
                       });
494
  }
B
Baibaifan 已提交
495 496 497 498 499 500 501
  bool SupportNPU() const override {
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_npu_place(kern_pair.first.place_);
                       });
  }
502
  bool SupportsMKLDNN(proto::VarType::Type data_type) const;
503

504 505
  bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                       proto::VarType::Type data_type) const;
506

507
  virtual void InferShape(InferShapeContext* ctx) const = 0;
Y
Yu Yang 已提交
508

X
Xin Pan 已提交
509 510
  void RuntimeInferShape(const Scope& scope, const platform::Place& place,
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
511

512 513 514
  proto::VarType::Type IndicateVarDataType(const ExecutionContext& ctx,
                                           const std::string& name) const;

515 516 517 518
  proto::VarType::Type IndicateOrPromoteVarDataTypes(
      const ExecutionContext& ctx, const std::string& name1,
      const std::string& name2) const;

519
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
520

521 522
  // change this to public so that in dygraph mode we can call it to check if we
  // need transform data
523 524 525
  virtual OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
526

527 528
  platform::Place GetExecutionPlace(
      const platform::Place& platform) const override {
Z
Zhang Ting 已提交
529 530 531
    return kernel_type_->place_;
  }

Y
Yu Yang 已提交
532
 private:
533
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
L
luotao1 已提交
534 535
  void RunImpl(const Scope& scope, const platform::Place& place,
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
536 537

  /**
T
tianshuo78520a 已提交
538 539
   * Transfer data from scope to a transferred scope. If there is no data need
   * to
Y
yuyang18 已提交
540 541 542 543
   * be tranfered, it returns nullptr.
   *
   * * transfered_inplace_vars is a output vector.
   */
X
Xin Pan 已提交
544 545 546 547
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
548 549 550 551

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
552

L
Liu Yiqun 已提交
553 554 555
  void ChooseKernel(const RuntimeContext& ctx, const Scope& scope,
                    const platform::Place& place) const;

556 557 558
  void HandleComplexGradToRealGrad(const Scope& scope,
                                   RuntimeContext* ctx) const;

559 560 561 562 563 564 565 566 567 568 569
  /* Inner assist methods */
  // indicate kernel DataType by input data.
  // By default all input data must be same.
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
  // used for IndicateDataType
  void ParseInputDataType(const ExecutionContext& ctx, const std::string& name,
                          proto::VarType::Type* type) const;
  // used for IndicateOrPromoteVarDataTypes
  Tensor* GetTensorFormInputSafely(const ExecutionContext& ctx,
                                   const std::string& name) const;

570
 protected:
L
Liu Yiqun 已提交
571 572
  mutable std::unique_ptr<OpKernelType> kernel_type_;
  mutable std::unique_ptr<OpKernelFunc> kernel_func_;
L
luotao1 已提交
573 574
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
575
  mutable bool need_prepare_data_ = true;
576 577
  mutable bool enable_cache_runtime_context_ = false;
  mutable bool all_kernels_must_compute_runtime_shape_ = false;
578
  mutable std::mutex cache_update_mutex_;
579
  mutable bool enable_cache_transfer_scope_ = false;
Q
Qiao Longfei 已提交
580 581
};

Y
Yu Yang 已提交
582 583
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
584 585
}  // namespace framework
}  // namespace paddle