operator.h 19.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
20
#include <mutex>  // NOLINT
Q
Qiao Longfei 已提交
21
#include <string>
D
dzhwinter 已提交
22
#include <tuple>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
L
luotao1 已提交
24
#include <utility>
Q
Qiao Longfei 已提交
25 26
#include <vector>

Y
Yu Yang 已提交
27
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
28 29 30 31 32 33 34 35
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
36
#include "paddle/fluid/framework/unused_var_check.h"
37
#include "paddle/fluid/memory/malloc.h"
Y
Yi Wang 已提交
38 39
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
Q
Qiao Longfei 已提交
40

W
wanghuancoder 已提交
41 42 43 44 45 46 47 48 49
namespace paddle {
namespace framework {
class InferShapeContext;
class OpInfo;
class Scope;
class Variable;
}  // namespace framework
}  // namespace paddle

Q
Qiao Longfei 已提交
50 51
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
52 53 54
namespace paddle {
namespace framework {

55
/// If a variable is a empty variable, that name will be used.
56
constexpr char kEmptyVarName[] = "@EMPTY@";
57 58 59

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
60
constexpr char kTempVarName[] = "@TEMP@";
61 62

/// If a variable's name has a certain suffix, it means that the
T
tianshuo78520a 已提交
63 64
/// variable is the gradient of another variable.
/// e.g. Variable "x@GRAD" is the gradient of variable "x".
65
constexpr char kGradVarSuffix[] = "@GRAD";
66

M
minqiyang 已提交
67 68
constexpr size_t kGradVarSuffixSize = 5U;

69
/// Variables with this suffix are supposed to be filled up with zeros.
70
constexpr char kZeroVarSuffix[] = "@ZERO";
71

C
chengduo 已提交
72 73 74
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

L
luotao1 已提交
75 76 77 78 79 80 81 82
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
83 84 85 86 87 88 89 90 91
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
92
// define some kernel priority
93
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
94 95
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

96
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
97 98 99 100 101
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
102 103
}

M
minqiyang 已提交
104
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
105
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
106 107 108 109 110
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
111 112
}

C
chengduo 已提交
113 114
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
115

116
class ExecutionContext;
W
wanghuancoder 已提交
117
class OperatorBase;
118

X
Xin Pan 已提交
119 120
class RuntimeContext {
 public:
X
Xin Pan 已提交
121 122
  RuntimeContext(const VariableNameMap& innames,
                 const VariableNameMap& outnames, const Scope& scope);
X
Xin Pan 已提交
123

X
Xin Pan 已提交
124 125 126 127
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
128 129 130 131
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
132
/**
X
Xin Pan 已提交
133
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
134 135 136 137 138 139
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
Y
Yu Yang 已提交
140 141
  OperatorBase(const std::string& type, const VariableNameMap& inputs,
               const VariableNameMap& outputs, const AttributeMap& attrs);
142

Q
Qiao Longfei 已提交
143 144
  virtual ~OperatorBase() {}

145
  /// Executor will call this interface function to Run an op.
146 147
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
148

T
typhoonzero 已提交
149 150 151
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

152 153 154
  /// if scope is not null, also show dimensions of arguments
  virtual std::string DebugStringEx(const Scope* scope) const;
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
155

156
  virtual bool SupportGPU() const { return false; }
B
Baibaifan 已提交
157
  virtual bool SupportNPU() const { return false; }
158

159 160
  const std::string& Type() const { return type_; }

M
Michal Gallus 已提交
161
  bool HasAttr(const std::string& name) const { return attrs_.count(name); }
162 163
  template <typename T>
  inline const T& Attr(const std::string& name) const {
164 165 166
    PADDLE_ENFORCE_NE(
        attrs_.find(name), attrs_.end(),
        platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
167
    return BOOST_GET_CONST(T, attrs_.at(name));
168
  }
169 170 171 172 173 174 175 176
  void SetAttr(const std::string& name, const Attribute& v) {
    PADDLE_ENFORCE_EQ(
        HasAttr(name), true,
        platform::errors::NotFound(
            "The attribute %s is not found in operator %s", name, Type()));

    attrs_[name] = v;
  }
177
  const AttributeMap& Attrs() const { return attrs_; }
D
dongzhihong 已提交
178

Y
Yu Yang 已提交
179 180
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
181

S
sneaxiy 已提交
182
  const OpInfo& Info() const {
183 184 185
    PADDLE_ENFORCE_NOT_NULL(
        info_, platform::errors::NotFound(
                   "OpInfo of operator (%s) is not found.", type_));
S
sneaxiy 已提交
186 187 188
    return *info_;
  }

189
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
190
  //! Get a input with argument's name described in `op_proto`
191
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
192
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
193
  const std::vector<std::string>& Inputs(const std::string& name) const;
194
  //! Get all inputs variable names
Q
qijun 已提交
195 196
  std::vector<std::string> InputVars() const;

197
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
198
  //! Get a output with argument's name described in `op_proto`
199
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
200 201
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
202
  const std::vector<std::string>& Outputs(const std::string& name) const;
203
  //! Get all outputs variable names
Y
Yu Yang 已提交
204
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
205

206
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
207

B
baojun-nervana 已提交
208
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
209 210
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
211

Z
Zhang Ting 已提交
212 213 214 215 216
  virtual platform::Place GetExecutionPlace(
      const platform::Place& place) const {
    return place;
  }

Q
qiaolongfei 已提交
217
 protected:
Q
Qiao Longfei 已提交
218
  std::string type_;
D
dongzhihong 已提交
219
  // NOTE: in case of OpGrad, inputs_ contains:
220
  // I (Inputs)
D
dongzhihong 已提交
221 222
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
223
  VariableNameMap inputs_;
Y
Yu Yang 已提交
224

D
dongzhihong 已提交
225 226
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
227
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
228
  AttributeMap attrs_;
S
sneaxiy 已提交
229 230 231 232

  // OpInfo
  const OpInfo* info_;

233 234
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
235 236 237 238

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
239 240
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
241 242
};

243
class ExecutionContext {
Y
Yan Chunwei 已提交
244
 public:
245
  ExecutionContext(const OperatorBase& op, const Scope& scope,
X
Xin Pan 已提交
246
                   const platform::DeviceContext& device_context,
247 248
                   const RuntimeContext& ctx)
      : op_(op), scope_(scope), device_context_(device_context), ctx_(ctx) {}
H
hong 已提交
249
  virtual ~ExecutionContext() {}
250

H
hong 已提交
251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
  virtual std::string InputName(const std::string& name) const {
    return op_.Input(name);
  }
  virtual std::vector<std::string> InputNames(const std::string& name) const {
    return op_.Inputs(name);
  }
  virtual std::string OutputName(const std::string& name) const {
    return op_.Output(name);
  }

  virtual std::vector<std::string> OutputNames(const std::string& name) const {
    return op_.Outputs(name);
  }

  virtual bool HasAttr(const std::string& name) const {
    return op_.HasAttr(name);
  }
  virtual const AttributeMap& Attrs() const { return op_.Attrs(); }

  const std::string& Type() const { return op_.Type(); }
Q
qiaolongfei 已提交
271 272 273

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
274
  template <typename T>
Y
Yu Yang 已提交
275
  inline const T& Attr(const std::string& name) const {
276
    return BOOST_GET_CONST(T, GetAttr(name));
Q
qiaolongfei 已提交
277 278
  }

H
hong 已提交
279 280 281
  virtual const Attribute& GetAttr(const std::string& name) const {
    return op_.Attrs().at(name);
  }
282

H
hong 已提交
283
  virtual bool HasInput(const std::string& name) const;
284

H
hong 已提交
285
  virtual bool HasOutput(const std::string& name) const;
286

H
hong 已提交
287
  virtual size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
288
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
289 290
  }

H
hong 已提交
291
  virtual size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
292
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
293 294
  }

H
hong 已提交
295
  virtual const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
296

H
hong 已提交
297
  virtual Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
298

H
hong 已提交
299
  virtual const std::vector<Variable*> MultiInputVar(
300
      const std::string& name) const {
301 302
    LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
303 304 305 306
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
307
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
308 309
  }

H
hong 已提交
310
  virtual std::vector<Variable*> MultiOutputVar(const std::string& name) const {
X
Xin Pan 已提交
311 312 313 314 315 316 317
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

H
hong 已提交
318 319 320 321 322 323 324 325 326 327 328
  virtual std::vector<std::string> InNameList() const {
    std::vector<std::string> vec_temp;
    vec_temp.reserve(ctx_.inputs.size());

    for (auto& input : ctx_.inputs) {
      vec_temp.push_back(input.first);
    }

    return vec_temp;
  }

329 330
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
331
    auto* var = InputVar(name);
332
    return var == nullptr ? nullptr : &var->Get<T>();
333 334 335 336
  }

  template <typename T>
  T* Output(const std::string& name) const {
337
    auto var = OutputVar(name);
338
    return var == nullptr ? nullptr : var->GetMutable<T>();
339 340 341 342
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
343 344
    LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
345 346
    auto vars = MultiInputVar(name);
    if (vars.size() == 0) {
X
Xin Pan 已提交
347 348 349 350 351
      return {};
    }
    std::vector<const T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
H
hong 已提交
352
                   [&](const Variable* var) -> const T* {
X
Xin Pan 已提交
353 354 355 356 357 358 359
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
H
hong 已提交
360 361 362
    auto vars = MultiOutputVar(name);

    if (vars.size() == 0) {
X
Xin Pan 已提交
363 364
      return {};
    }
H
hong 已提交
365

X
Xin Pan 已提交
366 367 368 369 370 371
    std::vector<T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
H
hong 已提交
372

X
Xin Pan 已提交
373 374 375
    return res;
  }

376
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
377

Q
QI JUN 已提交
378 379 380 381 382
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

383
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
384
    return device_context_;
Q
qijun 已提交
385
  }
Q
qijun 已提交
386

387
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
QI JUN 已提交
388
  const inline platform::CUDADeviceContext& cuda_device_context() const {
389 390 391
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true,
                      platform::errors::PreconditionNotMet(
                          "Current device context place is not GPUPlace."));
Q
QI JUN 已提交
392 393 394 395 396
    return *reinterpret_cast<const platform::CUDADeviceContext*>(
        &device_context_);
  }
#endif

X
Xin Pan 已提交
397 398 399
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
400
    auto tmp_allocation_ptr = memory::Alloc(dev_ctx, product(dim) * sizeof(T));
X
Xin Pan 已提交
401 402 403 404 405
    auto& deleter = tmp_allocation_ptr.get_deleter();
    auto* allocation_ptr = tmp_allocation_ptr.release();
    auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
        allocation_ptr, deleter);

406 407 408 409 410 411
    PADDLE_ENFORCE_GE(
        allocation_ptr->size(), framework::product(dim) * sizeof(T),
        platform::errors::PreconditionNotMet(
            "The data memory size(%d) is less than the tensor needed memory "
            "size(%d).",
            allocation_ptr->size(), framework::product(dim) * sizeof(T)));
X
Xin Pan 已提交
412 413 414 415 416 417 418 419

    paddle::framework::Tensor temp_tensor(
        framework::ToDataType(std::type_index(typeid(T))));
    temp_tensor.Resize(dim);
    temp_tensor.ResetHolder(std::move(shared_allocation));
    return temp_tensor;
  }

H
hong 已提交
420 421 422
  const RuntimeContext Context() const { return ctx_; }

  std::string DebugString() const { return op_.DebugString(); }
423
  const OperatorBase& GetOp() const { return op_; }
H
hong 已提交
424

425
 private:
426 427
  const OperatorBase& op_;
  const Scope& scope_;
428
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
429
  const RuntimeContext& ctx_;
Q
Qiao Longfei 已提交
430 431
};

432 433 434 435 436 437 438 439 440 441 442 443 444 445
template <>
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;

template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
446
class OpKernelBase {
Q
qijun 已提交
447
 public:
Q
qijun 已提交
448
  /**
449
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
450 451
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
452
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
453 454
   */

455
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
456

Y
Yu Yang 已提交
457 458 459 460 461 462 463
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
464 465
};

Y
Yu Yang 已提交
466 467
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
468
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
469
  using OpKernelMap =
Y
yuyang18 已提交
470
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
471

Y
Yu Yang 已提交
472 473
  OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
                     const VariableNameMap& outputs, const AttributeMap& attrs)
Y
Yu Yang 已提交
474 475
      : OperatorBase(type, inputs, outputs, attrs) {}

Y
Yu Yang 已提交
476 477 478 479
  static std::unordered_map<std::string /* op_type */, OpKernelMap>&
  AllOpKernels() {
    static std::unordered_map<std::string, OpKernelMap> g_all_op_kernels;
    return g_all_op_kernels;
Y
Yu Yang 已提交
480
  }
Y
Yan Chunwei 已提交
481

482 483 484 485 486
  bool IsMKLDNNType() const {
    return ((this->kernel_type_) && (this->kernel_type_->data_layout_ ==
                                     framework::DataLayout::kMKLDNN));
  }

487
  bool SupportGPU() const override {
Y
Yu Yang 已提交
488 489 490 491 492
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_gpu_place(kern_pair.first.place_);
                       });
493
  }
B
Baibaifan 已提交
494 495 496 497 498 499 500
  bool SupportNPU() const override {
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_npu_place(kern_pair.first.place_);
                       });
  }
501
  bool SupportsMKLDNN(proto::VarType::Type data_type) const;
502

503 504
  bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                       proto::VarType::Type data_type) const;
505

506
  virtual void InferShape(InferShapeContext* ctx) const = 0;
Y
Yu Yang 已提交
507

X
Xin Pan 已提交
508 509
  void RuntimeInferShape(const Scope& scope, const platform::Place& place,
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
510

511 512 513
  proto::VarType::Type IndicateVarDataType(const ExecutionContext& ctx,
                                           const std::string& name) const;

514 515 516 517
  proto::VarType::Type IndicateOrPromoteVarDataTypes(
      const ExecutionContext& ctx, const std::string& name1,
      const std::string& name2) const;

518
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
519

520 521
  // change this to public so that in dygraph mode we can call it to check if we
  // need transform data
522 523 524
  virtual OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
525

526 527
  platform::Place GetExecutionPlace(
      const platform::Place& platform) const override {
Z
Zhang Ting 已提交
528 529 530
    return kernel_type_->place_;
  }

Y
Yu Yang 已提交
531
 private:
532
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
L
luotao1 已提交
533 534
  void RunImpl(const Scope& scope, const platform::Place& place,
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
535 536

  /**
T
tianshuo78520a 已提交
537 538
   * Transfer data from scope to a transferred scope. If there is no data need
   * to
Y
yuyang18 已提交
539 540 541 542
   * be tranfered, it returns nullptr.
   *
   * * transfered_inplace_vars is a output vector.
   */
X
Xin Pan 已提交
543 544 545 546
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
547 548 549 550

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
551

L
Liu Yiqun 已提交
552 553 554
  void ChooseKernel(const RuntimeContext& ctx, const Scope& scope,
                    const platform::Place& place) const;

555 556 557
  void HandleComplexGradToRealGrad(const Scope& scope,
                                   RuntimeContext* ctx) const;

558 559 560 561 562 563 564 565 566 567 568
  /* Inner assist methods */
  // indicate kernel DataType by input data.
  // By default all input data must be same.
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
  // used for IndicateDataType
  void ParseInputDataType(const ExecutionContext& ctx, const std::string& name,
                          proto::VarType::Type* type) const;
  // used for IndicateOrPromoteVarDataTypes
  Tensor* GetTensorFormInputSafely(const ExecutionContext& ctx,
                                   const std::string& name) const;

569
 protected:
L
Liu Yiqun 已提交
570 571
  mutable std::unique_ptr<OpKernelType> kernel_type_;
  mutable std::unique_ptr<OpKernelFunc> kernel_func_;
L
luotao1 已提交
572 573
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
574
  mutable bool need_prepare_data_ = true;
575 576
  mutable bool enable_cache_runtime_context_ = false;
  mutable bool all_kernels_must_compute_runtime_shape_ = false;
577
  mutable std::mutex cache_update_mutex_;
578
  mutable bool enable_cache_transfer_scope_ = false;
Q
Qiao Longfei 已提交
579 580
};

Y
Yu Yang 已提交
581 582
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
583 584
}  // namespace framework
}  // namespace paddle