operator.h 18.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
20
#include <mutex>  // NOLINT
Q
Qiao Longfei 已提交
21
#include <string>
D
dzhwinter 已提交
22
#include <tuple>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
L
luotao1 已提交
24
#include <utility>
Q
Qiao Longfei 已提交
25 26
#include <vector>

Y
Yu Yang 已提交
27
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
28 29 30 31 32 33
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
X
polish  
Xin Pan 已提交
34
#include "paddle/fluid/framework/operator_kernel_configs.h"
Y
Yi Wang 已提交
35 36 37
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
38
#include "paddle/fluid/memory/malloc.h"
Y
Yi Wang 已提交
39 40
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
Q
Qiao Longfei 已提交
41

Q
Qiao Longfei 已提交
42 43
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
44 45 46
namespace paddle {
namespace framework {

47
/// If a variable is a empty variable, that name will be used.
48
constexpr char kEmptyVarName[] = "@EMPTY@";
49 50 51

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
52
constexpr char kTempVarName[] = "@TEMP@";
53 54 55 56

/// If a variable's name has a certain suffix, it means that the
/// variable is the gradient of another varibale.
/// e.g. Variable "x@GRAD" is the gradient of varibale "x".
57
constexpr char kGradVarSuffix[] = "@GRAD";
58

M
minqiyang 已提交
59 60
constexpr size_t kGradVarSuffixSize = 5U;

61
/// Variables with this suffix are supposed to be filled up with zeros.
62
constexpr char kZeroVarSuffix[] = "@ZERO";
63

C
chengduo 已提交
64 65 66
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

L
luotao1 已提交
67 68 69 70 71 72 73 74
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
75 76 77 78 79 80 81 82 83
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
84
// define some kernel priority
85
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
86 87
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

88
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
89 90 91 92 93
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
94 95
}

M
minqiyang 已提交
96
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
97
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
98 99 100 101 102
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
103 104
}

C
chengduo 已提交
105 106
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
107

Q
Qiao Longfei 已提交
108
class OperatorBase;
109
class ExecutionContext;
110

X
Xin Pan 已提交
111 112
class RuntimeContext {
 public:
X
Xin Pan 已提交
113 114
  RuntimeContext(const VariableNameMap& innames,
                 const VariableNameMap& outnames, const Scope& scope);
X
Xin Pan 已提交
115

X
Xin Pan 已提交
116 117 118 119
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
120 121 122 123
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
124
/**
X
Xin Pan 已提交
125
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
126 127 128 129 130 131
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
Y
Yu Yang 已提交
132 133
  OperatorBase(const std::string& type, const VariableNameMap& inputs,
               const VariableNameMap& outputs, const AttributeMap& attrs);
134

Q
Qiao Longfei 已提交
135 136
  virtual ~OperatorBase() {}

137
  /// Executor will call this interface function to Run an op.
138 139
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
140

T
typhoonzero 已提交
141 142 143
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

144 145 146
  /// if scope is not null, also show dimensions of arguments
  virtual std::string DebugStringEx(const Scope* scope) const;
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
147

148 149
  virtual bool SupportGPU() const { return false; }

150 151
  const std::string& Type() const { return type_; }

M
Michal Gallus 已提交
152
  bool HasAttr(const std::string& name) const { return attrs_.count(name); }
153 154
  template <typename T>
  inline const T& Attr(const std::string& name) const {
M
minqiyang 已提交
155 156
    PADDLE_ENFORCE(attrs_.find(name) != attrs_.end(),
                   "%s should be in AttributeMap", name);
157 158 159
    return boost::get<T>(attrs_.at(name));
  }
  const AttributeMap& Attrs() const { return attrs_; }
D
dongzhihong 已提交
160

Y
Yu Yang 已提交
161 162
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
163

S
sneaxiy 已提交
164 165 166 167 168
  const OpInfo& Info() const {
    PADDLE_ENFORCE_NOT_NULL(info_, "OpInfo of %s is not found", type_);
    return *info_;
  }

169
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
170
  //! Get a input with argument's name described in `op_proto`
171
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
172
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
173
  const std::vector<std::string>& Inputs(const std::string& name) const;
174
  //! Get all inputs variable names
Q
qijun 已提交
175 176
  std::vector<std::string> InputVars() const;

177
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
178
  //! Get a output with argument's name described in `op_proto`
179
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
180 181
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
182
  const std::vector<std::string>& Outputs(const std::string& name) const;
183
  //! Get all outputs variable names
Y
Yu Yang 已提交
184
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
185

186
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
B
baojun-nervana 已提交
187
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
188 189
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
190

Q
qiaolongfei 已提交
191
 protected:
Q
Qiao Longfei 已提交
192
  std::string type_;
D
dongzhihong 已提交
193
  // NOTE: in case of OpGrad, inputs_ contains:
194
  // I (Inputs)
D
dongzhihong 已提交
195 196
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
197
  VariableNameMap inputs_;
Y
Yu Yang 已提交
198

D
dongzhihong 已提交
199 200
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
201
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
202
  AttributeMap attrs_;
S
sneaxiy 已提交
203 204 205 206

  // OpInfo
  const OpInfo* info_;

207 208
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
209 210 211 212

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
213 214
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
215 216
};

217 218 219 220 221 222 223 224 225 226 227 228 229
#ifdef PADDLE_WITH_CUDA
using KernelConfig = boost::variant<
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionFwdAlgo_t>>,
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionBwdDataAlgo_t>>,
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionBwdFilterAlgo_t>>>;
#else
using KernelConfig = boost::variant<boost::blank>;
#endif

using OpKernelConfigsMap =
    std::unordered_map<OpKernelType, std::vector<KernelConfig>,
                       OpKernelType::Hash>;

230
class ExecutionContext {
Y
Yan Chunwei 已提交
231
 public:
232
  ExecutionContext(const OperatorBase& op, const Scope& scope,
X
Xin Pan 已提交
233
                   const platform::DeviceContext& device_context,
234 235 236 237 238 239 240
                   const RuntimeContext& ctx,
                   std::vector<KernelConfig>* configs)
      : op_(op),
        scope_(scope),
        device_context_(device_context),
        ctx_(ctx),
        kernel_configs_(configs) {}
H
hong 已提交
241
  virtual ~ExecutionContext() {}
242

H
hong 已提交
243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262
  virtual std::string InputName(const std::string& name) const {
    return op_.Input(name);
  }
  virtual std::vector<std::string> InputNames(const std::string& name) const {
    return op_.Inputs(name);
  }
  virtual std::string OutputName(const std::string& name) const {
    return op_.Output(name);
  }

  virtual std::vector<std::string> OutputNames(const std::string& name) const {
    return op_.Outputs(name);
  }

  virtual bool HasAttr(const std::string& name) const {
    return op_.HasAttr(name);
  }
  virtual const AttributeMap& Attrs() const { return op_.Attrs(); }

  const std::string& Type() const { return op_.Type(); }
Q
qiaolongfei 已提交
263 264 265

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
266
  template <typename T>
Y
Yu Yang 已提交
267
  inline const T& Attr(const std::string& name) const {
H
hong 已提交
268
    return boost::get<T>(GetAttr(name));
Q
qiaolongfei 已提交
269 270
  }

H
hong 已提交
271 272 273
  virtual const Attribute& GetAttr(const std::string& name) const {
    return op_.Attrs().at(name);
  }
274

H
hong 已提交
275
  virtual bool HasInput(const std::string& name) const;
276

H
hong 已提交
277
  virtual bool HasOutput(const std::string& name) const;
278

H
hong 已提交
279
  virtual size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
280
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
281 282
  }

H
hong 已提交
283
  virtual size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
284
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
285 286
  }

H
hong 已提交
287
  virtual const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
288

H
hong 已提交
289
  virtual Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
290

H
hong 已提交
291
  virtual const std::vector<Variable*> MultiInputVar(
292
      const std::string& name) const {
X
Xin Pan 已提交
293 294 295 296
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
297
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
298 299
  }

H
hong 已提交
300
  virtual std::vector<Variable*> MultiOutputVar(const std::string& name) const {
X
Xin Pan 已提交
301 302 303 304 305 306 307
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

H
hong 已提交
308 309 310 311 312 313 314 315 316 317 318
  virtual std::vector<std::string> InNameList() const {
    std::vector<std::string> vec_temp;
    vec_temp.reserve(ctx_.inputs.size());

    for (auto& input : ctx_.inputs) {
      vec_temp.push_back(input.first);
    }

    return vec_temp;
  }

319 320
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
321
    auto* var = InputVar(name);
322
    return var == nullptr ? nullptr : &var->Get<T>();
323 324 325 326
  }

  template <typename T>
  T* Output(const std::string& name) const {
327
    auto var = OutputVar(name);
328
    return var == nullptr ? nullptr : var->GetMutable<T>();
329 330 331 332
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
H
hong 已提交
333 334
    auto vars = MultiInputVar(name);
    if (vars.size() == 0) {
X
Xin Pan 已提交
335 336 337 338 339
      return {};
    }
    std::vector<const T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
H
hong 已提交
340
                   [&](const Variable* var) -> const T* {
X
Xin Pan 已提交
341 342 343 344 345 346 347
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
H
hong 已提交
348 349 350
    auto vars = MultiOutputVar(name);

    if (vars.size() == 0) {
X
Xin Pan 已提交
351 352
      return {};
    }
H
hong 已提交
353

X
Xin Pan 已提交
354 355 356 357 358 359
    std::vector<T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
H
hong 已提交
360

X
Xin Pan 已提交
361 362 363
    return res;
  }

364
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
365

Q
QI JUN 已提交
366 367 368 369 370
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

371
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
372
    return device_context_;
Q
qijun 已提交
373
  }
Q
qijun 已提交
374

Q
QI JUN 已提交
375 376
#ifdef PADDLE_WITH_CUDA
  const inline platform::CUDADeviceContext& cuda_device_context() const {
377
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true);
Q
QI JUN 已提交
378 379 380 381 382
    return *reinterpret_cast<const platform::CUDADeviceContext*>(
        &device_context_);
  }
#endif

X
Xin Pan 已提交
383 384 385
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
386
    auto tmp_allocation_ptr = memory::Alloc(dev_ctx, product(dim) * sizeof(T));
X
Xin Pan 已提交
387 388 389 390 391
    auto& deleter = tmp_allocation_ptr.get_deleter();
    auto* allocation_ptr = tmp_allocation_ptr.release();
    auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
        allocation_ptr, deleter);

392
    PADDLE_ENFORCE_GE(allocation_ptr->size(),
X
Xin Pan 已提交
393 394 395 396 397 398 399 400 401
                      framework::product(dim) * sizeof(T));

    paddle::framework::Tensor temp_tensor(
        framework::ToDataType(std::type_index(typeid(T))));
    temp_tensor.Resize(dim);
    temp_tensor.ResetHolder(std::move(shared_allocation));
    return temp_tensor;
  }

402
  template <typename T>
403
  T& GetKernelConfig(size_t idx) const {
Q
qingqing01 已提交
404 405
    PADDLE_ENFORCE(
        kernel_configs_ && kernel_configs_->size() > static_cast<size_t>(idx),
406
        "%s selected kernel doesn't have kernel config %lu <= %lu",
Q
qingqing01 已提交
407
        op_.Type().c_str(), kernel_configs_->size(), idx);
408
    return *boost::get<std::shared_ptr<T>>((*kernel_configs_)[idx]);
409 410
  }

H
hong 已提交
411 412 413
  const RuntimeContext Context() const { return ctx_; }

  std::string DebugString() const { return op_.DebugString(); }
H
hong 已提交
414

415
 private:
416 417
  const OperatorBase& op_;
  const Scope& scope_;
418
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
419
  const RuntimeContext& ctx_;
420
  mutable std::vector<KernelConfig>* kernel_configs_;
Q
Qiao Longfei 已提交
421 422
};

423 424 425 426 427 428 429 430 431 432 433 434 435 436
template <>
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;

template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
437
class OpKernelBase {
Q
qijun 已提交
438
 public:
Q
qijun 已提交
439
  /**
440
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
441 442
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
443
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
444 445
   */

446
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
447

Y
Yu Yang 已提交
448 449 450 451 452 453 454
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
455 456
};

Y
Yu Yang 已提交
457 458
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
459
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
460
  using OpKernelMap =
Y
yuyang18 已提交
461
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
462

Y
Yu Yang 已提交
463 464
  OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
                     const VariableNameMap& outputs, const AttributeMap& attrs)
Y
Yu Yang 已提交
465 466
      : OperatorBase(type, inputs, outputs, attrs) {}

Y
Yu Yang 已提交
467 468 469 470
  static std::unordered_map<std::string /* op_type */, OpKernelMap>&
  AllOpKernels() {
    static std::unordered_map<std::string, OpKernelMap> g_all_op_kernels;
    return g_all_op_kernels;
Y
Yu Yang 已提交
471
  }
Y
Yan Chunwei 已提交
472

473 474 475 476 477
  bool IsMKLDNNType() const {
    return ((this->kernel_type_) && (this->kernel_type_->data_layout_ ==
                                     framework::DataLayout::kMKLDNN));
  }

478
  bool SupportGPU() const override {
Y
Yu Yang 已提交
479 480 481 482 483
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_gpu_place(kern_pair.first.place_);
                       });
484 485
  }

486
  virtual void InferShape(InferShapeContext* ctx) const {
S
sneaxiy 已提交
487
    Info().infer_shape_(ctx);
488
  }
Y
Yu Yang 已提交
489

X
Xin Pan 已提交
490 491
  void RuntimeInferShape(const Scope& scope, const platform::Place& place,
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
492

493 494 495
  proto::VarType::Type IndicateVarDataType(const ExecutionContext& ctx,
                                           const std::string& name) const;

496
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
497

X
polish  
Xin Pan 已提交
498 499
  std::vector<KernelConfig>* GetKernelConfig(const OpKernelType& key) const;

500 501
  // change this to public so that in dygraph mode we can call it to check if we
  // need transform data
502 503 504
  virtual OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
505 506

 private:
507 508
  void ParseInputDataType(const ExecutionContext& ctx, const std::string& name,
                          proto::VarType::Type* type) const;
509
  // indicate kernel DataType by input data. By default all input data must be
Y
Yu Yang 已提交
510
  // same.
511
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
512
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
L
luotao1 已提交
513 514
  void RunImpl(const Scope& scope, const platform::Place& place,
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
515 516 517 518 519 520 521

  /**
   * Transfer data from scope to a transfered scope. If there is no data need to
   * be tranfered, it returns nullptr.
   *
   * * transfered_inplace_vars is a output vector.
   */
X
Xin Pan 已提交
522 523 524 525
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
526 527 528 529

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
530

L
Liu Yiqun 已提交
531 532 533
  void ChooseKernel(const RuntimeContext& ctx, const Scope& scope,
                    const platform::Place& place) const;

534 535
 protected:
  mutable OpKernelConfigsMap kernel_configs_map_;
L
Liu Yiqun 已提交
536 537
  mutable std::unique_ptr<OpKernelType> kernel_type_;
  mutable std::unique_ptr<OpKernelFunc> kernel_func_;
L
luotao1 已提交
538 539
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
540 541
  mutable bool enable_cache_runtime_context_ = false;
  mutable bool all_kernels_must_compute_runtime_shape_ = false;
542
  mutable std::mutex cache_update_mutex_;
543
  mutable bool enable_cache_transfer_scope_ = false;
Q
Qiao Longfei 已提交
544 545
};

Y
Yu Yang 已提交
546 547
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
548 549
}  // namespace framework
}  // namespace paddle