operator.h 17.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
20
#include <mutex>  // NOLINT
Q
Qiao Longfei 已提交
21
#include <string>
D
dzhwinter 已提交
22
#include <tuple>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
L
luotao1 已提交
24
#include <utility>
Q
Qiao Longfei 已提交
25 26
#include <vector>

Y
Yu Yang 已提交
27
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
28 29 30 31 32 33 34 35 36
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
37
#include "paddle/fluid/framework/unused_var_check.h"
38
#include "paddle/fluid/memory/malloc.h"
Y
Yi Wang 已提交
39 40
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
Q
Qiao Longfei 已提交
41

Q
Qiao Longfei 已提交
42 43
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
44 45 46
namespace paddle {
namespace framework {

47
/// If a variable is a empty variable, that name will be used.
48
constexpr char kEmptyVarName[] = "@EMPTY@";
49 50 51

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
52
constexpr char kTempVarName[] = "@TEMP@";
53 54

/// If a variable's name has a certain suffix, it means that the
T
tianshuo78520a 已提交
55 56
/// variable is the gradient of another variable.
/// e.g. Variable "x@GRAD" is the gradient of variable "x".
57
constexpr char kGradVarSuffix[] = "@GRAD";
58

M
minqiyang 已提交
59 60
constexpr size_t kGradVarSuffixSize = 5U;

61
/// Variables with this suffix are supposed to be filled up with zeros.
62
constexpr char kZeroVarSuffix[] = "@ZERO";
63

C
chengduo 已提交
64 65 66
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

67 68 69
/// Variables with this suffix are the loaded from pre-train model.
constexpr char kLoadedVarSuffix[] = "@LOADED";

L
luotao1 已提交
70 71 72 73 74 75 76 77
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
78 79 80 81 82 83 84 85 86
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
87
// define some kernel priority
88
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
89 90
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

91
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
92 93 94 95 96
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
97 98
}

M
minqiyang 已提交
99
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
100
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
101 102 103 104 105
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
106 107
}

C
chengduo 已提交
108 109
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
110

Q
Qiao Longfei 已提交
111
class OperatorBase;
112
class ExecutionContext;
113

X
Xin Pan 已提交
114 115
class RuntimeContext {
 public:
X
Xin Pan 已提交
116 117
  RuntimeContext(const VariableNameMap& innames,
                 const VariableNameMap& outnames, const Scope& scope);
X
Xin Pan 已提交
118

X
Xin Pan 已提交
119 120 121 122
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
123 124 125 126
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
127
/**
X
Xin Pan 已提交
128
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
129 130 131 132 133 134
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
Y
Yu Yang 已提交
135 136
  OperatorBase(const std::string& type, const VariableNameMap& inputs,
               const VariableNameMap& outputs, const AttributeMap& attrs);
137

Q
Qiao Longfei 已提交
138 139
  virtual ~OperatorBase() {}

140
  /// Executor will call this interface function to Run an op.
141 142
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
143

T
typhoonzero 已提交
144 145 146
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

147 148 149
  /// if scope is not null, also show dimensions of arguments
  virtual std::string DebugStringEx(const Scope* scope) const;
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
150

151 152
  virtual bool SupportGPU() const { return false; }

153 154
  const std::string& Type() const { return type_; }

M
Michal Gallus 已提交
155
  bool HasAttr(const std::string& name) const { return attrs_.count(name); }
156 157
  template <typename T>
  inline const T& Attr(const std::string& name) const {
M
minqiyang 已提交
158 159
    PADDLE_ENFORCE(attrs_.find(name) != attrs_.end(),
                   "%s should be in AttributeMap", name);
160 161 162
    return boost::get<T>(attrs_.at(name));
  }
  const AttributeMap& Attrs() const { return attrs_; }
D
dongzhihong 已提交
163

Y
Yu Yang 已提交
164 165
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
166

S
sneaxiy 已提交
167 168 169 170 171
  const OpInfo& Info() const {
    PADDLE_ENFORCE_NOT_NULL(info_, "OpInfo of %s is not found", type_);
    return *info_;
  }

172
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
173
  //! Get a input with argument's name described in `op_proto`
174
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
175
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
176
  const std::vector<std::string>& Inputs(const std::string& name) const;
177
  //! Get all inputs variable names
Q
qijun 已提交
178 179
  std::vector<std::string> InputVars() const;

180
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
181
  //! Get a output with argument's name described in `op_proto`
182
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
183 184
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
185
  const std::vector<std::string>& Outputs(const std::string& name) const;
186
  //! Get all outputs variable names
Y
Yu Yang 已提交
187
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
188

189
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
190

B
baojun-nervana 已提交
191
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
192 193
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
194

Q
qiaolongfei 已提交
195
 protected:
Q
Qiao Longfei 已提交
196
  std::string type_;
D
dongzhihong 已提交
197
  // NOTE: in case of OpGrad, inputs_ contains:
198
  // I (Inputs)
D
dongzhihong 已提交
199 200
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
201
  VariableNameMap inputs_;
Y
Yu Yang 已提交
202

D
dongzhihong 已提交
203 204
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
205
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
206
  AttributeMap attrs_;
S
sneaxiy 已提交
207 208 209 210

  // OpInfo
  const OpInfo* info_;

211 212
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
213 214 215 216

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
217 218
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
219 220
};

221
class ExecutionContext {
Y
Yan Chunwei 已提交
222
 public:
223
  ExecutionContext(const OperatorBase& op, const Scope& scope,
X
Xin Pan 已提交
224
                   const platform::DeviceContext& device_context,
225 226
                   const RuntimeContext& ctx)
      : op_(op), scope_(scope), device_context_(device_context), ctx_(ctx) {}
H
hong 已提交
227
  virtual ~ExecutionContext() {}
228

H
hong 已提交
229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  virtual std::string InputName(const std::string& name) const {
    return op_.Input(name);
  }
  virtual std::vector<std::string> InputNames(const std::string& name) const {
    return op_.Inputs(name);
  }
  virtual std::string OutputName(const std::string& name) const {
    return op_.Output(name);
  }

  virtual std::vector<std::string> OutputNames(const std::string& name) const {
    return op_.Outputs(name);
  }

  virtual bool HasAttr(const std::string& name) const {
    return op_.HasAttr(name);
  }
  virtual const AttributeMap& Attrs() const { return op_.Attrs(); }

  const std::string& Type() const { return op_.Type(); }
Q
qiaolongfei 已提交
249 250 251

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
252
  template <typename T>
Y
Yu Yang 已提交
253
  inline const T& Attr(const std::string& name) const {
H
hong 已提交
254
    return boost::get<T>(GetAttr(name));
Q
qiaolongfei 已提交
255 256
  }

H
hong 已提交
257 258 259
  virtual const Attribute& GetAttr(const std::string& name) const {
    return op_.Attrs().at(name);
  }
260

H
hong 已提交
261
  virtual bool HasInput(const std::string& name) const;
262

H
hong 已提交
263
  virtual bool HasOutput(const std::string& name) const;
264

H
hong 已提交
265
  virtual size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
266
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
267 268
  }

H
hong 已提交
269
  virtual size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
270
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
271 272
  }

H
hong 已提交
273
  virtual const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
274

H
hong 已提交
275
  virtual Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
276

H
hong 已提交
277
  virtual const std::vector<Variable*> MultiInputVar(
278
      const std::string& name) const {
279 280
    LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
281 282 283 284
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
285
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
286 287
  }

H
hong 已提交
288
  virtual std::vector<Variable*> MultiOutputVar(const std::string& name) const {
X
Xin Pan 已提交
289 290 291 292 293 294 295
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

H
hong 已提交
296 297 298 299 300 301 302 303 304 305 306
  virtual std::vector<std::string> InNameList() const {
    std::vector<std::string> vec_temp;
    vec_temp.reserve(ctx_.inputs.size());

    for (auto& input : ctx_.inputs) {
      vec_temp.push_back(input.first);
    }

    return vec_temp;
  }

307 308
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
309
    auto* var = InputVar(name);
310
    return var == nullptr ? nullptr : &var->Get<T>();
311 312 313 314
  }

  template <typename T>
  T* Output(const std::string& name) const {
315
    auto var = OutputVar(name);
316
    return var == nullptr ? nullptr : var->GetMutable<T>();
317 318 319 320
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
321 322
    LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
323 324
    auto vars = MultiInputVar(name);
    if (vars.size() == 0) {
X
Xin Pan 已提交
325 326 327 328 329
      return {};
    }
    std::vector<const T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
H
hong 已提交
330
                   [&](const Variable* var) -> const T* {
X
Xin Pan 已提交
331 332 333 334 335 336 337
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
H
hong 已提交
338 339 340
    auto vars = MultiOutputVar(name);

    if (vars.size() == 0) {
X
Xin Pan 已提交
341 342
      return {};
    }
H
hong 已提交
343

X
Xin Pan 已提交
344 345 346 347 348 349
    std::vector<T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
H
hong 已提交
350

X
Xin Pan 已提交
351 352 353
    return res;
  }

354
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
355

Q
QI JUN 已提交
356 357 358 359 360
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

361
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
362
    return device_context_;
Q
qijun 已提交
363
  }
Q
qijun 已提交
364

Q
QI JUN 已提交
365 366
#ifdef PADDLE_WITH_CUDA
  const inline platform::CUDADeviceContext& cuda_device_context() const {
367
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true);
Q
QI JUN 已提交
368 369 370 371 372
    return *reinterpret_cast<const platform::CUDADeviceContext*>(
        &device_context_);
  }
#endif

X
Xin Pan 已提交
373 374 375
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
376
    auto tmp_allocation_ptr = memory::Alloc(dev_ctx, product(dim) * sizeof(T));
X
Xin Pan 已提交
377 378 379 380 381
    auto& deleter = tmp_allocation_ptr.get_deleter();
    auto* allocation_ptr = tmp_allocation_ptr.release();
    auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
        allocation_ptr, deleter);

382
    PADDLE_ENFORCE_GE(allocation_ptr->size(),
X
Xin Pan 已提交
383 384 385 386 387 388 389 390 391
                      framework::product(dim) * sizeof(T));

    paddle::framework::Tensor temp_tensor(
        framework::ToDataType(std::type_index(typeid(T))));
    temp_tensor.Resize(dim);
    temp_tensor.ResetHolder(std::move(shared_allocation));
    return temp_tensor;
  }

H
hong 已提交
392 393 394
  const RuntimeContext Context() const { return ctx_; }

  std::string DebugString() const { return op_.DebugString(); }
H
hong 已提交
395

396
 private:
397 398
  const OperatorBase& op_;
  const Scope& scope_;
399
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
400
  const RuntimeContext& ctx_;
Q
Qiao Longfei 已提交
401 402
};

403 404 405 406 407 408 409 410 411 412 413 414 415 416
template <>
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;

template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
417
class OpKernelBase {
Q
qijun 已提交
418
 public:
Q
qijun 已提交
419
  /**
420
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
421 422
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
423
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
424 425
   */

426
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
427

Y
Yu Yang 已提交
428 429 430 431 432 433 434
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
435 436
};

Y
Yu Yang 已提交
437 438
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
439
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
440
  using OpKernelMap =
Y
yuyang18 已提交
441
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
442

Y
Yu Yang 已提交
443 444
  OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
                     const VariableNameMap& outputs, const AttributeMap& attrs)
Y
Yu Yang 已提交
445 446
      : OperatorBase(type, inputs, outputs, attrs) {}

Y
Yu Yang 已提交
447 448 449 450
  static std::unordered_map<std::string /* op_type */, OpKernelMap>&
  AllOpKernels() {
    static std::unordered_map<std::string, OpKernelMap> g_all_op_kernels;
    return g_all_op_kernels;
Y
Yu Yang 已提交
451
  }
Y
Yan Chunwei 已提交
452

453 454 455 456 457
  bool IsMKLDNNType() const {
    return ((this->kernel_type_) && (this->kernel_type_->data_layout_ ==
                                     framework::DataLayout::kMKLDNN));
  }

458
  bool SupportGPU() const override {
Y
Yu Yang 已提交
459 460 461 462 463
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_gpu_place(kern_pair.first.place_);
                       });
464 465
  }

466
  virtual void InferShape(InferShapeContext* ctx) const = 0;
Y
Yu Yang 已提交
467

X
Xin Pan 已提交
468 469
  void RuntimeInferShape(const Scope& scope, const platform::Place& place,
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
470

471 472 473
  proto::VarType::Type IndicateVarDataType(const ExecutionContext& ctx,
                                           const std::string& name) const;

474
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
475

476 477
  // change this to public so that in dygraph mode we can call it to check if we
  // need transform data
478 479 480
  virtual OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
481 482

 private:
483 484
  void ParseInputDataType(const ExecutionContext& ctx, const std::string& name,
                          proto::VarType::Type* type) const;
485
  // indicate kernel DataType by input data. By default all input data must be
Y
Yu Yang 已提交
486
  // same.
487
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
488
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
L
luotao1 已提交
489 490
  void RunImpl(const Scope& scope, const platform::Place& place,
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
491 492

  /**
T
tianshuo78520a 已提交
493 494
   * Transfer data from scope to a transferred scope. If there is no data need
   * to
Y
yuyang18 已提交
495 496 497 498
   * be tranfered, it returns nullptr.
   *
   * * transfered_inplace_vars is a output vector.
   */
X
Xin Pan 已提交
499 500 501 502
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
503 504 505 506

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
507

L
Liu Yiqun 已提交
508 509 510
  void ChooseKernel(const RuntimeContext& ctx, const Scope& scope,
                    const platform::Place& place) const;

511
 protected:
L
Liu Yiqun 已提交
512 513
  mutable std::unique_ptr<OpKernelType> kernel_type_;
  mutable std::unique_ptr<OpKernelFunc> kernel_func_;
L
luotao1 已提交
514 515
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
516
  mutable bool need_prepare_data_ = true;
517 518
  mutable bool enable_cache_runtime_context_ = false;
  mutable bool all_kernels_must_compute_runtime_shape_ = false;
519
  mutable std::mutex cache_update_mutex_;
520
  mutable bool enable_cache_transfer_scope_ = false;
Q
Qiao Longfei 已提交
521 522
};

Y
Yu Yang 已提交
523 524
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
525 526
}  // namespace framework
}  // namespace paddle