operator.h 17.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
20
#include <mutex>  // NOLINT
Q
Qiao Longfei 已提交
21
#include <string>
D
dzhwinter 已提交
22
#include <tuple>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
L
luotao1 已提交
24
#include <utility>
Q
Qiao Longfei 已提交
25 26
#include <vector>

Y
Yu Yang 已提交
27
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
28 29 30 31 32 33
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/framework.pb.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
X
polish  
Xin Pan 已提交
34
#include "paddle/fluid/framework/operator_kernel_configs.h"
Y
Yi Wang 已提交
35 36 37 38 39
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
Q
Qiao Longfei 已提交
40

Q
Qiao Longfei 已提交
41 42
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
43 44 45
namespace paddle {
namespace framework {

46
/// If a variable is a empty variable, that name will be used.
47
constexpr char kEmptyVarName[] = "@EMPTY@";
48 49 50

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
51
constexpr char kTempVarName[] = "@TEMP@";
52 53 54 55

/// If a variable's name has a certain suffix, it means that the
/// variable is the gradient of another varibale.
/// e.g. Variable "x@GRAD" is the gradient of varibale "x".
56
constexpr char kGradVarSuffix[] = "@GRAD";
57

M
minqiyang 已提交
58 59
constexpr size_t kGradVarSuffixSize = 5U;

60
/// Variables with this suffix are supposed to be filled up with zeros.
61
constexpr char kZeroVarSuffix[] = "@ZERO";
62

C
chengduo 已提交
63 64 65
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

L
luotao1 已提交
66 67 68 69 70 71 72 73
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
74 75 76 77 78 79 80 81 82
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
83
// define some kernel priority
84
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
85 86
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

87
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
88 89 90 91 92
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
93 94
}

M
minqiyang 已提交
95
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
96
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
97 98 99 100 101
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
102 103
}

Q
qiaolongfei 已提交
104
proto::VarType::Type GetDataTypeOfVar(const Variable* var);
C
chengduo 已提交
105 106
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
107

Q
Qiao Longfei 已提交
108
class OperatorBase;
109
class ExecutionContext;
110

X
Xin Pan 已提交
111 112
class RuntimeContext {
 public:
X
Xin Pan 已提交
113 114
  RuntimeContext(const VariableNameMap& innames,
                 const VariableNameMap& outnames, const Scope& scope);
X
Xin Pan 已提交
115

X
Xin Pan 已提交
116 117 118 119
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
120 121 122 123
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
124
/**
X
Xin Pan 已提交
125
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
126 127 128 129 130 131
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
Y
Yu Yang 已提交
132 133
  OperatorBase(const std::string& type, const VariableNameMap& inputs,
               const VariableNameMap& outputs, const AttributeMap& attrs);
134

Q
Qiao Longfei 已提交
135 136
  virtual ~OperatorBase() {}

137
  /// Executor will call this interface function to Run an op.
138 139
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
140

T
typhoonzero 已提交
141 142 143
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

144 145 146
  /// if scope is not null, also show dimensions of arguments
  virtual std::string DebugStringEx(const Scope* scope) const;
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
147

148 149
  virtual bool SupportGPU() const { return false; }

150 151
  const std::string& Type() const { return type_; }

M
Michal Gallus 已提交
152
  bool HasAttr(const std::string& name) const { return attrs_.count(name); }
153 154
  template <typename T>
  inline const T& Attr(const std::string& name) const {
M
minqiyang 已提交
155 156
    PADDLE_ENFORCE(attrs_.find(name) != attrs_.end(),
                   "%s should be in AttributeMap", name);
157 158 159
    return boost::get<T>(attrs_.at(name));
  }
  const AttributeMap& Attrs() const { return attrs_; }
D
dongzhihong 已提交
160

Y
Yu Yang 已提交
161 162
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
163

S
sneaxiy 已提交
164 165 166 167 168
  const OpInfo& Info() const {
    PADDLE_ENFORCE_NOT_NULL(info_, "OpInfo of %s is not found", type_);
    return *info_;
  }

169
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
170
  //! Get a input with argument's name described in `op_proto`
171
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
172
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
173
  const std::vector<std::string>& Inputs(const std::string& name) const;
174
  //! Get all inputs variable names
Q
qijun 已提交
175 176
  std::vector<std::string> InputVars() const;

177
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
178
  //! Get a output with argument's name described in `op_proto`
179
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
180 181
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
182
  const std::vector<std::string>& Outputs(const std::string& name) const;
183
  //! Get all outputs variable names
Y
Yu Yang 已提交
184
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
185

186
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
B
baojun-nervana 已提交
187
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
188 189
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
190

Q
qiaolongfei 已提交
191
 protected:
Q
Qiao Longfei 已提交
192
  std::string type_;
D
dongzhihong 已提交
193
  // NOTE: in case of OpGrad, inputs_ contains:
194
  // I (Inputs)
D
dongzhihong 已提交
195 196
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
197
  VariableNameMap inputs_;
Y
Yu Yang 已提交
198

D
dongzhihong 已提交
199 200
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
201
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
202
  AttributeMap attrs_;
S
sneaxiy 已提交
203 204 205 206

  // OpInfo
  const OpInfo* info_;

207 208
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
209 210 211 212

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
213 214
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
215 216
};

217 218 219 220 221 222 223 224 225 226 227 228 229
#ifdef PADDLE_WITH_CUDA
using KernelConfig = boost::variant<
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionFwdAlgo_t>>,
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionBwdDataAlgo_t>>,
    std::shared_ptr<AlgorithmsCache<cudnnConvolutionBwdFilterAlgo_t>>>;
#else
using KernelConfig = boost::variant<boost::blank>;
#endif

using OpKernelConfigsMap =
    std::unordered_map<OpKernelType, std::vector<KernelConfig>,
                       OpKernelType::Hash>;

230
class ExecutionContext {
Y
Yan Chunwei 已提交
231
 public:
232
  ExecutionContext(const OperatorBase& op, const Scope& scope,
X
Xin Pan 已提交
233
                   const platform::DeviceContext& device_context,
234 235 236 237 238 239 240
                   const RuntimeContext& ctx,
                   std::vector<KernelConfig>* configs)
      : op_(op),
        scope_(scope),
        device_context_(device_context),
        ctx_(ctx),
        kernel_configs_(configs) {}
241

Q
qiaolongfei 已提交
242 243 244 245
  const OperatorBase& op() const { return op_; }

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
246
  template <typename T>
Y
Yu Yang 已提交
247 248
  inline const T& Attr(const std::string& name) const {
    return op_.Attr<T>(name);
Q
qiaolongfei 已提交
249 250
  }

251
  bool HasInput(const std::string& name) const;
252

253
  bool HasOutput(const std::string& name) const;
254

Y
Yu Yang 已提交
255
  size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
256
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
257 258
  }

Y
Yu Yang 已提交
259
  size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
260
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
261 262
  }

X
Xin Pan 已提交
263
  const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
264

X
Xin Pan 已提交
265
  Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
266

267 268
  const std::vector<const Variable*> MultiInputVar(
      const std::string& name) const {
X
Xin Pan 已提交
269 270 271 272
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
273
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
274 275 276 277 278 279 280 281 282 283 284
  }

  std::vector<Variable*> MultiOutputVar(const std::string& name) const {
    auto names = op_.Outputs(name);
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

285 286
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
287
    auto* var = InputVar(name);
288
    return var == nullptr ? nullptr : &var->Get<T>();
289 290 291 292
  }

  template <typename T>
  T* Output(const std::string& name) const {
293
    auto var = OutputVar(name);
294
    return var == nullptr ? nullptr : var->GetMutable<T>();
295 296 297 298
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
X
Xin Pan 已提交
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
    const std::vector<Variable*>& vars = it->second;
    std::vector<const T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> const T* {
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    const std::vector<Variable*>& vars = it->second;
    std::vector<T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
    return res;
  }

329
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
330

Q
QI JUN 已提交
331 332 333 334 335
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

336
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
337
    return device_context_;
Q
qijun 已提交
338
  }
Q
qijun 已提交
339

Q
QI JUN 已提交
340 341 342 343 344 345 346 347
#ifdef PADDLE_WITH_CUDA
  const inline platform::CUDADeviceContext& cuda_device_context() const {
    PADDLE_ENFORCE(platform::is_gpu_place(device_context_.GetPlace()));
    return *reinterpret_cast<const platform::CUDADeviceContext*>(
        &device_context_);
  }
#endif

D
dzhwinter 已提交
348
  //! Get actual name vector for this input.
D
Dong Zhihong 已提交
349 350 351
  const std::vector<std::string>& Inputs(const std::string& name) const {
    return op_.Inputs(name);
  }
D
Dong Zhihong 已提交
352

D
dzhwinter 已提交
353
  //! Get actual name vector for this output.
D
Dong Zhihong 已提交
354 355 356 357
  const std::vector<std::string>& Outputs(const std::string& name) const {
    return op_.Outputs(name);
  }

X
Xin Pan 已提交
358 359 360 361 362 363 364 365 366 367 368
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
    auto tmp_allocation_ptr = platform::DeviceTemporaryAllocator::Instance()
                                  .Get<DevContext>(dev_ctx)
                                  .Allocate(product(dim) * sizeof(T));
    auto& deleter = tmp_allocation_ptr.get_deleter();
    auto* allocation_ptr = tmp_allocation_ptr.release();
    auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
        allocation_ptr, deleter);

369
    PADDLE_ENFORCE_GE(allocation_ptr->size(),
X
Xin Pan 已提交
370 371 372 373 374 375 376 377 378
                      framework::product(dim) * sizeof(T));

    paddle::framework::Tensor temp_tensor(
        framework::ToDataType(std::type_index(typeid(T))));
    temp_tensor.Resize(dim);
    temp_tensor.ResetHolder(std::move(shared_allocation));
    return temp_tensor;
  }

379
  template <typename T>
380
  T& GetKernelConfig(size_t idx) const {
Q
qingqing01 已提交
381 382
    PADDLE_ENFORCE(
        kernel_configs_ && kernel_configs_->size() > static_cast<size_t>(idx),
383
        "%s selected kernel doesn't have kernel config %lu <= %lu",
Q
qingqing01 已提交
384
        op_.Type().c_str(), kernel_configs_->size(), idx);
385
    return *boost::get<std::shared_ptr<T>>((*kernel_configs_)[idx]);
386 387
  }

388
 private:
389 390
  const OperatorBase& op_;
  const Scope& scope_;
391
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
392
  const RuntimeContext& ctx_;
393
  mutable std::vector<KernelConfig>* kernel_configs_;
Q
Qiao Longfei 已提交
394 395
};

396 397 398 399 400 401 402 403 404 405 406 407 408 409
template <>
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;

template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
410
class OpKernelBase {
Q
qijun 已提交
411
 public:
Q
qijun 已提交
412
  /**
413
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
414 415
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
416
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
417 418
   */

419
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
420

Y
Yu Yang 已提交
421 422 423 424 425 426 427
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
428 429
};

Y
Yu Yang 已提交
430 431
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
432
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
433
  using OpKernelMap =
Y
yuyang18 已提交
434
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
435

Y
Yu Yang 已提交
436 437
  OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
                     const VariableNameMap& outputs, const AttributeMap& attrs)
Y
Yu Yang 已提交
438 439
      : OperatorBase(type, inputs, outputs, attrs) {}

Y
Yu Yang 已提交
440 441 442 443
  static std::unordered_map<std::string /* op_type */, OpKernelMap>&
  AllOpKernels() {
    static std::unordered_map<std::string, OpKernelMap> g_all_op_kernels;
    return g_all_op_kernels;
Y
Yu Yang 已提交
444
  }
Y
Yan Chunwei 已提交
445

446
  bool SupportGPU() const override {
Y
Yu Yang 已提交
447 448 449 450 451
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_gpu_place(kern_pair.first.place_);
                       });
452 453
  }

454
  virtual void InferShape(InferShapeContext* ctx) const {
S
sneaxiy 已提交
455
    Info().infer_shape_(ctx);
456
  }
Y
Yu Yang 已提交
457

X
Xin Pan 已提交
458 459
  void RuntimeInferShape(const Scope& scope, const platform::Place& place,
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
460

461
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
462

X
polish  
Xin Pan 已提交
463 464
  std::vector<KernelConfig>* GetKernelConfig(const OpKernelType& key) const;

X
Xin Pan 已提交
465
 protected:
466 467 468
  virtual OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
469 470

 private:
471
  // indicate kernel DataType by input data. By default all input data must be
Y
Yu Yang 已提交
472
  // same.
473
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
474
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
L
luotao1 已提交
475 476
  void RunImpl(const Scope& scope, const platform::Place& place,
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
477 478 479 480 481 482 483

  /**
   * Transfer data from scope to a transfered scope. If there is no data need to
   * be tranfered, it returns nullptr.
   *
   * * transfered_inplace_vars is a output vector.
   */
X
Xin Pan 已提交
484 485 486 487
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
488 489 490 491

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
492

L
Liu Yiqun 已提交
493 494 495
  void ChooseKernel(const RuntimeContext& ctx, const Scope& scope,
                    const platform::Place& place) const;

496 497
 protected:
  mutable OpKernelConfigsMap kernel_configs_map_;
L
Liu Yiqun 已提交
498 499
  mutable std::unique_ptr<OpKernelType> kernel_type_;
  mutable std::unique_ptr<OpKernelFunc> kernel_func_;
L
luotao1 已提交
500 501
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
L
luotao1 已提交
502 503
  mutable bool enable_cache_runtime_context = false;
  mutable bool all_kernels_must_compute_runtime_shape = false;
504
  mutable std::mutex cache_update_mutex_;
Q
Qiao Longfei 已提交
505 506
};

Y
Yu Yang 已提交
507 508
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
509 510
}  // namespace framework
}  // namespace paddle