operator.h 21.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
20
#include <mutex>  // NOLINT
Q
Qiao Longfei 已提交
21
#include <string>
D
dzhwinter 已提交
22
#include <tuple>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
L
luotao1 已提交
24
#include <utility>
Q
Qiao Longfei 已提交
25 26
#include <vector>

Y
Yu Yang 已提交
27
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
28 29 30 31 32
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
33
#include "paddle/fluid/framework/pten_utils.h"
Y
Yi Wang 已提交
34 35 36
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
37
#include "paddle/fluid/framework/unused_var_check.h"
38
#include "paddle/fluid/memory/malloc.h"
Y
Yi Wang 已提交
39 40
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
C
chentianyu03 已提交
41
#include "paddle/utils/flat_hash_map.h"
Q
Qiao Longfei 已提交
42

43
#include "paddle/pten/include/core.h"
44

W
wanghuancoder 已提交
45 46 47 48 49 50 51 52 53
namespace paddle {
namespace framework {
class InferShapeContext;
class OpInfo;
class Scope;
class Variable;
}  // namespace framework
}  // namespace paddle

Q
Qiao Longfei 已提交
54 55
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
56 57 58
namespace paddle {
namespace framework {

59
/// If a variable is a empty variable, that name will be used.
60
constexpr char kEmptyVarName[] = "@EMPTY@";
61 62 63

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
64
constexpr char kTempVarName[] = "@TEMP@";
65 66

/// If a variable's name has a certain suffix, it means that the
T
tianshuo78520a 已提交
67 68
/// variable is the gradient of another variable.
/// e.g. Variable "x@GRAD" is the gradient of variable "x".
69
constexpr char kGradVarSuffix[] = "@GRAD";
70

M
minqiyang 已提交
71 72
constexpr size_t kGradVarSuffixSize = 5U;

73
/// Variables with this suffix are supposed to be filled up with zeros.
74
constexpr char kZeroVarSuffix[] = "@ZERO";
75

C
chengduo 已提交
76 77 78
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

L
luotao1 已提交
79 80 81 82 83 84 85 86
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
87 88 89 90 91 92 93 94 95
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
96
// define some kernel priority
97
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
98 99
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

100
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
101 102 103 104 105
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
106 107
}

M
minqiyang 已提交
108
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
109
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
110 111 112 113 114
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
115 116
}

C
chengduo 已提交
117 118
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
119

120
class ExecutionContext;
W
wanghuancoder 已提交
121
class OperatorBase;
122

X
Xin Pan 已提交
123 124
class RuntimeContext {
 public:
X
Xin Pan 已提交
125 126
  RuntimeContext(const VariableNameMap& innames,
                 const VariableNameMap& outnames, const Scope& scope);
X
Xin Pan 已提交
127

X
Xin Pan 已提交
128 129 130 131
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
132 133 134 135
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
136
/**
X
Xin Pan 已提交
137
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
138 139 140 141 142 143
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
Y
Yu Yang 已提交
144 145
  OperatorBase(const std::string& type, const VariableNameMap& inputs,
               const VariableNameMap& outputs, const AttributeMap& attrs);
146

Q
Qiao Longfei 已提交
147 148
  virtual ~OperatorBase() {}

149
  /// Executor will call this interface function to Run an op.
150 151
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
152

T
typhoonzero 已提交
153 154 155
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

156
  /// if scope is not null, also show dimensions of arguments
157
  virtual std::string DebugStringEx(const ScopeBase* scope) const;
158
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
159

160
  virtual bool SupportGPU() const { return false; }
B
Baibaifan 已提交
161
  virtual bool SupportNPU() const { return false; }
162

163 164
  const std::string& Type() const { return type_; }

M
Michal Gallus 已提交
165
  bool HasAttr(const std::string& name) const { return attrs_.count(name); }
166 167
  template <typename T>
  inline const T& Attr(const std::string& name) const {
168 169 170
    PADDLE_ENFORCE_NE(
        attrs_.find(name), attrs_.end(),
        platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
171
    return BOOST_GET_CONST(T, attrs_.at(name));
172
  }
173 174 175 176 177 178 179 180
  void SetAttr(const std::string& name, const Attribute& v) {
    PADDLE_ENFORCE_EQ(
        HasAttr(name), true,
        platform::errors::NotFound(
            "The attribute %s is not found in operator %s", name, Type()));

    attrs_[name] = v;
  }
181
  const AttributeMap& Attrs() const { return attrs_; }
D
dongzhihong 已提交
182

Y
Yu Yang 已提交
183 184
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
185

S
sneaxiy 已提交
186
  const OpInfo& Info() const {
187 188 189
    PADDLE_ENFORCE_NOT_NULL(
        info_, platform::errors::NotFound(
                   "OpInfo of operator (%s) is not found.", type_));
S
sneaxiy 已提交
190 191 192
    return *info_;
  }

193
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
194
  //! Get a input with argument's name described in `op_proto`
195
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
196
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
197
  const std::vector<std::string>& Inputs(const std::string& name) const;
198
  //! Get all inputs variable names
Q
qijun 已提交
199 200
  std::vector<std::string> InputVars() const;

201
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
202
  //! Get a output with argument's name described in `op_proto`
203
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
204 205
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
206
  const std::vector<std::string>& Outputs(const std::string& name) const;
207
  //! Get all outputs variable names
Y
Yu Yang 已提交
208
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
209

210
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
211

B
baojun-nervana 已提交
212
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
213 214
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
215

Z
Zhang Ting 已提交
216 217 218 219 220
  virtual platform::Place GetExecutionPlace(
      const platform::Place& place) const {
    return place;
  }

Q
qiaolongfei 已提交
221
 protected:
Q
Qiao Longfei 已提交
222
  std::string type_;
D
dongzhihong 已提交
223
  // NOTE: in case of OpGrad, inputs_ contains:
224
  // I (Inputs)
D
dongzhihong 已提交
225 226
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
227
  VariableNameMap inputs_;
Y
Yu Yang 已提交
228

D
dongzhihong 已提交
229 230
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
231
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
232
  AttributeMap attrs_;
S
sneaxiy 已提交
233 234 235 236

  // OpInfo
  const OpInfo* info_;

237 238
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
239 240 241 242

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
243 244
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
245 246
};

247
class ExecutionContext {
Y
Yan Chunwei 已提交
248
 public:
249
  ExecutionContext(const OperatorBase& op, const Scope& scope,
X
Xin Pan 已提交
250
                   const platform::DeviceContext& device_context,
251 252
                   const RuntimeContext& ctx)
      : op_(op), scope_(scope), device_context_(device_context), ctx_(ctx) {}
H
hong 已提交
253
  virtual ~ExecutionContext() {}
254

H
hong 已提交
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
  virtual std::string InputName(const std::string& name) const {
    return op_.Input(name);
  }
  virtual std::vector<std::string> InputNames(const std::string& name) const {
    return op_.Inputs(name);
  }
  virtual std::string OutputName(const std::string& name) const {
    return op_.Output(name);
  }

  virtual std::vector<std::string> OutputNames(const std::string& name) const {
    return op_.Outputs(name);
  }

  virtual bool HasAttr(const std::string& name) const {
    return op_.HasAttr(name);
  }
  virtual const AttributeMap& Attrs() const { return op_.Attrs(); }

  const std::string& Type() const { return op_.Type(); }
Q
qiaolongfei 已提交
275 276 277

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
278
  template <typename T>
Y
Yu Yang 已提交
279
  inline const T& Attr(const std::string& name) const {
280
    return BOOST_GET_CONST(T, GetAttr(name));
Q
qiaolongfei 已提交
281 282
  }

H
hong 已提交
283 284 285
  virtual const Attribute& GetAttr(const std::string& name) const {
    return op_.Attrs().at(name);
  }
286

H
hong 已提交
287
  virtual bool HasInput(const std::string& name) const;
288

H
hong 已提交
289
  virtual bool HasOutput(const std::string& name) const;
290

H
hong 已提交
291
  virtual size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
292
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
293 294
  }

H
hong 已提交
295
  virtual size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
296
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
297 298
  }

H
hong 已提交
299
  virtual const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
300

H
hong 已提交
301
  virtual Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
302

H
hong 已提交
303
  virtual const std::vector<Variable*> MultiInputVar(
304
      const std::string& name) const {
305 306
    LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
307 308 309 310
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
311
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
312 313
  }

H
hong 已提交
314
  virtual std::vector<Variable*> MultiOutputVar(const std::string& name) const {
X
Xin Pan 已提交
315 316 317 318 319 320 321
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

H
hong 已提交
322 323 324 325 326 327 328 329 330 331 332
  virtual std::vector<std::string> InNameList() const {
    std::vector<std::string> vec_temp;
    vec_temp.reserve(ctx_.inputs.size());

    for (auto& input : ctx_.inputs) {
      vec_temp.push_back(input.first);
    }

    return vec_temp;
  }

333 334
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
335
    auto* var = InputVar(name);
336
    return var == nullptr ? nullptr : &var->Get<T>();
337 338 339 340
  }

  template <typename T>
  T* Output(const std::string& name) const {
341
    auto var = OutputVar(name);
342
    return var == nullptr ? nullptr : var->GetMutable<T>();
343 344 345 346
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
347 348
    LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
349 350
    auto vars = MultiInputVar(name);
    if (vars.size() == 0) {
X
Xin Pan 已提交
351 352 353 354 355
      return {};
    }
    std::vector<const T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
H
hong 已提交
356
                   [&](const Variable* var) -> const T* {
X
Xin Pan 已提交
357 358 359 360 361 362 363
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
H
hong 已提交
364 365 366
    auto vars = MultiOutputVar(name);

    if (vars.size() == 0) {
X
Xin Pan 已提交
367 368
      return {};
    }
H
hong 已提交
369

X
Xin Pan 已提交
370 371 372 373 374 375
    std::vector<T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
H
hong 已提交
376

X
Xin Pan 已提交
377 378 379
    return res;
  }

380
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
381

Q
QI JUN 已提交
382 383 384 385 386
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

387
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
388
    return device_context_;
Q
qijun 已提交
389
  }
Q
qijun 已提交
390

391
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
QI JUN 已提交
392
  const inline platform::CUDADeviceContext& cuda_device_context() const {
393 394 395
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true,
                      platform::errors::PreconditionNotMet(
                          "Current device context place is not GPUPlace."));
Q
QI JUN 已提交
396 397 398 399 400
    return *reinterpret_cast<const platform::CUDADeviceContext*>(
        &device_context_);
  }
#endif

X
Xin Pan 已提交
401 402 403
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
404
    auto tmp_allocation_ptr = memory::Alloc(dev_ctx, product(dim) * sizeof(T));
X
Xin Pan 已提交
405 406 407 408 409
    auto& deleter = tmp_allocation_ptr.get_deleter();
    auto* allocation_ptr = tmp_allocation_ptr.release();
    auto shared_allocation = std::shared_ptr<memory::allocation::Allocation>(
        allocation_ptr, deleter);

410 411 412 413 414 415
    PADDLE_ENFORCE_GE(
        allocation_ptr->size(), framework::product(dim) * sizeof(T),
        platform::errors::PreconditionNotMet(
            "The data memory size(%d) is less than the tensor needed memory "
            "size(%d).",
            allocation_ptr->size(), framework::product(dim) * sizeof(T)));
X
Xin Pan 已提交
416 417 418 419 420 421 422 423

    paddle::framework::Tensor temp_tensor(
        framework::ToDataType(std::type_index(typeid(T))));
    temp_tensor.Resize(dim);
    temp_tensor.ResetHolder(std::move(shared_allocation));
    return temp_tensor;
  }

H
hong 已提交
424 425 426
  const RuntimeContext Context() const { return ctx_; }

  std::string DebugString() const { return op_.DebugString(); }
427
  const OperatorBase& GetOp() const { return op_; }
H
hong 已提交
428

429
 private:
430 431
  const OperatorBase& op_;
  const Scope& scope_;
432
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
433
  const RuntimeContext& ctx_;
Q
Qiao Longfei 已提交
434 435
};

436 437 438 439 440 441 442 443 444 445 446 447 448 449
template <>
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;

template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
450
class OpKernelBase {
Q
qijun 已提交
451
 public:
Q
qijun 已提交
452
  /**
453
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
454 455
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
456
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
457 458
   */

459
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
460

Y
Yu Yang 已提交
461 462 463 464 465 466 467
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
468 469
};

Y
Yu Yang 已提交
470 471
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
472
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
473
  using OpKernelMap =
Y
yuyang18 已提交
474
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
475

Y
Yu Yang 已提交
476 477
  OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
                     const VariableNameMap& outputs, const AttributeMap& attrs)
Y
Yu Yang 已提交
478 479
      : OperatorBase(type, inputs, outputs, attrs) {}

C
chentianyu03 已提交
480
  static paddle::flat_hash_map<std::string /* op_type */, OpKernelMap>&
Y
Yu Yang 已提交
481
  AllOpKernels() {
C
chentianyu03 已提交
482
    static paddle::flat_hash_map<std::string, OpKernelMap> g_all_op_kernels;
Y
Yu Yang 已提交
483
    return g_all_op_kernels;
Y
Yu Yang 已提交
484
  }
Y
Yan Chunwei 已提交
485

486 487 488 489 490
  bool IsMKLDNNType() const {
    return ((this->kernel_type_) && (this->kernel_type_->data_layout_ ==
                                     framework::DataLayout::kMKLDNN));
  }

491
  bool SupportGPU() const override {
Y
Yu Yang 已提交
492 493 494 495 496
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_gpu_place(kern_pair.first.place_);
                       });
497
  }
B
Baibaifan 已提交
498 499 500 501 502 503 504
  bool SupportNPU() const override {
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_npu_place(kern_pair.first.place_);
                       });
  }
505
  bool SupportsMKLDNN(proto::VarType::Type data_type) const;
506

507 508
  bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                       proto::VarType::Type data_type) const;
509

510
  virtual void InferShape(InferShapeContext* ctx) const = 0;
Y
Yu Yang 已提交
511

X
Xin Pan 已提交
512 513
  void RuntimeInferShape(const Scope& scope, const platform::Place& place,
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
514

515 516 517
  proto::VarType::Type IndicateVarDataType(const ExecutionContext& ctx,
                                           const std::string& name) const;

518 519 520 521
  proto::VarType::Type IndicateOrPromoteVarDataTypes(
      const ExecutionContext& ctx, const std::string& name1,
      const std::string& name2) const;

522
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
523

524 525
  // change this to public so that in dygraph mode we can call it to check if we
  // need transform data
526 527 528
  virtual OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
529

530 531
  platform::Place GetExecutionPlace(
      const platform::Place& platform) const override {
Z
Zhang Ting 已提交
532 533 534
    return kernel_type_->place_;
  }

535 536 537 538 539 540 541 542 543 544 545
  /* member functions for adapting to pten lib */
  /** In the Tensor calculation library, the new Kernel adopts a clearer and
    * more streamlined design. The arguments of the Kernel and the input and
    * output arguments registered in the original OpMaker do not match in some
    * cases, so we use map to record the arguments required by the kernel.
    * When selecting Kernel during Op execution, select the arguments of the
    * original Op according to the GetExpectedPtenKernelArgs returned arguments.
    */
  virtual KernelSignature GetExpectedPtenKernelArgs(
      const ExecutionContext& ctx) const;

Y
Yu Yang 已提交
546
 private:
547
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
L
luotao1 已提交
548 549
  void RunImpl(const Scope& scope, const platform::Place& place,
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
550 551

  /**
T
tianshuo78520a 已提交
552 553
   * Transfer data from scope to a transferred scope. If there is no data need
   * to
Y
yuyang18 已提交
554 555 556 557
   * be tranfered, it returns nullptr.
   *
   * * transfered_inplace_vars is a output vector.
   */
X
Xin Pan 已提交
558 559 560 561
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
562 563 564 565

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
566

567 568 569
  OpKernelType InnerGetExpectedKernelType(const ExecutionContext& ctx) const;

  void ChooseKernel(const ExecutionContext& ctx) const;
L
Liu Yiqun 已提交
570

571 572 573
  void HandleComplexGradToRealGrad(const Scope& scope,
                                   RuntimeContext* ctx) const;

574 575 576 577 578
  /* Inner assist methods */
  // indicate kernel DataType by input data.
  // By default all input data must be same.
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
  // used for IndicateDataType
579 580 581
  void ParseInputDataType(const std::vector<Variable*>& vars,
                          const std::string& name,
                          proto::VarType::Type* data_type) const;
582 583 584 585
  // used for IndicateOrPromoteVarDataTypes
  Tensor* GetTensorFormInputSafely(const ExecutionContext& ctx,
                                   const std::string& name) const;

586 587 588
  /* member functions for adapting to pten lib */
  void ChoosePtenKernel(const ExecutionContext& ctx) const;

589 590
  void BuildPtenKernelContext(const RuntimeContext& ctx,
                              platform::DeviceContext* dev_ctx) const;
591

592 593
  void WriteBackToOutputs(RuntimeContext* ctx) const;

594
 protected:
L
Liu Yiqun 已提交
595 596
  mutable std::unique_ptr<OpKernelType> kernel_type_;
  mutable std::unique_ptr<OpKernelFunc> kernel_func_;
L
luotao1 已提交
597 598
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
599
  mutable bool need_prepare_data_ = true;
600 601
  mutable bool enable_cache_runtime_context_ = false;
  mutable bool all_kernels_must_compute_runtime_shape_ = false;
602
  mutable std::mutex cache_update_mutex_;
603
  mutable bool enable_cache_transfer_scope_ = false;
604 605 606 607 608 609
  // NOTE(chenweihang): Similar op members are used to adapt to
  // new pten kernel, if there is a better design in the future,
  // we may polish the implementation here
  mutable bool run_pten_kernel_ = false;
  mutable std::unique_ptr<KernelSignature> pt_kernel_signature_;
  mutable std::unique_ptr<pten::Kernel> pt_kernel_;
610 611 612
  // In order to reduce the compatibility phase
  // performance overhead, temporarily cache KernelContext
  mutable std::unique_ptr<pten::KernelContext> pt_kernel_context_;
Q
Qiao Longfei 已提交
613 614
};

Y
Yu Yang 已提交
615 616
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
617 618
}  // namespace framework
}  // namespace paddle