operator.h 22.9 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
20
#include <mutex>  // NOLINT
Q
Qiao Longfei 已提交
21
#include <string>
D
dzhwinter 已提交
22
#include <tuple>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
L
luotao1 已提交
24
#include <utility>
Q
Qiao Longfei 已提交
25 26
#include <vector>

Y
Yu Yang 已提交
27
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
28 29 30 31 32
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
33
#include "paddle/fluid/framework/pten_utils.h"
Y
Yi Wang 已提交
34 35 36
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/selected_rows.h"
#include "paddle/fluid/framework/tensor.h"
37
#include "paddle/fluid/framework/unused_var_check.h"
38
#include "paddle/fluid/memory/malloc.h"
Y
Yi Wang 已提交
39 40
#include "paddle/fluid/platform/device_context.h"
#include "paddle/fluid/platform/variant.h"
C
chentianyu03 已提交
41
#include "paddle/utils/flat_hash_map.h"
Q
Qiao Longfei 已提交
42

43
#include "paddle/pten/core/arg_map_context.h"
44
#include "paddle/pten/include/core.h"
45

W
wanghuancoder 已提交
46 47 48 49 50 51 52 53 54
namespace paddle {
namespace framework {
class InferShapeContext;
class OpInfo;
class Scope;
class Variable;
}  // namespace framework
}  // namespace paddle

Q
Qiao Longfei 已提交
55 56
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
57 58 59
namespace paddle {
namespace framework {

60
/// If a variable is a empty variable, that name will be used.
61
constexpr char kEmptyVarName[] = "@EMPTY@";
62 63 64

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
65
constexpr char kTempVarName[] = "@TEMP@";
66 67

/// If a variable's name has a certain suffix, it means that the
T
tianshuo78520a 已提交
68 69
/// variable is the gradient of another variable.
/// e.g. Variable "x@GRAD" is the gradient of variable "x".
70
constexpr char kGradVarSuffix[] = "@GRAD";
71

M
minqiyang 已提交
72 73
constexpr size_t kGradVarSuffixSize = 5U;

74
/// Variables with this suffix are supposed to be filled up with zeros.
75
constexpr char kZeroVarSuffix[] = "@ZERO";
76

C
chengduo 已提交
77 78 79
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

L
luotao1 已提交
80 81 82 83 84 85 86 87
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
88 89 90 91 92 93 94 95 96
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
97
// define some kernel priority
98
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
99 100
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

101
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
102 103 104 105 106
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
107 108
}

M
minqiyang 已提交
109
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
110
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
111 112 113 114 115
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
116 117
}

118 119 120 121
inline bool VarIsTensor(const Variable& var) {
  return var.IsType<LoDTensor>() || var.IsType<SelectedRows>();
}

C
chengduo 已提交
122 123
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
124

125
class ExecutionContext;
W
wanghuancoder 已提交
126
class OperatorBase;
127

X
Xin Pan 已提交
128 129
class RuntimeContext {
 public:
X
Xin Pan 已提交
130 131
  RuntimeContext(const VariableNameMap& innames,
                 const VariableNameMap& outnames, const Scope& scope);
X
Xin Pan 已提交
132

X
Xin Pan 已提交
133 134 135 136
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
137 138 139 140
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
141
/**
X
Xin Pan 已提交
142
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
143 144 145 146 147 148
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
Y
Yu Yang 已提交
149 150
  OperatorBase(const std::string& type, const VariableNameMap& inputs,
               const VariableNameMap& outputs, const AttributeMap& attrs);
151

Q
Qiao Longfei 已提交
152 153
  virtual ~OperatorBase() {}

154
  /// Executor will call this interface function to Run an op.
155 156
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
157

T
typhoonzero 已提交
158 159 160
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

161
  /// if scope is not null, also show dimensions of arguments
162
  virtual std::string DebugStringEx(const ScopeBase* scope) const;
163
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
164

165
  virtual bool SupportGPU() const { return false; }
B
Baibaifan 已提交
166
  virtual bool SupportNPU() const { return false; }
F
fwenguang 已提交
167
  virtual bool SupportMLU() const { return false; }
168

169 170
  const std::string& Type() const { return type_; }

M
Michal Gallus 已提交
171
  bool HasAttr(const std::string& name) const { return attrs_.count(name); }
172 173
  template <typename T>
  inline const T& Attr(const std::string& name) const {
174 175 176
    PADDLE_ENFORCE_NE(
        attrs_.find(name), attrs_.end(),
        platform::errors::NotFound("(%s) is not found in AttributeMap.", name));
177
    return BOOST_GET_CONST(T, attrs_.at(name));
178
  }
179 180 181 182 183 184 185 186
  void SetAttr(const std::string& name, const Attribute& v) {
    PADDLE_ENFORCE_EQ(
        HasAttr(name), true,
        platform::errors::NotFound(
            "The attribute %s is not found in operator %s", name, Type()));

    attrs_[name] = v;
  }
187
  const AttributeMap& Attrs() const { return attrs_; }
D
dongzhihong 已提交
188

Y
Yu Yang 已提交
189 190
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
191

S
sneaxiy 已提交
192
  const OpInfo& Info() const {
193 194 195
    PADDLE_ENFORCE_NOT_NULL(
        info_, platform::errors::NotFound(
                   "OpInfo of operator (%s) is not found.", type_));
S
sneaxiy 已提交
196 197 198
    return *info_;
  }

199
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
200
  //! Get a input with argument's name described in `op_proto`
201
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
202
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
203
  const std::vector<std::string>& Inputs(const std::string& name) const;
204
  //! Get all inputs variable names
Q
qijun 已提交
205 206
  std::vector<std::string> InputVars() const;

207
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
208
  //! Get a output with argument's name described in `op_proto`
209
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
210 211
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
212
  const std::vector<std::string>& Outputs(const std::string& name) const;
213
  //! Get all outputs variable names
Y
Yu Yang 已提交
214
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
215

216
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
217

B
baojun-nervana 已提交
218
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
219 220
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
221

Z
Zhang Ting 已提交
222 223 224 225 226
  virtual platform::Place GetExecutionPlace(
      const platform::Place& place) const {
    return place;
  }

Q
qiaolongfei 已提交
227
 protected:
Q
Qiao Longfei 已提交
228
  std::string type_;
D
dongzhihong 已提交
229
  // NOTE: in case of OpGrad, inputs_ contains:
230
  // I (Inputs)
D
dongzhihong 已提交
231 232
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
233
  VariableNameMap inputs_;
Y
Yu Yang 已提交
234

D
dongzhihong 已提交
235 236
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
237
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
238
  AttributeMap attrs_;
S
sneaxiy 已提交
239 240 241 242

  // OpInfo
  const OpInfo* info_;

243 244
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
245 246 247 248

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
249 250
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
251 252
};

253
class ExecutionContext {
Y
Yan Chunwei 已提交
254
 public:
255
  ExecutionContext(const OperatorBase& op, const Scope& scope,
X
Xin Pan 已提交
256
                   const platform::DeviceContext& device_context,
257 258
                   const RuntimeContext& ctx)
      : op_(op), scope_(scope), device_context_(device_context), ctx_(ctx) {}
H
hong 已提交
259
  virtual ~ExecutionContext() {}
260

H
hong 已提交
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
  virtual std::string InputName(const std::string& name) const {
    return op_.Input(name);
  }
  virtual std::vector<std::string> InputNames(const std::string& name) const {
    return op_.Inputs(name);
  }
  virtual std::string OutputName(const std::string& name) const {
    return op_.Output(name);
  }

  virtual std::vector<std::string> OutputNames(const std::string& name) const {
    return op_.Outputs(name);
  }

  virtual bool HasAttr(const std::string& name) const {
    return op_.HasAttr(name);
  }
  virtual const AttributeMap& Attrs() const { return op_.Attrs(); }

  const std::string& Type() const { return op_.Type(); }
Q
qiaolongfei 已提交
281 282 283

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
284
  template <typename T>
Y
Yu Yang 已提交
285
  inline const T& Attr(const std::string& name) const {
286
    return BOOST_GET_CONST(T, GetAttr(name));
Q
qiaolongfei 已提交
287 288
  }

H
hong 已提交
289 290 291
  virtual const Attribute& GetAttr(const std::string& name) const {
    return op_.Attrs().at(name);
  }
292

H
hong 已提交
293
  virtual bool HasInput(const std::string& name) const;
294

H
hong 已提交
295
  virtual bool HasOutput(const std::string& name) const;
296

H
hong 已提交
297
  virtual size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
298
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
299 300
  }

H
hong 已提交
301
  virtual size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
302
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
303 304
  }

H
hong 已提交
305
  virtual const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
306

H
hong 已提交
307
  virtual Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
308

H
hong 已提交
309
  virtual const std::vector<Variable*> MultiInputVar(
310
      const std::string& name) const {
311 312
    LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
313 314 315 316
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
317
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
318 319
  }

H
hong 已提交
320
  virtual std::vector<Variable*> MultiOutputVar(const std::string& name) const {
X
Xin Pan 已提交
321 322 323 324 325 326 327
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

H
hong 已提交
328 329 330 331 332 333 334 335 336 337 338
  virtual std::vector<std::string> InNameList() const {
    std::vector<std::string> vec_temp;
    vec_temp.reserve(ctx_.inputs.size());

    for (auto& input : ctx_.inputs) {
      vec_temp.push_back(input.first);
    }

    return vec_temp;
  }

339 340
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
341
    auto* var = InputVar(name);
342
    return var == nullptr ? nullptr : &var->Get<T>();
343 344 345 346
  }

  template <typename T>
  T* Output(const std::string& name) const {
347
    auto var = OutputVar(name);
348
    return var == nullptr ? nullptr : var->GetMutable<T>();
349 350 351 352
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
353 354
    LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
355 356
    auto vars = MultiInputVar(name);
    if (vars.size() == 0) {
X
Xin Pan 已提交
357 358 359 360 361
      return {};
    }
    std::vector<const T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
H
hong 已提交
362
                   [&](const Variable* var) -> const T* {
X
Xin Pan 已提交
363 364 365 366 367 368 369
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
H
hong 已提交
370 371 372
    auto vars = MultiOutputVar(name);

    if (vars.size() == 0) {
X
Xin Pan 已提交
373 374
      return {};
    }
H
hong 已提交
375

X
Xin Pan 已提交
376 377 378 379 380 381
    std::vector<T*> res;
    res.reserve(vars.size());
    std::transform(vars.begin(), vars.end(), std::back_inserter(res),
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
H
hong 已提交
382

X
Xin Pan 已提交
383 384 385
    return res;
  }

386
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
387

Q
QI JUN 已提交
388 389 390 391 392
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

393
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
394
    return device_context_;
Q
qijun 已提交
395
  }
Q
qijun 已提交
396

397
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
Q
QI JUN 已提交
398
  const inline platform::CUDADeviceContext& cuda_device_context() const {
399 400 401
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()), true,
                      platform::errors::PreconditionNotMet(
                          "Current device context place is not GPUPlace."));
Q
QI JUN 已提交
402 403 404 405 406
    return *reinterpret_cast<const platform::CUDADeviceContext*>(
        &device_context_);
  }
#endif

X
Xin Pan 已提交
407 408 409
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
410
    auto tmp_allocation_ptr = memory::Alloc(dev_ctx, product(dim) * sizeof(T));
X
Xin Pan 已提交
411 412
    auto& deleter = tmp_allocation_ptr.get_deleter();
    auto* allocation_ptr = tmp_allocation_ptr.release();
413 414
    auto shared_allocation =
        std::shared_ptr<pten::Allocation>(allocation_ptr, deleter);
X
Xin Pan 已提交
415

416 417 418 419 420 421
    PADDLE_ENFORCE_GE(
        allocation_ptr->size(), framework::product(dim) * sizeof(T),
        platform::errors::PreconditionNotMet(
            "The data memory size(%d) is less than the tensor needed memory "
            "size(%d).",
            allocation_ptr->size(), framework::product(dim) * sizeof(T)));
X
Xin Pan 已提交
422 423 424 425 426 427 428 429

    paddle::framework::Tensor temp_tensor(
        framework::ToDataType(std::type_index(typeid(T))));
    temp_tensor.Resize(dim);
    temp_tensor.ResetHolder(std::move(shared_allocation));
    return temp_tensor;
  }

H
hong 已提交
430 431 432
  const RuntimeContext Context() const { return ctx_; }

  std::string DebugString() const { return op_.DebugString(); }
433
  const OperatorBase& GetOp() const { return op_; }
H
hong 已提交
434

435
 private:
436 437
  const OperatorBase& op_;
  const Scope& scope_;
438
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
439
  const RuntimeContext& ctx_;
Q
Qiao Longfei 已提交
440 441
};

442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480
// TODO(chenweihang): split impl based OpProto or Dygraph if needed
class ExecutionArgumentMappingContext : public pten::ArgumentMappingContext {
 public:
  explicit ExecutionArgumentMappingContext(const ExecutionContext& ctx)
      : ctx_(ctx) {}

  bool HasInput(const std::string& name) const override {
    return ctx_.HasInput(name);
  }

  bool HasOutput(const std::string& name) const override {
    return ctx_.HasOutput(name);
  }

  bool HasAttr(const std::string& name) const override {
    return ctx_.HasAttr(name);
  }

  size_t InputSize(const std::string& name) const override {
    return ctx_.InputSize(name);
  }

  size_t OutputSize(const std::string& name) const override {
    return ctx_.OutputSize(name);
  }

  bool IsDenseTensorInput(const std::string& name) const override {
    return ctx_.InputVar(name)->IsType<framework::Tensor>() ||
           ctx_.InputVar(name)->IsType<framework::LoDTensor>();
  }

  bool IsSelectedRowsInput(const std::string& name) const override {
    return ctx_.InputVar(name)->IsType<framework::SelectedRows>();
  }

 private:
  const ExecutionContext& ctx_;
};

481 482 483 484 485 486 487 488 489 490 491 492 493 494
template <>
const Tensor* ExecutionContext::Input<Tensor>(const std::string& name) const;

template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
Tensor* ExecutionContext::Output<Tensor>(const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
495
class OpKernelBase {
Q
qijun 已提交
496
 public:
Q
qijun 已提交
497
  /**
498
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
499 500
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
501
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
502 503
   */

504
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
505

Y
Yu Yang 已提交
506 507 508 509 510 511 512
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
513 514
};

Y
Yu Yang 已提交
515 516
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
517
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
518
  using OpKernelMap =
Y
yuyang18 已提交
519
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
520

Y
Yu Yang 已提交
521 522
  OperatorWithKernel(const std::string& type, const VariableNameMap& inputs,
                     const VariableNameMap& outputs, const AttributeMap& attrs)
Y
Yu Yang 已提交
523 524
      : OperatorBase(type, inputs, outputs, attrs) {}

C
chentianyu03 已提交
525
  static paddle::flat_hash_map<std::string /* op_type */, OpKernelMap>&
Y
Yu Yang 已提交
526
  AllOpKernels() {
C
chentianyu03 已提交
527
    static paddle::flat_hash_map<std::string, OpKernelMap> g_all_op_kernels;
Y
Yu Yang 已提交
528
    return g_all_op_kernels;
Y
Yu Yang 已提交
529
  }
Y
Yan Chunwei 已提交
530

531
  bool SupportGPU() const override {
Y
Yu Yang 已提交
532 533 534 535 536
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_gpu_place(kern_pair.first.place_);
                       });
537
  }
B
Baibaifan 已提交
538 539 540 541 542 543 544
  bool SupportNPU() const override {
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_npu_place(kern_pair.first.place_);
                       });
  }
F
fwenguang 已提交
545 546 547 548 549 550 551
  bool SupportMLU() const override {
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
    return std::any_of(op_kernels.begin(), op_kernels.end(),
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_mlu_place(kern_pair.first.place_);
                       });
  }
552
  bool SupportsMKLDNN(proto::VarType::Type data_type) const;
553

554 555
  bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                       proto::VarType::Type data_type) const;
556

557
  virtual void InferShape(InferShapeContext* ctx) const = 0;
Y
Yu Yang 已提交
558

X
Xin Pan 已提交
559 560
  void RuntimeInferShape(const Scope& scope, const platform::Place& place,
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
561

562 563 564
  proto::VarType::Type IndicateVarDataType(const ExecutionContext& ctx,
                                           const std::string& name) const;

565 566 567 568
  proto::VarType::Type IndicateOrPromoteVarDataTypes(
      const ExecutionContext& ctx, const std::string& name1,
      const std::string& name2) const;

569
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
570

571 572
  // change this to public so that in dygraph mode we can call it to check if we
  // need transform data
573 574 575
  virtual OpKernelType GetKernelTypeForVar(
      const std::string& var_name, const Tensor& tensor,
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
576

577 578
  platform::Place GetExecutionPlace(
      const platform::Place& platform) const override {
Z
Zhang Ting 已提交
579 580 581
    return kernel_type_->place_;
  }

582 583 584 585 586 587 588 589 590 591 592
  /* member functions for adapting to pten lib */
  /** In the Tensor calculation library, the new Kernel adopts a clearer and
    * more streamlined design. The arguments of the Kernel and the input and
    * output arguments registered in the original OpMaker do not match in some
    * cases, so we use map to record the arguments required by the kernel.
    * When selecting Kernel during Op execution, select the arguments of the
    * original Op according to the GetExpectedPtenKernelArgs returned arguments.
    */
  virtual KernelSignature GetExpectedPtenKernelArgs(
      const ExecutionContext& ctx) const;

593 594 595 596 597 598 599 600 601 602 603 604 605 606
  /* member functions for adapting to pten lib */
  void ChoosePtenKernel(const ExecutionContext& ctx) const;

  void BuildPtenKernelContext(const RuntimeContext& ctx,
                              platform::DeviceContext* dev_ctx) const;

  void WriteBackToOutputs(RuntimeContext* ctx) const;

  pten::Kernel* PtenKernel() const { return pt_kernel_.get(); }

  pten::KernelContext* PtenKernelContext() const {
    return pt_kernel_context_.get();
  }

607 608
  const OpKernelType* kernel_type() const { return kernel_type_.get(); }

Y
Yu Yang 已提交
609
 private:
610
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
L
luotao1 已提交
611 612
  void RunImpl(const Scope& scope, const platform::Place& place,
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
613 614

  /**
T
tianshuo78520a 已提交
615 616
   * Transfer data from scope to a transferred scope. If there is no data need
   * to
Y
yuyang18 已提交
617 618 619 620
   * be tranfered, it returns nullptr.
   *
   * * transfered_inplace_vars is a output vector.
   */
X
Xin Pan 已提交
621 622 623 624
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
625 626 627 628

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
629

630 631 632
  OpKernelType InnerGetExpectedKernelType(const ExecutionContext& ctx) const;

  void ChooseKernel(const ExecutionContext& ctx) const;
L
Liu Yiqun 已提交
633

634 635 636
  void HandleComplexGradToRealGrad(const Scope& scope,
                                   RuntimeContext* ctx) const;

637 638 639 640 641
  /* Inner assist methods */
  // indicate kernel DataType by input data.
  // By default all input data must be same.
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
  // used for IndicateDataType
642 643 644
  void ParseInputDataType(const std::vector<Variable*>& vars,
                          const std::string& name,
                          proto::VarType::Type* data_type) const;
645 646 647 648
  // used for IndicateOrPromoteVarDataTypes
  Tensor* GetTensorFormInputSafely(const ExecutionContext& ctx,
                                   const std::string& name) const;

649
 protected:
L
Liu Yiqun 已提交
650 651
  mutable std::unique_ptr<OpKernelType> kernel_type_;
  mutable std::unique_ptr<OpKernelFunc> kernel_func_;
L
luotao1 已提交
652 653
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
654
  mutable bool need_prepare_data_ = true;
655 656
  mutable bool enable_cache_runtime_context_ = false;
  mutable bool all_kernels_must_compute_runtime_shape_ = false;
657
  mutable std::mutex cache_update_mutex_;
658
  mutable bool enable_cache_transfer_scope_ = false;
659 660 661 662 663 664
  // NOTE(chenweihang): Similar op members are used to adapt to
  // new pten kernel, if there is a better design in the future,
  // we may polish the implementation here
  mutable bool run_pten_kernel_ = false;
  mutable std::unique_ptr<KernelSignature> pt_kernel_signature_;
  mutable std::unique_ptr<pten::Kernel> pt_kernel_;
665 666 667
  // In order to reduce the compatibility phase
  // performance overhead, temporarily cache KernelContext
  mutable std::unique_ptr<pten::KernelContext> pt_kernel_context_;
Q
Qiao Longfei 已提交
668 669
};

Y
Yu Yang 已提交
670 671
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
672 673
}  // namespace framework
}  // namespace paddle