operator.h 25.2 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Q
Qiao Longfei 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

D
dongzhihong 已提交
17
#include <algorithm>
18
#include <atomic>
L
luotao1 已提交
19
#include <memory>
20
#include <mutex>  // NOLINT
Q
Qiao Longfei 已提交
21
#include <string>
D
dzhwinter 已提交
22
#include <tuple>
Q
Qiao Longfei 已提交
23
#include <unordered_map>
L
luotao1 已提交
24
#include <utility>
Q
Qiao Longfei 已提交
25 26
#include <vector>

Y
Yu Yang 已提交
27
#include "glog/logging.h"  // For VLOG
Y
Yi Wang 已提交
28 29
#include "paddle/fluid/framework/attribute.h"
#include "paddle/fluid/framework/block_desc.h"
30
#include "paddle/fluid/framework/convert_utils.h"
Y
Yi Wang 已提交
31 32 33
#include "paddle/fluid/framework/lod_tensor.h"
#include "paddle/fluid/framework/op_info.h"
#include "paddle/fluid/framework/op_kernel_type.h"
34
#include "paddle/fluid/framework/phi_utils.h"
Y
Yi Wang 已提交
35
#include "paddle/fluid/framework/scope.h"
36
#include "paddle/fluid/framework/selected_rows_utils.h"
Y
Yi Wang 已提交
37
#include "paddle/fluid/framework/tensor.h"
38
#include "paddle/fluid/framework/unused_var_check.h"
39
#include "paddle/fluid/memory/malloc.h"
Y
Yi Wang 已提交
40
#include "paddle/fluid/platform/device_context.h"
41

42 43 44
#include "paddle/phi/core/compat/arg_map_context.h"
#include "paddle/phi/core/compat/op_utils.h"
#include "paddle/phi/core/kernel_factory.h"
45
#include "paddle/utils/flat_hash_map.h"
46

W
wanghuancoder 已提交
47 48 49 50 51 52 53 54 55
namespace paddle {
namespace framework {
class InferShapeContext;
class OpInfo;
class Scope;
class Variable;
}  // namespace framework
}  // namespace paddle

56 57 58 59
namespace phi {
class KernelContext;
}

Q
Qiao Longfei 已提交
60 61
DECLARE_int32(inner_op_parallelism);

Q
Qiao Longfei 已提交
62 63 64
namespace paddle {
namespace framework {

65
/// If a variable is a empty variable, that name will be used.
66
constexpr char kEmptyVarName[] = "@EMPTY@";
67 68 69

/// If a variable is a temporary variable, that name will be set in Python,
/// but it will be convert to a unique name in scope after OpCreator.
70
constexpr char kTempVarName[] = "@TEMP@";
71 72

/// If a variable's name has a certain suffix, it means that the
T
tianshuo78520a 已提交
73 74
/// variable is the gradient of another variable.
/// e.g. Variable "x@GRAD" is the gradient of variable "x".
75
constexpr char kGradVarSuffix[] = "@GRAD";
76

M
minqiyang 已提交
77 78
constexpr size_t kGradVarSuffixSize = 5U;

79
/// Variables with this suffix are supposed to be filled up with zeros.
80
constexpr char kZeroVarSuffix[] = "@ZERO";
81

C
chengduo 已提交
82 83 84
/// Variables with this suffix are the new Gradient.
constexpr char kNewGradSuffix[] = "@NEWGRAD@";

L
luotao1 已提交
85 86 87 88 89 90 91 92
/// RuntimeContext is used to relate input/output names of Operator with
/// the corresponding variables in name scope.
/// If an Op has attribute kEnableCacheRuntimeContext, it means that in a same
/// name scope, since the input/output names of this Op do not change in the
/// execution, RuntimeContext could be created only at the first iteration of
/// this Op's execution to save the elapsed time.
constexpr char kEnableCacheRuntimeContext[] = "@ENABLE_CACHE_RUNTIME_CONTEXT@";

L
luotao1 已提交
93 94 95 96 97 98 99 100 101
/// If an Op has this attribute, all its kernels should calculate output
/// variable's shape in the corresponding Compute() function. And
/// OperatorWithKernel::RunImpl() would skip call this Op's InferShape()
/// function in its runtime for speedup.
/// TODO(luotao): Note that this temporal attribute would be deleted after all
/// ops contain it.
constexpr char kAllKernelsMustComputeRuntimeShape[] =
    "@ALL_KERNELS_MUST_COMPUTE_RUNTIME_SHAPE@";

D
dzhwinter 已提交
102
// define some kernel priority
103
/* Define multiple kernel type fallback order*/
D
dzhwinter 已提交
104 105
extern std::vector<std::tuple<platform::Place, LibraryType>> kKernelPriority;

106
inline std::string GradVarName(const std::string& var_name) {
M
minqiyang 已提交
107 108 109 110 111
  std::string result;
  result.reserve(var_name.size() + kGradVarSuffixSize);
  result += var_name;
  result += kGradVarSuffix;
  return result;
112 113
}

M
minqiyang 已提交
114
inline std::string GradOriginalVarName(const std::string& grad_var_name) {
M
minqiyang 已提交
115
  std::size_t pos = grad_var_name.rfind(kGradVarSuffix);
116 117 118 119 120
  if (pos == std::string::npos) {
    return grad_var_name;
  } else {
    return grad_var_name.substr(0, pos);
  }
121 122
}

123
inline bool VarIsTensor(const Variable& var) {
124
  return var.IsType<LoDTensor>() || var.IsType<phi::SelectedRows>();
125 126
}

C
chengduo 已提交
127 128
const Tensor* GetLoDTensorOrSelectedRowsValueFromVar(const Variable& var);
Tensor* GetMutableLoDTensorOrSelectedRowsValueFromVar(Variable* var);
Q
qiaolongfei 已提交
129

130
class ExecutionContext;
W
wanghuancoder 已提交
131
class OperatorBase;
132

X
Xin Pan 已提交
133 134
class RuntimeContext {
 public:
X
Xin Pan 已提交
135
  RuntimeContext(const VariableNameMap& innames,
136 137
                 const VariableNameMap& outnames,
                 const Scope& scope);
X
Xin Pan 已提交
138

X
Xin Pan 已提交
139 140 141 142
  RuntimeContext(const VariableValueMap& invars,
                 const VariableValueMap& outvars)
      : inputs(invars), outputs(outvars) {}

X
Xin Pan 已提交
143 144 145 146
  VariableValueMap inputs;
  VariableValueMap outputs;
};

Q
Qiao Longfei 已提交
147
/**
X
Xin Pan 已提交
148
 * OperatorBase has the basic elements that Net will call to do computation.
Q
Qiao Longfei 已提交
149 150 151 152 153 154
 * Only CreateOperator from OpRegistry will new Operator directly. User
 * should always construct a proto message OpDesc and call
 * OpRegistry::CreateOp(op_desc) to get an Operator instance.
 */
class OperatorBase {
 public:
155 156 157 158
  OperatorBase(const std::string& type,
               const VariableNameMap& inputs,
               const VariableNameMap& outputs,
               const AttributeMap& attrs);
159

Q
Qiao Longfei 已提交
160 161
  virtual ~OperatorBase() {}

162
  /// Executor will call this interface function to Run an op.
163 164
  //  The implementation should be written at RunImpl
  void Run(const Scope& scope, const platform::Place& place);
Y
Yu Yang 已提交
165

T
typhoonzero 已提交
166 167 168
  // FIXME(typhoonzero): this is only used for recv_op to stop event_loop.
  virtual void Stop() {}

169
  /// if scope is not null, also show dimensions of arguments
170
  virtual std::string DebugStringEx(const ScopeBase* scope) const;
171
  std::string DebugString() const { return DebugStringEx(nullptr); }
Y
Yu Yang 已提交
172

173
  virtual bool SupportGPU() const { return false; }
B
Baibaifan 已提交
174
  virtual bool SupportNPU() const { return false; }
F
fwenguang 已提交
175
  virtual bool SupportMLU() const { return false; }
176
  virtual bool SupportXPU() const { return false; }
177

178 179
  const std::string& Type() const { return type_; }

180 181 182
  bool HasAttr(const std::string& name) const {
    return attrs_.count(name) || runtime_attrs_.count(name);
  }
183 184
  template <typename T>
  inline const T& Attr(const std::string& name) const {
185 186 187 188 189 190 191 192 193 194 195
    auto it = attrs_.find(name);
    if (it == attrs_.end()) {
      it = runtime_attrs_.find(name);
      PADDLE_ENFORCE_NE(
          it,
          runtime_attrs_.end(),
          platform::errors::NotFound(
              "(%s) is not found in AttributeMap and RuntimeAttributeMap.",
              name));
    }
    return PADDLE_GET_CONST(T, it->second);
196
  }
197 198
  void SetAttr(const std::string& name, const Attribute& v) {
    PADDLE_ENFORCE_EQ(
199 200
        HasAttr(name),
        true,
201 202 203 204 205
        platform::errors::NotFound(
            "The attribute %s is not found in operator %s", name, Type()));

    attrs_[name] = v;
  }
206
  const AttributeMap& Attrs() const { return attrs_; }
207 208 209 210
  const AttributeMap& RuntimeAttrs() const { return runtime_attrs_; }
  void SetRuntimeAttributeMap(const AttributeMap& runtime_attrs) {
    runtime_attrs_ = runtime_attrs;
  }
D
dongzhihong 已提交
211

Y
Yu Yang 已提交
212 213
  const VariableNameMap& Inputs() const { return inputs_; }
  const VariableNameMap& Outputs() const { return outputs_; }
214 215
  VariableNameMap& Inputs() { return inputs_; }
  VariableNameMap& Outputs() { return outputs_; }
216

S
sneaxiy 已提交
217
  const OpInfo& Info() const {
218
    PADDLE_ENFORCE_NOT_NULL(
219 220 221
        info_,
        platform::errors::NotFound("OpInfo of operator (%s) is not found.",
                                   type_));
S
sneaxiy 已提交
222 223 224
    return *info_;
  }

225
  bool HasInputs(const std::string& name) const;
Y
Yu Yang 已提交
226
  //! Get a input with argument's name described in `op_proto`
227
  std::string Input(const std::string& name) const;
Y
Yu Yang 已提交
228
  //! Get a input which has multiple variables.
Y
Yu Yang 已提交
229
  const std::vector<std::string>& Inputs(const std::string& name) const;
230
  //! Get all inputs variable names
Q
qijun 已提交
231 232
  std::vector<std::string> InputVars() const;

233
  bool HasOutputs(const std::string& name) const;
Y
Yu Yang 已提交
234
  //! Get a output with argument's name described in `op_proto`
235
  std::string Output(const std::string& name) const;
Y
Yu Yang 已提交
236 237
  //! Get an output which has multiple variables.
  //! TODO add a vector_view to prevent memory copy.
Y
Yu Yang 已提交
238
  const std::vector<std::string>& Outputs(const std::string& name) const;
239
  //! Get all outputs variable names
Y
Yu Yang 已提交
240
  virtual std::vector<std::string> OutputVars(bool has_intermediate) const;
241

242
  void SetIsCalledByExecutor(bool x) { run_by_executor_ = x; }
243

B
baojun-nervana 已提交
244
  virtual void RuntimeInferShape(const Scope& scope,
X
Xin Pan 已提交
245 246
                                 const platform::Place& place,
                                 const RuntimeContext& ctx) const {}
247

Z
Zhang Ting 已提交
248 249 250 251 252
  virtual platform::Place GetExecutionPlace(
      const platform::Place& place) const {
    return place;
  }

Q
qiaolongfei 已提交
253
 protected:
Q
Qiao Longfei 已提交
254
  std::string type_;
D
dongzhihong 已提交
255
  // NOTE: in case of OpGrad, inputs_ contains:
256
  // I (Inputs)
D
dongzhihong 已提交
257 258
  // O (Outputs)
  // OG (Output Gradients)
Y
Yu Yang 已提交
259
  VariableNameMap inputs_;
Y
Yu Yang 已提交
260

D
dongzhihong 已提交
261 262
  // NOTE: in case of OpGrad, outputs_ contains
  // IG (Inputs Gradients)
Y
Yu Yang 已提交
263
  VariableNameMap outputs_;
Q
Qiao Longfei 已提交
264
  AttributeMap attrs_;
265 266 267 268 269 270
  // NOTE: runtime_attrs_ contains the attributes which used for dispatching
  // kernel (use_mkldnn, use_cudnn, ...) or passing additional configuration
  // for special heterogeneous kernel (workspace_size_MB, ...).
  // The attributes in runtime_attrs_ are setted by framework (such as PASS),
  // and not in the python api.
  AttributeMap runtime_attrs_;
S
sneaxiy 已提交
271 272 273 274

  // OpInfo
  const OpInfo* info_;

275 276
  // Whether this operator executes in an Executor.
  bool run_by_executor_{true};
277 278 279 280

 private:
  void GenerateTemporaryNames();
  void CheckAllInputOutputSet() const;
281 282
  virtual void RunImpl(const Scope& scope,
                       const platform::Place& place) const = 0;
Y
Yan Chunwei 已提交
283 284
};

285
class ExecutionContext {
Y
Yan Chunwei 已提交
286
 public:
287 288
  ExecutionContext(const OperatorBase& op,
                   const Scope& scope,
X
Xin Pan 已提交
289
                   const platform::DeviceContext& device_context,
290 291
                   const RuntimeContext& ctx)
      : op_(op), scope_(scope), device_context_(device_context), ctx_(ctx) {}
H
hong 已提交
292
  virtual ~ExecutionContext() {}
293

H
hong 已提交
294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
  virtual std::string InputName(const std::string& name) const {
    return op_.Input(name);
  }
  virtual std::vector<std::string> InputNames(const std::string& name) const {
    return op_.Inputs(name);
  }
  virtual std::string OutputName(const std::string& name) const {
    return op_.Output(name);
  }

  virtual std::vector<std::string> OutputNames(const std::string& name) const {
    return op_.Outputs(name);
  }

  virtual bool HasAttr(const std::string& name) const {
    return op_.HasAttr(name);
  }
  virtual const AttributeMap& Attrs() const { return op_.Attrs(); }

  const std::string& Type() const { return op_.Type(); }
Q
qiaolongfei 已提交
314 315 316

  const Scope& scope() const { return scope_; }

Q
qiaolongfei 已提交
317
  template <typename T>
Y
Yu Yang 已提交
318
  inline const T& Attr(const std::string& name) const {
R
Ruibiao Chen 已提交
319
    return PADDLE_GET_CONST(T, GetAttr(name));
Q
qiaolongfei 已提交
320 321
  }

H
hong 已提交
322
  virtual const Attribute& GetAttr(const std::string& name) const {
323 324 325 326 327 328
    auto iter = op_.Attrs().find(name);
    if (iter == op_.Attrs().end()) {
      return op_.RuntimeAttrs().at(name);
    } else {
      return iter->second;
    }
H
hong 已提交
329
  }
330

H
hong 已提交
331
  virtual bool HasInput(const std::string& name) const;
332

333 334
  virtual bool HasInputs(const std::string& name) const;

H
hong 已提交
335
  virtual bool HasOutput(const std::string& name) const;
336

H
hong 已提交
337
  virtual size_t InputSize(const std::string& name) const {
Y
Yu Yang 已提交
338
    return op_.Inputs(name).size();
Y
Yan Chunwei 已提交
339 340
  }

H
hong 已提交
341
  virtual size_t OutputSize(const std::string& name) const {
Y
Yu Yang 已提交
342
    return op_.Outputs(name).size();
Y
Yan Chunwei 已提交
343 344
  }

H
hong 已提交
345
  virtual const Variable* InputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
346

H
hong 已提交
347
  virtual Variable* OutputVar(const std::string& name) const;
Y
Yan Chunwei 已提交
348

H
hong 已提交
349
  virtual const std::vector<Variable*> MultiInputVar(
350
      const std::string& name) const {
351 352
    LogVarUsageIfUnusedVarCheckEnabled(name);

X
Xin Pan 已提交
353 354 355 356
    auto it = ctx_.inputs.find(name);
    if (it == ctx_.inputs.end()) {
      return {};
    }
G
Gabor Buella 已提交
357
    return {it->second.begin(), it->second.end()};
X
Xin Pan 已提交
358 359
  }

H
hong 已提交
360
  virtual std::vector<Variable*> MultiOutputVar(const std::string& name) const {
X
Xin Pan 已提交
361 362 363 364 365 366 367
    auto it = ctx_.outputs.find(name);
    if (it == ctx_.outputs.end()) {
      return {};
    }
    return it->second;
  }

C
Chen Weihang 已提交
368 369
  virtual paddle::small_vector<const std::string*> InNameList() const {
    paddle::small_vector<const std::string*> vec_temp;
H
hong 已提交
370 371 372
    vec_temp.reserve(ctx_.inputs.size());

    for (auto& input : ctx_.inputs) {
373
      vec_temp.push_back(&input.first);
H
hong 已提交
374 375 376 377 378
    }

    return vec_temp;
  }

379 380
  template <typename T>
  const T* Input(const std::string& name) const {
Y
Yu Yang 已提交
381
    auto* var = InputVar(name);
382
    return var == nullptr ? nullptr : &var->Get<T>();
383 384 385 386
  }

  template <typename T>
  T* Output(const std::string& name) const {
387
    auto var = OutputVar(name);
388
    return var == nullptr ? nullptr : var->GetMutable<T>();
389 390 391 392
  }

  template <typename T>
  const std::vector<const T*> MultiInput(const std::string& name) const {
393 394
    LogVarUsageIfUnusedVarCheckEnabled(name);

H
hong 已提交
395 396
    auto vars = MultiInputVar(name);
    if (vars.size() == 0) {
X
Xin Pan 已提交
397 398 399 400
      return {};
    }
    std::vector<const T*> res;
    res.reserve(vars.size());
401 402 403
    std::transform(vars.begin(),
                   vars.end(),
                   std::back_inserter(res),
H
hong 已提交
404
                   [&](const Variable* var) -> const T* {
X
Xin Pan 已提交
405 406 407 408 409 410 411
                     return var == nullptr ? nullptr : &var->Get<T>();
                   });
    return res;
  }

  template <typename T>
  std::vector<T*> MultiOutput(const std::string& name) const {
H
hong 已提交
412 413 414
    auto vars = MultiOutputVar(name);

    if (vars.size() == 0) {
X
Xin Pan 已提交
415 416
      return {};
    }
H
hong 已提交
417

X
Xin Pan 已提交
418 419
    std::vector<T*> res;
    res.reserve(vars.size());
420 421 422
    std::transform(vars.begin(),
                   vars.end(),
                   std::back_inserter(res),
X
Xin Pan 已提交
423 424 425
                   [&](Variable* var) -> T* {
                     return var == nullptr ? nullptr : var->GetMutable<T>();
                   });
H
hong 已提交
426

X
Xin Pan 已提交
427 428 429
    return res;
  }

430
  platform::Place GetPlace() const { return device_context_.GetPlace(); }
Q
qijun 已提交
431

Q
QI JUN 已提交
432 433 434 435 436
  template <typename DeviceContextType>
  const DeviceContextType& device_context() const {
    return *reinterpret_cast<const DeviceContextType*>(&device_context_);
  }

437
  const platform::DeviceContext& device_context() const {
Q
qijun 已提交
438
    return device_context_;
Q
qijun 已提交
439
  }
Q
qijun 已提交
440

441
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
L
Leo Chen 已提交
442
  const inline phi::GPUContext& cuda_device_context() const {
443 444
    PADDLE_ENFORCE_EQ(platform::is_gpu_place(device_context_.GetPlace()),
                      true,
445 446
                      platform::errors::PreconditionNotMet(
                          "Current device context place is not GPUPlace."));
L
Leo Chen 已提交
447
    return *reinterpret_cast<const phi::GPUContext*>(&device_context_);
Q
QI JUN 已提交
448 449 450
  }
#endif

X
Xin Pan 已提交
451 452 453
  template <typename T, typename DevContext>
  Tensor AllocateTmpTensor(const framework::DDim& dim,
                           const DevContext& dev_ctx) const {
454 455 456 457
    phi::DenseTensor tmp;
    tmp.Resize(dim);
    dev_ctx.template Alloc<T>(&tmp);
    return tmp;
X
Xin Pan 已提交
458 459
  }

H
hong 已提交
460 461 462
  const RuntimeContext Context() const { return ctx_; }

  std::string DebugString() const { return op_.DebugString(); }
463
  const OperatorBase& GetOp() const { return op_; }
H
hong 已提交
464

465
 private:
466 467
  const OperatorBase& op_;
  const Scope& scope_;
468
  const platform::DeviceContext& device_context_;
X
Xin Pan 已提交
469
  const RuntimeContext& ctx_;
Q
Qiao Longfei 已提交
470 471
};

472
// TODO(chenweihang): split impl based OpProto or Dygraph if needed
473
class ExecutionArgumentMappingContext : public phi::ArgumentMappingContext {
474 475 476 477 478
 public:
  explicit ExecutionArgumentMappingContext(const ExecutionContext& ctx)
      : ctx_(ctx) {}

  bool HasInput(const std::string& name) const override {
479
    return ctx_.HasInputs(name);
480 481 482 483 484 485
  }

  bool HasOutput(const std::string& name) const override {
    return ctx_.HasOutput(name);
  }

486 487 488 489
  bool HasAttr(const std::string& name) const override {
    return ctx_.HasAttr(name);
  }

490 491 492
  paddle::any Attr(const std::string& name) const override {
    auto& attr = ctx_.GetAttr(name);
    return GetAttrValue(attr);
493 494 495
  }

  size_t InputSize(const std::string& name) const override {
496
    return ctx_.MultiInputVar(name).size();
497 498 499
  }

  size_t OutputSize(const std::string& name) const override {
500
    return ctx_.MultiOutputVar(name).size();
501 502 503
  }

  bool IsDenseTensorInput(const std::string& name) const override {
504 505 506 507 508
    const auto* var = ctx_.InputVar(name);
    return var->IsType<phi::DenseTensor>();
  }

  bool IsDenseTensorInputs(const std::string& name) const override {
509 510 511 512
    auto vars = ctx_.MultiInputVar(name);
    return std::all_of(vars.begin(), vars.end(), [](const Variable* var) {
      return var->IsType<phi::DenseTensor>();
    });
513 514
  }

515 516 517 518 519 520 521
  bool IsSelectedRowsInputs(const std::string& name) const override {
    auto vars = ctx_.MultiInputVar(name);
    return std::all_of(vars.begin(), vars.end(), [](const Variable* var) {
      return var->IsType<phi::SelectedRows>();
    });
  }

522
  bool IsSelectedRowsInput(const std::string& name) const override {
523 524
    const auto* var = ctx_.InputVar(name);
    return var->IsType<phi::SelectedRows>();
525 526
  }

527
  bool IsDenseTensorVectorInput(const std::string& name) const override {
528 529 530 531
    auto vars = ctx_.MultiInputVar(name);
    return std::all_of(vars.begin(), vars.end(), [](const Variable* var) {
      return var->IsType<framework::LoDTensorArray>();
    });
532 533
  }

534 535 536 537 538
  bool IsSparseCooTensorInput(const std::string& name) const override {
    const auto* var = ctx_.InputVar(name);
    return var->IsType<phi::SparseCooTensor>();
  }

539 540 541 542 543
  bool IsSparseCsrTensorInput(const std::string& name) const override {
    const auto* var = ctx_.InputVar(name);
    return var->IsType<phi::SparseCsrTensor>();
  }

544
  bool IsDenseTensorOutput(const std::string& name) const override {
545 546 547 548
    auto vars = ctx_.MultiOutputVar(name);
    return std::all_of(vars.begin(), vars.end(), [](const Variable* var) {
      return var->IsType<phi::DenseTensor>();
    });
549 550 551
  }

  bool IsSelectedRowsOutput(const std::string& name) const override {
552 553 554 555
    auto vars = ctx_.MultiOutputVar(name);
    return std::all_of(vars.begin(), vars.end(), [](const Variable* var) {
      return var->IsType<phi::SelectedRows>();
    });
556 557
  }

558 559
  bool IsForInferShape() const override { return false; }

560 561 562 563
 private:
  const ExecutionContext& ctx_;
};

564 565 566 567 568 569 570 571
template <>
const std::vector<const Tensor*> ExecutionContext::MultiInput<Tensor>(
    const std::string& name) const;

template <>
std::vector<Tensor*> ExecutionContext::MultiOutput<Tensor>(
    const std::string& name) const;

Y
Yu Yang 已提交
572
class OpKernelBase {
Q
qijun 已提交
573
 public:
Q
qijun 已提交
574
  /**
575
   * ExecutionContext is the only parameter of Kernel Run function.
Q
qijun 已提交
576 577
   * Run will get input/output variables, state such as momentum and
   * device resource such as CUDA stream, cublas handle, etc. from
578
   * ExecutionContext. User should construct it before run the Operator.
Q
qijun 已提交
579 580
   */

581
  virtual void Compute(const ExecutionContext& context) const = 0;
Y
Yu Yang 已提交
582

Y
Yu Yang 已提交
583 584 585 586 587 588 589
  virtual ~OpKernelBase() = default;
};

template <typename T>
class OpKernel : public OpKernelBase {
 public:
  using ELEMENT_TYPE = T;
Y
Yu Yang 已提交
590 591
};

Y
Yu Yang 已提交
592 593
class OperatorWithKernel : public OperatorBase {
 public:
Y
yuyang18 已提交
594
  using OpKernelFunc = std::function<void(const ExecutionContext&)>;
Y
Yu Yang 已提交
595
  using OpKernelMap =
Y
yuyang18 已提交
596
      std::unordered_map<OpKernelType, OpKernelFunc, OpKernelType::Hash>;
Q
Qiao Longfei 已提交
597

598 599 600 601
  OperatorWithKernel(const std::string& type,
                     const VariableNameMap& inputs,
                     const VariableNameMap& outputs,
                     const AttributeMap& attrs)
Y
Yu Yang 已提交
602 603
      : OperatorBase(type, inputs, outputs, attrs) {}

C
chentianyu03 已提交
604
  static paddle::flat_hash_map<std::string /* op_type */, OpKernelMap>&
Y
Yu Yang 已提交
605
  AllOpKernels() {
C
chentianyu03 已提交
606
    static paddle::flat_hash_map<std::string, OpKernelMap> g_all_op_kernels;
Y
Yu Yang 已提交
607
    return g_all_op_kernels;
Y
Yu Yang 已提交
608
  }
Y
Yan Chunwei 已提交
609

610 611 612
  bool SupportGPU() const override;

  bool SupportNPU() const override;
613

F
fwenguang 已提交
614
  bool SupportMLU() const override {
615
    // TODO(zhiqiu): support phi if needed?
F
fwenguang 已提交
616
    auto& op_kernels = OperatorWithKernel::AllOpKernels().at(type_);
617 618
    return std::any_of(op_kernels.begin(),
                       op_kernels.end(),
F
fwenguang 已提交
619 620 621 622
                       [](OpKernelMap::const_reference kern_pair) {
                         return platform::is_mlu_place(kern_pair.first.place_);
                       });
  }
623 624 625

  bool SupportXPU() const override;

626
  bool SupportsMKLDNN(proto::VarType::Type data_type) const;
627

628 629
  bool SupportsKernelType(const OpKernelType& kernel_type) const;

630 631
  bool CanMKLDNNBeUsed(const framework::ExecutionContext& ctx,
                       proto::VarType::Type data_type) const;
632

633
  virtual void InferShape(InferShapeContext* ctx) const;
Y
Yu Yang 已提交
634

635 636
  void RuntimeInferShape(const Scope& scope,
                         const platform::Place& place,
X
Xin Pan 已提交
637
                         const RuntimeContext& ctx) const override;
B
baojun-nervana 已提交
638

639 640 641
  proto::VarType::Type IndicateVarDataType(const ExecutionContext& ctx,
                                           const std::string& name) const;

642
  proto::VarType::Type IndicateOrPromoteVarDataTypes(
643 644
      const ExecutionContext& ctx,
      const std::string& name1,
645 646
      const std::string& name2) const;

647
  virtual OpKernelType GetExpectedKernelType(const ExecutionContext& ctx) const;
X
Xin Pan 已提交
648

649 650
  // change this to public so that in dygraph mode we can call it to check if we
  // need transform data
651
  virtual OpKernelType GetKernelTypeForVar(
652 653
      const std::string& var_name,
      const Tensor& tensor,
654
      const OpKernelType& expected_kernel_type) const;
Y
Yu Yang 已提交
655

656 657
  platform::Place GetExecutionPlace(
      const platform::Place& platform) const override {
Z
Zhang Ting 已提交
658 659 660
    return kernel_type_->place_;
  }

661
  /* member functions for adapting to phi lib */
662
  /** In the Tensor calculation library, the new Kernel adopts a clearer and
663 664 665 666 667 668
   * more streamlined design. The arguments of the Kernel and the input and
   * output arguments registered in the original OpMaker do not match in some
   * cases, so we use map to record the arguments required by the kernel.
   * When selecting Kernel during Op execution, select the arguments of the
   * original Op according to the GetExpectedPhiKernelArgs returned arguments.
   */
669
  phi::KernelSignature GetExpectedPhiKernelArgs(
670 671
      const ExecutionContext& ctx) const;

672 673
  /* member functions for adapting to phi lib */
  phi::KernelKey ChoosePhiKernel(const ExecutionContext& ctx) const;
674

675
  void ChooseKernel(const ExecutionContext& ctx) const;
676

677 678
  void BuildPhiKernelContext(const RuntimeContext& ctx,
                             platform::DeviceContext* dev_ctx,
679
                             phi::KernelContext* phi_kernel_context) const;
680

681
  phi::KernelSignature* PhiKernelSignature() const {
682
    return kernel_signature_.get();
683 684
  }

685
  phi::Kernel* PhiKernel() const { return phi_kernel_.get(); }
686

687
  void ResetPhiKernel(phi::Kernel* kernel) const {
688
    return phi_kernel_.reset(kernel);
689 690
  }

691
  const OpKernelType* kernel_type() const { return kernel_type_.get(); }
692
  const OpKernelFunc* kernel_func() const { return kernel_func_.get(); }
693

694 695 696 697
  void ResetKernelType(OpKernelType* kernel_type) {
    kernel_type_.reset(kernel_type);
  }

Y
Yu Yang 已提交
698
 private:
699
  void RunImpl(const Scope& scope, const platform::Place& place) const final;
700 701
  void RunImpl(const Scope& scope,
               const platform::Place& place,
L
luotao1 已提交
702
               RuntimeContext* runtime_ctx) const;
Y
yuyang18 已提交
703 704

  /**
T
tianshuo78520a 已提交
705
   * Transfer data from scope to a transferred scope. If there is no data need
706
   * to be transferred, it returns nullptr.
Y
yuyang18 已提交
707
   *
708
   * transfered_inplace_vars is a output vector.
Y
yuyang18 已提交
709
   */
X
Xin Pan 已提交
710 711 712 713
  Scope* PrepareData(const Scope& scope,
                     const OpKernelType& expected_kernel_key,
                     std::vector<std::string>* transfered_inplace_vars,
                     RuntimeContext* ctx) const;
Y
yuyang18 已提交
714 715 716 717

  void TransferInplaceVarsBack(const Scope& scope,
                               const std::vector<std::string>& inplace_vars,
                               const Scope& exec_scope) const;
718

719 720
  OpKernelType InnerGetExpectedKernelType(const ExecutionContext& ctx) const;

721 722 723
  void HandleComplexGradToRealGrad(const Scope& scope,
                                   RuntimeContext* ctx) const;

724 725 726 727 728
  /* Inner assist methods */
  // indicate kernel DataType by input data.
  // By default all input data must be same.
  proto::VarType::Type IndicateDataType(const ExecutionContext& ctx) const;
  // used for IndicateDataType
729 730
  void ParseInputDataType(const Variable* vars,
                          const std::string& name,
731
                          proto::VarType::Type* data_type) const;
732 733 734
  void ParseMultiInputDataType(const std::vector<Variable*>& vars,
                               const std::string& name,
                               proto::VarType::Type* data_type) const;
735 736 737 738
  // used for IndicateOrPromoteVarDataTypes
  Tensor* GetTensorFormInputSafely(const ExecutionContext& ctx,
                                   const std::string& name) const;

739
 protected:
L
Liu Yiqun 已提交
740 741
  mutable std::unique_ptr<OpKernelType> kernel_type_;
  mutable std::unique_ptr<OpKernelFunc> kernel_func_;
L
luotao1 已提交
742 743
  mutable std::unique_ptr<RuntimeContext> runtime_ctx_;
  mutable const Scope* pre_scope_ = nullptr;
744
  mutable bool need_prepare_data_ = true;
745
  mutable bool need_prepare_phi_data_ = false;
746 747
  mutable bool enable_cache_runtime_context_ = false;
  mutable bool all_kernels_must_compute_runtime_shape_ = false;
748
  mutable std::mutex cache_update_mutex_;
749
  mutable bool enable_cache_transfer_scope_ = false;
750
  // NOTE(chenweihang): Similar op members are used to adapt to
751
  // new phi kernel, if there is a better design in the future,
752
  // we may polish the implementation here
753
  mutable bool run_phi_kernel_ = false;
L
Liu-xiandong 已提交
754
  mutable bool run_kp_kernel = false;
755
  mutable std::unique_ptr<phi::KernelSignature> kernel_signature_;
756
  mutable std::unique_ptr<phi::Kernel> phi_kernel_;
757
  mutable std::unique_ptr<phi::ArgumentMappingFn> arg_map_fn_;
758 759 760

  struct CacheImpl;
  mutable CacheImpl* impl_{nullptr};
Q
Qiao Longfei 已提交
761 762
};

Y
Yu Yang 已提交
763 764
extern bool OpSupportGPU(const std::string& op_type);

Q
Qiao Longfei 已提交
765 766
}  // namespace framework
}  // namespace paddle