prepared_operator.cc 31.2 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/prepared_operator.h"
16

J
Jiabin Yang 已提交
17
#include "paddle/fluid/eager/eager_tensor.h"
18
#include "paddle/fluid/framework/data_type_transform.h"
19
#include "paddle/fluid/framework/details/nan_inf_utils.h"
20
#include "paddle/fluid/imperative/infer_shape_context.h"
21
#include "paddle/fluid/imperative/tracer.h"
22
#include "paddle/phi/common/int_array.h"
23
#include "paddle/phi/common/scalar.h"
24
#include "paddle/utils/small_vector.h"
Q
QingshuChen 已提交
25
#ifdef PADDLE_WITH_XPU
26
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
Q
QingshuChen 已提交
27
#endif
28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_op_list.h"
#endif
L
Liu-xiandong 已提交
31
#include "paddle/fluid/framework/library_type.h"
32
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
C
chenjian 已提交
33
#include "paddle/fluid/platform/profiler/event_tracing.h"
C
chenjian 已提交
34
#include "paddle/fluid/platform/profiler/supplement_tracing.h"
35

36
DECLARE_bool(check_nan_inf);
37
DECLARE_bool(benchmark);
F
Feng Xing 已提交
38
DECLARE_bool(run_kp_kernel);
39

J
Jiabin Yang 已提交
40 41 42
namespace paddle {
namespace imperative {

43
static const phi::Kernel empty_kernel;
44 45
static const framework::RuntimeContext empty_ctx({}, {});
static const framework::Scope empty_scope;
46

47 48 49 50 51 52 53
const phi::KernelFactory& PreparedOp::phi_kernel_factory =
    phi::KernelFactory::Instance();
const phi::OpUtilsMap& PreparedOp::phi_op_utils_map =
    phi::OpUtilsMap::Instance();
const phi::DefaultKernelSignatureMap& PreparedOp::default_phi_kernel_sig_map =
    phi::DefaultKernelSignatureMap::Instance();

54 55 56 57 58 59 60 61 62 63
const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<paddle::imperative::VarBase>& var) {
  return var->SharedVar();
}

const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<VariableWrapper>& var) {
  return var;
}

64
const phi::DenseTensor* GetTensorFromVar(const framework::Variable& var) {
65 66
  if (var.IsType<phi::DenseTensor>()) {
    return &(var.Get<phi::DenseTensor>());
67 68
  } else if (var.IsType<phi::SelectedRows>()) {
    return &(var.Get<phi::SelectedRows>().value());
J
Jiabin Yang 已提交
69 70 71 72 73
  } else {
    return nullptr;
  }
}

74
template <typename VarType>
J
Jiabin Yang 已提交
75
void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
  for (auto& pair : outs) {
    for (auto& var : pair.second) {
      if (var == nullptr) {
        continue;
      }
      if (var->ForwardDataType() ==
          static_cast<framework::proto::VarType::Type>(-1)) {
        VLOG(6) << "Var (" << var->Name()
                << ")'s forward data type is not set.";
        continue;
      }
      if (!framework::IsComplexType(var->DataType()) ||
          framework::IsComplexType(var->ForwardDataType())) {
        continue;
      }
      const auto* tensor = GetTensorFromVar(var->Var());
J
Jiabin Yang 已提交
92
      if (tensor && tensor->IsInitialized()) {
93 94 95 96
        VLOG(6) << "Transform " << framework::DataTypeToString(var->DataType())
                << " var `" << var->Name() << "` to "
                << framework::DataTypeToString(var->ForwardDataType())
                << " real var in dynamic graph.";
97
        phi::DenseTensor out;
98 99
        framework::TransComplexToReal(
            var->ForwardDataType(), var->DataType(), *tensor, &out);
100
        SetTensorToVariable(var->Var(), out, var->MutableVar());
J
Jiabin Yang 已提交
101 102 103 104 105
      }
    }
  }
}

J
Jiabin Yang 已提交
106
template <>
107 108
void HandleComplexGradToRealGrad<egr::EagerVariable>(
    const NameVarMap<egr::EagerVariable>& outs) {
J
Jiabin Yang 已提交
109 110 111
  // TODO(jiabin): Support Complex here.
}

112 113 114 115 116
void TestHandleComplexGradToRealGradEager(
    const NameVarMap<egr::EagerVariable>& outs) {
  HandleComplexGradToRealGrad<egr::EagerVariable>(outs);
}

J
Jiabin Yang 已提交
117 118
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
119
                       const framework::OpKernelType& kernel_type,
120
                       const framework::OperatorWithKernel::OpKernelFunc& func,
121 122
                       const phi::ArgumentMappingFn* arg_map_fn,
                       const phi::KernelSignature* default_kernel_signature,
123
                       platform::DeviceContext* dev_ctx)
124 125 126 127
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(func),
128
      dev_ctx_(dev_ctx),
129 130 131
      arg_map_fn_(arg_map_fn),
      default_kernel_signature_(default_kernel_signature),
      phi_kernel_(empty_kernel) {}
132

133 134 135
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
                       const framework::OpKernelType& kernel_type,
136 137 138 139
                       const phi::ArgumentMappingFn* arg_map_fn,
                       const phi::KernelSignature* default_kernel_signature,
                       phi::KernelSignature&& kernel_signature,
                       const phi::Kernel& phi_kernel,
140 141 142 143 144 145
                       platform::DeviceContext* dev_ctx)
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(nullptr),
      dev_ctx_(dev_ctx),
146
      run_phi_kernel_(true),
147 148 149 150
      arg_map_fn_(arg_map_fn),
      default_kernel_signature_(default_kernel_signature),
      kernel_signature_(std::move(kernel_signature)),
      phi_kernel_(phi_kernel) {}
151

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
#ifdef PADDLE_WITH_MLU

static void tokenize(const std::string& ops,
                     char delim,
                     std::unordered_set<std::string>* op_set) {
  std::string::size_type beg = 0;
  for (uint64_t end = 0; (end = ops.find(delim, end)) != std::string::npos;
       ++end) {
    op_set->insert(ops.substr(beg, end - beg));
    beg = end + 1;
  }

  op_set->insert(ops.substr(beg));
}

static bool is_in_mlu_black_list(const std::string& op_name) {
  static bool inited = false;
  static std::unordered_set<std::string> mlu_black_list;
  static std::mutex s_mtx;
  if (!inited) {
    std::lock_guard<std::mutex> guard(s_mtx);
    if (!inited) {
      if (std::getenv("MLU_BLACK_LIST") != nullptr) {
        std::string ops(std::getenv("MLU_BLACK_LIST"));
        tokenize(ops, ',', &mlu_black_list);
      }
      inited = true;
      VLOG(3) << "MLU Black List: ";
      for (auto iter = mlu_black_list.begin(); iter != mlu_black_list.end();
           ++iter) {
        VLOG(3) << *iter << " ";
      }
    }
  }
  if (mlu_black_list.find(op_name) != mlu_black_list.end()) {
    return true;
  }
  return false;
}

#endif

194
template <typename VarType>
195
PreparedOp PrepareImpl(
196 197 198 199
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::OperatorWithKernel& op,
    const platform::Place& place,
200 201 202 203 204
    const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs,
    const phi::KernelFactory& phi_kernel_factory,
    const phi::OpUtilsMap& phi_op_utils_map,
    const phi::DefaultKernelSignatureMap& default_phi_kernel_sig_map) {
205
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
206
  auto* dev_ctx = pool.Get(place);
207

208 209 210 211 212 213
#ifdef PADDLE_WITH_MKLDNN
  // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
  // GetKernelType functions, so we need to copy the attributes there.
  // Const qualifier of Attrs had to be discarded to overwrite it.
  if (FLAGS_use_mkldnn) {
    auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
214 215 216 217
    mutable_op_attrs = default_attrs;
    for (auto& attr : attrs) {
      mutable_op_attrs[attr.first] = attr.second;
    }
218 219
  }
#endif
220 221
  // NOTE(zhiqiu): for kernels on given device, for example NPU, the order to
  // choose is:
222
  // phi npu kernel > fluid npu kernel > phi cpu kernel > fluid cpu kernel
J
Jiabin Yang 已提交
223

224
  // 1. get expected kernel key
225
  auto dygraph_exe_ctx = DygraphExecutionContext<VarType>(
226
      op, empty_scope, *dev_ctx, empty_ctx, ins, outs, attrs, default_attrs);
227
  auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
228

229 230
  const phi::KernelSignature* default_kernel_signature = nullptr;
  phi::KernelSignature kernel_signature;
231 232
  phi::KernelKey phi_kernel_key;
  std::string phi_kernel_name;
233 234 235 236

// NOTE(jiahongyu): The registered MKLDNN kernel have library_type =
// LibraryType::kMKLDNN and data_layout_ = DataLayout::kMKLDNN. But the default
// values are kPlain, so we need to modify the library_type and data_layout_
237 238 239 240
// here. There are three statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
241
#ifdef PADDLE_WITH_MKLDNN
242
  if (!op.DnnFallback() && !paddle::platform::in_mkldnn_white_list(op.Type()) &&
243 244 245 246 247 248
      op.CanMKLDNNBeUsed(dygraph_exe_ctx, expected_kernel_key.data_type_)) {
    expected_kernel_key.library_type_ = framework::LibraryType::kMKLDNN;
    expected_kernel_key.data_layout_ = framework::DataLayout::kMKLDNN;
  }
#endif

L
Liu-xiandong 已提交
249
#if defined(PADDLE_WITH_XPU)
250 251 252 253 254 255
  bool is_xpu_unsupport =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
          !paddle::platform::is_xpu_support_op(op.Type(),
                                               expected_kernel_key) ||
      paddle::platform::is_in_xpu_black_list(op.Type());
#endif
256

257 258 259 260 261 262
#ifdef PADDLE_WITH_MLU
  if (is_in_mlu_black_list(op.Type())) {
    expected_kernel_key.place_ = platform::CPUPlace();
  }
#endif

263 264
  bool has_phi_kernel = false;

265 266
  const auto* arg_map_fn = phi_op_utils_map.GetArgumentMappingFn(op.Type());

267 268
  if (arg_map_fn) {
    has_phi_kernel = true;
269
    kernel_signature = (*arg_map_fn)(
270 271
        framework::ExecutionArgumentMappingContext(dygraph_exe_ctx));
  } else {
272
    default_kernel_signature =
273
        default_phi_kernel_sig_map.GetNullable(op.Type());
274
    if (default_kernel_signature) {
275
      has_phi_kernel = true;
276
      kernel_signature = *default_kernel_signature;
277 278
    }
  }
279

280
  if (has_phi_kernel) {
281
    VLOG(6) << kernel_signature;
282
    phi_kernel_name = kernel_signature.name;
283 284 285
// NOTE(Liu-xiandong): The register kernel used KP have library_type[KP],
// But the default library_type is Plain, so we need to modify the
// library_type here, otherwise it can't work.
L
Liu-xiandong 已提交
286 287 288
#ifdef PADDLE_WITH_XPU_KP
    if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
      bool use_xpu_kp_kernel_rt =
289 290
          FLAGS_run_kp_kernel && paddle::platform::is_xpu_kp_support_op(
                                     op.Type(), expected_kernel_key);
L
Liu-xiandong 已提交
291 292 293 294 295 296 297 298 299 300 301
      bool use_xpu_kp_kernel_debug =
          paddle::platform::is_in_xpu_kpwhite_list(op.Type());
      if (use_xpu_kp_kernel_rt) {
        VLOG(3) << "phi xpu_kp using rt mode ";
      }
      if (use_xpu_kp_kernel_debug) {
        VLOG(3) << "phi xpu_kp using debug mode ";
      }
      bool is_xpu_kp_support =
          (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
      if (is_xpu_kp_support) {
302 303
        auto expected_kernel_key_library_type =
            expected_kernel_key.library_type_;
L
Liu-xiandong 已提交
304
        expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
305
        VLOG(3) << "modifing XPU KP kernel: " << phi_kernel_name
L
Liu-xiandong 已提交
306
                << ", using_kernel_key:" << expected_kernel_key;
307

308
        phi::KernelKey try_phi_kernel_key =
309
            TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
310 311
        if (!phi_kernel_factory.HasKernel(phi_kernel_name,
                                          try_phi_kernel_key)) {
312
          expected_kernel_key.library_type_ = expected_kernel_key_library_type;
313
          VLOG(3) << "modify XPU KP kernel: " << phi_kernel_name
314 315
                  << " in dynamic graph is failed " << expected_kernel_key;
        } else {
316
          VLOG(3) << "modify XPU KP kernel: " << phi_kernel_name
317
                  << " in dynamic graph is succeed " << expected_kernel_key;
318
        }
L
Liu-xiandong 已提交
319 320 321
      }
    }
#endif
322

323
    phi_kernel_key = TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
324
    auto& phi_kernel =
325
        phi_kernel_factory.SelectKernel(phi_kernel_name, phi_kernel_key);
326

327
    if (phi_kernel.IsValid()
L
Liu-xiandong 已提交
328
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
329 330
        && !is_xpu_unsupport
#endif
331
    ) {
332 333
      VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << phi_kernel_name
              << " | kernel key: " << phi_kernel_key
334
              << " | kernel: " << phi_kernel;
335

F
From00 已提交
336 337
      if (expected_kernel_key.place_ != place) {
        dev_ctx = pool.Get(expected_kernel_key.place_);
W
Wilber 已提交
338
      }
F
From00 已提交
339

340 341 342 343 344 345 346 347
      return PreparedOp(op,
                        empty_ctx,
                        expected_kernel_key,
                        arg_map_fn,
                        default_kernel_signature,
                        std::move(kernel_signature),
                        phi_kernel,
                        dev_ctx);
348
    } else {
349
      VLOG(6) << "Dynamic mode ChoosePhiKernel - kernel `" << phi_kernel_name
350 351 352 353
              << "` not found.";
    }
  }

354
  // 2. check if op[type] has kernel registered.
J
Jiabin Yang 已提交
355 356
  auto& all_op_kernels = op.AllOpKernels();
  auto kernels_iter = all_op_kernels.find(op.Type());
357

358 359 360
// NOTE(Liu-xiandong): If we can't find heterogeneous kernel in phi,
// we need to select the heterogeneous kernel in fluid, but the kernel
// registered in KP use library_type[KP], we need to modify it.
361 362 363 364 365 366 367 368 369 370 371 372 373 374
#ifdef PADDLE_WITH_XPU_KP
  bool use_xpu_kp_kernel_rt =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
      FLAGS_run_kp_kernel &&
      paddle::platform::is_xpu_kp_support_op(op.Type(), expected_kernel_key);
  bool use_xpu_kp_kernel_debug =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
      paddle::platform::is_in_xpu_kpwhite_list(op.Type());
  bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
  if (is_xpu_kp_support) {
    expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
  }
#endif

375 376 377
  if ((kernels_iter == all_op_kernels.end() ||
       kernels_iter->second.find(expected_kernel_key) ==
           kernels_iter->second.end())
L
Liu-xiandong 已提交
378
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
379
      || is_xpu_unsupport
380
#endif
381 382 383
#if defined(PADDLE_WITH_XPU_KP)
      || (is_xpu_unsupport && !is_xpu_kp_support)
#endif
384
  ) {
385
    if (has_phi_kernel) {
386 387 388 389 390 391 392 393
      auto phi_cpu_kernel_key =
          FallBackToCpu(expected_kernel_key, phi_kernel_key, op);
      auto& phi_cpu_kernel =
          phi_kernel_factory.SelectKernel(phi_kernel_name, phi_cpu_kernel_key);
      if (phi_cpu_kernel.IsValid()) {
        VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << phi_kernel_name
                << " | kernel key: " << phi_cpu_kernel_key
                << " | kernel: " << phi_cpu_kernel;
394
        auto* cpu_ctx = pool.Get(paddle::platform::CPUPlace());
395
        return PreparedOp(
396 397
            op,
            empty_ctx,
398
            framework::TransPhiKernelKeyToOpKernelType(phi_cpu_kernel_key),
399 400 401
            arg_map_fn,
            default_kernel_signature,
            std::move(kernel_signature),
402
            phi_cpu_kernel,
403
            cpu_ctx);
404 405 406 407
      }
    }
  }

408
  PADDLE_ENFORCE_NE(
409 410
      kernels_iter,
      all_op_kernels.end(),
411 412 413
      platform::errors::NotFound(
          "There are no kernels which are registered in the %s operator.",
          op.Type()));
414

J
Jiabin Yang 已提交
415 416
  auto& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(expected_kernel_key);
417

418
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
419
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
420
      (kernel_iter == kernels.end() || is_xpu_unsupport)) {
421
    VLOG(3) << "fluid missing XPU kernel: " << op.Type()
422 423
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
424 425 426
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
427
#endif
L
Liu-xiandong 已提交
428 429

#ifdef PADDLE_WITH_XPU_KP
430 431
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
    if (use_xpu_kp_kernel_rt) {
432
      VLOG(3) << "fluid xpu_kp using rt mode ";
433 434
    }
    if (use_xpu_kp_kernel_debug) {
435
      VLOG(3) << "fluid xpu_kp using debug mode ";
436 437 438 439
    }
    if (is_xpu_kp_support) {
      expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
      kernel_iter = kernels.find(expected_kernel_key);
440
      VLOG(3) << "using fluid XPU KP kernel: " << op.Type()
441 442 443 444
              << ", using_kernel_key:" << expected_kernel_key;
    }
    if (!is_xpu_kp_support &&
        (kernel_iter == kernels.end() || is_xpu_unsupport)) {
445
      VLOG(3) << "fluid missing XPU kernel: " << op.Type()
446 447 448 449 450
              << ", expected_kernel_key:" << expected_kernel_key
              << ", fallbacking to CPU one!";
      expected_kernel_key.place_ = platform::CPUPlace();
      kernel_iter = kernels.find(expected_kernel_key);
    }
L
Liu-xiandong 已提交
451 452 453
  }
#endif

454 455
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
456
      paddle::platform::is_npu_place(expected_kernel_key.place_)) {
457 458 459
    VLOG(3) << "missing NPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
460
    expected_kernel_key.place_ = platform::CPUPlace();
461 462 463 464 465 466 467 468 469 470
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
#ifdef PADDLE_WITH_IPU
  if (kernel_iter == kernels.end() &&
      paddle::platform::is_ipu_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing IPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
471 472
    kernel_iter = kernels.find(expected_kernel_key);
  }
473 474 475
#endif
#ifdef PADDLE_WITH_MLU
  if (kernel_iter == kernels.end() &&
476
      paddle::platform::is_mlu_place(expected_kernel_key.place_)) {
477 478 479 480 481 482
    VLOG(3) << "missing MLU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
483 484 485 486 487 488 489 490 491 492
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (kernel_iter == kernels.end() &&
      paddle::platform::is_custom_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing " << place.GetDeviceType() << " kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
493
#endif
494 495
  // TODO(jiabin): Add operator.cc's line 1000 part back when we need that
  // case
496 497 498 499 500 501
  PADDLE_ENFORCE_NE(
      kernel_iter,
      kernels.end(),
      platform::errors::NotFound("Operator %s does not have kernel for %s.",
                                 op.Type(),
                                 KernelTypeToString(expected_kernel_key)));
502

503 504 505 506
  if (!(expected_kernel_key.place_ == place)) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

507 508 509 510 511 512 513
  return PreparedOp(op,
                    empty_ctx,
                    expected_kernel_key,
                    kernel_iter->second,
                    arg_map_fn,
                    default_kernel_signature,
                    dev_ctx);
514 515
}

516 517 518 519
PreparedOp PreparedOp::Prepare(const NameVarMap<VarBase>& ins,
                               const NameVarMap<VarBase>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
520
                               const framework::AttributeMap& attrs,
521
                               const framework::AttributeMap& default_attrs) {
522 523 524 525 526 527 528 529
  return PrepareImpl<VarBase>(ins,
                              outs,
                              op,
                              place,
                              attrs,
                              default_attrs,
                              phi_kernel_factory,
                              phi_op_utils_map,
530
                              default_phi_kernel_sig_map);
531 532 533 534 535 536
}

PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
                               const NameVarMap<VariableWrapper>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
537
                               const framework::AttributeMap& attrs,
538
                               const framework::AttributeMap& default_attrs) {
539 540 541 542 543 544 545 546 547
  return PrepareImpl<VariableWrapper>(ins,
                                      outs,
                                      op,
                                      place,
                                      attrs,
                                      default_attrs,
                                      phi_kernel_factory,
                                      phi_op_utils_map,
                                      default_phi_kernel_sig_map);
548 549
}

550 551
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerVariable>& ins,
                               const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
552 553 554 555
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
                               const framework::AttributeMap& attrs,
                               const framework::AttributeMap& default_attrs) {
556 557 558 559 560 561 562 563 564
  return PrepareImpl<egr::EagerVariable>(ins,
                                         outs,
                                         op,
                                         place,
                                         attrs,
                                         default_attrs,
                                         phi_kernel_factory,
                                         phi_op_utils_map,
                                         default_phi_kernel_sig_map);
J
Jiabin Yang 已提交
565
}
566 567
template <typename VarType>
static void PreparedOpRunImpl(
568 569
    const framework::OperatorBase& op,
    const framework::RuntimeContext& ctx,
570
    const framework::OpKernelType& kernel_type,
571
    const framework::OperatorWithKernel::OpKernelFunc& func,
572 573
    const phi::ArgumentMappingFn* arg_map_fn,
    const phi::KernelSignature* default_kernel_signature,
574 575 576 577
    platform::DeviceContext* dev_ctx,
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::AttributeMap& attrs,
578
    const framework::AttributeMap& default_attrs) {
J
Jiabin Yang 已提交
579
  // TODO(zjl): remove scope in dygraph
H
hong 已提交
580

581
  {
582
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
583
                                       platform::TracerEventType::OperatorInner,
584 585 586 587 588 589 590 591 592 593
                                       1,
                                       platform::EventRole::kInnerOp);
    DygraphInferShapeContext<VarType> infer_shape_ctx(&ins,
                                                      &outs,
                                                      &attrs,
                                                      &default_attrs,
                                                      op.Type(),
                                                      &kernel_type,
                                                      arg_map_fn,
                                                      default_kernel_signature);
594
    op.Info().infer_shape_(&infer_shape_ctx);
C
chenjian 已提交
595 596 597
    record_event.End();
    platform::RecordOpInfoSupplement(
        op.Type(), op.Attrs(), infer_shape_ctx, ctx);
598 599 600
  }

  {
601
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
602
                                       platform::TracerEventType::OperatorInner,
603 604
                                       1,
                                       platform::EventRole::kInnerOp);
H
hong 已提交
605

606 607
    func(DygraphExecutionContext<VarType>(
        op, empty_scope, *dev_ctx, ctx, ins, outs, attrs, default_attrs));
608
  }
609

610 611 612 613 614
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

L
Leo Chen 已提交
615 616 617 618 619 620 621 622
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
  /**
   * [ Why need handle complex gradient to real gradient? ]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64).
   *
   * But because the dout is complex64, the dx is also complex64 after
   * grad op kernel executed, we need to recognize this situation and
   * convert dx to float32 type. HandleComplexGradToRealGrad does this thing.
   */
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
638
}
H
hong 已提交
639

640 641 642
template <typename VarType>
static void PreparedOpRunPtImpl(
    const framework::OperatorBase& op,
643
    const framework::OpKernelType& kernel_type,
644 645
    const phi::ArgumentMappingFn* arg_map_fn,
    const phi::KernelSignature* default_kernel_signature,
646 647 648 649 650 651
    const phi::KernelSignature& kernel_signature,
    const phi::Kernel& phi_kernel,
    platform::DeviceContext* dev_ctx,
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::AttributeMap& attrs,
652
    const framework::AttributeMap& default_attrs) {
653
  {
654
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
655
                                       platform::TracerEventType::OperatorInner,
656 657 658 659 660 661 662 663 664 665
                                       1,
                                       platform::EventRole::kInnerOp);
    DygraphInferShapeContext<VarType> infer_shape_ctx(&ins,
                                                      &outs,
                                                      &attrs,
                                                      &default_attrs,
                                                      op.Type(),
                                                      &kernel_type,
                                                      arg_map_fn,
                                                      default_kernel_signature);
666
    op.Info().infer_shape_(&infer_shape_ctx);
C
chenjian 已提交
667 668 669
    record_event.End();
    platform::RecordOpInfoSupplement(
        op.Type(), op.Attrs(), infer_shape_ctx, kernel_signature);
670 671 672
  }

  {
673
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
674
                                       platform::TracerEventType::OperatorInner,
675 676
                                       1,
                                       platform::EventRole::kInnerOp);
677

678
    PreparePhiData<VarType>(phi_kernel, kernel_signature, ins);
679

680
    phi::KernelContext phi_kernel_context;
681 682 683 684 685 686 687
    BuildDygraphPhiKernelContext<VarType>(kernel_signature,
                                          phi_kernel,
                                          ins,
                                          outs,
                                          attrs,
                                          default_attrs,
                                          dev_ctx,
688
                                          &phi_kernel_context);
689

690
    phi_kernel(&phi_kernel_context);
691
  }
692

693 694 695 696 697
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

698 699
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
700 701
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
702 703 704 705
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

706 707 708
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
709 710
}

711 712
void PreparedOp::Run(const NameVarMap<VarBase>& ins,
                     const NameVarMap<VarBase>& outs,
713 714
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
715
  if (run_phi_kernel_) {
716 717 718 719 720 721 722 723 724 725
    PreparedOpRunPtImpl<VarBase>(op_,
                                 kernel_type_,
                                 arg_map_fn_,
                                 default_kernel_signature_,
                                 kernel_signature_,
                                 phi_kernel_,
                                 dev_ctx_,
                                 ins,
                                 outs,
                                 attrs,
726
                                 default_attrs);
727
  } else {
728 729 730 731 732 733 734 735 736 737 738
    PreparedOpRunImpl<VarBase>(op_,
                               ctx_,
                               kernel_type_,
                               func_,
                               arg_map_fn_,
                               default_kernel_signature_,
                               dev_ctx_,
                               ins,
                               outs,
                               attrs,
                               default_attrs);
739
  }
740
}
H
hong 已提交
741

742 743
void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
                     const NameVarMap<VariableWrapper>& outs,
744 745
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
746
  if (run_phi_kernel_) {
747 748 749 750 751 752 753 754 755 756 757
    PreparedOpRunPtImpl<VariableWrapper>(op_,
                                         kernel_type_,
                                         arg_map_fn_,
                                         default_kernel_signature_,
                                         kernel_signature_,
                                         phi_kernel_,
                                         dev_ctx_,
                                         ins,
                                         outs,
                                         attrs,
                                         default_attrs);
758
  } else {
759 760 761 762 763 764 765 766 767 768 769
    PreparedOpRunImpl<VariableWrapper>(op_,
                                       ctx_,
                                       kernel_type_,
                                       func_,
                                       arg_map_fn_,
                                       default_kernel_signature_,
                                       dev_ctx_,
                                       ins,
                                       outs,
                                       attrs,
                                       default_attrs);
770
  }
J
Jiabin Yang 已提交
771 772
}

773 774
void PreparedOp::Run(const NameVarMap<egr::EagerVariable>& ins,
                     const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
775 776
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
777
  if (run_phi_kernel_) {
778 779 780 781 782 783 784 785 786 787 788
    PreparedOpRunPtImpl<egr::EagerVariable>(op_,
                                            kernel_type_,
                                            arg_map_fn_,
                                            default_kernel_signature_,
                                            kernel_signature_,
                                            phi_kernel_,
                                            dev_ctx_,
                                            ins,
                                            outs,
                                            attrs,
                                            default_attrs);
J
Jiabin Yang 已提交
789
  } else {
790 791 792 793 794 795 796 797 798 799 800
    PreparedOpRunImpl<egr::EagerVariable>(op_,
                                          ctx_,
                                          kernel_type_,
                                          func_,
                                          arg_map_fn_,
                                          default_kernel_signature_,
                                          dev_ctx_,
                                          ins,
                                          outs,
                                          attrs,
                                          default_attrs);
J
Jiabin Yang 已提交
801 802 803
  }
}

J
Jiabin Yang 已提交
804 805
}  // namespace imperative
}  // namespace paddle