prepared_operator.cc 31.4 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/prepared_operator.h"
16

J
Jiabin Yang 已提交
17
#include "paddle/fluid/eager/eager_tensor.h"
18
#include "paddle/fluid/framework/data_type_transform.h"
19
#include "paddle/fluid/framework/details/nan_inf_utils.h"
20
#include "paddle/fluid/imperative/infer_shape_context.h"
21
#include "paddle/fluid/imperative/tracer.h"
22
#include "paddle/phi/common/int_array.h"
23
#include "paddle/phi/common/scalar.h"
24
#include "paddle/utils/small_vector.h"
Q
QingshuChen 已提交
25
#ifdef PADDLE_WITH_XPU
26
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
Q
QingshuChen 已提交
27
#endif
28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_op_list.h"
#endif
L
Liu-xiandong 已提交
31
#include "paddle/fluid/framework/library_type.h"
32
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
C
chenjian 已提交
33
#include "paddle/fluid/platform/profiler/event_tracing.h"
C
chenjian 已提交
34
#include "paddle/fluid/platform/profiler/supplement_tracing.h"
35

36
DECLARE_bool(check_nan_inf);
37
DECLARE_bool(benchmark);
F
Feng Xing 已提交
38
DECLARE_bool(run_kp_kernel);
39

J
Jiabin Yang 已提交
40 41 42
namespace paddle {
namespace imperative {

43
static const phi::Kernel empty_kernel;
44 45
static const framework::RuntimeContext empty_ctx({}, {});
static const framework::Scope empty_scope;
46

47 48 49 50 51 52 53
const phi::KernelFactory& PreparedOp::phi_kernel_factory =
    phi::KernelFactory::Instance();
const phi::OpUtilsMap& PreparedOp::phi_op_utils_map =
    phi::OpUtilsMap::Instance();
const phi::DefaultKernelSignatureMap& PreparedOp::default_phi_kernel_sig_map =
    phi::DefaultKernelSignatureMap::Instance();

54 55 56 57 58 59 60 61 62 63
const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<paddle::imperative::VarBase>& var) {
  return var->SharedVar();
}

const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<VariableWrapper>& var) {
  return var;
}

64
const phi::DenseTensor* GetTensorFromVar(const framework::Variable& var) {
65 66
  if (var.IsType<phi::DenseTensor>()) {
    return &(var.Get<phi::DenseTensor>());
67 68
  } else if (var.IsType<phi::SelectedRows>()) {
    return &(var.Get<phi::SelectedRows>().value());
J
Jiabin Yang 已提交
69 70 71 72 73
  } else {
    return nullptr;
  }
}

74
template <typename VarType>
J
Jiabin Yang 已提交
75
void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
  for (auto& pair : outs) {
    for (auto& var : pair.second) {
      if (var == nullptr) {
        continue;
      }
      if (var->ForwardDataType() ==
          static_cast<framework::proto::VarType::Type>(-1)) {
        VLOG(6) << "Var (" << var->Name()
                << ")'s forward data type is not set.";
        continue;
      }
      if (!framework::IsComplexType(var->DataType()) ||
          framework::IsComplexType(var->ForwardDataType())) {
        continue;
      }
      const auto* tensor = GetTensorFromVar(var->Var());
J
Jiabin Yang 已提交
92
      if (tensor && tensor->IsInitialized()) {
93 94 95 96
        VLOG(6) << "Transform " << framework::DataTypeToString(var->DataType())
                << " var `" << var->Name() << "` to "
                << framework::DataTypeToString(var->ForwardDataType())
                << " real var in dynamic graph.";
97
        phi::DenseTensor out;
98 99
        framework::TransComplexToReal(
            var->ForwardDataType(), var->DataType(), *tensor, &out);
100
        SetTensorToVariable(var->Var(), out, var->MutableVar());
J
Jiabin Yang 已提交
101 102 103 104 105
      }
    }
  }
}

J
Jiabin Yang 已提交
106
template <>
107 108
void HandleComplexGradToRealGrad<egr::EagerVariable>(
    const NameVarMap<egr::EagerVariable>& outs) {
J
Jiabin Yang 已提交
109 110 111
  // TODO(jiabin): Support Complex here.
}

112 113 114 115 116
void TestHandleComplexGradToRealGradEager(
    const NameVarMap<egr::EagerVariable>& outs) {
  HandleComplexGradToRealGrad<egr::EagerVariable>(outs);
}

J
Jiabin Yang 已提交
117 118
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
119
                       const framework::OpKernelType& kernel_type,
120
                       const framework::OperatorWithKernel::OpKernelFunc& func,
121 122
                       const phi::ArgumentMappingFn* arg_map_fn,
                       const phi::KernelSignature* default_kernel_signature,
123
                       platform::DeviceContext* dev_ctx)
124 125 126 127
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(func),
128
      dev_ctx_(dev_ctx),
129 130 131
      arg_map_fn_(arg_map_fn),
      default_kernel_signature_(default_kernel_signature),
      phi_kernel_(empty_kernel) {}
132

133 134 135
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
                       const framework::OpKernelType& kernel_type,
136 137 138 139
                       const phi::ArgumentMappingFn* arg_map_fn,
                       const phi::KernelSignature* default_kernel_signature,
                       phi::KernelSignature&& kernel_signature,
                       const phi::Kernel& phi_kernel,
140 141 142 143 144 145
                       platform::DeviceContext* dev_ctx)
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(nullptr),
      dev_ctx_(dev_ctx),
146
      run_phi_kernel_(true),
147 148 149 150
      arg_map_fn_(arg_map_fn),
      default_kernel_signature_(default_kernel_signature),
      kernel_signature_(std::move(kernel_signature)),
      phi_kernel_(phi_kernel) {}
151

152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193
#ifdef PADDLE_WITH_MLU

static void tokenize(const std::string& ops,
                     char delim,
                     std::unordered_set<std::string>* op_set) {
  std::string::size_type beg = 0;
  for (uint64_t end = 0; (end = ops.find(delim, end)) != std::string::npos;
       ++end) {
    op_set->insert(ops.substr(beg, end - beg));
    beg = end + 1;
  }

  op_set->insert(ops.substr(beg));
}

static bool is_in_mlu_black_list(const std::string& op_name) {
  static bool inited = false;
  static std::unordered_set<std::string> mlu_black_list;
  static std::mutex s_mtx;
  if (!inited) {
    std::lock_guard<std::mutex> guard(s_mtx);
    if (!inited) {
      if (std::getenv("MLU_BLACK_LIST") != nullptr) {
        std::string ops(std::getenv("MLU_BLACK_LIST"));
        tokenize(ops, ',', &mlu_black_list);
      }
      inited = true;
      VLOG(3) << "MLU Black List: ";
      for (auto iter = mlu_black_list.begin(); iter != mlu_black_list.end();
           ++iter) {
        VLOG(3) << *iter << " ";
      }
    }
  }
  if (mlu_black_list.find(op_name) != mlu_black_list.end()) {
    return true;
  }
  return false;
}

#endif

194
template <typename VarType>
195
PreparedOp PrepareImpl(
196 197 198 199
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::OperatorWithKernel& op,
    const platform::Place& place,
200 201 202 203 204
    const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs,
    const phi::KernelFactory& phi_kernel_factory,
    const phi::OpUtilsMap& phi_op_utils_map,
    const phi::DefaultKernelSignatureMap& default_phi_kernel_sig_map) {
205
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
206
  auto* dev_ctx = pool.Get(place);
207

208 209 210 211 212 213
#ifdef PADDLE_WITH_MKLDNN
  // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
  // GetKernelType functions, so we need to copy the attributes there.
  // Const qualifier of Attrs had to be discarded to overwrite it.
  if (FLAGS_use_mkldnn) {
    auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
214 215 216 217
    mutable_op_attrs = default_attrs;
    for (auto& attr : attrs) {
      mutable_op_attrs[attr.first] = attr.second;
    }
218 219
  }
#endif
220 221
  // NOTE(zhiqiu): for kernels on given device, for example NPU, the order to
  // choose is:
222
  // phi npu kernel > fluid npu kernel > phi cpu kernel > fluid cpu kernel
J
Jiabin Yang 已提交
223

224
  // 1. get expected kernel key
225
  auto dygraph_exe_ctx = DygraphExecutionContext<VarType>(
226
      op, empty_scope, *dev_ctx, empty_ctx, ins, outs, attrs, default_attrs);
227
  auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
228

229 230
  const phi::KernelSignature* default_kernel_signature = nullptr;
  phi::KernelSignature kernel_signature;
231 232
  phi::KernelKey phi_kernel_key;
  std::string phi_kernel_name;
233 234

// NOTE(jiahongyu): The registered MKLDNN kernel have library_type =
235
// LibraryType::kMKLDNN and data_layout_ = DataLayout::ONEDNN. But the default
236
// values are kPlain, so we need to modify the library_type and data_layout_
237 238 239 240
// here. There are three statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
241
#ifdef PADDLE_WITH_MKLDNN
242
  if (!op.DnnFallback() && !paddle::platform::in_mkldnn_white_list(op.Type()) &&
243 244
      op.CanMKLDNNBeUsed(dygraph_exe_ctx, expected_kernel_key.data_type_)) {
    expected_kernel_key.library_type_ = framework::LibraryType::kMKLDNN;
245
    expected_kernel_key.data_layout_ = framework::DataLayout::ONEDNN;
246 247 248
  }
#endif

249 250 251 252 253 254
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (op.CanCUDNNBeUsed(dygraph_exe_ctx, expected_kernel_key.data_type_)) {
    expected_kernel_key.library_type_ = framework::LibraryType::kCUDNN;
  }
#endif

L
Liu-xiandong 已提交
255
#if defined(PADDLE_WITH_XPU)
256 257 258 259 260 261
  bool is_xpu_unsupport =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
          !paddle::platform::is_xpu_support_op(op.Type(),
                                               expected_kernel_key) ||
      paddle::platform::is_in_xpu_black_list(op.Type());
#endif
262

263 264 265 266 267 268
#ifdef PADDLE_WITH_MLU
  if (is_in_mlu_black_list(op.Type())) {
    expected_kernel_key.place_ = platform::CPUPlace();
  }
#endif

269 270
  bool has_phi_kernel = false;

271 272
  const auto* arg_map_fn = phi_op_utils_map.GetArgumentMappingFn(op.Type());

273 274
  if (arg_map_fn) {
    has_phi_kernel = true;
275
    kernel_signature = (*arg_map_fn)(
276 277
        framework::ExecutionArgumentMappingContext(dygraph_exe_ctx));
  } else {
278
    default_kernel_signature =
279
        default_phi_kernel_sig_map.GetNullable(op.Type());
280
    if (default_kernel_signature) {
281
      has_phi_kernel = true;
282
      kernel_signature = *default_kernel_signature;
283 284
    }
  }
285

286
  if (has_phi_kernel) {
287
    VLOG(6) << kernel_signature;
288
    phi_kernel_name = kernel_signature.name;
289 290 291
// NOTE(Liu-xiandong): The register kernel used KP have library_type[KP],
// But the default library_type is Plain, so we need to modify the
// library_type here, otherwise it can't work.
L
Liu-xiandong 已提交
292 293 294
#ifdef PADDLE_WITH_XPU_KP
    if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
      bool use_xpu_kp_kernel_rt =
295 296
          FLAGS_run_kp_kernel && paddle::platform::is_xpu_kp_support_op(
                                     op.Type(), expected_kernel_key);
L
Liu-xiandong 已提交
297 298 299 300 301 302 303 304 305 306 307
      bool use_xpu_kp_kernel_debug =
          paddle::platform::is_in_xpu_kpwhite_list(op.Type());
      if (use_xpu_kp_kernel_rt) {
        VLOG(3) << "phi xpu_kp using rt mode ";
      }
      if (use_xpu_kp_kernel_debug) {
        VLOG(3) << "phi xpu_kp using debug mode ";
      }
      bool is_xpu_kp_support =
          (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
      if (is_xpu_kp_support) {
308 309
        auto expected_kernel_key_library_type =
            expected_kernel_key.library_type_;
L
Liu-xiandong 已提交
310
        expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
311
        VLOG(3) << "modifing XPU KP kernel: " << phi_kernel_name
L
Liu-xiandong 已提交
312
                << ", using_kernel_key:" << expected_kernel_key;
313

314
        phi::KernelKey try_phi_kernel_key =
315
            TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
316 317
        if (!phi_kernel_factory.HasKernel(phi_kernel_name,
                                          try_phi_kernel_key)) {
318
          expected_kernel_key.library_type_ = expected_kernel_key_library_type;
319
          VLOG(3) << "modify XPU KP kernel: " << phi_kernel_name
320 321
                  << " in dynamic graph is failed " << expected_kernel_key;
        } else {
322
          VLOG(3) << "modify XPU KP kernel: " << phi_kernel_name
323
                  << " in dynamic graph is succeed " << expected_kernel_key;
324
        }
L
Liu-xiandong 已提交
325 326 327
      }
    }
#endif
328

329
    phi_kernel_key = TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
330
    auto& phi_kernel =
331
        phi_kernel_factory.SelectKernel(phi_kernel_name, phi_kernel_key);
332

333
    if (phi_kernel.IsValid()
L
Liu-xiandong 已提交
334
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
335 336
        && !is_xpu_unsupport
#endif
337
    ) {
338 339
      VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << phi_kernel_name
              << " | kernel key: " << phi_kernel_key
340
              << " | kernel: " << phi_kernel;
341

F
From00 已提交
342 343
      if (expected_kernel_key.place_ != place) {
        dev_ctx = pool.Get(expected_kernel_key.place_);
W
Wilber 已提交
344
      }
F
From00 已提交
345

346 347 348 349 350 351 352 353
      return PreparedOp(op,
                        empty_ctx,
                        expected_kernel_key,
                        arg_map_fn,
                        default_kernel_signature,
                        std::move(kernel_signature),
                        phi_kernel,
                        dev_ctx);
354
    } else {
355
      VLOG(6) << "Dynamic mode ChoosePhiKernel - kernel `" << phi_kernel_name
356 357 358 359
              << "` not found.";
    }
  }

360
  // 2. check if op[type] has kernel registered.
J
Jiabin Yang 已提交
361 362
  auto& all_op_kernels = op.AllOpKernels();
  auto kernels_iter = all_op_kernels.find(op.Type());
363

364 365 366
// NOTE(Liu-xiandong): If we can't find heterogeneous kernel in phi,
// we need to select the heterogeneous kernel in fluid, but the kernel
// registered in KP use library_type[KP], we need to modify it.
367 368 369 370 371 372 373 374 375 376 377 378 379 380
#ifdef PADDLE_WITH_XPU_KP
  bool use_xpu_kp_kernel_rt =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
      FLAGS_run_kp_kernel &&
      paddle::platform::is_xpu_kp_support_op(op.Type(), expected_kernel_key);
  bool use_xpu_kp_kernel_debug =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
      paddle::platform::is_in_xpu_kpwhite_list(op.Type());
  bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
  if (is_xpu_kp_support) {
    expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
  }
#endif

381 382 383
  if ((kernels_iter == all_op_kernels.end() ||
       kernels_iter->second.find(expected_kernel_key) ==
           kernels_iter->second.end())
L
Liu-xiandong 已提交
384
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
385
      || is_xpu_unsupport
386
#endif
387 388 389
#if defined(PADDLE_WITH_XPU_KP)
      || (is_xpu_unsupport && !is_xpu_kp_support)
#endif
390
  ) {
391
    if (has_phi_kernel) {
392 393 394 395 396 397 398 399
      auto phi_cpu_kernel_key =
          FallBackToCpu(expected_kernel_key, phi_kernel_key, op);
      auto& phi_cpu_kernel =
          phi_kernel_factory.SelectKernel(phi_kernel_name, phi_cpu_kernel_key);
      if (phi_cpu_kernel.IsValid()) {
        VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << phi_kernel_name
                << " | kernel key: " << phi_cpu_kernel_key
                << " | kernel: " << phi_cpu_kernel;
400
        auto* cpu_ctx = pool.Get(paddle::platform::CPUPlace());
401
        return PreparedOp(
402 403
            op,
            empty_ctx,
404
            framework::TransPhiKernelKeyToOpKernelType(phi_cpu_kernel_key),
405 406 407
            arg_map_fn,
            default_kernel_signature,
            std::move(kernel_signature),
408
            phi_cpu_kernel,
409
            cpu_ctx);
410 411 412 413
      }
    }
  }

414
  PADDLE_ENFORCE_NE(
415 416
      kernels_iter,
      all_op_kernels.end(),
417 418 419
      platform::errors::NotFound(
          "There are no kernels which are registered in the %s operator.",
          op.Type()));
420

J
Jiabin Yang 已提交
421 422
  auto& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(expected_kernel_key);
423

424
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
425
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
426
      (kernel_iter == kernels.end() || is_xpu_unsupport)) {
427
    VLOG(3) << "fluid missing XPU kernel: " << op.Type()
428 429
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
430 431 432
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
433
#endif
L
Liu-xiandong 已提交
434 435

#ifdef PADDLE_WITH_XPU_KP
436 437
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
    if (use_xpu_kp_kernel_rt) {
438
      VLOG(3) << "fluid xpu_kp using rt mode ";
439 440
    }
    if (use_xpu_kp_kernel_debug) {
441
      VLOG(3) << "fluid xpu_kp using debug mode ";
442 443 444 445
    }
    if (is_xpu_kp_support) {
      expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
      kernel_iter = kernels.find(expected_kernel_key);
446
      VLOG(3) << "using fluid XPU KP kernel: " << op.Type()
447 448 449 450
              << ", using_kernel_key:" << expected_kernel_key;
    }
    if (!is_xpu_kp_support &&
        (kernel_iter == kernels.end() || is_xpu_unsupport)) {
451
      VLOG(3) << "fluid missing XPU kernel: " << op.Type()
452 453 454 455 456
              << ", expected_kernel_key:" << expected_kernel_key
              << ", fallbacking to CPU one!";
      expected_kernel_key.place_ = platform::CPUPlace();
      kernel_iter = kernels.find(expected_kernel_key);
    }
L
Liu-xiandong 已提交
457 458 459
  }
#endif

460 461
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
462
      paddle::platform::is_npu_place(expected_kernel_key.place_)) {
463 464 465
    VLOG(3) << "missing NPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
466
    expected_kernel_key.place_ = platform::CPUPlace();
467 468 469 470 471 472 473 474 475 476
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
#ifdef PADDLE_WITH_IPU
  if (kernel_iter == kernels.end() &&
      paddle::platform::is_ipu_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing IPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
477 478
    kernel_iter = kernels.find(expected_kernel_key);
  }
479 480 481
#endif
#ifdef PADDLE_WITH_MLU
  if (kernel_iter == kernels.end() &&
482
      paddle::platform::is_mlu_place(expected_kernel_key.place_)) {
483 484 485 486 487 488
    VLOG(3) << "missing MLU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
489 490 491 492 493 494 495 496 497 498
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (kernel_iter == kernels.end() &&
      paddle::platform::is_custom_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing " << place.GetDeviceType() << " kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
499
#endif
500 501
  // TODO(jiabin): Add operator.cc's line 1000 part back when we need that
  // case
502 503 504 505 506 507
  PADDLE_ENFORCE_NE(
      kernel_iter,
      kernels.end(),
      platform::errors::NotFound("Operator %s does not have kernel for %s.",
                                 op.Type(),
                                 KernelTypeToString(expected_kernel_key)));
508

509 510 511 512
  if (!(expected_kernel_key.place_ == place)) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

513 514 515 516 517 518 519
  return PreparedOp(op,
                    empty_ctx,
                    expected_kernel_key,
                    kernel_iter->second,
                    arg_map_fn,
                    default_kernel_signature,
                    dev_ctx);
520 521
}

522 523 524 525
PreparedOp PreparedOp::Prepare(const NameVarMap<VarBase>& ins,
                               const NameVarMap<VarBase>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
526
                               const framework::AttributeMap& attrs,
527
                               const framework::AttributeMap& default_attrs) {
528 529 530 531 532 533 534 535
  return PrepareImpl<VarBase>(ins,
                              outs,
                              op,
                              place,
                              attrs,
                              default_attrs,
                              phi_kernel_factory,
                              phi_op_utils_map,
536
                              default_phi_kernel_sig_map);
537 538 539 540 541 542
}

PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
                               const NameVarMap<VariableWrapper>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
543
                               const framework::AttributeMap& attrs,
544
                               const framework::AttributeMap& default_attrs) {
545 546 547 548 549 550 551 552 553
  return PrepareImpl<VariableWrapper>(ins,
                                      outs,
                                      op,
                                      place,
                                      attrs,
                                      default_attrs,
                                      phi_kernel_factory,
                                      phi_op_utils_map,
                                      default_phi_kernel_sig_map);
554 555
}

556 557
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerVariable>& ins,
                               const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
558 559 560 561
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
                               const framework::AttributeMap& attrs,
                               const framework::AttributeMap& default_attrs) {
562 563 564 565 566 567 568 569 570
  return PrepareImpl<egr::EagerVariable>(ins,
                                         outs,
                                         op,
                                         place,
                                         attrs,
                                         default_attrs,
                                         phi_kernel_factory,
                                         phi_op_utils_map,
                                         default_phi_kernel_sig_map);
J
Jiabin Yang 已提交
571
}
572 573
template <typename VarType>
static void PreparedOpRunImpl(
574 575
    const framework::OperatorBase& op,
    const framework::RuntimeContext& ctx,
576
    const framework::OpKernelType& kernel_type,
577
    const framework::OperatorWithKernel::OpKernelFunc& func,
578 579
    const phi::ArgumentMappingFn* arg_map_fn,
    const phi::KernelSignature* default_kernel_signature,
580 581 582 583
    platform::DeviceContext* dev_ctx,
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::AttributeMap& attrs,
584
    const framework::AttributeMap& default_attrs) {
J
Jiabin Yang 已提交
585
  // TODO(zjl): remove scope in dygraph
H
hong 已提交
586

587
  {
588
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
589
                                       platform::TracerEventType::OperatorInner,
590 591 592 593 594 595 596 597 598 599
                                       1,
                                       platform::EventRole::kInnerOp);
    DygraphInferShapeContext<VarType> infer_shape_ctx(&ins,
                                                      &outs,
                                                      &attrs,
                                                      &default_attrs,
                                                      op.Type(),
                                                      &kernel_type,
                                                      arg_map_fn,
                                                      default_kernel_signature);
600
    op.Info().infer_shape_(&infer_shape_ctx);
C
chenjian 已提交
601 602 603
    record_event.End();
    platform::RecordOpInfoSupplement(
        op.Type(), op.Attrs(), infer_shape_ctx, ctx);
604 605 606
  }

  {
607
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
608
                                       platform::TracerEventType::OperatorInner,
609 610
                                       1,
                                       platform::EventRole::kInnerOp);
H
hong 已提交
611

612 613
    func(DygraphExecutionContext<VarType>(
        op, empty_scope, *dev_ctx, ctx, ins, outs, attrs, default_attrs));
614
  }
615

616 617 618 619 620
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

L
Leo Chen 已提交
621 622 623 624 625 626 627 628
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643
  /**
   * [ Why need handle complex gradient to real gradient? ]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64).
   *
   * But because the dout is complex64, the dx is also complex64 after
   * grad op kernel executed, we need to recognize this situation and
   * convert dx to float32 type. HandleComplexGradToRealGrad does this thing.
   */
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
644
}
H
hong 已提交
645

646 647 648
template <typename VarType>
static void PreparedOpRunPtImpl(
    const framework::OperatorBase& op,
649
    const framework::OpKernelType& kernel_type,
650 651
    const phi::ArgumentMappingFn* arg_map_fn,
    const phi::KernelSignature* default_kernel_signature,
652 653 654 655 656 657
    const phi::KernelSignature& kernel_signature,
    const phi::Kernel& phi_kernel,
    platform::DeviceContext* dev_ctx,
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::AttributeMap& attrs,
658
    const framework::AttributeMap& default_attrs) {
659
  {
660
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
661
                                       platform::TracerEventType::OperatorInner,
662 663 664 665 666 667 668 669 670 671
                                       1,
                                       platform::EventRole::kInnerOp);
    DygraphInferShapeContext<VarType> infer_shape_ctx(&ins,
                                                      &outs,
                                                      &attrs,
                                                      &default_attrs,
                                                      op.Type(),
                                                      &kernel_type,
                                                      arg_map_fn,
                                                      default_kernel_signature);
672
    op.Info().infer_shape_(&infer_shape_ctx);
C
chenjian 已提交
673 674 675
    record_event.End();
    platform::RecordOpInfoSupplement(
        op.Type(), op.Attrs(), infer_shape_ctx, kernel_signature);
676 677 678
  }

  {
679
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
680
                                       platform::TracerEventType::OperatorInner,
681 682
                                       1,
                                       platform::EventRole::kInnerOp);
683

684
    PreparePhiData<VarType>(phi_kernel, kernel_signature, ins);
685

686
    phi::KernelContext phi_kernel_context;
687 688 689 690 691 692 693
    BuildDygraphPhiKernelContext<VarType>(kernel_signature,
                                          phi_kernel,
                                          ins,
                                          outs,
                                          attrs,
                                          default_attrs,
                                          dev_ctx,
694
                                          &phi_kernel_context);
695

696
    phi_kernel(&phi_kernel_context);
697
  }
698

699 700 701 702 703
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

704 705
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
706 707
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
708 709 710 711
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

712 713 714
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
715 716
}

717 718
void PreparedOp::Run(const NameVarMap<VarBase>& ins,
                     const NameVarMap<VarBase>& outs,
719 720
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
721
  if (run_phi_kernel_) {
722 723 724 725 726 727 728 729 730 731
    PreparedOpRunPtImpl<VarBase>(op_,
                                 kernel_type_,
                                 arg_map_fn_,
                                 default_kernel_signature_,
                                 kernel_signature_,
                                 phi_kernel_,
                                 dev_ctx_,
                                 ins,
                                 outs,
                                 attrs,
732
                                 default_attrs);
733
  } else {
734 735 736 737 738 739 740 741 742 743 744
    PreparedOpRunImpl<VarBase>(op_,
                               ctx_,
                               kernel_type_,
                               func_,
                               arg_map_fn_,
                               default_kernel_signature_,
                               dev_ctx_,
                               ins,
                               outs,
                               attrs,
                               default_attrs);
745
  }
746
}
H
hong 已提交
747

748 749
void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
                     const NameVarMap<VariableWrapper>& outs,
750 751
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
752
  if (run_phi_kernel_) {
753 754 755 756 757 758 759 760 761 762 763
    PreparedOpRunPtImpl<VariableWrapper>(op_,
                                         kernel_type_,
                                         arg_map_fn_,
                                         default_kernel_signature_,
                                         kernel_signature_,
                                         phi_kernel_,
                                         dev_ctx_,
                                         ins,
                                         outs,
                                         attrs,
                                         default_attrs);
764
  } else {
765 766 767 768 769 770 771 772 773 774 775
    PreparedOpRunImpl<VariableWrapper>(op_,
                                       ctx_,
                                       kernel_type_,
                                       func_,
                                       arg_map_fn_,
                                       default_kernel_signature_,
                                       dev_ctx_,
                                       ins,
                                       outs,
                                       attrs,
                                       default_attrs);
776
  }
J
Jiabin Yang 已提交
777 778
}

779 780
void PreparedOp::Run(const NameVarMap<egr::EagerVariable>& ins,
                     const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
781 782
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
783
  if (run_phi_kernel_) {
784 785 786 787 788 789 790 791 792 793 794
    PreparedOpRunPtImpl<egr::EagerVariable>(op_,
                                            kernel_type_,
                                            arg_map_fn_,
                                            default_kernel_signature_,
                                            kernel_signature_,
                                            phi_kernel_,
                                            dev_ctx_,
                                            ins,
                                            outs,
                                            attrs,
                                            default_attrs);
J
Jiabin Yang 已提交
795
  } else {
796 797 798 799 800 801 802 803 804 805 806
    PreparedOpRunImpl<egr::EagerVariable>(op_,
                                          ctx_,
                                          kernel_type_,
                                          func_,
                                          arg_map_fn_,
                                          default_kernel_signature_,
                                          dev_ctx_,
                                          ins,
                                          outs,
                                          attrs,
                                          default_attrs);
J
Jiabin Yang 已提交
807 808 809
  }
}

J
Jiabin Yang 已提交
810 811
}  // namespace imperative
}  // namespace paddle