prepared_operator.cc 30.0 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/prepared_operator.h"
16

J
Jiabin Yang 已提交
17
#include "paddle/fluid/eager/eager_tensor.h"
18
#include "paddle/fluid/framework/data_type_transform.h"
19
#include "paddle/fluid/framework/details/nan_inf_utils.h"
20
#include "paddle/fluid/imperative/infer_shape_context.h"
21
#include "paddle/fluid/imperative/tracer.h"
22
#include "paddle/phi/common/int_array.h"
23
#include "paddle/phi/common/scalar.h"
24
#include "paddle/utils/small_vector.h"
Q
QingshuChen 已提交
25
#ifdef PADDLE_WITH_XPU
26
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
Q
QingshuChen 已提交
27
#endif
28 29 30
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_op_list.h"
#endif
L
Liu-xiandong 已提交
31
#include "paddle/fluid/framework/library_type.h"
32
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
C
chenjian 已提交
33
#include "paddle/fluid/platform/profiler/event_tracing.h"
C
chenjian 已提交
34
#include "paddle/fluid/platform/profiler/supplement_tracing.h"
35

36
DECLARE_bool(check_nan_inf);
37
DECLARE_bool(benchmark);
F
Feng Xing 已提交
38
DECLARE_bool(run_kp_kernel);
39

J
Jiabin Yang 已提交
40 41 42
namespace paddle {
namespace imperative {

43
static const phi::Kernel empty_kernel;
44 45
static const framework::RuntimeContext empty_ctx({}, {});
static const framework::Scope empty_scope;
46

47 48 49 50 51 52 53
const phi::KernelFactory& PreparedOp::phi_kernel_factory =
    phi::KernelFactory::Instance();
const phi::OpUtilsMap& PreparedOp::phi_op_utils_map =
    phi::OpUtilsMap::Instance();
const phi::DefaultKernelSignatureMap& PreparedOp::default_phi_kernel_sig_map =
    phi::DefaultKernelSignatureMap::Instance();

54 55 56 57 58 59 60 61 62 63
const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<paddle::imperative::VarBase>& var) {
  return var->SharedVar();
}

const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<VariableWrapper>& var) {
  return var;
}

64
const phi::DenseTensor* GetTensorFromVar(const framework::Variable& var) {
65 66
  if (var.IsType<phi::DenseTensor>()) {
    return &(var.Get<phi::DenseTensor>());
67 68
  } else if (var.IsType<phi::SelectedRows>()) {
    return &(var.Get<phi::SelectedRows>().value());
J
Jiabin Yang 已提交
69 70 71 72 73
  } else {
    return nullptr;
  }
}

74
template <typename VarType>
J
Jiabin Yang 已提交
75
void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91
  for (auto& pair : outs) {
    for (auto& var : pair.second) {
      if (var == nullptr) {
        continue;
      }
      if (var->ForwardDataType() ==
          static_cast<framework::proto::VarType::Type>(-1)) {
        VLOG(6) << "Var (" << var->Name()
                << ")'s forward data type is not set.";
        continue;
      }
      if (!framework::IsComplexType(var->DataType()) ||
          framework::IsComplexType(var->ForwardDataType())) {
        continue;
      }
      const auto* tensor = GetTensorFromVar(var->Var());
J
Jiabin Yang 已提交
92
      if (tensor && tensor->IsInitialized()) {
93 94 95 96
        VLOG(6) << "Transform " << framework::DataTypeToString(var->DataType())
                << " var `" << var->Name() << "` to "
                << framework::DataTypeToString(var->ForwardDataType())
                << " real var in dynamic graph.";
97
        phi::DenseTensor out;
98 99
        framework::TransComplexToReal(
            var->ForwardDataType(), var->DataType(), *tensor, &out);
100
        SetTensorToVariable(var->Var(), out, var->MutableVar());
J
Jiabin Yang 已提交
101 102 103 104 105
      }
    }
  }
}

J
Jiabin Yang 已提交
106
template <>
107 108
void HandleComplexGradToRealGrad<egr::EagerVariable>(
    const NameVarMap<egr::EagerVariable>& outs) {
J
Jiabin Yang 已提交
109 110 111
  // TODO(jiabin): Support Complex here.
}

112 113 114 115 116
void TestHandleComplexGradToRealGradEager(
    const NameVarMap<egr::EagerVariable>& outs) {
  HandleComplexGradToRealGrad<egr::EagerVariable>(outs);
}

J
Jiabin Yang 已提交
117 118
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
119
                       const framework::OpKernelType& kernel_type,
120
                       const framework::OperatorWithKernel::OpKernelFunc& func,
121 122
                       const phi::ArgumentMappingFn* arg_map_fn,
                       const phi::KernelSignature* default_kernel_signature,
123
                       platform::DeviceContext* dev_ctx)
124 125 126 127
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(func),
128
      dev_ctx_(dev_ctx),
129 130 131
      arg_map_fn_(arg_map_fn),
      default_kernel_signature_(default_kernel_signature),
      phi_kernel_(empty_kernel) {}
132

133 134 135
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
                       const framework::OpKernelType& kernel_type,
136 137 138 139
                       const phi::ArgumentMappingFn* arg_map_fn,
                       const phi::KernelSignature* default_kernel_signature,
                       phi::KernelSignature&& kernel_signature,
                       const phi::Kernel& phi_kernel,
140 141 142 143 144 145
                       platform::DeviceContext* dev_ctx)
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(nullptr),
      dev_ctx_(dev_ctx),
146
      run_phi_kernel_(true),
147 148 149 150
      arg_map_fn_(arg_map_fn),
      default_kernel_signature_(default_kernel_signature),
      kernel_signature_(std::move(kernel_signature)),
      phi_kernel_(phi_kernel) {}
151

152
template <typename VarType>
153
PreparedOp PrepareImpl(
154 155 156 157
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::OperatorWithKernel& op,
    const platform::Place& place,
158 159 160 161 162
    const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs,
    const phi::KernelFactory& phi_kernel_factory,
    const phi::OpUtilsMap& phi_op_utils_map,
    const phi::DefaultKernelSignatureMap& default_phi_kernel_sig_map) {
163
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
164
  auto* dev_ctx = pool.Get(place);
165

166 167 168 169 170 171
#ifdef PADDLE_WITH_MKLDNN
  // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
  // GetKernelType functions, so we need to copy the attributes there.
  // Const qualifier of Attrs had to be discarded to overwrite it.
  if (FLAGS_use_mkldnn) {
    auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
172 173 174 175
    mutable_op_attrs = default_attrs;
    for (auto& attr : attrs) {
      mutable_op_attrs[attr.first] = attr.second;
    }
176 177
  }
#endif
178 179
  // NOTE(zhiqiu): for kernels on given device, for example NPU, the order to
  // choose is:
180
  // phi npu kernel > fluid npu kernel > phi cpu kernel > fluid cpu kernel
J
Jiabin Yang 已提交
181

182
  // 1. get expected kernel key
183
  auto dygraph_exe_ctx = DygraphExecutionContext<VarType>(
184
      op, empty_scope, *dev_ctx, empty_ctx, ins, outs, attrs, default_attrs);
185
  auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
186

187 188
  const phi::KernelSignature* default_kernel_signature = nullptr;
  phi::KernelSignature kernel_signature;
189 190
  phi::KernelKey phi_kernel_key;
  std::string phi_kernel_name;
191 192 193 194

// NOTE(jiahongyu): The registered MKLDNN kernel have library_type =
// LibraryType::kMKLDNN and data_layout_ = DataLayout::kMKLDNN. But the default
// values are kPlain, so we need to modify the library_type and data_layout_
195 196 197 198
// here. There are three statements in if condition:
// 1. Whether mkldnn kernel fallbacks to plain kernel;
// 2. Whether this op has specific implementation;
// 3. Whether mkldnn kernel can be used.
199
#ifdef PADDLE_WITH_MKLDNN
200
  if (!op.DnnFallback() && !paddle::platform::in_mkldnn_white_list(op.Type()) &&
201 202 203 204 205 206
      op.CanMKLDNNBeUsed(dygraph_exe_ctx, expected_kernel_key.data_type_)) {
    expected_kernel_key.library_type_ = framework::LibraryType::kMKLDNN;
    expected_kernel_key.data_layout_ = framework::DataLayout::kMKLDNN;
  }
#endif

L
Liu-xiandong 已提交
207
#if defined(PADDLE_WITH_XPU)
208 209 210 211 212 213
  bool is_xpu_unsupport =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
          !paddle::platform::is_xpu_support_op(op.Type(),
                                               expected_kernel_key) ||
      paddle::platform::is_in_xpu_black_list(op.Type());
#endif
214

215 216
  bool has_phi_kernel = false;

217 218
  const auto* arg_map_fn = phi_op_utils_map.GetArgumentMappingFn(op.Type());

219 220
  if (arg_map_fn) {
    has_phi_kernel = true;
221
    kernel_signature = (*arg_map_fn)(
222 223
        framework::ExecutionArgumentMappingContext(dygraph_exe_ctx));
  } else {
224
    default_kernel_signature =
225
        default_phi_kernel_sig_map.GetNullable(op.Type());
226
    if (default_kernel_signature) {
227
      has_phi_kernel = true;
228
      kernel_signature = *default_kernel_signature;
229 230
    }
  }
231

232
  if (has_phi_kernel) {
233
    VLOG(6) << kernel_signature;
234
    phi_kernel_name = kernel_signature.name;
235 236 237
// NOTE(Liu-xiandong): The register kernel used KP have library_type[KP],
// But the default library_type is Plain, so we need to modify the
// library_type here, otherwise it can't work.
L
Liu-xiandong 已提交
238 239 240
#ifdef PADDLE_WITH_XPU_KP
    if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
      bool use_xpu_kp_kernel_rt =
241 242
          FLAGS_run_kp_kernel && paddle::platform::is_xpu_kp_support_op(
                                     op.Type(), expected_kernel_key);
L
Liu-xiandong 已提交
243 244 245 246 247 248 249 250 251 252 253
      bool use_xpu_kp_kernel_debug =
          paddle::platform::is_in_xpu_kpwhite_list(op.Type());
      if (use_xpu_kp_kernel_rt) {
        VLOG(3) << "phi xpu_kp using rt mode ";
      }
      if (use_xpu_kp_kernel_debug) {
        VLOG(3) << "phi xpu_kp using debug mode ";
      }
      bool is_xpu_kp_support =
          (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
      if (is_xpu_kp_support) {
254 255
        auto expected_kernel_key_library_type =
            expected_kernel_key.library_type_;
L
Liu-xiandong 已提交
256
        expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
257
        VLOG(3) << "modifing XPU KP kernel: " << phi_kernel_name
L
Liu-xiandong 已提交
258
                << ", using_kernel_key:" << expected_kernel_key;
259

260
        phi::KernelKey try_phi_kernel_key =
261
            TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
262 263
        if (!phi_kernel_factory.HasKernel(phi_kernel_name,
                                          try_phi_kernel_key)) {
264
          expected_kernel_key.library_type_ = expected_kernel_key_library_type;
265
          VLOG(3) << "modify XPU KP kernel: " << phi_kernel_name
266 267
                  << " in dynamic graph is failed " << expected_kernel_key;
        } else {
268
          VLOG(3) << "modify XPU KP kernel: " << phi_kernel_name
269
                  << " in dynamic graph is succeed " << expected_kernel_key;
270
        }
L
Liu-xiandong 已提交
271 272 273
      }
    }
#endif
274

275
    phi_kernel_key = TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
276
    auto& phi_kernel =
277
        phi_kernel_factory.SelectKernel(phi_kernel_name, phi_kernel_key);
278

279
    if (phi_kernel.IsValid()
L
Liu-xiandong 已提交
280
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
281 282
        && !is_xpu_unsupport
#endif
283
    ) {
284 285
      VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << phi_kernel_name
              << " | kernel key: " << phi_kernel_key
286
              << " | kernel: " << phi_kernel;
287

F
From00 已提交
288 289
      if (expected_kernel_key.place_ != place) {
        dev_ctx = pool.Get(expected_kernel_key.place_);
W
Wilber 已提交
290
      }
F
From00 已提交
291

292 293 294 295 296 297 298 299
      return PreparedOp(op,
                        empty_ctx,
                        expected_kernel_key,
                        arg_map_fn,
                        default_kernel_signature,
                        std::move(kernel_signature),
                        phi_kernel,
                        dev_ctx);
300
    } else {
301
      VLOG(6) << "Dynamic mode ChoosePhiKernel - kernel `" << phi_kernel_name
302 303 304 305
              << "` not found.";
    }
  }

306
  // 2. check if op[type] has kernel registered.
J
Jiabin Yang 已提交
307 308
  auto& all_op_kernels = op.AllOpKernels();
  auto kernels_iter = all_op_kernels.find(op.Type());
309

310 311 312
// NOTE(Liu-xiandong): If we can't find heterogeneous kernel in phi,
// we need to select the heterogeneous kernel in fluid, but the kernel
// registered in KP use library_type[KP], we need to modify it.
313 314 315 316 317 318 319 320 321 322 323 324 325 326
#ifdef PADDLE_WITH_XPU_KP
  bool use_xpu_kp_kernel_rt =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
      FLAGS_run_kp_kernel &&
      paddle::platform::is_xpu_kp_support_op(op.Type(), expected_kernel_key);
  bool use_xpu_kp_kernel_debug =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
      paddle::platform::is_in_xpu_kpwhite_list(op.Type());
  bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
  if (is_xpu_kp_support) {
    expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
  }
#endif

327 328 329
  if ((kernels_iter == all_op_kernels.end() ||
       kernels_iter->second.find(expected_kernel_key) ==
           kernels_iter->second.end())
L
Liu-xiandong 已提交
330
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
331
      || is_xpu_unsupport
332
#endif
333 334 335
#if defined(PADDLE_WITH_XPU_KP)
      || (is_xpu_unsupport && !is_xpu_kp_support)
#endif
336
  ) {
337
    if (has_phi_kernel) {
338 339 340 341 342 343 344 345
      auto phi_cpu_kernel_key =
          FallBackToCpu(expected_kernel_key, phi_kernel_key, op);
      auto& phi_cpu_kernel =
          phi_kernel_factory.SelectKernel(phi_kernel_name, phi_cpu_kernel_key);
      if (phi_cpu_kernel.IsValid()) {
        VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << phi_kernel_name
                << " | kernel key: " << phi_cpu_kernel_key
                << " | kernel: " << phi_cpu_kernel;
346
        auto* cpu_ctx = pool.Get(paddle::platform::CPUPlace());
347
        return PreparedOp(
348 349
            op,
            empty_ctx,
350
            framework::TransPhiKernelKeyToOpKernelType(phi_cpu_kernel_key),
351 352 353
            arg_map_fn,
            default_kernel_signature,
            std::move(kernel_signature),
354
            phi_cpu_kernel,
355
            cpu_ctx);
356 357 358 359
      }
    }
  }

360
  PADDLE_ENFORCE_NE(
361 362
      kernels_iter,
      all_op_kernels.end(),
363 364 365
      platform::errors::NotFound(
          "There are no kernels which are registered in the %s operator.",
          op.Type()));
366

J
Jiabin Yang 已提交
367 368
  auto& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(expected_kernel_key);
369

370
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
371
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
372
      (kernel_iter == kernels.end() || is_xpu_unsupport)) {
373
    VLOG(3) << "fluid missing XPU kernel: " << op.Type()
374 375
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
376 377 378
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
379
#endif
L
Liu-xiandong 已提交
380 381

#ifdef PADDLE_WITH_XPU_KP
382 383
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
    if (use_xpu_kp_kernel_rt) {
384
      VLOG(3) << "fluid xpu_kp using rt mode ";
385 386
    }
    if (use_xpu_kp_kernel_debug) {
387
      VLOG(3) << "fluid xpu_kp using debug mode ";
388 389 390 391
    }
    if (is_xpu_kp_support) {
      expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
      kernel_iter = kernels.find(expected_kernel_key);
392
      VLOG(3) << "using fluid XPU KP kernel: " << op.Type()
393 394 395 396
              << ", using_kernel_key:" << expected_kernel_key;
    }
    if (!is_xpu_kp_support &&
        (kernel_iter == kernels.end() || is_xpu_unsupport)) {
397
      VLOG(3) << "fluid missing XPU kernel: " << op.Type()
398 399 400 401 402
              << ", expected_kernel_key:" << expected_kernel_key
              << ", fallbacking to CPU one!";
      expected_kernel_key.place_ = platform::CPUPlace();
      kernel_iter = kernels.find(expected_kernel_key);
    }
L
Liu-xiandong 已提交
403 404 405
  }
#endif

406 407
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
408
      paddle::platform::is_npu_place(expected_kernel_key.place_)) {
409 410 411
    VLOG(3) << "missing NPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
412
    expected_kernel_key.place_ = platform::CPUPlace();
413 414 415 416 417 418 419 420 421 422
    kernel_iter = kernels.find(expected_kernel_key);
  }
#endif
#ifdef PADDLE_WITH_IPU
  if (kernel_iter == kernels.end() &&
      paddle::platform::is_ipu_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing IPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
423 424
    kernel_iter = kernels.find(expected_kernel_key);
  }
425 426 427
#endif
#ifdef PADDLE_WITH_MLU
  if (kernel_iter == kernels.end() &&
428
      paddle::platform::is_mlu_place(expected_kernel_key.place_)) {
429 430 431 432 433 434
    VLOG(3) << "missing MLU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
435 436 437 438 439 440 441 442 443 444
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (kernel_iter == kernels.end() &&
      paddle::platform::is_custom_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing " << place.GetDeviceType() << " kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
445
#endif
446 447
  // TODO(jiabin): Add operator.cc's line 1000 part back when we need that
  // case
448 449 450 451 452 453
  PADDLE_ENFORCE_NE(
      kernel_iter,
      kernels.end(),
      platform::errors::NotFound("Operator %s does not have kernel for %s.",
                                 op.Type(),
                                 KernelTypeToString(expected_kernel_key)));
454

455 456 457 458
  if (!(expected_kernel_key.place_ == place)) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

459 460 461 462 463 464 465
  return PreparedOp(op,
                    empty_ctx,
                    expected_kernel_key,
                    kernel_iter->second,
                    arg_map_fn,
                    default_kernel_signature,
                    dev_ctx);
466 467
}

468 469 470 471
PreparedOp PreparedOp::Prepare(const NameVarMap<VarBase>& ins,
                               const NameVarMap<VarBase>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
472
                               const framework::AttributeMap& attrs,
473
                               const framework::AttributeMap& default_attrs) {
474 475 476 477 478 479 480 481
  return PrepareImpl<VarBase>(ins,
                              outs,
                              op,
                              place,
                              attrs,
                              default_attrs,
                              phi_kernel_factory,
                              phi_op_utils_map,
482
                              default_phi_kernel_sig_map);
483 484 485 486 487 488
}

PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
                               const NameVarMap<VariableWrapper>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
489
                               const framework::AttributeMap& attrs,
490
                               const framework::AttributeMap& default_attrs) {
491 492 493 494 495 496 497 498 499
  return PrepareImpl<VariableWrapper>(ins,
                                      outs,
                                      op,
                                      place,
                                      attrs,
                                      default_attrs,
                                      phi_kernel_factory,
                                      phi_op_utils_map,
                                      default_phi_kernel_sig_map);
500 501
}

502 503
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerVariable>& ins,
                               const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
504 505 506 507
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
                               const framework::AttributeMap& attrs,
                               const framework::AttributeMap& default_attrs) {
508 509 510 511 512 513 514 515 516
  return PrepareImpl<egr::EagerVariable>(ins,
                                         outs,
                                         op,
                                         place,
                                         attrs,
                                         default_attrs,
                                         phi_kernel_factory,
                                         phi_op_utils_map,
                                         default_phi_kernel_sig_map);
J
Jiabin Yang 已提交
517
}
518 519
template <typename VarType>
static void PreparedOpRunImpl(
520 521
    const framework::OperatorBase& op,
    const framework::RuntimeContext& ctx,
522
    const framework::OpKernelType& kernel_type,
523
    const framework::OperatorWithKernel::OpKernelFunc& func,
524 525
    const phi::ArgumentMappingFn* arg_map_fn,
    const phi::KernelSignature* default_kernel_signature,
526 527 528 529
    platform::DeviceContext* dev_ctx,
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::AttributeMap& attrs,
530
    const framework::AttributeMap& default_attrs) {
J
Jiabin Yang 已提交
531
  // TODO(zjl): remove scope in dygraph
H
hong 已提交
532

533
  {
534
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
535
                                       platform::TracerEventType::OperatorInner,
536 537 538 539 540 541 542 543 544 545
                                       1,
                                       platform::EventRole::kInnerOp);
    DygraphInferShapeContext<VarType> infer_shape_ctx(&ins,
                                                      &outs,
                                                      &attrs,
                                                      &default_attrs,
                                                      op.Type(),
                                                      &kernel_type,
                                                      arg_map_fn,
                                                      default_kernel_signature);
546
    op.Info().infer_shape_(&infer_shape_ctx);
C
chenjian 已提交
547 548 549
    record_event.End();
    platform::RecordOpInfoSupplement(
        op.Type(), op.Attrs(), infer_shape_ctx, ctx);
550 551 552
  }

  {
553
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
554
                                       platform::TracerEventType::OperatorInner,
555 556
                                       1,
                                       platform::EventRole::kInnerOp);
H
hong 已提交
557

558 559
    func(DygraphExecutionContext<VarType>(
        op, empty_scope, *dev_ctx, ctx, ins, outs, attrs, default_attrs));
560
  }
561

562 563 564 565 566
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

L
Leo Chen 已提交
567 568 569 570 571 572 573 574
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589
  /**
   * [ Why need handle complex gradient to real gradient? ]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64).
   *
   * But because the dout is complex64, the dx is also complex64 after
   * grad op kernel executed, we need to recognize this situation and
   * convert dx to float32 type. HandleComplexGradToRealGrad does this thing.
   */
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
590
}
H
hong 已提交
591

592 593 594
template <typename VarType>
static void PreparedOpRunPtImpl(
    const framework::OperatorBase& op,
595
    const framework::OpKernelType& kernel_type,
596 597
    const phi::ArgumentMappingFn* arg_map_fn,
    const phi::KernelSignature* default_kernel_signature,
598 599 600 601 602 603
    const phi::KernelSignature& kernel_signature,
    const phi::Kernel& phi_kernel,
    platform::DeviceContext* dev_ctx,
    const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs,
    const framework::AttributeMap& attrs,
604
    const framework::AttributeMap& default_attrs) {
605
  {
606
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
607
                                       platform::TracerEventType::OperatorInner,
608 609 610 611 612 613 614 615 616 617
                                       1,
                                       platform::EventRole::kInnerOp);
    DygraphInferShapeContext<VarType> infer_shape_ctx(&ins,
                                                      &outs,
                                                      &attrs,
                                                      &default_attrs,
                                                      op.Type(),
                                                      &kernel_type,
                                                      arg_map_fn,
                                                      default_kernel_signature);
618
    op.Info().infer_shape_(&infer_shape_ctx);
C
chenjian 已提交
619 620 621
    record_event.End();
    platform::RecordOpInfoSupplement(
        op.Type(), op.Attrs(), infer_shape_ctx, kernel_signature);
622 623 624
  }

  {
625
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
626
                                       platform::TracerEventType::OperatorInner,
627 628
                                       1,
                                       platform::EventRole::kInnerOp);
629

630
    PreparePhiData<VarType>(phi_kernel, kernel_signature, ins);
631

632
    phi::KernelContext phi_kernel_context;
633 634 635 636 637 638 639
    BuildDygraphPhiKernelContext<VarType>(kernel_signature,
                                          phi_kernel,
                                          ins,
                                          outs,
                                          attrs,
                                          default_attrs,
                                          dev_ctx,
640
                                          &phi_kernel_context);
641

642
    phi_kernel(&phi_kernel_context);
643
  }
644

645 646 647 648 649
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

650 651
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
652 653
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
654 655 656 657
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

658 659 660
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
661 662
}

663 664
void PreparedOp::Run(const NameVarMap<VarBase>& ins,
                     const NameVarMap<VarBase>& outs,
665 666
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
667
  if (run_phi_kernel_) {
668 669 670 671 672 673 674 675 676 677
    PreparedOpRunPtImpl<VarBase>(op_,
                                 kernel_type_,
                                 arg_map_fn_,
                                 default_kernel_signature_,
                                 kernel_signature_,
                                 phi_kernel_,
                                 dev_ctx_,
                                 ins,
                                 outs,
                                 attrs,
678
                                 default_attrs);
679
  } else {
680 681 682 683 684 685 686 687 688 689 690
    PreparedOpRunImpl<VarBase>(op_,
                               ctx_,
                               kernel_type_,
                               func_,
                               arg_map_fn_,
                               default_kernel_signature_,
                               dev_ctx_,
                               ins,
                               outs,
                               attrs,
                               default_attrs);
691
  }
692
}
H
hong 已提交
693

694 695
void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
                     const NameVarMap<VariableWrapper>& outs,
696 697
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
698
  if (run_phi_kernel_) {
699 700 701 702 703 704 705 706 707 708 709
    PreparedOpRunPtImpl<VariableWrapper>(op_,
                                         kernel_type_,
                                         arg_map_fn_,
                                         default_kernel_signature_,
                                         kernel_signature_,
                                         phi_kernel_,
                                         dev_ctx_,
                                         ins,
                                         outs,
                                         attrs,
                                         default_attrs);
710
  } else {
711 712 713 714 715 716 717 718 719 720 721
    PreparedOpRunImpl<VariableWrapper>(op_,
                                       ctx_,
                                       kernel_type_,
                                       func_,
                                       arg_map_fn_,
                                       default_kernel_signature_,
                                       dev_ctx_,
                                       ins,
                                       outs,
                                       attrs,
                                       default_attrs);
722
  }
J
Jiabin Yang 已提交
723 724
}

725 726
void PreparedOp::Run(const NameVarMap<egr::EagerVariable>& ins,
                     const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
727 728
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
729
  if (run_phi_kernel_) {
730 731 732 733 734 735 736 737 738 739 740
    PreparedOpRunPtImpl<egr::EagerVariable>(op_,
                                            kernel_type_,
                                            arg_map_fn_,
                                            default_kernel_signature_,
                                            kernel_signature_,
                                            phi_kernel_,
                                            dev_ctx_,
                                            ins,
                                            outs,
                                            attrs,
                                            default_attrs);
J
Jiabin Yang 已提交
741
  } else {
742 743 744 745 746 747 748 749 750 751 752
    PreparedOpRunImpl<egr::EagerVariable>(op_,
                                          ctx_,
                                          kernel_type_,
                                          func_,
                                          arg_map_fn_,
                                          default_kernel_signature_,
                                          dev_ctx_,
                                          ins,
                                          outs,
                                          attrs,
                                          default_attrs);
J
Jiabin Yang 已提交
753 754 755
  }
}

J
Jiabin Yang 已提交
756 757
}  // namespace imperative
}  // namespace paddle