prepared_operator.cc 24.3 KB
Newer Older
J
Jiabin Yang 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/prepared_operator.h"
16

J
Jiabin Yang 已提交
17
#include "paddle/fluid/eager/eager_tensor.h"
18
#include "paddle/fluid/framework/data_type_transform.h"
19
#include "paddle/fluid/framework/details/nan_inf_utils.h"
20
#include "paddle/fluid/imperative/infer_shape_context.h"
21
#include "paddle/fluid/imperative/tracer.h"
22
#include "paddle/phi/common/int_array.h"
23
#include "paddle/phi/common/scalar.h"
24
#include "paddle/utils/small_vector.h"
Q
QingshuChen 已提交
25
#ifdef PADDLE_WITH_XPU
26
#include "paddle/fluid/platform/device/xpu/xpu_op_list.h"
Q
QingshuChen 已提交
27
#endif
L
Liu-xiandong 已提交
28
#include "paddle/fluid/framework/library_type.h"
29
#include "paddle/fluid/platform/device/gpu/gpu_info.h"
C
chenjian 已提交
30
#include "paddle/fluid/platform/profiler/event_tracing.h"
31

32
DECLARE_bool(check_nan_inf);
33
DECLARE_bool(benchmark);
F
Feng Xing 已提交
34
DECLARE_bool(run_kp_kernel);
35

J
Jiabin Yang 已提交
36 37 38
namespace paddle {
namespace imperative {

39
static const phi::Kernel empty_kernel;
40 41
static const framework::RuntimeContext empty_ctx({}, {});
static const framework::Scope empty_scope;
42

43 44 45 46 47 48 49
const phi::KernelFactory& PreparedOp::phi_kernel_factory =
    phi::KernelFactory::Instance();
const phi::OpUtilsMap& PreparedOp::phi_op_utils_map =
    phi::OpUtilsMap::Instance();
const phi::DefaultKernelSignatureMap& PreparedOp::default_phi_kernel_sig_map =
    phi::DefaultKernelSignatureMap::Instance();

50 51 52 53 54 55 56 57 58 59
const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<paddle::imperative::VarBase>& var) {
  return var->SharedVar();
}

const std::shared_ptr<VariableWrapper>& GetVariableWrapper(
    const std::shared_ptr<VariableWrapper>& var) {
  return var;
}

J
Jiabin Yang 已提交
60 61 62
const framework::Tensor* GetTensorFromVar(const framework::Variable& var) {
  if (var.IsType<framework::LoDTensor>()) {
    return &(var.Get<framework::LoDTensor>());
63 64
  } else if (var.IsType<phi::SelectedRows>()) {
    return &(var.Get<phi::SelectedRows>().value());
J
Jiabin Yang 已提交
65 66 67 68 69
  } else {
    return nullptr;
  }
}

70
template <typename VarType>
J
Jiabin Yang 已提交
71
void HandleComplexGradToRealGrad(const NameVarMap<VarType>& outs) {
72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
  for (auto& pair : outs) {
    for (auto& var : pair.second) {
      if (var == nullptr) {
        continue;
      }
      if (var->ForwardDataType() ==
          static_cast<framework::proto::VarType::Type>(-1)) {
        VLOG(6) << "Var (" << var->Name()
                << ")'s forward data type is not set.";
        continue;
      }
      if (!framework::IsComplexType(var->DataType()) ||
          framework::IsComplexType(var->ForwardDataType())) {
        continue;
      }
      const auto* tensor = GetTensorFromVar(var->Var());
J
Jiabin Yang 已提交
88
      if (tensor && tensor->IsInitialized()) {
89 90 91 92 93 94 95 96
        VLOG(6) << "Transform " << framework::DataTypeToString(var->DataType())
                << " var `" << var->Name() << "` to "
                << framework::DataTypeToString(var->ForwardDataType())
                << " real var in dynamic graph.";
        framework::Tensor out;
        framework::TransComplexToReal(var->ForwardDataType(), var->DataType(),
                                      *tensor, &out);
        SetTensorToVariable(var->Var(), out, var->MutableVar());
J
Jiabin Yang 已提交
97 98 99 100 101
      }
    }
  }
}

J
Jiabin Yang 已提交
102
template <>
103 104
void HandleComplexGradToRealGrad<egr::EagerVariable>(
    const NameVarMap<egr::EagerVariable>& outs) {
J
Jiabin Yang 已提交
105 106 107
  // TODO(jiabin): Support Complex here.
}

108 109 110 111 112
void TestHandleComplexGradToRealGradEager(
    const NameVarMap<egr::EagerVariable>& outs) {
  HandleComplexGradToRealGrad<egr::EagerVariable>(outs);
}

J
Jiabin Yang 已提交
113 114
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
115
                       const framework::OpKernelType& kernel_type,
116
                       const framework::OperatorWithKernel::OpKernelFunc& func,
117 118
                       const phi::ArgumentMappingFn* arg_map_fn,
                       const phi::KernelSignature* default_kernel_signature,
119
                       platform::DeviceContext* dev_ctx)
120 121 122 123
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(func),
124
      dev_ctx_(dev_ctx),
125 126 127
      arg_map_fn_(arg_map_fn),
      default_kernel_signature_(default_kernel_signature),
      phi_kernel_(empty_kernel) {}
128

129 130 131
PreparedOp::PreparedOp(const framework::OperatorBase& op,
                       const framework::RuntimeContext& ctx,
                       const framework::OpKernelType& kernel_type,
132 133 134 135
                       const phi::ArgumentMappingFn* arg_map_fn,
                       const phi::KernelSignature* default_kernel_signature,
                       phi::KernelSignature&& kernel_signature,
                       const phi::Kernel& phi_kernel,
136 137 138 139 140 141
                       platform::DeviceContext* dev_ctx)
    : op_(op),
      ctx_(ctx),
      kernel_type_(kernel_type),
      func_(nullptr),
      dev_ctx_(dev_ctx),
142
      run_phi_kernel_(true),
143 144 145 146
      arg_map_fn_(arg_map_fn),
      default_kernel_signature_(default_kernel_signature),
      kernel_signature_(std::move(kernel_signature)),
      phi_kernel_(phi_kernel) {}
147

148
template <typename VarType>
149 150 151 152 153 154 155 156
PreparedOp PrepareImpl(
    const NameVarMap<VarType>& ins, const NameVarMap<VarType>& outs,
    const framework::OperatorWithKernel& op, const platform::Place& place,
    const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs,
    const phi::KernelFactory& phi_kernel_factory,
    const phi::OpUtilsMap& phi_op_utils_map,
    const phi::DefaultKernelSignatureMap& default_phi_kernel_sig_map) {
157
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
158
  auto* dev_ctx = pool.Get(place);
159

160 161 162 163 164 165
#ifdef PADDLE_WITH_MKLDNN
  // MKLDNN variant of code reads attributes in some of GetKernelTypeForVar and
  // GetKernelType functions, so we need to copy the attributes there.
  // Const qualifier of Attrs had to be discarded to overwrite it.
  if (FLAGS_use_mkldnn) {
    auto& mutable_op_attrs = const_cast<framework::AttributeMap&>(op.Attrs());
166 167 168 169
    mutable_op_attrs = default_attrs;
    for (auto& attr : attrs) {
      mutable_op_attrs[attr.first] = attr.second;
    }
170 171
  }
#endif
172 173
  // NOTE(zhiqiu): for kernels on given device, for example NPU, the order to
  // choose is:
174
  // phi npu kernel > fluid npu kernel > phi cpu kernel > fluid cpu kernel
J
Jiabin Yang 已提交
175

176
  // 1. get expected kernel key
177
  auto dygraph_exe_ctx = DygraphExecutionContext<VarType>(
178
      op, empty_scope, *dev_ctx, empty_ctx, ins, outs, attrs, default_attrs);
179
  auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx);
180

181 182
  const phi::KernelSignature* default_kernel_signature = nullptr;
  phi::KernelSignature kernel_signature;
183
  phi::KernelKey pt_kernel_key;
184
  std::string pt_kernel_name;
L
Liu-xiandong 已提交
185
#if defined(PADDLE_WITH_XPU)
186 187 188 189 190
  bool is_xpu_unsupport =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
          !paddle::platform::is_xpu_support_op(op.Type(),
                                               expected_kernel_key) ||
      paddle::platform::is_in_xpu_black_list(op.Type());
L
Liu-xiandong 已提交
191

192
#endif
193

194 195
  bool has_phi_kernel = false;

196 197
  const auto* arg_map_fn = phi_op_utils_map.GetArgumentMappingFn(op.Type());

198 199
  if (arg_map_fn) {
    has_phi_kernel = true;
200
    kernel_signature = (*arg_map_fn)(
201 202
        framework::ExecutionArgumentMappingContext(dygraph_exe_ctx));
  } else {
203
    default_kernel_signature =
204
        default_phi_kernel_sig_map.GetNullable(op.Type());
205
    if (default_kernel_signature) {
206
      has_phi_kernel = true;
207
      kernel_signature = *default_kernel_signature;
208 209 210 211
    }
  }

  if (has_phi_kernel) {
212 213
    VLOG(6) << kernel_signature;
    pt_kernel_name = kernel_signature.name;
214 215 216
// NOTE(Liu-xiandong): The register kernel used KP have library_type[KP],
// But the default library_type is Plain, so we need to modify the
// library_type here, otherwise it can't work.
L
Liu-xiandong 已提交
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
#ifdef PADDLE_WITH_XPU_KP
    if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
      bool use_xpu_kp_kernel_rt =
          FLAGS_run_kp_kernel && paddle::platform::is_xpu_kp_support_op(
                                     op.Type(), expected_kernel_key);
      bool use_xpu_kp_kernel_debug =
          paddle::platform::is_in_xpu_kpwhite_list(op.Type());
      if (use_xpu_kp_kernel_rt) {
        VLOG(3) << "phi xpu_kp using rt mode ";
      }
      if (use_xpu_kp_kernel_debug) {
        VLOG(3) << "phi xpu_kp using debug mode ";
      }
      bool is_xpu_kp_support =
          (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
      if (is_xpu_kp_support) {
233 234
        auto expected_kernel_key_library_type =
            expected_kernel_key.library_type_;
L
Liu-xiandong 已提交
235
        expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
236
        VLOG(3) << "modifing XPU KP kernel: " << pt_kernel_name
L
Liu-xiandong 已提交
237
                << ", using_kernel_key:" << expected_kernel_key;
238

239 240
        phi::KernelKey try_pt_kernel_key =
            TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
241
        if (!phi_kernel_factory.HasKernel(pt_kernel_name, try_pt_kernel_key)) {
242
          expected_kernel_key.library_type_ = expected_kernel_key_library_type;
243 244 245 246 247
          VLOG(3) << "modify XPU KP kernel: " << pt_kernel_name
                  << " in dynamic graph is failed " << expected_kernel_key;
        } else {
          VLOG(3) << "modify XPU KP kernel: " << pt_kernel_name
                  << " in dynamic graph is succeed " << expected_kernel_key;
248
        }
L
Liu-xiandong 已提交
249 250 251
      }
    }
#endif
252

253
    pt_kernel_key = TransOpKernelTypeToPhiKernelKey(expected_kernel_key);
254 255
    auto& phi_kernel =
        phi_kernel_factory.SelectKernel(pt_kernel_name, pt_kernel_key);
256

257
    if (phi_kernel.IsValid()
L
Liu-xiandong 已提交
258
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
259 260
        && !is_xpu_unsupport
#endif
261
    ) {
C
Chen Weihang 已提交
262
      VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << pt_kernel_name
263
              << " | kernel key: " << pt_kernel_key
264
              << " | kernel: " << phi_kernel;
265

F
From00 已提交
266 267
      if (expected_kernel_key.place_ != place) {
        dev_ctx = pool.Get(expected_kernel_key.place_);
W
Wilber 已提交
268
      }
F
From00 已提交
269

270 271 272
      return PreparedOp(op, empty_ctx, expected_kernel_key, arg_map_fn,
                        default_kernel_signature, std::move(kernel_signature),
                        phi_kernel, dev_ctx);
273
    } else {
274
      VLOG(6) << "Dynamic mode ChoosePhiKernel - kernel `" << pt_kernel_name
275 276 277 278
              << "` not found.";
    }
  }

279
  // 2. check if op[type] has kernel registered.
J
Jiabin Yang 已提交
280 281
  auto& all_op_kernels = op.AllOpKernels();
  auto kernels_iter = all_op_kernels.find(op.Type());
282

283 284 285
// NOTE(Liu-xiandong): If we can't find heterogeneous kernel in phi,
// we need to select the heterogeneous kernel in fluid, but the kernel
// registered in KP use library_type[KP], we need to modify it.
286 287 288 289 290 291 292 293 294 295 296 297 298 299
#ifdef PADDLE_WITH_XPU_KP
  bool use_xpu_kp_kernel_rt =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
      FLAGS_run_kp_kernel &&
      paddle::platform::is_xpu_kp_support_op(op.Type(), expected_kernel_key);
  bool use_xpu_kp_kernel_debug =
      paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
      paddle::platform::is_in_xpu_kpwhite_list(op.Type());
  bool is_xpu_kp_support = (use_xpu_kp_kernel_rt || use_xpu_kp_kernel_debug);
  if (is_xpu_kp_support) {
    expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
  }
#endif

300 301 302
  if ((kernels_iter == all_op_kernels.end() ||
       kernels_iter->second.find(expected_kernel_key) ==
           kernels_iter->second.end())
L
Liu-xiandong 已提交
303
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
304
      || is_xpu_unsupport
305
#endif
306 307 308
#if defined(PADDLE_WITH_XPU_KP)
      || (is_xpu_unsupport && !is_xpu_kp_support)
#endif
309
  ) {
310
    if (has_phi_kernel) {
311 312
      auto pt_cpu_kernel_key =
          FallBackToCpu(expected_kernel_key, pt_kernel_key, op);
313 314
      auto& pt_cpu_kernel =
          phi_kernel_factory.SelectKernel(pt_kernel_name, pt_cpu_kernel_key);
315 316 317 318 319
      if (pt_cpu_kernel.IsValid()) {
        VLOG(6) << "Dynamic mode PrepareImpl - kernel name: " << pt_kernel_name
                << " | kernel key: " << pt_cpu_kernel_key
                << " | kernel: " << pt_cpu_kernel;
        auto* cpu_ctx = pool.Get(paddle::platform::CPUPlace());
320 321 322 323 324
        return PreparedOp(
            op, empty_ctx,
            framework::TransPhiKernelKeyToOpKernelType(pt_cpu_kernel_key),
            arg_map_fn, default_kernel_signature, std::move(kernel_signature),
            pt_cpu_kernel, cpu_ctx);
325 326 327 328
      }
    }
  }

329 330 331 332 333
  PADDLE_ENFORCE_NE(
      kernels_iter, all_op_kernels.end(),
      platform::errors::NotFound(
          "There are no kernels which are registered in the %s operator.",
          op.Type()));
334

J
Jiabin Yang 已提交
335 336
  auto& kernels = kernels_iter->second;
  auto kernel_iter = kernels.find(expected_kernel_key);
337

338
#if defined(PADDLE_WITH_XPU) && !defined(PADDLE_WITH_XPU_KP)
339
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_) &&
340
      (kernel_iter == kernels.end() || is_xpu_unsupport)) {
341
    VLOG(3) << "fluid missing XPU kernel: " << op.Type()
342 343
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
344 345 346
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
347
#endif
L
Liu-xiandong 已提交
348 349

#ifdef PADDLE_WITH_XPU_KP
350 351
  if (paddle::platform::is_xpu_place(expected_kernel_key.place_)) {
    if (use_xpu_kp_kernel_rt) {
352
      VLOG(3) << "fluid xpu_kp using rt mode ";
353 354
    }
    if (use_xpu_kp_kernel_debug) {
355
      VLOG(3) << "fluid xpu_kp using debug mode ";
356 357 358 359
    }
    if (is_xpu_kp_support) {
      expected_kernel_key.library_type_ = paddle::framework::LibraryType::kKP;
      kernel_iter = kernels.find(expected_kernel_key);
360
      VLOG(3) << "using fluid XPU KP kernel: " << op.Type()
361 362 363 364
              << ", using_kernel_key:" << expected_kernel_key;
    }
    if (!is_xpu_kp_support &&
        (kernel_iter == kernels.end() || is_xpu_unsupport)) {
365
      VLOG(3) << "fluid missing XPU kernel: " << op.Type()
366 367 368 369 370
              << ", expected_kernel_key:" << expected_kernel_key
              << ", fallbacking to CPU one!";
      expected_kernel_key.place_ = platform::CPUPlace();
      kernel_iter = kernels.find(expected_kernel_key);
    }
L
Liu-xiandong 已提交
371 372 373
  }
#endif

374 375
#ifdef PADDLE_WITH_ASCEND_CL
  if (kernel_iter == kernels.end() &&
376
      paddle::platform::is_npu_place(expected_kernel_key.place_)) {
377 378 379
    VLOG(3) << "missing NPU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
380 381 382
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
383 384 385
#endif
#ifdef PADDLE_WITH_MLU
  if (kernel_iter == kernels.end() &&
386
      paddle::platform::is_mlu_place(expected_kernel_key.place_)) {
387 388 389 390 391 392
    VLOG(3) << "missing MLU kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
393 394 395 396 397 398 399 400 401 402
#endif
#ifdef PADDLE_WITH_CUSTOM_DEVICE
  if (kernel_iter == kernels.end() &&
      paddle::platform::is_custom_place(expected_kernel_key.place_)) {
    VLOG(3) << "missing " << place.GetDeviceType() << " kernel: " << op.Type()
            << ", expected_kernel_key:" << expected_kernel_key
            << ", fallbacking to CPU one!";
    expected_kernel_key.place_ = platform::CPUPlace();
    kernel_iter = kernels.find(expected_kernel_key);
  }
403
#endif
404 405
  // TODO(jiabin): Add operator.cc's line 1000 part back when we need that
  // case
406 407 408 409
  PADDLE_ENFORCE_NE(kernel_iter, kernels.end(),
                    platform::errors::NotFound(
                        "Operator %s does not have kernel for %s.", op.Type(),
                        KernelTypeToString(expected_kernel_key)));
410

411 412 413 414
  if (!(expected_kernel_key.place_ == place)) {
    dev_ctx = pool.Get(expected_kernel_key.place_);
  }

415
  return PreparedOp(op, empty_ctx, expected_kernel_key, kernel_iter->second,
416
                    arg_map_fn, default_kernel_signature, dev_ctx);
417 418
}

419 420 421 422
PreparedOp PreparedOp::Prepare(const NameVarMap<VarBase>& ins,
                               const NameVarMap<VarBase>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
423
                               const framework::AttributeMap& attrs,
424
                               const framework::AttributeMap& default_attrs) {
425 426 427
  return PrepareImpl<VarBase>(ins, outs, op, place, attrs, default_attrs,
                              phi_kernel_factory, phi_op_utils_map,
                              default_phi_kernel_sig_map);
428 429 430 431 432 433
}

PreparedOp PreparedOp::Prepare(const NameVarMap<VariableWrapper>& ins,
                               const NameVarMap<VariableWrapper>& outs,
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
434
                               const framework::AttributeMap& attrs,
435
                               const framework::AttributeMap& default_attrs) {
436 437 438
  return PrepareImpl<VariableWrapper>(
      ins, outs, op, place, attrs, default_attrs, phi_kernel_factory,
      phi_op_utils_map, default_phi_kernel_sig_map);
439 440
}

441 442
PreparedOp PreparedOp::Prepare(const NameVarMap<egr::EagerVariable>& ins,
                               const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
443 444 445 446
                               const framework::OperatorWithKernel& op,
                               const platform::Place& place,
                               const framework::AttributeMap& attrs,
                               const framework::AttributeMap& default_attrs) {
447 448 449
  return PrepareImpl<egr::EagerVariable>(
      ins, outs, op, place, attrs, default_attrs, phi_kernel_factory,
      phi_op_utils_map, default_phi_kernel_sig_map);
J
Jiabin Yang 已提交
450
}
451 452 453
template <typename VarType>
static void PreparedOpRunImpl(
    const framework::OperatorBase& op, const framework::RuntimeContext& ctx,
454
    const framework::OpKernelType& kernel_type,
455
    const framework::OperatorWithKernel::OpKernelFunc& func,
456 457
    const phi::ArgumentMappingFn* arg_map_fn,
    const phi::KernelSignature* default_kernel_signature,
458
    platform::DeviceContext* dev_ctx, const NameVarMap<VarType>& ins,
459 460
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
    const framework::AttributeMap& default_attrs) {
J
Jiabin Yang 已提交
461
  // TODO(zjl): remove scope in dygraph
H
hong 已提交
462

463
  {
464
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
465 466
                                       platform::TracerEventType::OperatorInner,
                                       1, platform::EventRole::kInnerOp);
467
    DygraphInferShapeContext<VarType> infer_shape_ctx(
468 469
        &ins, &outs, &attrs, &default_attrs, op.Type(), &kernel_type,
        arg_map_fn, default_kernel_signature);
470 471 472 473
    op.Info().infer_shape_(&infer_shape_ctx);
  }

  {
474
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
475 476
                                       platform::TracerEventType::OperatorInner,
                                       1, platform::EventRole::kInnerOp);
H
hong 已提交
477

478 479
    func(DygraphExecutionContext<VarType>(op, empty_scope, *dev_ctx, ctx, ins,
                                          outs, attrs, default_attrs));
480
  }
481

482 483 484 485 486
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

L
Leo Chen 已提交
487 488 489 490 491 492 493 494
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509
  /**
   * [ Why need handle complex gradient to real gradient? ]
   *
   * After the introduction of complex number calculations, Ops that support
   * complex number calculations generally support type promotion, such as
   * x(float32) + y(complex64) = out(complex64), then the type of the grad
   * tensor should be dout(complex64), dx(float32), dy (complex64).
   *
   * But because the dout is complex64, the dx is also complex64 after
   * grad op kernel executed, we need to recognize this situation and
   * convert dx to float32 type. HandleComplexGradToRealGrad does this thing.
   */
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
510
}
H
hong 已提交
511

512 513 514
template <typename VarType>
static void PreparedOpRunPtImpl(
    const framework::OperatorBase& op,
515
    const framework::OpKernelType& kernel_type,
516 517 518 519 520
    const phi::ArgumentMappingFn* arg_map_fn,
    const phi::KernelSignature* default_kernel_signature,
    const phi::KernelSignature& kernel_signature, const phi::Kernel& phi_kernel,
    platform::DeviceContext* dev_ctx, const NameVarMap<VarType>& ins,
    const NameVarMap<VarType>& outs, const framework::AttributeMap& attrs,
521
    const framework::AttributeMap& default_attrs) {
522
  {
523
    platform::RecordEvent record_event("infer_shape",
C
chenjian 已提交
524 525
                                       platform::TracerEventType::OperatorInner,
                                       1, platform::EventRole::kInnerOp);
526
    DygraphInferShapeContext<VarType> infer_shape_ctx(
527 528
        &ins, &outs, &attrs, &default_attrs, op.Type(), &kernel_type,
        arg_map_fn, default_kernel_signature);
529 530 531 532
    op.Info().infer_shape_(&infer_shape_ctx);
  }

  {
533
    platform::RecordEvent record_event("compute",
C
chenjian 已提交
534 535
                                       platform::TracerEventType::OperatorInner,
                                       1, platform::EventRole::kInnerOp);
536

537
    PreparePhiData<VarType>(phi_kernel, kernel_signature, ins);
538

539
    phi::KernelContext pt_kernel_context;
540
    BuildDygraphPhiKernelContext<VarType>(kernel_signature, phi_kernel, ins,
541 542
                                          outs, attrs, default_attrs, dev_ctx,
                                          &pt_kernel_context);
543

544
    phi_kernel(&pt_kernel_context);
545
  }
546

547 548 549 550 551
  if (FLAGS_check_nan_inf) {
    framework::details::CheckOpHasNanOrInfInDygraph<VarType>(
        op.Type(), outs, dev_ctx->GetPlace());
  }

552 553
  if (FLAGS_benchmark) {
    dev_ctx->Wait();
554 555
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
    PADDLE_ENFORCE_GPU_SUCCESS(platform::GpuGetLastError());
556 557 558 559
    VLOG(4) << "Operator(" << op.Type() << "): context wait and get last error";
#endif
  }

560 561 562
  if (framework::IsComplexType(kernel_type.data_type_)) {
    HandleComplexGradToRealGrad<VarType>(outs);
  }
563 564
}

565 566
void PreparedOp::Run(const NameVarMap<VarBase>& ins,
                     const NameVarMap<VarBase>& outs,
567 568
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
569
  if (run_phi_kernel_) {
570 571 572
    PreparedOpRunPtImpl<VarBase>(op_, kernel_type_, arg_map_fn_,
                                 default_kernel_signature_, kernel_signature_,
                                 phi_kernel_, dev_ctx_, ins, outs, attrs,
573
                                 default_attrs);
574
  } else {
575 576 577
    PreparedOpRunImpl<VarBase>(op_, ctx_, kernel_type_, func_, arg_map_fn_,
                               default_kernel_signature_, dev_ctx_, ins, outs,
                               attrs, default_attrs);
578
  }
579
}
H
hong 已提交
580

581 582
void PreparedOp::Run(const NameVarMap<VariableWrapper>& ins,
                     const NameVarMap<VariableWrapper>& outs,
583 584
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
585
  if (run_phi_kernel_) {
586
    PreparedOpRunPtImpl<VariableWrapper>(
587 588 589
        op_, kernel_type_, arg_map_fn_, default_kernel_signature_,
        kernel_signature_, phi_kernel_, dev_ctx_, ins, outs, attrs,
        default_attrs);
590
  } else {
591 592 593
    PreparedOpRunImpl<VariableWrapper>(
        op_, ctx_, kernel_type_, func_, arg_map_fn_, default_kernel_signature_,
        dev_ctx_, ins, outs, attrs, default_attrs);
594
  }
J
Jiabin Yang 已提交
595 596
}

597 598
void PreparedOp::Run(const NameVarMap<egr::EagerVariable>& ins,
                     const NameVarMap<egr::EagerVariable>& outs,
J
Jiabin Yang 已提交
599 600
                     const framework::AttributeMap& attrs,
                     const framework::AttributeMap& default_attrs) {
601
  if (run_phi_kernel_) {
602
    PreparedOpRunPtImpl<egr::EagerVariable>(
603 604 605
        op_, kernel_type_, arg_map_fn_, default_kernel_signature_,
        kernel_signature_, phi_kernel_, dev_ctx_, ins, outs, attrs,
        default_attrs);
J
Jiabin Yang 已提交
606
  } else {
607 608 609
    PreparedOpRunImpl<egr::EagerVariable>(
        op_, ctx_, kernel_type_, func_, arg_map_fn_, default_kernel_signature_,
        dev_ctx_, ins, outs, attrs, default_attrs);
J
Jiabin Yang 已提交
610 611 612
  }
}

J
Jiabin Yang 已提交
613 614
}  // namespace imperative
}  // namespace paddle