tracer.cc 20.9 KB
Newer Older
J
Jiabin Yang 已提交
1
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/imperative/tracer.h"
15

16
#include <map>
H
hong 已提交
17
#include <set>
M
minqiyang 已提交
18
#include <unordered_set>
19
#include <utility>
20

21
#include "paddle/fluid/framework/op_registry.h"
22
#include "paddle/fluid/imperative/amp_auto_cast.h"
23
#include "paddle/fluid/imperative/execution_context.h"
24
#include "paddle/fluid/imperative/layout_autotune.h"
25
#include "paddle/fluid/imperative/op_base.h"
26
#include "paddle/fluid/operators/ops_extra_info.h"
27
#include "paddle/fluid/platform/denormal.h"
28
#include "paddle/fluid/platform/device/device_wrapper.h"
C
chengduo 已提交
29
#include "paddle/fluid/platform/profiler.h"
30
#include "paddle/fluid/platform/profiler/event_tracing.h"
31
#include "paddle/fluid/string/string_helper.h"
32
#include "paddle/phi/common/place.h"
33

34
DECLARE_bool(use_mkldnn);
35 36
DECLARE_string(tracer_mkldnn_ops_on);
DECLARE_string(tracer_mkldnn_ops_off);
37

38
namespace paddle {
M
minqiyang 已提交
39 40
namespace imperative {

41 42
thread_local bool Tracer::enable_program_desc_tracing_ = false;

Z
Zeng Jinle 已提交
43 44
thread_local bool Tracer::has_grad_ = true;

45 46
thread_local AmpLevel Tracer::amp_level_ = AmpLevel::O0;

47
thread_local phi::DataType Tracer::amp_dtype_ = phi::DataType::FLOAT32;
48

49 50 51 52 53 54 55 56 57
static std::shared_ptr<Tracer> g_current_tracer(nullptr);

const std::shared_ptr<Tracer>& GetCurrentTracer() { return g_current_tracer; }

void SetCurrentTracer(const std::shared_ptr<Tracer>& tracer) {
  g_current_tracer = tracer;
  VLOG(6) << "Set current tracer: " << g_current_tracer;
}

58
void PassStopGradient(const NameVarBaseMap& outs, bool generate_grad) {
59 60 61 62 63 64 65 66 67 68 69 70
  for (const auto& pair : outs) {
    for (const auto& var : pair.second) {
      // NOTE(zhiqiu): this happends when None output are passed from python
      // side. For example, fake_quantize_dequantize_moving_average_abs_max may
      // pass None OutAccum in eval mode.
      // It can be refined by generate several different pybind interface for
      // one operator with different function signature.
      if (var == nullptr) {
        VLOG(4) << pair.first << " is NULL";
        continue;
      }
      VLOG(6) << "Set output: " << var->Name() << "'s OverridedStopGradient as "
71
              << generate_grad;
72
      var->InnerSetOverridedStopGradient(generate_grad);
73 74 75 76
    }
  }
}

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
void IncreaseVarbaseReferenceCountUntilCopyComplete(
    const std::shared_ptr<imperative::VarBase>& var,
    const platform::Place& place) {
  // Note(zhiqiu): Follow the logic of TensorCopy to determine the place that we
  // need to add callback, see tensor_utils.cc:245
  auto place_ = platform::is_gpu_place(place) ? place : var->Place();

  auto tracer = imperative::GetCurrentTracer();
  auto gc = tracer->MutableGarbageCollectorIfNotExists(place_);

  // Note(zhiqiu): This is an empty callback, the only way is to "reference"
  // var, so it will not be destructed until the kernels launched at current
  // stream of given place is finished.
  auto callback = [var, place_]() {
    VLOG(4) << "Run callback of var:" << var->Name() << " at place " << place_;
  };

  gc->DirectClearCallback(callback);
}

paddle::framework::GarbageCollector* Tracer::MutableGarbageCollectorIfNotExists(
    const platform::Place& place) {
  // if not exists, create a new GarbageCollector at given place
  if (gcs_.count(place) == 0) {
    std::unique_ptr<framework::GarbageCollector> gc;
    if (platform::is_gpu_place(place)) {
Z
zhulei 已提交
103
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
104
      gc.reset(new framework::DefaultStreamGarbageCollector(place, 0));
105 106 107 108 109 110 111 112

      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CUDA device since it's not compiled with CUDA,"
          "Please recompile or reinstall Paddle with GPU support."));
#endif
    } else if (platform::is_cuda_pinned_place(place)) {
Z
zhulei 已提交
113
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
114
      gc.reset(new framework::CUDAPinnedGarbageCollector(place, 0));
115 116 117 118 119 120 121 122 123 124

      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CUDAPinned device since it's not compiled with "
          "CUDA,"
          "Please recompile or reinstall Paddle with GPU support."));
#endif
    } else if (platform::is_xpu_place(place)) {
#if defined(PADDLE_WITH_XPU)
125
      gc.reset(new framework::XPUGarbageCollector(place, 0));
126 127 128 129 130 131 132
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use XPU device since it's not compiled with XPU,"
          "Please recompile or reinstall Paddle with XPU support."));
#endif
    } else if (platform::is_cpu_place(place)) {
133
      gc.reset(new framework::CPUGarbageCollector(place, 0));
134
      VLOG(10) << "Created GarbageCollector at " << place;
135 136 137
    } else if (platform::is_npu_place(place)) {
#if defined(PADDLE_WITH_ASCEND_CL)
      // TODO(zhiqiu): fix bugs and enable NPUDefaultStreamGarbageCollector.
138
      gc.reset(new framework::NPUUnsafeFastGarbageCollector(place, 0));
139 140 141 142 143
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use NPU device since it's not compiled with NPU,"
          "Please recompile or reinstall Paddle with NPU support."));
144 145 146 147 148 149 150 151 152
#endif
    } else if (platform::is_ipu_place(place)) {
#if defined(PADDLE_WITH_IPU)
      gc.reset(new framework::IPUGarbageCollector(place, 0));
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use IPU device since it's not compiled with IPU,"
          "Please recompile or reinstall Paddle with IPU support."));
F
fwenguang 已提交
153 154 155
#endif
    } else if (platform::is_mlu_place(place)) {
#if defined(PADDLE_WITH_MLU)
156
      gc.reset(new framework::MLUDefaultStreamGarbageCollector(place, 0));
F
fwenguang 已提交
157 158 159 160 161
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use MLU device since it's not compiled with MLU,"
          "Please recompile or reinstall Paddle with MLU support."));
162 163 164
#endif
    } else if (platform::is_custom_place(place)) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
165 166 167 168 169 170 171 172
      if (framework::IsFastEagerDeletionModeEnabled()) {
        gc.reset(
            new framework::CustomDeviceUnsafeFastGarbageCollector(place, 0));
        VLOG(10) << "Created UnsafeFastGarbageCollector at " << place;
      } else {
        gc.reset(new framework::CustomDefaultStreamGarbageCollector(place, 0));
        VLOG(10) << "Created GarbageCollector at " << place;
      }
173 174 175 176 177 178
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CustomDevice since it's not compiled with "
          "CustomDevice,"
          "Please recompile or reinstall Paddle with CustomDevice "
          "support."));
179
#endif
180 181 182 183 184 185 186 187 188 189
    } else {
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "Unsupported place for garbage collection"));
    }
    gcs_.emplace(place, std::move(gc));
  }

  return gcs_.at(place).get();
}

J
Jiabin Yang 已提交
190
template <typename VarType>
191 192
void Tracer::TraceOp(const std::string& type,
                     const NameVarMap<VarType>& ins,
J
Jiabin Yang 已提交
193 194
                     const NameVarMap<VarType>& outs,
                     framework::AttributeMap attrs,
195 196
                     const platform::Place& place,
                     bool trace_backward,
J
Jiabin Yang 已提交
197 198
                     const std::map<std::string, std::string>& inplace_map,
                     paddle::framework::AttributeMap* passed_default_attrs_,
199
                     bool use_default_attr_map) {
200 201 202 203 204 205 206 207
  TraceOpImpl<VarType>(type,
                       ins,
                       outs,
                       attrs,
                       place,
                       trace_backward,
                       inplace_map,
                       passed_default_attrs_,
W
wanghuancoder 已提交
208 209 210 211 212 213 214 215
                       use_default_attr_map);
}

template <typename VarType>
void Tracer::TraceOpImpl(const std::string& type,
                         const NameVarMap<VarType>& ins,
                         const NameVarMap<VarType>& outs,
                         framework::AttributeMap& attrs,
216 217
                         const platform::Place& place,
                         bool trace_backward,
W
wanghuancoder 已提交
218 219 220
                         const std::map<std::string, std::string>& inplace_map,
                         paddle::framework::AttributeMap* passed_default_attrs_,
                         bool use_default_attr_map) {
221
  platform::RecordEvent op_type_record_event(
222
      type, platform::TracerEventType::Operator, 1);
223
  platform::ScopedFlushDenormal flush;
J
Jiabin Yang 已提交
224
  VLOG(1) << "Trace Op: " << type;
225
  if (FLAGS_use_mkldnn) {
226 227 228 229 230 231 232 233 234 235 236
    // if both lists are empty all ops are enabled (default for
    // FLAGS_use_mkldnn=1)
    // if ops_on list is not empty only ops from that list are enabled
    if (!FLAGS_tracer_mkldnn_ops_on.empty()) {
      auto is_on = FLAGS_tracer_mkldnn_ops_on.find(type) != std::string::npos;
      attrs["use_mkldnn"] = is_on;
    } else {
      // if ops_on list is empty all ops are enabled except types from off_list
      auto is_off = FLAGS_tracer_mkldnn_ops_off.find(type) != std::string::npos;
      attrs["use_mkldnn"] = !is_off;
    }
237
  }
238 239 240 241
  auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
  const auto& op_info = op->Info();
  auto* attr_checker = op_info.Checker();
  if (attr_checker) {
242
    attr_checker->Check(&attrs, true, /*only_check_exist_value=*/true);
243
  }
244 245 246 247 248
  const auto& extra_attr_checkers =
      operators::ExtraInfoUtils::Instance().GetExtraAttrsChecker(type);
  for (const auto& checker : extra_attr_checkers) {
    checker(&attrs, true);
  }
249

250 251 252 253 254
  static paddle::framework::AttributeMap empty_attrs_map = {};
  const paddle::framework::AttributeMap& default_attrs =
      attr_checker == nullptr ? empty_attrs_map
                              : attr_checker->GetDefaultAttrMap();

Z
zyfncg 已提交
255
  std::unique_ptr<NameVarMap<VarType>> ins_amp = nullptr;
L
Leo Chen 已提交
256
  if (amp_level_ == AmpLevel::O1) {
257
    if (amp_dtype_ == phi::DataType::FLOAT16) {
258
      VLOG(5) << "Float16 Auto Mixed Precision O1 run operator: " << type;
Z
zyfncg 已提交
259
      ins_amp = std::make_unique<NameVarMap<VarType>>(
260
          AutoCastInputs<VarType>(type, ins));
261
    } else if (amp_dtype_ == phi::DataType::BFLOAT16) {
262
      VLOG(5) << "BFloat16 Auto Mixed Precision O1 run operator: " << type;
Z
zyfncg 已提交
263 264
      ins_amp = std::make_unique<NameVarMap<VarType>>(
          AutoCastBF16Inputs<VarType>(type, ins));
265
    }
L
Leo Chen 已提交
266
  } else if (amp_level_ == AmpLevel::O2) {
267
    if (amp_dtype_ == phi::DataType::FLOAT16) {
268
      VLOG(5) << "Float16 Auto Mixed Precision O2 run operator: " << type;
269 270
      ins_amp = std::make_unique<NameVarMap<VarType>>(
          CastPureFp16Inputs<VarType>(type, ins));
271
    } else if (amp_dtype_ == phi::DataType::BFLOAT16) {
272
      VLOG(5) << "BFloat16 Auto Mixed Precision O2 run operator: " << type;
Z
zyfncg 已提交
273 274
      ins_amp = std::make_unique<NameVarMap<VarType>>(
          CastPureBf16Inputs<VarType>(type, ins));
275
    }
276
  }
277 278 279 280 281

  if (platform::is_gpu_place(place)) {
    const auto& new_tmp = ins_amp == nullptr ? ins : *ins_amp;
    const auto& tracer = imperative::GetCurrentTracer();
    ins_amp = std::make_unique<NameVarMap<VarType>>(
282 283
        imperative::AutoTuneLayout<VarType>(
            type, new_tmp, outs, &attrs, tracer));
284 285
  }

Z
zyfncg 已提交
286
  const auto& new_ins = ins_amp == nullptr ? ins : *ins_amp;
287

288
  try {
289 290
    if (platform::is_gpu_place(place)) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
291
      platform::SetDeviceId(place.device);
292 293 294 295 296 297
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with GPU if use CUDAPlace."));
#endif
    } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
298
      platform::SetXPUDeviceId(place.device);
299 300 301
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with XPU if use XPUPlace."));
H
houj04 已提交
302 303 304
#endif
    } else if (platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
305
      platform::SetNPUDeviceId(place.device);
H
houj04 已提交
306 307 308
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with NPU if use NPUPlace."));
F
fwenguang 已提交
309 310 311
#endif
    } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
312
      platform::SetMLUDeviceId(place.device);
F
fwenguang 已提交
313 314 315
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with MLU if use MLUPlace."));
316 317 318
#endif
    } else if (platform::is_custom_place(place)) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
319
      phi::DeviceManager::SetDevice(place);
320 321 322 323
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with CustomDevice if use "
          "CustomPlace."));
324 325
#endif
    }
326
    if (!use_default_attr_map) {
J
Jiabin Yang 已提交
327 328 329 330 331 332 333 334 335 336 337 338 339
      PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
                              paddle::platform::errors::PermissionDenied(
                                  "Detected default_attrs = nullptr."));
      VLOG(6) << "Use passed in default attrs";
      OpBase::Run(*op, new_ins, outs, attrs, (*passed_default_attrs_), place);
    } else {
      VLOG(6) << "Use Checker's default attrs";
      if (passed_default_attrs_) {
        // TODO(jiabin): Update this without copy
        *passed_default_attrs_ = default_attrs;
      }
      OpBase::Run(*op, new_ins, outs, attrs, default_attrs, place);
    }
340 341 342 343
  } catch (platform::EnforceNotMet& exception) {
    framework::AppendErrorOpHint(type, &exception);
    throw std::move(exception);
  } catch (std::exception& ex) {
344 345 346 347 348 349
    PADDLE_THROW(
        platform::errors::Fatal("Operator %s raises an %s exception.\n"
                                "The exception content is\n:%s.",
                                type,
                                platform::demangle(typeid(ex).name()),
                                ex.what()));
350 351 352 353 354 355 356
  } catch (...) {
    // NOTE: this branch represents a very serious bug with
    // low probability of occurrence, and we can't get its
    // exception content here.
    PADDLE_THROW(platform::errors::Fatal(
        "Operator %s raises an unknown exception.", type));
  }
J
Jiabin Yang 已提交
357

358 359
  if (enable_program_desc_tracing_) {
    VLOG(5) << "Trace op " << type << " into ProgramDesc";
360
    program_desc_tracer_->InsertOp(type, new_ins, outs, attrs);
361 362
  }

363 364
  {
    platform::RecordEvent node_creation_record_event(
365
        "grad_node_creation", platform::TracerEventType::OperatorInner, 1);
366 367 368

    if (ComputeRequiredGrad(new_ins, outs, trace_backward)) {
      PADDLE_ENFORCE_EQ(
369 370
          passed_default_attrs_,
          nullptr,
371 372 373 374
          paddle::platform::errors::PermissionDenied(
              "We expect passed_default_attrs_ is nullptr while "
              "use_default_attr_map is true, however we got not null "
              "passed_default_attrs_. Please check your usage of trace_op. "));
375 376
      CreateGradOpNode(
          *op, new_ins, outs, attrs, default_attrs, place, inplace_map);
377 378 379 380
    } else {
      VLOG(3) << "No Grad to track for Op: " << type;
    }
    VLOG(6) << "Finish Trace Op: " << type;
381
  }
M
minqiyang 已提交
382 383
}

J
Jiabin Yang 已提交
384
template void Tracer::TraceOp<VarBase>(
385 386 387 388 389 390
    const std::string& type,
    const NameVarMap<VarBase>& ins,
    const NameVarMap<VarBase>& outs,
    framework::AttributeMap attrs,
    const platform::Place& place,
    bool trace_backward,
J
Jiabin Yang 已提交
391
    const std::map<std::string, std::string>& inplace_map,
392 393
    paddle::framework::AttributeMap* default_attrs,
    bool use_default_attr_map);
J
Jiabin Yang 已提交
394

395
template void Tracer::TraceOp<egr::EagerVariable>(
396 397 398 399 400 401
    const std::string& type,
    const NameVarMap<egr::EagerVariable>& ins,
    const NameVarMap<egr::EagerVariable>& outs,
    framework::AttributeMap attrs,
    const platform::Place& place,
    bool trace_backward,
J
Jiabin Yang 已提交
402
    const std::map<std::string, std::string>& inplace_map_,
403 404
    paddle::framework::AttributeMap* default_attrs,
    bool use_default_attr_map);
J
Jiabin Yang 已提交
405

406 407 408 409
void Tracer::TraceOp(const std::string& type,
                     const NameVarBaseMap& ins,
                     const NameVarBaseMap& outs,
                     framework::AttributeMap attrs,
410
                     const std::map<std::string, std::string>& inplace_map) {
411 412 413 414 415 416 417
  TraceOp<VarBase>(type,
                   ins,
                   outs,
                   std::move(attrs),
                   expected_place_,
                   has_grad_,
                   inplace_map);
J
Jiabin Yang 已提交
418 419
}

420 421
void Tracer::TraceOp(const std::string& type,
                     const NameTensorMap& ins,
J
Jiabin Yang 已提交
422
                     const NameTensorMap& outs,
W
wanghuancoder 已提交
423
                     paddle::framework::AttributeMap& attrs,
J
Jiabin Yang 已提交
424 425
                     const paddle::platform::Place& place,
                     paddle::framework::AttributeMap* default_attrs,
426
                     bool use_default_attr_map,
J
Jiabin Yang 已提交
427
                     const std::map<std::string, std::string>& inplace_map) {
428 429
  VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: "
          << use_default_attr_map;
430 431 432 433 434 435 436 437
  TraceOpImpl<egr::EagerVariable>(type,
                                  ins,
                                  outs,
                                  attrs,
                                  place,
                                  false,
                                  inplace_map,
                                  default_attrs,
W
wanghuancoder 已提交
438 439 440
                                  use_default_attr_map);
}

441 442
void Tracer::TraceOp(const std::string& type,
                     const NameTensorMap& ins,
W
wanghuancoder 已提交
443 444 445
                     const NameTensorMap& outs,
                     paddle::framework::AttributeMap attrs) {
  VLOG(6) << "Running On Eager TraceOp(4 agrs): ";
446 447
  TraceOpImpl<egr::EagerVariable>(
      type, ins, outs, attrs, expected_place_, false, {}, nullptr, true);
J
Jiabin Yang 已提交
448 449
}

450 451
void Tracer::TraceOp(const std::string& type,
                     const NameTensorMap& ins,
J
Jiabin Yang 已提交
452
                     const NameTensorMap& outs,
W
wanghuancoder 已提交
453
                     paddle::framework::AttributeMap& attrs,
J
Jiabin Yang 已提交
454 455
                     const std::map<std::string, std::string>& inplace_map) {
  VLOG(6) << "Running On Eager TraceOp(less): ";
456 457 458 459 460 461 462 463 464
  TraceOpImpl<egr::EagerVariable>(type,
                                  ins,
                                  outs,
                                  attrs,
                                  expected_place_,
                                  false,
                                  inplace_map,
                                  nullptr,
                                  true);
465 466
}

W
WangXi 已提交
467 468 469 470
void Tracer::SetExpectedPlace(platform::Place place) {
  expected_place_ = place;
}

J
Jiabin Yang 已提交
471
bool Tracer::ComputeRequiredGrad(const NameVarBaseMap& ins,
472
                                 const NameVarBaseMap& outs,
J
Jiabin Yang 已提交
473
                                 bool trace_backward) {
474 475 476 477 478 479 480 481 482 483 484 485 486
  if (!trace_backward) return false;

  for (const auto& name_pair : ins) {
    for (const auto& var_base : name_pair.second) {
      if (!var_base->OverridedStopGradient()) {
        VLOG(6) << "Find out input: " << var_base->Name()
                << "'s GeneratedGrad is True";
        PassStopGradient(outs, var_base->OverridedStopGradient());
        return true;
      }
    }
  }
  return false;
M
minqiyang 已提交
487 488
}

J
Jiabin Yang 已提交
489 490 491 492 493 494
bool Tracer::ComputeRequiredGrad(const NameTensorMap& ins,
                                 const NameTensorMap& outs,
                                 bool trace_backward) {
  return false;
}

495
phi::KernelSignature Tracer::GetExpectedKernelSignature(
496 497 498 499
    const std::string& type,
    const NameTensorMap& ins,
    const NameTensorMap& outs,
    framework::AttributeMap attrs) const {
500 501 502 503 504 505 506 507 508 509 510 511 512 513
  auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
  framework::RuntimeContext ctx({}, {});
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(phi::CPUPlace());
  const auto& op_info = op->Info();
  auto* attr_checker = op_info.Checker();
  if (attr_checker) {
    attr_checker->Check(&attrs, true, /*only_check_exist_value=*/true);
  }
  static paddle::framework::AttributeMap empty_attrs_map = {};
  const paddle::framework::AttributeMap& default_attrs =
      attr_checker == nullptr ? empty_attrs_map
                              : attr_checker->GetDefaultAttrMap();
  auto dygraph_exe_ctx =
514
      imperative::DygraphExecutionContext<egr::EagerVariable>(
515 516 517 518 519 520 521
          *op,
          framework::Scope(),
          *dev_ctx,
          ctx,
          ins,
          outs,
          attrs,
522 523 524
          default_attrs);
  auto* opbase_with_kernel =
      dynamic_cast<framework::OperatorWithKernel*>(op.get());
525 526
  PADDLE_ENFORCE_NE(opbase_with_kernel,
                    nullptr,
527 528 529 530 531 532 533 534
                    platform::errors::InvalidArgument(
                        "This op type:`%s` is not a OperatorWithKernel, only "
                        "OperatorWithKernel can get KernelSignature",
                        type));
  return phi::KernelSignature(
      std::move(opbase_with_kernel->GetExpectedPhiKernelArgs(dygraph_exe_ctx)));
}

M
minqiyang 已提交
535
}  // namespace imperative
536
}  // namespace paddle