tracer.cc 19.1 KB
Newer Older
J
Jiabin Yang 已提交
1
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/imperative/tracer.h"
15

16
#include <map>
H
hong 已提交
17
#include <set>
M
minqiyang 已提交
18
#include <unordered_set>
19
#include <utility>
20

21
#include "paddle/fluid/framework/op_registry.h"
22
#include "paddle/fluid/imperative/amp_auto_cast.h"
23
#include "paddle/fluid/imperative/execution_context.h"
24
#include "paddle/fluid/imperative/layout_autotune.h"
25
#include "paddle/fluid/imperative/op_base.h"
26
#include "paddle/fluid/platform/denormal.h"
27
#include "paddle/fluid/platform/device/device_wrapper.h"
C
chengduo 已提交
28
#include "paddle/fluid/platform/profiler.h"
29
#include "paddle/fluid/platform/profiler/event_tracing.h"
30
#include "paddle/fluid/string/string_helper.h"
31
#include "paddle/phi/common/place.h"
32

33
DECLARE_bool(use_mkldnn);
34 35
DECLARE_string(tracer_mkldnn_ops_on);
DECLARE_string(tracer_mkldnn_ops_off);
36

37
namespace paddle {
M
minqiyang 已提交
38 39
namespace imperative {

40 41
thread_local bool Tracer::enable_program_desc_tracing_ = false;

Z
Zeng Jinle 已提交
42 43
thread_local bool Tracer::has_grad_ = true;

44 45
thread_local AmpLevel Tracer::amp_level_ = AmpLevel::O0;

46
thread_local phi::DataType Tracer::amp_dtype_ = phi::DataType::FLOAT32;
47

48 49 50 51 52 53 54 55 56
static std::shared_ptr<Tracer> g_current_tracer(nullptr);

const std::shared_ptr<Tracer>& GetCurrentTracer() { return g_current_tracer; }

void SetCurrentTracer(const std::shared_ptr<Tracer>& tracer) {
  g_current_tracer = tracer;
  VLOG(6) << "Set current tracer: " << g_current_tracer;
}

57
void PassStopGradient(const NameVarBaseMap& outs, bool generate_grad) {
58 59 60 61 62 63 64 65 66 67 68 69
  for (const auto& pair : outs) {
    for (const auto& var : pair.second) {
      // NOTE(zhiqiu): this happends when None output are passed from python
      // side. For example, fake_quantize_dequantize_moving_average_abs_max may
      // pass None OutAccum in eval mode.
      // It can be refined by generate several different pybind interface for
      // one operator with different function signature.
      if (var == nullptr) {
        VLOG(4) << pair.first << " is NULL";
        continue;
      }
      VLOG(6) << "Set output: " << var->Name() << "'s OverridedStopGradient as "
70
              << generate_grad;
71
      var->InnerSetOverridedStopGradient(generate_grad);
72 73 74 75
    }
  }
}

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
void IncreaseVarbaseReferenceCountUntilCopyComplete(
    const std::shared_ptr<imperative::VarBase>& var,
    const platform::Place& place) {
  // Note(zhiqiu): Follow the logic of TensorCopy to determine the place that we
  // need to add callback, see tensor_utils.cc:245
  auto place_ = platform::is_gpu_place(place) ? place : var->Place();

  auto tracer = imperative::GetCurrentTracer();
  auto gc = tracer->MutableGarbageCollectorIfNotExists(place_);

  // Note(zhiqiu): This is an empty callback, the only way is to "reference"
  // var, so it will not be destructed until the kernels launched at current
  // stream of given place is finished.
  auto callback = [var, place_]() {
    VLOG(4) << "Run callback of var:" << var->Name() << " at place " << place_;
  };

  gc->DirectClearCallback(callback);
}

paddle::framework::GarbageCollector* Tracer::MutableGarbageCollectorIfNotExists(
    const platform::Place& place) {
  // if not exists, create a new GarbageCollector at given place
  if (gcs_.count(place) == 0) {
    std::unique_ptr<framework::GarbageCollector> gc;
    if (platform::is_gpu_place(place)) {
Z
zhulei 已提交
102
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
103
      gc.reset(new framework::DefaultStreamGarbageCollector(place, 0));
104 105 106 107 108 109 110 111

      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CUDA device since it's not compiled with CUDA,"
          "Please recompile or reinstall Paddle with GPU support."));
#endif
    } else if (platform::is_cuda_pinned_place(place)) {
Z
zhulei 已提交
112
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
113
      gc.reset(new framework::CUDAPinnedGarbageCollector(place, 0));
114 115 116 117 118 119 120 121 122 123

      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CUDAPinned device since it's not compiled with "
          "CUDA,"
          "Please recompile or reinstall Paddle with GPU support."));
#endif
    } else if (platform::is_xpu_place(place)) {
#if defined(PADDLE_WITH_XPU)
124
      gc.reset(new framework::XPUGarbageCollector(place, 0));
125 126 127 128 129 130 131
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use XPU device since it's not compiled with XPU,"
          "Please recompile or reinstall Paddle with XPU support."));
#endif
    } else if (platform::is_cpu_place(place)) {
132
      gc.reset(new framework::CPUGarbageCollector(place, 0));
133
      VLOG(10) << "Created GarbageCollector at " << place;
134 135 136
    } else if (platform::is_npu_place(place)) {
#if defined(PADDLE_WITH_ASCEND_CL)
      // TODO(zhiqiu): fix bugs and enable NPUDefaultStreamGarbageCollector.
137
      gc.reset(new framework::NPUUnsafeFastGarbageCollector(place, 0));
138 139 140 141 142
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use NPU device since it's not compiled with NPU,"
          "Please recompile or reinstall Paddle with NPU support."));
F
fwenguang 已提交
143 144 145
#endif
    } else if (platform::is_mlu_place(place)) {
#if defined(PADDLE_WITH_MLU)
146
      gc.reset(new framework::MLUDefaultStreamGarbageCollector(place, 0));
F
fwenguang 已提交
147 148 149 150 151
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use MLU device since it's not compiled with MLU,"
          "Please recompile or reinstall Paddle with MLU support."));
152 153 154 155 156 157 158 159 160 161 162
#endif
    } else if (platform::is_custom_place(place)) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
      gc.reset(new framework::CustomDefaultStreamGarbageCollector(place, 0));
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CustomDevice since it's not compiled with "
          "CustomDevice,"
          "Please recompile or reinstall Paddle with CustomDevice "
          "support."));
163
#endif
164 165 166 167 168 169 170 171 172 173
    } else {
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "Unsupported place for garbage collection"));
    }
    gcs_.emplace(place, std::move(gc));
  }

  return gcs_.at(place).get();
}

J
Jiabin Yang 已提交
174 175 176 177
template <typename VarType>
void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
                     const NameVarMap<VarType>& outs,
                     framework::AttributeMap attrs,
178
                     const platform::Place& place, bool trace_backward,
J
Jiabin Yang 已提交
179 180
                     const std::map<std::string, std::string>& inplace_map,
                     paddle::framework::AttributeMap* passed_default_attrs_,
181
                     bool use_default_attr_map) {
W
wanghuancoder 已提交
182 183 184 185 186 187 188 189 190 191 192 193 194 195
  TraceOpImpl<VarType>(type, ins, outs, attrs, place, trace_backward,
                       inplace_map, passed_default_attrs_,
                       use_default_attr_map);
}

template <typename VarType>
void Tracer::TraceOpImpl(const std::string& type,
                         const NameVarMap<VarType>& ins,
                         const NameVarMap<VarType>& outs,
                         framework::AttributeMap& attrs,
                         const platform::Place& place, bool trace_backward,
                         const std::map<std::string, std::string>& inplace_map,
                         paddle::framework::AttributeMap* passed_default_attrs_,
                         bool use_default_attr_map) {
196
  platform::RecordEvent op_type_record_event(
197
      type, platform::TracerEventType::Operator, 1);
198
  platform::ScopedFlushDenormal flush;
J
Jiabin Yang 已提交
199
  VLOG(1) << "Trace Op: " << type;
200
  if (FLAGS_use_mkldnn) {
201 202 203 204 205 206 207 208 209 210 211
    // if both lists are empty all ops are enabled (default for
    // FLAGS_use_mkldnn=1)
    // if ops_on list is not empty only ops from that list are enabled
    if (!FLAGS_tracer_mkldnn_ops_on.empty()) {
      auto is_on = FLAGS_tracer_mkldnn_ops_on.find(type) != std::string::npos;
      attrs["use_mkldnn"] = is_on;
    } else {
      // if ops_on list is empty all ops are enabled except types from off_list
      auto is_off = FLAGS_tracer_mkldnn_ops_off.find(type) != std::string::npos;
      attrs["use_mkldnn"] = !is_off;
    }
212
  }
213 214 215 216
  auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
  const auto& op_info = op->Info();
  auto* attr_checker = op_info.Checker();
  if (attr_checker) {
217
    attr_checker->Check(&attrs, true, /*only_check_exist_value=*/true);
218 219
  }

220 221 222 223 224
  static paddle::framework::AttributeMap empty_attrs_map = {};
  const paddle::framework::AttributeMap& default_attrs =
      attr_checker == nullptr ? empty_attrs_map
                              : attr_checker->GetDefaultAttrMap();

Z
zyfncg 已提交
225
  std::unique_ptr<NameVarMap<VarType>> ins_amp = nullptr;
L
Leo Chen 已提交
226
  if (amp_level_ == AmpLevel::O1) {
227
    if (amp_dtype_ == phi::DataType::FLOAT16) {
228
      const auto& tracer = imperative::GetCurrentTracer();
229
      VLOG(5) << "Float16 Auto Mixed Precision O1 run operator: " << type;
Z
zyfncg 已提交
230 231 232
      ins_amp = std::make_unique<NameVarMap<VarType>>(
          AutoCastInputs<VarType>(type, imperative::AutoTuneLayout<VarType>(
                                            type, ins, outs, &attrs, tracer)));
233
    } else if (amp_dtype_ == phi::DataType::BFLOAT16) {
234
      VLOG(5) << "BFloat16 Auto Mixed Precision O1 run operator: " << type;
Z
zyfncg 已提交
235 236
      ins_amp = std::make_unique<NameVarMap<VarType>>(
          AutoCastBF16Inputs<VarType>(type, ins));
237
    }
L
Leo Chen 已提交
238
  } else if (amp_level_ == AmpLevel::O2) {
239
    if (amp_dtype_ == phi::DataType::FLOAT16) {
240
      const auto& tracer = imperative::GetCurrentTracer();
241
      VLOG(5) << "Float16 Auto Mixed Precision O2 run operator: " << type;
Z
zyfncg 已提交
242 243 244 245
      ins_amp =
          std::make_unique<NameVarMap<VarType>>(CastPureFp16Inputs<VarType>(
              type, imperative::AutoTuneLayout<VarType>(type, ins, outs, &attrs,
                                                        tracer)));
246
    } else if (amp_dtype_ == phi::DataType::BFLOAT16) {
247
      VLOG(5) << "BFloat16 Auto Mixed Precision O2 run operator: " << type;
Z
zyfncg 已提交
248 249
      ins_amp = std::make_unique<NameVarMap<VarType>>(
          CastPureBf16Inputs<VarType>(type, ins));
250
    }
251
  }
Z
zyfncg 已提交
252
  const auto& new_ins = ins_amp == nullptr ? ins : *ins_amp;
253

254
  try {
255 256
    if (platform::is_gpu_place(place)) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
257
      platform::SetDeviceId(place.device);
258 259 260 261 262 263
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with GPU if use CUDAPlace."));
#endif
    } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
264
      platform::SetXPUDeviceId(place.device);
265 266 267
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with XPU if use XPUPlace."));
H
houj04 已提交
268 269 270
#endif
    } else if (platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
271
      platform::SetNPUDeviceId(place.device);
H
houj04 已提交
272 273 274
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with NPU if use NPUPlace."));
F
fwenguang 已提交
275 276 277
#endif
    } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
278
      platform::SetMLUDeviceId(place.device);
F
fwenguang 已提交
279 280 281
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with MLU if use MLUPlace."));
282 283 284
#endif
    } else if (platform::is_custom_place(place)) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
285
      phi::DeviceManager::SetDevice(place);
286 287 288 289
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with CustomDevice if use "
          "CustomPlace."));
290 291
#endif
    }
292
    if (!use_default_attr_map) {
J
Jiabin Yang 已提交
293 294 295 296 297 298 299 300 301 302 303 304 305
      PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
                              paddle::platform::errors::PermissionDenied(
                                  "Detected default_attrs = nullptr."));
      VLOG(6) << "Use passed in default attrs";
      OpBase::Run(*op, new_ins, outs, attrs, (*passed_default_attrs_), place);
    } else {
      VLOG(6) << "Use Checker's default attrs";
      if (passed_default_attrs_) {
        // TODO(jiabin): Update this without copy
        *passed_default_attrs_ = default_attrs;
      }
      OpBase::Run(*op, new_ins, outs, attrs, default_attrs, place);
    }
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
  } catch (platform::EnforceNotMet& exception) {
    framework::AppendErrorOpHint(type, &exception);
    throw std::move(exception);
  } catch (std::exception& ex) {
    PADDLE_THROW(platform::errors::Fatal(
        "Operator %s raises an %s exception.\n"
        "The exception content is\n:%s.",
        type, platform::demangle(typeid(ex).name()), ex.what()));
  } catch (...) {
    // NOTE: this branch represents a very serious bug with
    // low probability of occurrence, and we can't get its
    // exception content here.
    PADDLE_THROW(platform::errors::Fatal(
        "Operator %s raises an unknown exception.", type));
  }
J
Jiabin Yang 已提交
321

322 323
  if (enable_program_desc_tracing_) {
    VLOG(5) << "Trace op " << type << " into ProgramDesc";
324
    program_desc_tracer_->InsertOp(type, new_ins, outs, attrs);
325 326
  }

327 328
  {
    platform::RecordEvent node_creation_record_event(
329
        "grad_node_creation", platform::TracerEventType::OperatorInner, 1);
330 331 332 333 334 335 336 337 338 339 340 341 342 343

    if (ComputeRequiredGrad(new_ins, outs, trace_backward)) {
      PADDLE_ENFORCE_EQ(
          passed_default_attrs_, nullptr,
          paddle::platform::errors::PermissionDenied(
              "We expect passed_default_attrs_ is nullptr while "
              "use_default_attr_map is true, however we got not null "
              "passed_default_attrs_. Please check your usage of trace_op. "));
      CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place,
                       inplace_map);
    } else {
      VLOG(3) << "No Grad to track for Op: " << type;
    }
    VLOG(6) << "Finish Trace Op: " << type;
344
  }
M
minqiyang 已提交
345 346
}

J
Jiabin Yang 已提交
347 348 349 350 351
template void Tracer::TraceOp<VarBase>(
    const std::string& type, const NameVarMap<VarBase>& ins,
    const NameVarMap<VarBase>& outs, framework::AttributeMap attrs,
    const platform::Place& place, bool trace_backward,
    const std::map<std::string, std::string>& inplace_map,
352
    paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
J
Jiabin Yang 已提交
353

354 355 356
template void Tracer::TraceOp<egr::EagerVariable>(
    const std::string& type, const NameVarMap<egr::EagerVariable>& ins,
    const NameVarMap<egr::EagerVariable>& outs, framework::AttributeMap attrs,
J
Jiabin Yang 已提交
357 358
    const platform::Place& place, bool trace_backward,
    const std::map<std::string, std::string>& inplace_map_,
359
    paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
J
Jiabin Yang 已提交
360

361
void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
362 363
                     const NameVarBaseMap& outs, framework::AttributeMap attrs,
                     const std::map<std::string, std::string>& inplace_map) {
J
Jiabin Yang 已提交
364 365 366 367 368 369
  TraceOp<VarBase>(type, ins, outs, std::move(attrs), expected_place_,
                   has_grad_, inplace_map);
}

void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
                     const NameTensorMap& outs,
W
wanghuancoder 已提交
370
                     paddle::framework::AttributeMap& attrs,
J
Jiabin Yang 已提交
371 372
                     const paddle::platform::Place& place,
                     paddle::framework::AttributeMap* default_attrs,
373
                     bool use_default_attr_map,
J
Jiabin Yang 已提交
374
                     const std::map<std::string, std::string>& inplace_map) {
375 376
  VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: "
          << use_default_attr_map;
W
wanghuancoder 已提交
377 378 379 380 381 382 383 384 385 386 387
  TraceOpImpl<egr::EagerVariable>(type, ins, outs, attrs, place, false,
                                  inplace_map, default_attrs,
                                  use_default_attr_map);
}

void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
                     const NameTensorMap& outs,
                     paddle::framework::AttributeMap attrs) {
  VLOG(6) << "Running On Eager TraceOp(4 agrs): ";
  TraceOpImpl<egr::EagerVariable>(type, ins, outs, attrs, expected_place_,
                                  false, {}, nullptr, true);
J
Jiabin Yang 已提交
388 389 390 391
}

void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
                     const NameTensorMap& outs,
W
wanghuancoder 已提交
392
                     paddle::framework::AttributeMap& attrs,
J
Jiabin Yang 已提交
393 394
                     const std::map<std::string, std::string>& inplace_map) {
  VLOG(6) << "Running On Eager TraceOp(less): ";
W
wanghuancoder 已提交
395 396
  TraceOpImpl<egr::EagerVariable>(type, ins, outs, attrs, expected_place_,
                                  false, inplace_map, nullptr, true);
397 398
}

W
WangXi 已提交
399 400 401 402
void Tracer::SetExpectedPlace(platform::Place place) {
  expected_place_ = place;
}

J
Jiabin Yang 已提交
403
bool Tracer::ComputeRequiredGrad(const NameVarBaseMap& ins,
404
                                 const NameVarBaseMap& outs,
J
Jiabin Yang 已提交
405
                                 bool trace_backward) {
406 407 408 409 410 411 412 413 414 415 416 417 418
  if (!trace_backward) return false;

  for (const auto& name_pair : ins) {
    for (const auto& var_base : name_pair.second) {
      if (!var_base->OverridedStopGradient()) {
        VLOG(6) << "Find out input: " << var_base->Name()
                << "'s GeneratedGrad is True";
        PassStopGradient(outs, var_base->OverridedStopGradient());
        return true;
      }
    }
  }
  return false;
M
minqiyang 已提交
419 420
}

J
Jiabin Yang 已提交
421 422 423 424 425 426
bool Tracer::ComputeRequiredGrad(const NameTensorMap& ins,
                                 const NameTensorMap& outs,
                                 bool trace_backward) {
  return false;
}

427
phi::KernelSignature Tracer::GetExpectedKernelSignature(
428 429
    const std::string& type, const NameTensorMap& ins,
    const NameTensorMap& outs, framework::AttributeMap attrs) const {
430 431 432 433 434 435 436 437 438 439 440 441 442 443
  auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
  framework::RuntimeContext ctx({}, {});
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(phi::CPUPlace());
  const auto& op_info = op->Info();
  auto* attr_checker = op_info.Checker();
  if (attr_checker) {
    attr_checker->Check(&attrs, true, /*only_check_exist_value=*/true);
  }
  static paddle::framework::AttributeMap empty_attrs_map = {};
  const paddle::framework::AttributeMap& default_attrs =
      attr_checker == nullptr ? empty_attrs_map
                              : attr_checker->GetDefaultAttrMap();
  auto dygraph_exe_ctx =
444
      imperative::DygraphExecutionContext<egr::EagerVariable>(
445 446 447 448 449 450 451 452 453 454 455 456 457
          *op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs,
          default_attrs);
  auto* opbase_with_kernel =
      dynamic_cast<framework::OperatorWithKernel*>(op.get());
  PADDLE_ENFORCE_NE(opbase_with_kernel, nullptr,
                    platform::errors::InvalidArgument(
                        "This op type:`%s` is not a OperatorWithKernel, only "
                        "OperatorWithKernel can get KernelSignature",
                        type));
  return phi::KernelSignature(
      std::move(opbase_with_kernel->GetExpectedPhiKernelArgs(dygraph_exe_ctx)));
}

M
minqiyang 已提交
458
}  // namespace imperative
459
}  // namespace paddle