tracer.cc 17.2 KB
Newer Older
J
Jiabin Yang 已提交
1
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/imperative/tracer.h"
15
#include <map>
H
hong 已提交
16
#include <set>
M
minqiyang 已提交
17
#include <unordered_set>
18
#include <utility>
19
#include "paddle/fluid/framework/op_registry.h"
20
#include "paddle/fluid/imperative/amp_auto_cast.h"
21
#include "paddle/fluid/imperative/execution_context.h"
22
#include "paddle/fluid/imperative/op_base.h"
23
#include "paddle/fluid/platform/denormal.h"
24
#include "paddle/fluid/platform/device/device_wrapper.h"
C
chengduo 已提交
25
#include "paddle/fluid/platform/profiler.h"
26
#include "paddle/fluid/platform/profiler/event_tracing.h"
27
#include "paddle/fluid/string/string_helper.h"
28
#include "paddle/phi/common/place.h"
29

30
DECLARE_bool(use_mkldnn);
31 32
DECLARE_string(tracer_mkldnn_ops_on);
DECLARE_string(tracer_mkldnn_ops_off);
33

34
namespace paddle {
M
minqiyang 已提交
35 36
namespace imperative {

37 38
thread_local bool Tracer::enable_program_desc_tracing_ = false;

Z
Zeng Jinle 已提交
39 40
thread_local bool Tracer::has_grad_ = true;

41 42
thread_local AmpLevel Tracer::amp_level_ = AmpLevel::O0;

43
thread_local phi::DataType Tracer::amp_dtype_ = phi::DataType::FLOAT32;
44

45 46 47 48 49 50 51 52 53
static std::shared_ptr<Tracer> g_current_tracer(nullptr);

const std::shared_ptr<Tracer>& GetCurrentTracer() { return g_current_tracer; }

void SetCurrentTracer(const std::shared_ptr<Tracer>& tracer) {
  g_current_tracer = tracer;
  VLOG(6) << "Set current tracer: " << g_current_tracer;
}

54
void PassStopGradient(const NameVarBaseMap& outs, bool generate_grad) {
55 56 57 58 59 60 61 62 63 64 65 66
  for (const auto& pair : outs) {
    for (const auto& var : pair.second) {
      // NOTE(zhiqiu): this happends when None output are passed from python
      // side. For example, fake_quantize_dequantize_moving_average_abs_max may
      // pass None OutAccum in eval mode.
      // It can be refined by generate several different pybind interface for
      // one operator with different function signature.
      if (var == nullptr) {
        VLOG(4) << pair.first << " is NULL";
        continue;
      }
      VLOG(6) << "Set output: " << var->Name() << "'s OverridedStopGradient as "
67
              << generate_grad;
68
      var->InnerSetOverridedStopGradient(generate_grad);
69 70 71 72
    }
  }
}

73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
void IncreaseVarbaseReferenceCountUntilCopyComplete(
    const std::shared_ptr<imperative::VarBase>& var,
    const platform::Place& place) {
  // Note(zhiqiu): Follow the logic of TensorCopy to determine the place that we
  // need to add callback, see tensor_utils.cc:245
  auto place_ = platform::is_gpu_place(place) ? place : var->Place();

  auto tracer = imperative::GetCurrentTracer();
  auto gc = tracer->MutableGarbageCollectorIfNotExists(place_);

  // Note(zhiqiu): This is an empty callback, the only way is to "reference"
  // var, so it will not be destructed until the kernels launched at current
  // stream of given place is finished.
  auto callback = [var, place_]() {
    VLOG(4) << "Run callback of var:" << var->Name() << " at place " << place_;
  };

  gc->DirectClearCallback(callback);
}

paddle::framework::GarbageCollector* Tracer::MutableGarbageCollectorIfNotExists(
    const platform::Place& place) {
  // if not exists, create a new GarbageCollector at given place
  if (gcs_.count(place) == 0) {
    std::unique_ptr<framework::GarbageCollector> gc;
    if (platform::is_gpu_place(place)) {
Z
zhulei 已提交
99
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
100
      gc.reset(new framework::DefaultStreamGarbageCollector(place, 0));
101 102 103 104 105 106 107 108

      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CUDA device since it's not compiled with CUDA,"
          "Please recompile or reinstall Paddle with GPU support."));
#endif
    } else if (platform::is_cuda_pinned_place(place)) {
Z
zhulei 已提交
109
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
110
      gc.reset(new framework::CUDAPinnedGarbageCollector(place, 0));
111 112 113 114 115 116 117 118 119 120

      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CUDAPinned device since it's not compiled with "
          "CUDA,"
          "Please recompile or reinstall Paddle with GPU support."));
#endif
    } else if (platform::is_xpu_place(place)) {
#if defined(PADDLE_WITH_XPU)
121
      gc.reset(new framework::XPUGarbageCollector(place, 0));
122 123 124 125 126 127 128
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use XPU device since it's not compiled with XPU,"
          "Please recompile or reinstall Paddle with XPU support."));
#endif
    } else if (platform::is_cpu_place(place)) {
129
      gc.reset(new framework::CPUGarbageCollector(place, 0));
130
      VLOG(10) << "Created GarbageCollector at " << place;
131 132 133
    } else if (platform::is_npu_place(place)) {
#if defined(PADDLE_WITH_ASCEND_CL)
      // TODO(zhiqiu): fix bugs and enable NPUDefaultStreamGarbageCollector.
134
      gc.reset(new framework::NPUUnsafeFastGarbageCollector(place, 0));
135 136 137 138 139
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use NPU device since it's not compiled with NPU,"
          "Please recompile or reinstall Paddle with NPU support."));
F
fwenguang 已提交
140 141 142
#endif
    } else if (platform::is_mlu_place(place)) {
#if defined(PADDLE_WITH_MLU)
143
      gc.reset(new framework::MLUDefaultStreamGarbageCollector(place, 0));
F
fwenguang 已提交
144 145 146 147 148
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use MLU device since it's not compiled with MLU,"
          "Please recompile or reinstall Paddle with MLU support."));
149 150 151 152 153 154 155 156 157 158 159
#endif
    } else if (platform::is_custom_place(place)) {
#if defined(PADDLE_WITH_CUSTOM_DEVICE)
      gc.reset(new framework::CustomDefaultStreamGarbageCollector(place, 0));
      VLOG(10) << "Created GarbageCollector at " << place;
#else
      PADDLE_THROW(platform::errors::PermissionDenied(
          "Paddle can't use CustomDevice since it's not compiled with "
          "CustomDevice,"
          "Please recompile or reinstall Paddle with CustomDevice "
          "support."));
160
#endif
161 162 163 164 165 166 167 168 169 170
    } else {
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "Unsupported place for garbage collection"));
    }
    gcs_.emplace(place, std::move(gc));
  }

  return gcs_.at(place).get();
}

J
Jiabin Yang 已提交
171 172 173 174
template <typename VarType>
void Tracer::TraceOp(const std::string& type, const NameVarMap<VarType>& ins,
                     const NameVarMap<VarType>& outs,
                     framework::AttributeMap attrs,
175
                     const platform::Place& place, bool trace_backward,
J
Jiabin Yang 已提交
176 177
                     const std::map<std::string, std::string>& inplace_map,
                     paddle::framework::AttributeMap* passed_default_attrs_,
178
                     bool use_default_attr_map) {
179
  platform::RecordEvent op_type_record_event(
L
liutiexing 已提交
180
      type, platform::TracerEventType::Operator, 1);
181
  platform::ScopedFlushDenormal flush;
J
Jiabin Yang 已提交
182
  VLOG(1) << "Trace Op: " << type;
183
  if (FLAGS_use_mkldnn) {
184 185 186 187 188 189 190 191 192 193 194
    // if both lists are empty all ops are enabled (default for
    // FLAGS_use_mkldnn=1)
    // if ops_on list is not empty only ops from that list are enabled
    if (!FLAGS_tracer_mkldnn_ops_on.empty()) {
      auto is_on = FLAGS_tracer_mkldnn_ops_on.find(type) != std::string::npos;
      attrs["use_mkldnn"] = is_on;
    } else {
      // if ops_on list is empty all ops are enabled except types from off_list
      auto is_off = FLAGS_tracer_mkldnn_ops_off.find(type) != std::string::npos;
      attrs["use_mkldnn"] = !is_off;
    }
195
  }
196 197 198 199
  auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
  const auto& op_info = op->Info();
  auto* attr_checker = op_info.Checker();
  if (attr_checker) {
200
    attr_checker->Check(&attrs, true, /*only_check_exist_value=*/true);
201 202
  }

203 204 205 206 207
  static paddle::framework::AttributeMap empty_attrs_map = {};
  const paddle::framework::AttributeMap& default_attrs =
      attr_checker == nullptr ? empty_attrs_map
                              : attr_checker->GetDefaultAttrMap();

J
Jiabin Yang 已提交
208
  NameVarMap<VarType> new_ins = ins;
L
Leo Chen 已提交
209
  if (amp_level_ == AmpLevel::O1) {
210
    if (amp_dtype_ == phi::DataType::FLOAT16) {
211
      VLOG(5) << "Float16 Auto Mixed Precision O1 run operator: " << type;
212
      new_ins = AutoCastInputs<VarType>(type, ins);
213
    } else if (amp_dtype_ == phi::DataType::BFLOAT16) {
214
      VLOG(5) << "BFloat16 Auto Mixed Precision O1 run operator: " << type;
215 216
      new_ins = AutoCastBF16Inputs<VarType>(type, ins);
    }
L
Leo Chen 已提交
217
  } else if (amp_level_ == AmpLevel::O2) {
218
    if (amp_dtype_ == phi::DataType::FLOAT16) {
219
      VLOG(5) << "Float16 Auto Mixed Precision O2 run operator: " << type;
220
      new_ins = CastPureFp16Inputs<VarType>(type, ins);
221
    } else if (amp_dtype_ == phi::DataType::BFLOAT16) {
222
      VLOG(5) << "BFloat16 Auto Mixed Precision O2 run operator: " << type;
223 224
      new_ins = CastPureBf16Inputs<VarType>(type, ins);
    }
225 226
  }

227
  try {
228 229
    if (platform::is_gpu_place(place)) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
230
      platform::SetDeviceId(place.device);
231 232 233 234 235 236
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with GPU if use CUDAPlace."));
#endif
    } else if (platform::is_xpu_place(place)) {
#ifdef PADDLE_WITH_XPU
237
      platform::SetXPUDeviceId(place.device);
238 239 240
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with XPU if use XPUPlace."));
H
houj04 已提交
241 242 243
#endif
    } else if (platform::is_npu_place(place)) {
#ifdef PADDLE_WITH_ASCEND_CL
244
      platform::SetNPUDeviceId(place.device);
H
houj04 已提交
245 246 247
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with NPU if use NPUPlace."));
F
fwenguang 已提交
248 249 250
#endif
    } else if (platform::is_mlu_place(place)) {
#ifdef PADDLE_WITH_MLU
251
      platform::SetMLUDeviceId(place.device);
F
fwenguang 已提交
252 253 254
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with MLU if use MLUPlace."));
255 256 257
#endif
    } else if (platform::is_custom_place(place)) {
#ifdef PADDLE_WITH_CUSTOM_DEVICE
258
      phi::DeviceManager::SetDevice(place);
259 260 261 262
#else
      PADDLE_THROW(platform::errors::PreconditionNotMet(
          "PaddlePaddle should compile with CustomDevice if use "
          "CustomPlace."));
263 264
#endif
    }
265
    if (!use_default_attr_map) {
J
Jiabin Yang 已提交
266 267 268 269 270 271 272 273 274 275 276 277 278
      PADDLE_ENFORCE_NOT_NULL(passed_default_attrs_,
                              paddle::platform::errors::PermissionDenied(
                                  "Detected default_attrs = nullptr."));
      VLOG(6) << "Use passed in default attrs";
      OpBase::Run(*op, new_ins, outs, attrs, (*passed_default_attrs_), place);
    } else {
      VLOG(6) << "Use Checker's default attrs";
      if (passed_default_attrs_) {
        // TODO(jiabin): Update this without copy
        *passed_default_attrs_ = default_attrs;
      }
      OpBase::Run(*op, new_ins, outs, attrs, default_attrs, place);
    }
279 280 281 282 283 284 285 286 287 288 289 290 291 292 293
  } catch (platform::EnforceNotMet& exception) {
    framework::AppendErrorOpHint(type, &exception);
    throw std::move(exception);
  } catch (std::exception& ex) {
    PADDLE_THROW(platform::errors::Fatal(
        "Operator %s raises an %s exception.\n"
        "The exception content is\n:%s.",
        type, platform::demangle(typeid(ex).name()), ex.what()));
  } catch (...) {
    // NOTE: this branch represents a very serious bug with
    // low probability of occurrence, and we can't get its
    // exception content here.
    PADDLE_THROW(platform::errors::Fatal(
        "Operator %s raises an unknown exception.", type));
  }
J
Jiabin Yang 已提交
294

295 296
  if (enable_program_desc_tracing_) {
    VLOG(5) << "Trace op " << type << " into ProgramDesc";
297
    program_desc_tracer_->InsertOp(type, new_ins, outs, attrs);
298 299
  }

300
  if (ComputeRequiredGrad(new_ins, outs, trace_backward)) {
301 302 303 304 305 306 307 308
    PADDLE_ENFORCE_EQ(
        passed_default_attrs_, nullptr,
        paddle::platform::errors::PermissionDenied(
            "We expect passed_default_attrs_ is nullptr while "
            "use_default_attr_map is true, however we got not null "
            "passed_default_attrs_. Please check your usage of trace_op. "));
    CreateGradOpNode(*op, new_ins, outs, attrs, default_attrs, place,
                     inplace_map);
309 310
  } else {
    VLOG(3) << "No Grad to track for Op: " << type;
311
  }
J
Jiabin Yang 已提交
312
  VLOG(6) << "Finish Trace Op: " << type;
M
minqiyang 已提交
313 314
}

J
Jiabin Yang 已提交
315 316 317 318 319
template void Tracer::TraceOp<VarBase>(
    const std::string& type, const NameVarMap<VarBase>& ins,
    const NameVarMap<VarBase>& outs, framework::AttributeMap attrs,
    const platform::Place& place, bool trace_backward,
    const std::map<std::string, std::string>& inplace_map,
320
    paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
J
Jiabin Yang 已提交
321

322 323 324
template void Tracer::TraceOp<egr::EagerVariable>(
    const std::string& type, const NameVarMap<egr::EagerVariable>& ins,
    const NameVarMap<egr::EagerVariable>& outs, framework::AttributeMap attrs,
J
Jiabin Yang 已提交
325 326
    const platform::Place& place, bool trace_backward,
    const std::map<std::string, std::string>& inplace_map_,
327
    paddle::framework::AttributeMap* default_attrs, bool use_default_attr_map);
J
Jiabin Yang 已提交
328

329
void Tracer::TraceOp(const std::string& type, const NameVarBaseMap& ins,
330 331
                     const NameVarBaseMap& outs, framework::AttributeMap attrs,
                     const std::map<std::string, std::string>& inplace_map) {
J
Jiabin Yang 已提交
332 333 334 335 336 337 338 339 340
  TraceOp<VarBase>(type, ins, outs, std::move(attrs), expected_place_,
                   has_grad_, inplace_map);
}

void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
                     const NameTensorMap& outs,
                     paddle::framework::AttributeMap attrs,
                     const paddle::platform::Place& place,
                     paddle::framework::AttributeMap* default_attrs,
341
                     bool use_default_attr_map,
J
Jiabin Yang 已提交
342
                     const std::map<std::string, std::string>& inplace_map) {
343 344 345 346
  VLOG(6) << "Running On Eager TraceOp with use_default_attr_map: "
          << use_default_attr_map;
  TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs), place, false,
                              inplace_map, default_attrs, use_default_attr_map);
J
Jiabin Yang 已提交
347 348 349 350 351 352 353
}

void Tracer::TraceOp(const std::string& type, const NameTensorMap& ins,
                     const NameTensorMap& outs,
                     paddle::framework::AttributeMap attrs,
                     const std::map<std::string, std::string>& inplace_map) {
  VLOG(6) << "Running On Eager TraceOp(less): ";
354 355 356
  TraceOp<egr::EagerVariable>(type, ins, outs, std::move(attrs),
                              expected_place_, false, inplace_map, nullptr,
                              true);
357 358
}

W
WangXi 已提交
359 360 361 362
void Tracer::SetExpectedPlace(platform::Place place) {
  expected_place_ = place;
}

J
Jiabin Yang 已提交
363
bool Tracer::ComputeRequiredGrad(const NameVarBaseMap& ins,
364
                                 const NameVarBaseMap& outs,
J
Jiabin Yang 已提交
365
                                 bool trace_backward) {
366 367 368 369 370 371 372 373 374 375 376 377 378
  if (!trace_backward) return false;

  for (const auto& name_pair : ins) {
    for (const auto& var_base : name_pair.second) {
      if (!var_base->OverridedStopGradient()) {
        VLOG(6) << "Find out input: " << var_base->Name()
                << "'s GeneratedGrad is True";
        PassStopGradient(outs, var_base->OverridedStopGradient());
        return true;
      }
    }
  }
  return false;
M
minqiyang 已提交
379 380
}

J
Jiabin Yang 已提交
381 382 383 384 385 386
bool Tracer::ComputeRequiredGrad(const NameTensorMap& ins,
                                 const NameTensorMap& outs,
                                 bool trace_backward) {
  return false;
}

387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
phi::KernelSignature Tracer::GetExpectedKernelSignature(
    const std::string& type, const NameVarBaseMap& ins,
    const NameVarBaseMap& outs, framework::AttributeMap attrs) const {
  auto op = framework::OpRegistry::CreateOp(type, {}, {}, {}, false);
  framework::RuntimeContext ctx({}, {});
  platform::DeviceContextPool& pool = platform::DeviceContextPool::Instance();
  auto* dev_ctx = pool.Get(phi::CPUPlace());
  const auto& op_info = op->Info();
  auto* attr_checker = op_info.Checker();
  if (attr_checker) {
    attr_checker->Check(&attrs, true, /*only_check_exist_value=*/true);
  }
  static paddle::framework::AttributeMap empty_attrs_map = {};
  const paddle::framework::AttributeMap& default_attrs =
      attr_checker == nullptr ? empty_attrs_map
                              : attr_checker->GetDefaultAttrMap();
  auto dygraph_exe_ctx =
      imperative::DygraphExecutionContext<imperative::VarBase>(
          *op, framework::Scope(), *dev_ctx, ctx, ins, outs, attrs,
          default_attrs);
  auto* opbase_with_kernel =
      dynamic_cast<framework::OperatorWithKernel*>(op.get());
  PADDLE_ENFORCE_NE(opbase_with_kernel, nullptr,
                    platform::errors::InvalidArgument(
                        "This op type:`%s` is not a OperatorWithKernel, only "
                        "OperatorWithKernel can get KernelSignature",
                        type));
  return phi::KernelSignature(
      std::move(opbase_with_kernel->GetExpectedPhiKernelArgs(dygraph_exe_ctx)));
}

M
minqiyang 已提交
418
}  // namespace imperative
419
}  // namespace paddle