amp_auto_cast.h 2.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <memory>
#include <set>
#include <string>
#include <tuple>
#include <unordered_set>

#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/eager/eager_tensor.h"
#include "paddle/fluid/eager/legacy/type_def.h"
J
Jiabin Yang 已提交
25
#include "paddle/fluid/imperative/amp_auto_cast.h"
26 27

namespace egr {
28
namespace legacy {
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64

class AmpOperators {
 public:
  ~AmpOperators();
  AmpOperators(const AmpOperators& o) = delete;
  const AmpOperators& operator=(const AmpOperators& o) = delete;

  static AmpOperators& Instance();

  std::shared_ptr<std::unordered_set<std::string>> GetMutableAllowOps();

  std::shared_ptr<std::unordered_set<std::string>> GetMutableBlockOps();

  std::shared_ptr<std::unordered_set<std::string>>
  GetMutableUnsupportedFp16Ops();

 private:
  AmpOperators();  // forbid calling default constructor

  // The set of ops that support fp16 calculation and are considered numerically
  // safe and performance critical. These ops are always converted to fp16.
  std::shared_ptr<std::unordered_set<std::string>> allow_ops_;

  // The set of ops that support fp16 calculation and are considered numerically
  // dangerous and whose effects may also be observed in downstream ops.
  std::shared_ptr<std::unordered_set<std::string>> block_ops_;

  // The set of ops that has no fp16 CUDA kennel.
  std::shared_ptr<std::unordered_set<std::string>> unsupported_fp16_ops_;
};

std::ostream& operator<<(std::ostream& os, AmpOperators& ops);

// NOTE(zhiqiu): AutoCastGuard is used for RAII.
class AutoCastGuard {
 public:
J
Jiabin Yang 已提交
65
  explicit AutoCastGuard(paddle::imperative::AmpLevel guard_level) {
66 67 68 69 70 71 72 73 74 75 76 77 78 79
    pre_amp_level_ = Controller::Instance().GetAMPLevel();

    if (pre_amp_level_ != guard_level) {
      Controller::Instance().SetAMPLevel(guard_level);
    }
  }

  ~AutoCastGuard() { Controller::Instance().SetAMPLevel(pre_amp_level_); }

  // forbid copy and operator=
  AutoCastGuard(const AutoCastGuard& guard) = delete;
  AutoCastGuard& operator=(const AutoCastGuard& guard) = delete;

 private:
J
Jiabin Yang 已提交
80
  paddle::imperative::AmpLevel pre_amp_level_;
81 82 83 84 85 86 87 88
};

NameTensorMap AutoCastInputs(const std::string& op_type,
                             const NameTensorMap& ins);

NameTensorMap CastPureFp16Inputs(const std::string& op_type,
                                 const NameTensorMap& ins);

89
}  // namespace legacy
90
}  // namespace egr