amp_utils.h 6.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once
#include <string>
#include "paddle/fluid/eager/api/utils/global_utils.h"
#include "paddle/fluid/imperative/amp_auto_cast.h"

namespace egr {

static inline paddle::experimental::DataType GetPromoteType(
23
    const std::string& op_name,
24 25 26 27 28 29
    const std::vector<std::vector<paddle::experimental::Tensor>>&
        amp_tensors_vector,
    const paddle::experimental::DataType& amp_dtype) {
  auto dst_type = amp_dtype;
  if (egr::Controller::Instance().GetCurrentTracer()->GetAmpDtype() ==
      "float16") {
30 31
    if (op_name == "batch_norm" || op_name == "layer_norm" ||
        op_name == "sync_batch_norm") {
32 33 34 35
      if (amp_tensors_vector[0][0].dtype() ==
          paddle::experimental::DataType::FLOAT32) {
        dst_type = paddle::experimental::DataType::FLOAT32;
      }
36
    } else if (op_name == "fused_attention") {
37 38 39 40 41 42 43 44 45
      for (size_t i = 0; i < amp_tensors_vector.size(); i++) {
        if (i != 3 || i != 4 || i != 9 || i != 10) {
          if (amp_tensors_vector[i][0].dtype() ==
              paddle::experimental::DataType::FLOAT32) {
            dst_type = paddle::experimental::DataType::FLOAT32;
            break;
          }
        }
      }
46
    } else if (op_name == "fused_feedforward") {
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
      for (size_t i = 0; i < amp_tensors_vector.size(); i++) {
        if (i != 7 || i != 8 || i != 9 || i != 10) {
          if (amp_tensors_vector[i][0].dtype() ==
              paddle::experimental::DataType::FLOAT32) {
            dst_type = paddle::experimental::DataType::FLOAT32;
            break;
          }
        }
      }
    } else {
      for (const auto& tensors : amp_tensors_vector) {
        for (const auto& tensor : tensors) {
          if (tensor.dtype() == paddle::experimental::DataType::FLOAT32) {
            dst_type = tensor.dtype();
            break;
          }
        }
      }
    }
  } else {
    for (const auto& tensors : amp_tensors_vector) {
      for (const auto& tensor : tensors) {
        if (tensor.dtype() == paddle::experimental::DataType::FLOAT32) {
          dst_type = tensor.dtype();
          break;
        }
      }
    }
  }
  // NOTE(juncai): moving_average_abs_max_scale only consider the dtype of
  // input(X)
78
  if (op_name == "moving_average_abs_max_scale") {
79 80 81 82 83 84 85 86
    if (amp_tensors_vector[0][0].dtype() ==
        paddle::experimental::DataType::FLOAT16) {
      dst_type = paddle::experimental::DataType::FLOAT16;
    }
  }
  return dst_type;
}

87 88
inline paddle::experimental::DataType GetAmpDestDtype(
    const std::string& op_name,
89 90 91 92 93 94
    const std::vector<std::vector<paddle::experimental::Tensor>>&
        amp_tensors_vector) {
  auto amp_dtype =
      egr::Controller::Instance().GetCurrentTracer()->GetAmpDtype();
  auto amp_level = egr::Controller::Instance().GetAMPLevel();
  VLOG(6) << "AMP GetAmpDestDtype:"
95
          << " op(" << op_name << ") amp_dtype(" << amp_dtype << ") amp_level("
96 97 98 99 100
          << static_cast<int>(amp_level) << ").";
  if (amp_dtype == "float16") {
    if (amp_level == paddle::imperative::AmpLevel::O1) {
      if (paddle::imperative::AmpOperators::Instance()
              .GetMutableAllowOps()
101
              ->count(op_name)) {
102 103 104
        return paddle::experimental::DataType::FLOAT16;
      } else if (paddle::imperative::AmpOperators::Instance()
                     .GetMutableBlockOps()
105
                     ->count(op_name)) {
106 107
        return paddle::experimental::DataType::FLOAT32;
      } else {
108
        auto dst_type = GetPromoteType(op_name, amp_tensors_vector,
109 110 111 112
                                       paddle::experimental::DataType::FLOAT16);
        if (dst_type == paddle::experimental::DataType::FLOAT16 &&
            paddle::imperative::AmpOperators::Instance()
                .GetMutableUnsupportedFp16Ops()
113
                ->count(op_name)) {
114 115 116 117 118 119 120 121
          dst_type = paddle::experimental::DataType::FLOAT32;
        }
        return dst_type;
      }
    } else if (amp_level == paddle::imperative::AmpLevel::O2) {
      auto dst_type = paddle::experimental::DataType::FLOAT16;
      if (paddle::imperative::AmpOperators::Instance()
              .GetMutableUnsupportedFp16Ops()
122
              ->count(op_name) ||
123 124
          paddle::imperative::AmpOperators::Instance()
              .GetMutableBlockOps()
125
              ->count(op_name)) {
126 127 128 129 130 131 132 133
        dst_type = paddle::experimental::DataType::FLOAT32;
      }
      return dst_type;
    }
  } else if (amp_dtype == "bfloat16") {
    if (amp_level == paddle::imperative::AmpLevel::O1) {
      if (paddle::imperative::AmpOperators::Instance()
              .GetMutableAllowOps()
134
              ->count(op_name)) {
135 136 137
        return paddle::experimental::DataType::BFLOAT16;
      } else if (paddle::imperative::AmpOperators::Instance()
                     .GetMutableBlockOps()
138
                     ->count(op_name)) {
139 140 141
        return paddle::experimental::DataType::FLOAT32;
      } else {
        auto dst_type =
142
            GetPromoteType(op_name, amp_tensors_vector,
143 144 145 146
                           paddle::experimental::DataType::BFLOAT16);
        if (dst_type == paddle::experimental::DataType::BFLOAT16 &&
            paddle::imperative::AmpOperators::Instance()
                .GetMutableUnsupportedBf16Ops()
147
                ->count(op_name)) {
148 149 150 151 152 153 154 155
          dst_type = paddle::experimental::DataType::FLOAT32;
        }
        return dst_type;
      }
    } else if (amp_level == paddle::imperative::AmpLevel::O2) {
      auto dst_type = paddle::experimental::DataType::BFLOAT16;
      if (paddle::imperative::AmpOperators::Instance()
              .GetMutableUnsupportedBf16Ops()
156
              ->count(op_name) ||
157 158
          paddle::imperative::AmpOperators::Instance()
              .GetMutableBlockOps()
159
              ->count(op_name)) {
160 161 162 163 164 165 166 167 168
        dst_type = paddle::experimental::DataType::FLOAT32;
      }
      return dst_type;
    }
  }
  return paddle::experimental::DataType::FLOAT32;
}

}  // namespace egr