amp_auto_cast.cc 7.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "paddle/fluid/imperative/amp_auto_cast.h"

#include <memory>
#include <string>

#include "paddle/fluid/imperative/tracer.h"

namespace paddle {
namespace imperative {

W
wanghuancoder 已提交
25 26
class VarBase;

27 28
AmpOperators::AmpOperators()
    : allow_ops_(new std::unordered_set<std::string>()),
29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46
      block_ops_(new std::unordered_set<std::string>()),
      unsupported_fp16_ops_(new std::unordered_set<std::string>()) {
  auto& all_kernels = framework::OperatorWithKernel::AllOpKernels();
  auto fp16_dtype = framework::proto::VarType::FP16;
  for (auto it = all_kernels.begin(); it != all_kernels.end(); it++) {
    bool supported = false;
    for (auto& kernel_type : it->second) {
      if (platform::is_gpu_place(kernel_type.first.place_) &&
          kernel_type.first.data_type_ == fp16_dtype) {
        supported = true;
      }
    }
    if (!supported) {
      unsupported_fp16_ops_->insert(it->first);
    }
  }
}

47 48 49 50 51 52 53
AmpOperators::~AmpOperators() {}

AmpOperators& AmpOperators::Instance() {
  static AmpOperators instance;
  return instance;
}

54 55
std::shared_ptr<std::unordered_set<std::string>>
AmpOperators::GetMutableAllowOps() {
56 57 58
  return allow_ops_;
}

59 60
std::shared_ptr<std::unordered_set<std::string>>
AmpOperators::GetMutableBlockOps() {
61 62 63
  return block_ops_;
}

64 65 66 67 68
std::shared_ptr<std::unordered_set<std::string>>
AmpOperators::GetMutableUnsupportedFp16Ops() {
  return unsupported_fp16_ops_;
}

69 70 71 72 73
std::ostream& operator<<(std::ostream& os, AmpOperators& ops) {
  os << "allow ops: ";
  auto allow_ops = ops.GetMutableAllowOps();
  std::copy((*allow_ops).begin(), (*allow_ops).end(),
            std::ostream_iterator<std::string>(os, " "));
74
  os << "\n";
75 76 77 78
  os << "block ops: ";
  auto block_ops = ops.GetMutableBlockOps();
  std::copy((*block_ops).begin(), (*block_ops).end(),
            std::ostream_iterator<std::string>(os, " "));
79 80 81 82 83
  os << "\n";
  os << "unsupported fp16 ops: ";
  auto unsupported_fp16_ops = ops.GetMutableUnsupportedFp16Ops();
  std::copy((*unsupported_fp16_ops).begin(), (*unsupported_fp16_ops).end(),
            std::ostream_iterator<std::string>(os, " "));
84 85 86
  return os;
}

87 88 89 90 91 92
inline std::string GetDtypeStr(
    const std::shared_ptr<imperative::VarBase>& var) {
  return framework::DataTypeToString(var->DataType());
}

inline bool NeedCast(const std::shared_ptr<VarBase>& var) {
L
Leo Chen 已提交
93 94 95 96 97 98 99
  if (platform::is_gpu_place(var->Place()) ||
      platform::is_cuda_pinned_place(var->Place())) {
    // CudaPinndePlace is added for varbase created by dataloader
    if (var->DataType() == framework::proto::VarType::FP32 ||
        var->DataType() == framework::proto::VarType::FP16) {
      return true;
    }
100
  }
L
Leo Chen 已提交
101
  return false;
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
}

// NOTE: Trace a cast op, so if a var is casted from fp32 to fp16, then the grad
// var will be cast back from fp16 to fp32 during backward phase.
static inline std::shared_ptr<imperative::VarBase> CastToType(
    const std::shared_ptr<VarBase>& var,
    const framework::proto::VarType::Type dst_type) {
  const auto& tracer = imperative::GetCurrentTracer();
  imperative::NameVarBaseMap ins = {{"X", {var}}};
  framework::AttributeMap attrs = {{"in_dtype", var->DataType()},
                                   {"out_dtype", dst_type}};
  auto out = std::shared_ptr<imperative::VarBase>(
      new imperative::VarBase(tracer->GenerateUniqueName()));
  imperative::NameVarBaseMap outs = {{"Out", {out}}};

  {
    AutoCastGuard guard(tracer, false);
    tracer->TraceOp("cast", ins, outs, std::move(attrs));
  }

  return out;
}

static inline std::shared_ptr<imperative::VarBase> CastToFP16(
    const std::shared_ptr<VarBase>& var) {
  auto dst_type = framework::proto::VarType::FP16;
  if (NeedCast(var) && (var->DataType() != dst_type)) {
    return CastToType(var, dst_type);
  }
  return var;
}

static inline std::shared_ptr<imperative::VarBase> CastToFP32(
    const std::shared_ptr<VarBase>& var) {
  auto dst_type = framework::proto::VarType::FP32;
  if (NeedCast(var) && (var->DataType() != dst_type)) {
    return CastToType(var, dst_type);
  }
  return var;
}

static inline framework::proto::VarType::Type GetPromoteType(
    const NameVarBaseMap& ins) {
  auto dst_type = framework::proto::VarType::FP16;
  for (const auto& pair : ins) {
    for (const auto& var : pair.second) {
      if (var->DataType() == framework::proto::VarType::FP32) {
        dst_type = var->DataType();
        break;
      }
    }
  }
  return dst_type;
}

NameVarBaseMap AutoCastInputs(const std::string& op_type,
                              const NameVarBaseMap& ins) {
159 160 161 162
  NameVarBaseMap new_ins(ins);
  if (AmpOperators::Instance().GetMutableAllowOps()->count(op_type)) {
    for (auto& pair : new_ins) {
      // NOTE(zhiqiu): batch_norm and layer_norm support only input x is fp16.
163 164
      if ((op_type == "batch_norm" || op_type == "layer_norm" ||
           op_type == "sync_batch_norm") &&
165 166 167 168
          pair.first != "X") {
        continue;
      }

169 170
      VLOG(5) << "Op(" << op_type << "): Cast " << pair.first << " from "
              << GetDtypeStr(*pair.second.cbegin()) << " to float16";
171 172
      for (auto& var : pair.second) {
        var = CastToFP16(var);
173 174 175
      }
    }
    return new_ins;
176 177
  } else if (AmpOperators::Instance().GetMutableBlockOps()->count(op_type)) {
    for (auto& pair : new_ins) {
178 179
      VLOG(5) << "Op(" << op_type << "): Cast " << pair.first << " from "
              << GetDtypeStr(*pair.second.cbegin()) << " to float";
180 181
      for (auto& var : pair.second) {
        var = CastToFP32(var);
182 183 184 185 186
      }
    }
    return new_ins;
  } else {
    auto dst_type = GetPromoteType(ins);
187 188 189 190 191 192
    // NOTE(zhiqiu): if the op has op fp16 kernel, fall back to fp32.
    if (dst_type == framework::proto::VarType::FP16 &&
        AmpOperators::Instance().GetMutableUnsupportedFp16Ops()->count(
            op_type)) {
      dst_type = framework::proto::VarType::FP32;
    }
193 194
    for (auto& pair : new_ins) {
      // NOTE(zhiqiu): batch_norm and layer_norm support only input x is fp16.
195 196
      if ((op_type == "batch_norm" || op_type == "layer_norm" ||
           op_type == "sync_batch_norm") &&
197 198 199
          pair.first == "X" && dst_type == framework::proto::VarType::FP32) {
        continue;
      }
200 201 202
      VLOG(5) << "Op(" << op_type << "): Cast " << pair.first << " from "
              << GetDtypeStr(*pair.second.cbegin()) << " to "
              << framework::DataTypeToString(dst_type);
203 204 205
      for (auto& var : pair.second) {
        var = (dst_type == framework::proto::VarType::FP32 ? CastToFP32(var)
                                                           : CastToFP16(var));
206 207 208 209
      }
    }
    return new_ins;
  }
210
  return new_ins;
211 212 213 214
}

}  // namespace imperative
}  // namespace paddle