未验证 提交 ca72aa2a 编写于 作者: J jiangfan06 提交者: GitHub

Fix reduce_ops for mixed-precision FP16 support (#55573)

上级 14094aad
...@@ -415,7 +415,8 @@ void AutoMixedPrecisionPass::GetOpPrecision() const { ...@@ -415,7 +415,8 @@ void AutoMixedPrecisionPass::GetOpPrecision() const {
auto out_dtype = op_node->Op()->GetAttrIfExists<int>("out_dtype"); auto out_dtype = op_node->Op()->GetAttrIfExists<int>("out_dtype");
support_low_precision = support_low_precision =
support_low_precision && support_low_precision &&
IsFP32AndFP64(static_cast<VarType::Type>(out_dtype)); (IsFP32AndFP64(static_cast<VarType::Type>(out_dtype)) ||
out_dtype == -1);
} }
// If scale op's "scale" and "bias" attr value exceed the range of fp16 // If scale op's "scale" and "bias" attr value exceed the range of fp16
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册