From ca72aa2ac25c0427677ae1eb85a00993b2b6fd0c Mon Sep 17 00:00:00 2001 From: jiangfan06 <117341294+MuShangCC@users.noreply.github.com> Date: Tue, 25 Jul 2023 10:28:33 +0800 Subject: [PATCH] Fix reduce_ops for mixed-precision FP16 support (#55573) --- paddle/fluid/framework/ir/auto_mixed_precision_pass.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc b/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc index 6e12cf00e90..6139ccb3137 100644 --- a/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc +++ b/paddle/fluid/framework/ir/auto_mixed_precision_pass.cc @@ -415,7 +415,8 @@ void AutoMixedPrecisionPass::GetOpPrecision() const { auto out_dtype = op_node->Op()->GetAttrIfExists("out_dtype"); support_low_precision = support_low_precision && - IsFP32AndFP64(static_cast(out_dtype)); + (IsFP32AndFP64(static_cast(out_dtype)) || + out_dtype == -1); } // If scale op's "scale" and "bias" attr value exceed the range of fp16 -- GitLab