提交 40e778fb 编写于 作者: M Megvii Engine Team

fix(mgb): fix-deconv-io16c32-pass-fail

GitOrigin-RevId: 4b44cb48cb15206c4f5735f33927200407dbd283
上级 9f4bffbd
......@@ -771,6 +771,29 @@ std::unique_ptr<ConvertF32ToF16Pass> ConvertF32ToF16Pass::make(
return new_conv_opr.node()->owner_opr();
};
auto replace_deconv_opr = [use_f32_comp](OperatorNodeBase* opr,
const VarNodeArray& new_inp) {
mgb_assert(opr->input().size() == new_inp.size());
auto& deconv_opr = opr->cast_final_safe<opr::ConvolutionBackwardData>();
auto new_param = deconv_opr.param();
if (use_f32_comp) {
new_param.compute_mode =
megdnn::param::Convolution::ComputeMode::FLOAT32;
}
mgb_assert(new_inp[0]->dtype() == dtype::Float16(),
"inp %s:%s, owner_opr:%s", new_inp[0]->dtype().name(),
new_inp[0]->name().c_str(),
new_inp[0]->owner_opr()->name().c_str());
mgb_assert(new_inp[1]->dtype() == dtype::Float16(),
"inp %s:%s, owner_opr:%s", new_inp[1]->dtype().name(),
new_inp[1]->name().c_str(),
new_inp[1]->owner_opr()->name().c_str());
auto new_deconv_opr = opr::ConvolutionBackwardData::make(
new_inp[0], new_inp[1], new_param, deconv_opr.execution_policy(),
deconv_opr.config());
return new_deconv_opr.node()->owner_opr();
};
auto replace_convbias_opr = [use_f32_comp](OperatorNodeBase* opr,
const VarNodeArray& new_inp) {
auto& convbias_opr = opr->cast_final_safe<opr::ConvBiasForward>();
......@@ -941,6 +964,7 @@ std::unique_ptr<ConvertF32ToF16Pass> ConvertF32ToF16Pass::make(
replace_func[opr::Host2DeviceCopy::typeinfo()] = replace_h2d_opr;
replace_func[opr::SharedDeviceTensor::typeinfo()] = replace_sdt_opr;
replace_func[opr::Convolution::typeinfo()] = replace_conv_opr;
replace_func[opr::ConvolutionBackwardData::typeinfo()] = replace_deconv_opr;
replace_func[opr::ConvBias::typeinfo()] = replace_convbias_opr;
replace_func[opr::MatrixMul::typeinfo()] = replace_matmul_opr;
replace_func[opr::Reduce::typeinfo()] = replace_reduce_opr;
......
......@@ -710,6 +710,33 @@ TEST(TestGoptInference, Float16IOFloat32Compute) {
MGB_ASSERT_TENSOR_NEAR(host_y, host_y_opt, 1e-3);
}
TEST(TestGoptInference, Float16IOFloat32ComputeDeConv) {
constexpr size_t INP_H = 10, INP_W = 10;
HostTensorGenerator<> gen;
auto graph = ComputingGraph::make();
auto mkvar = [&](const char* name, const TensorShape& shp) {
return opr::Host2DeviceCopy::make(*graph, gen(shp)).rename(name);
};
graph->options().graph_opt_level = 0;
auto s0 = mkvar("s0", {5, 5, 3, 3}),
s1 = mkvar("s1", {1, 5, INP_H, INP_W});
auto y = opr::ConvolutionBackwardData::make(s0, s1, {}, {});
SymbolVar y_opt;
auto options = gopt::OptimizeForInferenceOptions{};
options.enable_f16_io_f32_comp();
unpack_vector(gopt::optimize_for_inference({y}, options), y_opt);
ASSERT_EQ(find_opr<opr::ConvolutionBackwardData>(y_opt).param().compute_mode,
opr::ConvBias::Param::ConvBias::ComputeMode::FLOAT32);
ASSERT_EQ(y_opt.dtype(), dtype::Float32());
HostTensorND host_y, host_y_opt;
auto func = graph->compile({make_callback_copy(y, host_y),
make_callback_copy(y_opt, host_y_opt)});
func->execute();
MGB_ASSERT_TENSOR_NEAR(host_y, host_y_opt, 1e-2);
}
TEST(TestGoptInference, Float16IOFloat32ComputeWarpPerspective) {
constexpr size_t INP_H = 10, INP_W = 10, N = 2;
HostTensorGenerator<> gen;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册