提交 16f052e9 编写于 作者: L luzzyzhang 提交者: Xinran Xu

fix(megdnn): change ver 60 to use cuda capability 50

fix(megdnn): if compute capability less than 60 skip fp16 test
上级 c4dfdbd2
......@@ -20,4 +20,4 @@ if [[ "$1" == "cpu" || "$1" == "cuda" ]] ; then
else
echo "Argument must cpu or cuda"
exit 1
fi
\ No newline at end of file
fi
......@@ -17,6 +17,7 @@
#include "test/common/convolution.h"
#include "test/common/rng.h"
#include "test/cuda/benchmark.h"
#include "test/cuda/utils.h"
#include "src/cuda/utils.h"
......@@ -203,18 +204,20 @@ TEST_F(CUDA, CONVOLUTION_BACKWARD_DATA)
.set_epsilon(1e-3)
.set_param(arg.param)
.exec(TensorLayoutArray{filter, dst, src});
src.dtype = dst.dtype = filter.dtype = dtype::Float16();
checker.set_rng(0, &rng)
.set_rng(1, &rng)
.set_epsilon(1e-1)
.set_param(arg.param)
.exec(TensorLayoutArray{filter, dst, src});
arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
checker.set_rng(0, &rng)
.set_rng(1, &rng)
.set_epsilon(1e-1)
.set_param(arg.param)
.exec(TensorLayoutArray{filter, dst, src});
if (!megdnn::test::check_compute_capability(6, 0)) {
src.dtype = dst.dtype = filter.dtype = dtype::Float16();
checker.set_rng(0, &rng)
.set_rng(1, &rng)
.set_epsilon(1e-1)
.set_param(arg.param)
.exec(TensorLayoutArray{filter, dst, src});
arg.param.compute_mode = param::Convolution::ComputeMode::FLOAT32;
checker.set_rng(0, &rng)
.set_rng(1, &rng)
.set_epsilon(1e-1)
.set_param(arg.param)
.exec(TensorLayoutArray{filter, dst, src});
}
}
}
......
......@@ -49,7 +49,7 @@ HalideCudaTargetTrait::FeatureSet HalideCudaTargetTrait::features(
set.set(Target::CUDACapability32);
} else if (in(35, 40)) {
set.set(Target::CUDACapability35);
} else if (in(50, 60)) {
} else if (in(50, 61)) {
set.set(Target::CUDACapability50);
} else if (in(61, 70)) {
set.set(Target::CUDACapability61);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册