提交 70209667 编写于 作者: M Megvii Engine Team

fix(dnn/test): fix some bug when force_deduce_layout is off

GitOrigin-RevId: d7ccc397dfdbd9e2c599a24dd072242f69d19027
上级 597a1e79
......@@ -16,7 +16,7 @@
namespace megdnn {
void FakeQuantBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) {
output = TensorLayout(input, input.dtype);
output = TensorLayout(input);
}
void FakeQuantBase::check_layout_fwd(
......
......@@ -16,7 +16,7 @@
namespace megdnn {
void LSQBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) {
output = TensorLayout(input, input.dtype);
output = TensorLayout(input);
}
void LSQBase::check_layout_fwd(
......
......@@ -16,7 +16,7 @@
namespace megdnn {
void TQTBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) {
output = TensorLayout(input, input.dtype);
output = TensorLayout(input);
}
void TQTBase::check_layout_fwd(
......
......@@ -84,7 +84,7 @@ protected:
TensorsConstriant m_tensor_constraint;
bool m_no_naive_and_check = false;
bool m_stable_check = false;
bool m_force_deduce_dst = true;
bool m_force_deduce_dst = false;
bool m_allow_invalid_check = false;
/**
* the offset from the start of malloc memory
......
......@@ -756,6 +756,8 @@ DEF_TEST(all_modes) {
for (size_t i = 0; i < shapes.size() - 1; ++i) {
shapes[i] = {3, 9, 7};
}
//! NOTE: force set output layout to empty to trigger layout deduce
shapes[shapes.size() - 1] = {};
auto do_run = [&](DType dtype, float eps = 1e-3) {
// limit value ranges for some modes
if (mode == Mode::LOG || mode == Mode::LOG1P) {
......
......@@ -22,16 +22,17 @@ TEST_F(CUDA, CHECK_NON_FINITE_BASIC) {
const auto nan = std::numeric_limits<float>::quiet_NaN();
UniformFloatWithValueRNG rng(-1.0f, 1.0f, 0.1f, inf);
checker.set_rng(0, &rng);
checker.execs({{512 * 4}, {4}, {1}});
//! while deduce layout, dst tensor dtype will be set to Int32
checker.execs({{512 * 4}, {4}, {}});
rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 1.f, inf);
checker.set_rng(0, &rng);
checker.execs({{4}, {512 * 4}, {1}});
checker.execs({{4}, {512 * 4}, {}});
rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 1.f, nan);
checker.set_rng(0, &rng);
checker.execs({{32}, {256}, {1}});
checker.execs({{32}, {256}, {}});
rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 0.f, nan);
checker.set_rng(0, &rng);
checker.execs({{16}, {16}, {2}, {1}});
checker.execs({{16}, {16}, {2}, {}});
}
} // namespace test
......
......@@ -26,10 +26,10 @@ TEST_F(CUDA, DIAG) {
checker.set_dtype(1, dtype);
size_t absk = static_cast<size_t>(std::abs(k));
checker.exec(TensorShapeArray{{8}, {8 + absk, 8 + absk}});
//! NOTE: diag for vector or matrix is a vector
auto oshape = [&](int n, int m) -> TensorShape {
size_t o = (k >= 0 ? std::min(n - k, m) : std::min(m + k, n));
return {o, o};
size_t o = (k >= 0 ? std::min(m - k, n) : std::min(n + k, m));
return {o};
};
checker.exec(TensorShapeArray{{8, 6}, oshape(8, 6)});
checker.exec(TensorShapeArray{{6, 8}, oshape(6, 8)});
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册