Browse Source

fix(dnn/test): fix some bug when force_deduce_layout is off

GitOrigin-RevId: d7ccc397df
release-1.10
Megvii Engine Team 3 years ago
parent
commit
70209667e8
7 changed files with 14 additions and 11 deletions
  1. +1
    -1
      dnn/src/common/fake_quant.cpp
  2. +1
    -1
      dnn/src/common/lsq.cpp
  3. +1
    -1
      dnn/src/common/tqt.cpp
  4. +1
    -1
      dnn/test/common/checker.h
  5. +2
    -0
      dnn/test/common/elemwise.cpp
  6. +5
    -4
      dnn/test/cuda/check_non_finite.cpp
  7. +3
    -3
      dnn/test/cuda/diag.cpp

+ 1
- 1
dnn/src/common/fake_quant.cpp View File

@@ -16,7 +16,7 @@
namespace megdnn { namespace megdnn {


void FakeQuantBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) { void FakeQuantBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) {
output = TensorLayout(input, input.dtype);
output = TensorLayout(input);
} }


void FakeQuantBase::check_layout_fwd( void FakeQuantBase::check_layout_fwd(


+ 1
- 1
dnn/src/common/lsq.cpp View File

@@ -16,7 +16,7 @@
namespace megdnn { namespace megdnn {


void LSQBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) { void LSQBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) {
output = TensorLayout(input, input.dtype);
output = TensorLayout(input);
} }


void LSQBase::check_layout_fwd( void LSQBase::check_layout_fwd(


+ 1
- 1
dnn/src/common/tqt.cpp View File

@@ -16,7 +16,7 @@
namespace megdnn { namespace megdnn {


void TQTBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) { void TQTBase::deduce_layout_fwd(const TensorLayout& input, TensorLayout& output) {
output = TensorLayout(input, input.dtype);
output = TensorLayout(input);
} }


void TQTBase::check_layout_fwd( void TQTBase::check_layout_fwd(


+ 1
- 1
dnn/test/common/checker.h View File

@@ -84,7 +84,7 @@ protected:
TensorsConstriant m_tensor_constraint; TensorsConstriant m_tensor_constraint;
bool m_no_naive_and_check = false; bool m_no_naive_and_check = false;
bool m_stable_check = false; bool m_stable_check = false;
bool m_force_deduce_dst = true;
bool m_force_deduce_dst = false;
bool m_allow_invalid_check = false; bool m_allow_invalid_check = false;
/** /**
* the offset from the start of malloc memory * the offset from the start of malloc memory


+ 2
- 0
dnn/test/common/elemwise.cpp View File

@@ -756,6 +756,8 @@ DEF_TEST(all_modes) {
for (size_t i = 0; i < shapes.size() - 1; ++i) { for (size_t i = 0; i < shapes.size() - 1; ++i) {
shapes[i] = {3, 9, 7}; shapes[i] = {3, 9, 7};
} }
//! NOTE: force set output layout to empty to trigger layout deduce
shapes[shapes.size() - 1] = {};
auto do_run = [&](DType dtype, float eps = 1e-3) { auto do_run = [&](DType dtype, float eps = 1e-3) {
// limit value ranges for some modes // limit value ranges for some modes
if (mode == Mode::LOG || mode == Mode::LOG1P) { if (mode == Mode::LOG || mode == Mode::LOG1P) {


+ 5
- 4
dnn/test/cuda/check_non_finite.cpp View File

@@ -22,16 +22,17 @@ TEST_F(CUDA, CHECK_NON_FINITE_BASIC) {
const auto nan = std::numeric_limits<float>::quiet_NaN(); const auto nan = std::numeric_limits<float>::quiet_NaN();
UniformFloatWithValueRNG rng(-1.0f, 1.0f, 0.1f, inf); UniformFloatWithValueRNG rng(-1.0f, 1.0f, 0.1f, inf);
checker.set_rng(0, &rng); checker.set_rng(0, &rng);
checker.execs({{512 * 4}, {4}, {1}});
//! while deduce layout, dst tensor dtype will be set to Int32
checker.execs({{512 * 4}, {4}, {}});
rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 1.f, inf); rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 1.f, inf);
checker.set_rng(0, &rng); checker.set_rng(0, &rng);
checker.execs({{4}, {512 * 4}, {1}});
checker.execs({{4}, {512 * 4}, {}});
rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 1.f, nan); rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 1.f, nan);
checker.set_rng(0, &rng); checker.set_rng(0, &rng);
checker.execs({{32}, {256}, {1}});
checker.execs({{32}, {256}, {}});
rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 0.f, nan); rng = UniformFloatWithValueRNG(-1.0f, 1.0f, 0.f, nan);
checker.set_rng(0, &rng); checker.set_rng(0, &rng);
checker.execs({{16}, {16}, {2}, {1}});
checker.execs({{16}, {16}, {2}, {}});
} }


} // namespace test } // namespace test


+ 3
- 3
dnn/test/cuda/diag.cpp View File

@@ -26,10 +26,10 @@ TEST_F(CUDA, DIAG) {
checker.set_dtype(1, dtype); checker.set_dtype(1, dtype);
size_t absk = static_cast<size_t>(std::abs(k)); size_t absk = static_cast<size_t>(std::abs(k));
checker.exec(TensorShapeArray{{8}, {8 + absk, 8 + absk}}); checker.exec(TensorShapeArray{{8}, {8 + absk, 8 + absk}});
//! NOTE: diag for vector or matrix is a vector
auto oshape = [&](int n, int m) -> TensorShape { auto oshape = [&](int n, int m) -> TensorShape {
size_t o = (k >= 0 ? std::min(n - k, m) : std::min(m + k, n));
return {o, o};
size_t o = (k >= 0 ? std::min(m - k, n) : std::min(n + k, m));
return {o};
}; };
checker.exec(TensorShapeArray{{8, 6}, oshape(8, 6)}); checker.exec(TensorShapeArray{{8, 6}, oshape(8, 6)});
checker.exec(TensorShapeArray{{6, 8}, oshape(6, 8)}); checker.exec(TensorShapeArray{{6, 8}, oshape(6, 8)});


Loading…
Cancel
Save