You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cond_take.cpp 3.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293
  1. /**
  2. * \file dnn/test/common/cond_take.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "./cond_take.h"
  12. #include "./rng.h"
  13. #include "./tensor.h"
  14. #include "./utils.h"
  15. using namespace megdnn;
  16. using namespace test;
  17. using Param = CondTake::Param;
  18. std::vector<CondTakeTestcase> CondTakeTestcase::make() {
  19. std::vector<CondTakeTestcase> ret;
  20. for (uint32_t mode = 0; mode < Param::MODE_NR_MEMBER; ++mode) {
  21. ret.push_back({
  22. Param{static_cast<Param::Mode>(mode), 0.1f, 0.1f},
  23. TensorLayout{{1}, dtype::Int8()},
  24. TensorLayout{{1}, dtype::Float32()},
  25. });
  26. ret.push_back({
  27. Param{static_cast<Param::Mode>(mode), 0.1f, 0.1f},
  28. TensorLayout{{2, 3}, dtype::Int8()},
  29. TensorLayout{{2, 3}, dtype::Float32()},
  30. });
  31. ret.push_back({
  32. Param{static_cast<Param::Mode>(mode), 100},
  33. TensorLayout{{1024}, dtype::Float32()},
  34. TensorLayout{{1024}, dtype::Int32()},
  35. });
  36. }
  37. NormalRNG data_rng;
  38. UniformIntRNG rng_byte(0, 255);
  39. auto fill_data = [&](TensorND data) {
  40. auto sz = data.layout.span().dist_byte(), szf = sz / sizeof(dt_float32);
  41. auto pf = static_cast<dt_float32*>(data.raw_ptr());
  42. data_rng.fill_fast_float32(pf, szf);
  43. auto prem = reinterpret_cast<uint8_t*>(pf + szf);
  44. size_t szrem = sz % sizeof(dt_float32);
  45. for (size_t i = 0; i < szrem; ++i) {
  46. prem[i] = rng_byte.gen_single_val();
  47. }
  48. };
  49. for (auto&& i : ret) {
  50. auto size0 = i.m_data.layout.span().dist_byte(),
  51. size1 = i.m_mask.layout.span().dist_byte();
  52. i.m_mem.reset(new uint8_t[size0 + size1]);
  53. i.m_data.reset_ptr(i.m_mem.get());
  54. i.m_mask.reset_ptr(i.m_mem.get() + size0);
  55. fill_data(i.m_data);
  56. auto mean = i.m_param.val;
  57. if (i.m_mask.layout.dtype == dtype::Int32()) {
  58. UniformIntRNG rng(mean - 10, mean + 10);
  59. rng.gen(i.m_mask);
  60. } else {
  61. megdnn_assert(i.m_mask.layout.dtype == dtype::Float32());
  62. NormalRNG rng(mean);
  63. rng.gen(i.m_mask);
  64. }
  65. }
  66. return ret;
  67. }
  68. CondTakeTestcase::Result CondTakeTestcase::run(CondTake* opr) {
  69. auto handle = opr->handle();
  70. auto data = make_tensor_h2d(handle, m_data), mask = make_tensor_h2d(handle, m_mask);
  71. opr->param() = m_param;
  72. DynOutMallocPolicyImpl malloc_policy(handle);
  73. auto workspace_size = opr->get_workspace_in_bytes(data->layout);
  74. auto workspace_ptr = malloc_policy.alloc_workspace(workspace_size, nullptr);
  75. auto result = opr->exec(
  76. *data, *mask, {(dt_byte*)workspace_ptr, workspace_size}, &malloc_policy);
  77. malloc_policy.free_workspace(workspace_ptr, nullptr);
  78. return {make_tensor_d2h(handle, result[0]), make_tensor_d2h(handle, result[1])};
  79. }
  80. // vim: syntax=cpp.doxygen