You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

adaptive_pooling.cpp 3.6 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. /**
  2. * \file dnn/test/cuda/adaptive_pooling.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "test/cuda/fixture.h"
  13. #include "megdnn/tensor_iter.h"
  14. #include "test/common/adaptive_pooling.h"
  15. #include "test/common/checker.h"
  16. #include "src/common/utils.h"
  17. #include "test/cuda/utils.h"
  18. #include <cudnn.h>
  19. #include "test/cuda/benchmark.h"
  20. namespace megdnn {
  21. namespace test {
  22. TEST_F(CUDA, ADAPTIVE_POOLING_FORWARD) {
  23. auto args = adaptive_pooling::get_args();
  24. using Format = param::AdaptivePooling::Format;
  25. DType dtype = dtype::Float32();
  26. for (auto&& arg : args) {
  27. auto param = arg.param;
  28. auto src = arg.ishape;
  29. auto dst = arg.oshape;
  30. param.format = Format::NCHW;
  31. Checker<AdaptivePooling> checker(handle_cuda());
  32. checker.set_epsilon(1e-2);
  33. checker.set_param(param).set_dtype(0, dtype).set_dtype(1, dtype).exec(
  34. TensorShapeArray{src, dst, {}});
  35. }
  36. }
  37. TEST_F(CUDA, ADAPTIVE_POOLING_BACKWARD) {
  38. auto args = adaptive_pooling::get_args();
  39. for (auto&& arg : args) {
  40. Checker<AdaptivePoolingBackward> checker(handle_cuda());
  41. TensorLayout ilayout = TensorLayout(arg.ishape, dtype::Float32());
  42. TensorLayout olayout = TensorLayout(arg.oshape, dtype::Float32());
  43. auto constraint = [this, arg](CheckerHelper::TensorValueArray& tensors_orig) {
  44. megdnn_assert(tensors_orig.size() == 4);
  45. auto opr = handle_cuda()->create_operator<AdaptivePoolingForward>();
  46. opr->param() = arg.param;
  47. auto tensors_cuda_storage = CheckerHelper::alloc_tensors(
  48. handle_cuda(), {tensors_orig[0].layout, tensors_orig[1].layout}, 0);
  49. auto&& tensors_cuda = *tensors_cuda_storage;
  50. auto span = tensors_cuda[0].layout.span();
  51. auto dst = static_cast<dt_byte*>(tensors_cuda[0].raw_ptr()) + span.low_byte;
  52. auto src = static_cast<const dt_byte*>(tensors_orig[0].raw_ptr()) +
  53. span.low_byte;
  54. megdnn_memcpy_H2D(handle_cuda(), dst, src, span.dist_byte());
  55. auto workspace_size = opr->get_workspace_in_bytes(
  56. tensors_cuda[0].layout, tensors_cuda[1].layout);
  57. auto workspace_cuda = megdnn_malloc(handle_cuda(), workspace_size);
  58. Workspace workspace{static_cast<dt_byte*>(workspace_cuda), workspace_size};
  59. opr->exec(tensors_cuda[0], tensors_cuda[1], workspace);
  60. megdnn_free(handle_cuda(), workspace_cuda);
  61. span = tensors_cuda[1].layout.span();
  62. dst = static_cast<dt_byte*>(tensors_orig[1].raw_ptr()) + span.low_byte;
  63. src = static_cast<const dt_byte*>(tensors_cuda[1].raw_ptr()) +
  64. span.low_byte;
  65. megdnn_memcpy_D2H(handle_cuda(), dst, src, span.dist_byte());
  66. };
  67. DType dtype = dtype::Float32();
  68. checker.set_tensors_constraint(constraint)
  69. .set_dtype(0, dtype)
  70. .set_dtype(1, dtype)
  71. .set_dtype(2, dtype)
  72. .set_dtype(3, dtype)
  73. .set_param(arg.param)
  74. .exec(TensorShapeArray{ilayout, olayout, olayout, ilayout});
  75. }
  76. }
  77. } // namespace test
  78. } // namespace megdnn
  79. // vim: syntax=cpp.doxygen