You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

resize.cpp 3.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. /**
  2. * \file dnn/test/naive/resize.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/naive/fixture.h"
  12. #include "test/common/checker.h"
  13. #include "test/common/resize.h"
  14. #include "megdnn/oprs/cv.h"
  15. using namespace megdnn;
  16. using namespace test;
  17. TEST_F(NAIVE, RESIZE_NCHW4) {
  18. Checker<Resize> checker(handle());
  19. auto args = resize::get_nchw4_args();
  20. auto convert_true_format = [](const TensorLayout& layout) {
  21. return layout
  22. .reshape({layout[0], layout[1] / 4, layout[2], layout[3], 4})
  23. .dimshuffle({0, 1, 4, 2, 3});
  24. };
  25. for (auto&& arg : args) {
  26. auto extra_impl = [ this, param = arg.param, convert_true_format ](
  27. const TensorNDArray& tensors) {
  28. auto resize = handle()->create_operator<Resize>();
  29. resize->param().imode = param.imode;
  30. resize->param().format = Resize::Param::Format::NCHW;
  31. TensorNDArray nchw_tensors;
  32. for (size_t i = 0; i < tensors.size(); ++i) {
  33. auto layout = tensors[i].layout;
  34. layout = layout.reshape({layout[0], layout[1] * 4, layout[2],
  35. layout[3]});
  36. layout.dtype = dtype::Int8();
  37. nchw_tensors.emplace_back(malloc(layout.span().dist_byte()),
  38. layout);
  39. }
  40. TensorNDArray nchw4_tensors;
  41. for (size_t i = 0; i < tensors.size(); ++i) {
  42. auto layout = convert_true_format(nchw_tensors[i].layout);
  43. nchw4_tensors.emplace_back(tensors[i].raw_ptr,
  44. std::move(layout));
  45. }
  46. auto relayout = handle()->create_operator<RelayoutForward>();
  47. relayout->exec(nchw4_tensors[0], nchw_tensors[0]);
  48. auto workspace_size = resize->get_workspace_in_bytes(
  49. nchw_tensors[0].layout, nchw_tensors[1].layout);
  50. dt_byte* workspace_ptr =
  51. static_cast<dt_byte*>(malloc(workspace_size));
  52. Workspace workspace{workspace_ptr, workspace_size};
  53. resize->exec(nchw_tensors[0], nchw_tensors[1], workspace);
  54. relayout->exec(nchw_tensors[1], nchw4_tensors[1]);
  55. free(workspace_ptr);
  56. for (auto &&tensor : nchw_tensors) {
  57. free(tensor.raw_ptr);
  58. }
  59. };
  60. checker.set_extra_opr_impl(extra_impl);
  61. checker.set_param(arg.param)
  62. .set_dtype(0, dtype::QuantizedS8(0.1f))
  63. .set_dtype(1, dtype::QuantizedS8(0.1f))
  64. .set_epsilon(1 + 1e-3)
  65. .execs({arg.src, arg.dst});
  66. }
  67. }

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台

Contributors (1)