You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

remap.cpp 4.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /**
  2. * \file dnn/src/common/remap.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "megdnn/oprs.h"
  13. #include "src/common/cv/common.h"
  14. #include "src/common/cv/helper.h"
  15. #include "src/common/utils.h"
  16. namespace megdnn {
  17. void RemapBase::deduce_layout_fwd(const TensorLayout& src,
  18. const TensorLayout& map_xy,
  19. TensorLayout& dst) {
  20. dst.dtype = src.dtype;
  21. dst.ndim = src.ndim;
  22. dst.shape[0] = src.shape[0];
  23. size_t height_index, channel_index;
  24. if (param().format == param::Remap::Format::NHWC) {
  25. height_index = 1;
  26. channel_index = 3;
  27. } else {
  28. megdnn_assert(param().format == param::Remap::Format::NCHW);
  29. height_index = 2;
  30. channel_index = 1;
  31. }
  32. dst.shape[height_index] = map_xy.shape[1];
  33. dst.shape[height_index + 1] = map_xy.shape[2];
  34. dst.shape[channel_index] = src.shape[channel_index];
  35. }
  36. void RemapBase::check_layout_fwd(const TensorLayout& src,
  37. const TensorLayout& map_xy,
  38. const TensorLayout& dst) {
  39. auto errmsg = [&]() {
  40. return megdnn_layout_msg(src) + ", " + megdnn_layout_msg(map_xy) +
  41. ", " + megdnn_layout_msg(dst);
  42. };
  43. MEGDNN_MARK_USED_VAR(errmsg);
  44. megdnn_assert(src.ndim == map_xy.ndim && src.ndim == dst.ndim &&
  45. src.ndim == 4);
  46. megdnn_assert(dst.dtype == src.dtype);
  47. megdnn_assert(dst.shape[0] == src.shape[0], "%s", errmsg().c_str());
  48. megdnn_assert(map_xy.shape[3] == 2);
  49. megdnn_assert(map_xy.shape[0] == src.shape[0]);
  50. megdnn_assert_contiguous(src);
  51. // map_xy only support floa32 type
  52. // map_xy always in NHWC format
  53. megdnn_assert(map_xy.dtype.enumv() == DTypeEnum::Float32);
  54. // In remap opr, H, W is same as H W in map_xy.
  55. if (param().format == param::Remap::Format::NHWC) {
  56. megdnn_assert(src.shape[3] == dst.shape[3], "%s", errmsg().c_str());
  57. megdnn_assert(dst.shape[2] == map_xy.shape[2] &&
  58. dst.shape[1] == map_xy.shape[1],
  59. "%s", errmsg().c_str());
  60. } else if (param().format == param::Remap::Format::NCHW) {
  61. megdnn_assert(src.shape[1] == dst.shape[1], "%s", errmsg().c_str());
  62. megdnn_assert(dst.shape[2] == map_xy.shape[1] &&
  63. dst.shape[3] == map_xy.shape[2],
  64. "%s", errmsg().c_str());
  65. } else {
  66. megdnn_throw(
  67. "megdnn currently do not support other param.format except "
  68. "NHWC and NCHW");
  69. }
  70. }
  71. void Remap::deduce_layout(const TensorLayout& src, const TensorLayout& map_xy,
  72. TensorLayout& dst) {
  73. deduce_layout_fwd(src, map_xy, dst);
  74. }
  75. void Remap::check_exec(const TensorLayout& src, const TensorLayout& map_xy,
  76. const TensorLayout& dst, size_t workspace_in_bytes) {
  77. check_layout_fwd(src, map_xy, dst);
  78. auto required_workspace_in_bytes = get_workspace_in_bytes(src, map_xy, dst);
  79. megdnn_assert(workspace_in_bytes >= required_workspace_in_bytes);
  80. }
  81. void RemapBackwardData::check_exec(const TensorLayout& map_xy,
  82. const TensorLayout& diff,
  83. const TensorLayout& grad,
  84. size_t workspace_in_bytes) {
  85. check_layout_fwd(grad, map_xy, diff);
  86. megdnn_assert(grad.dtype == dtype::Float32() MEGDNN_INC_FLOAT16(
  87. || grad.dtype == dtype::BFloat16()),
  88. "Backward Remap only supports Float32/BFloat16.");
  89. auto required_workspace_in_bytes =
  90. get_workspace_in_bytes(map_xy, diff, grad);
  91. megdnn_assert(workspace_in_bytes >= required_workspace_in_bytes);
  92. }
  93. void RemapBackwardMat::check_exec(const TensorLayout& src,
  94. const TensorLayout& map_xy,
  95. const TensorLayout& diff,
  96. const TensorLayout& grad,
  97. size_t workspace_in_bytes) {
  98. check_layout_fwd(src, map_xy, diff);
  99. megdnn_assert_eq_layout(map_xy, grad);
  100. megdnn_assert(grad.dtype == dtype::Float32() MEGDNN_INC_FLOAT16(
  101. || grad.dtype == dtype::BFloat16()),
  102. "Backward Remap only supports Float32/BFloat16.");
  103. auto required_workspace_in_bytes =
  104. get_workspace_in_bytes(src, map_xy, diff, grad);
  105. megdnn_assert(workspace_in_bytes >= required_workspace_in_bytes);
  106. }
  107. } // namespace megdnn
  108. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台