You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

relayout_format.cpp 4.5 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596
  1. /**
  2. * \file dnn/src/cuda/relayout_format/relayout_format.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
  10. * implied.
  11. */
  12. #include "src/cuda/relayout_format/relayout_format.cuh"
  13. #include "src/cuda/relayout_format/relayout_format.h"
  14. using namespace megdnn;
  15. using namespace cuda;
  16. namespace {
  17. inline void get_scale_zeropoint(const DType& tensor_dtype, float& scale,
  18. uint8_t& zero_point) {
  19. if (tensor_dtype.enumv() == DTypeEnum::Quantized8Asymm) {
  20. zero_point = tensor_dtype.param<dtype::Quantized8Asymm>().zero_point;
  21. scale = tensor_dtype.param<dtype::Quantized8Asymm>().scale;
  22. } else if (tensor_dtype.enumv() == DTypeEnum::QuantizedS8) {
  23. scale = tensor_dtype.param<dtype::QuantizedS8>().scale;
  24. } else if (tensor_dtype.enumv() == DTypeEnum::QuantizedS4) {
  25. scale = tensor_dtype.param<dtype::QuantizedS4>().scale;
  26. } else if (tensor_dtype.enumv() == DTypeEnum::Quantized4Asymm) {
  27. zero_point = tensor_dtype.param<dtype::Quantized4Asymm>().zero_point;
  28. scale = tensor_dtype.param<dtype::Quantized4Asymm>().scale;
  29. }
  30. }
  31. } // namespace
  32. bool relayout_format::RelayoutFormatFast::usable(
  33. const TensorLayout& src_layout, const TensorLayout& dst_layout) {
  34. return relayout_format_cuda_usable(src_layout, dst_layout);
  35. }
  36. void relayout_format::RelayoutFormatFast::exec(const TensorND& src,
  37. const TensorND& dst,
  38. cudaStream_t stream,
  39. RelayoutFormat::Param::Mode mode,
  40. int group) {
  41. float src_scale = 1.f;
  42. float dst_scale = 1.f;
  43. uint8_t src_zero_point = 0;
  44. uint8_t dst_zero_point = 0;
  45. get_scale_zeropoint(src.layout.dtype, src_scale, src_zero_point);
  46. get_scale_zeropoint(dst.layout.dtype, dst_scale, dst_zero_point);
  47. if (src.layout.dtype.enumv() == DTypeEnum::Uint8) {
  48. src_zero_point = 128;
  49. }
  50. if (mode == RelayoutFormat::Param::Mode::NCHW_NCHW4 ||
  51. mode == RelayoutFormat::Param::Mode::NCHW_NCHW64) {
  52. return relayout_format_cuda_nchw_nchwx(src, dst, stream, src_scale,
  53. dst_scale, src_zero_point,
  54. dst_zero_point, group);
  55. } else if (mode == RelayoutFormat::Param::Mode::NCHW64_NCHW) {
  56. megdnn_assert(group == 1,
  57. "RelayoutFormat kernel only support transforming NCHW64 "
  58. "to NCHW with group = 1(group:%d)",
  59. group);
  60. return relayout_format_cuda_nchwx_nchw(src, dst, stream, src_scale,
  61. dst_scale, src_zero_point,
  62. dst_zero_point);
  63. } else if (mode == RelayoutFormat::Param::Mode::NCHW_NHWC) {
  64. #define CHECK(dt) \
  65. megdnn_assert(dt.enumv() == DTypeEnum::Quantized4Asymm || \
  66. dt.enumv() == DTypeEnum::QuantizedS4)
  67. CHECK(src.layout.dtype);
  68. CHECK(dst.layout.dtype);
  69. return relayout_format_cuda_nchw_nhwc(src, dst, stream, src_scale,
  70. dst_scale, src_zero_point,
  71. dst_zero_point);
  72. } else if (mode == RelayoutFormat::Param::Mode::NHWC_NCHW) {
  73. CHECK(src.layout.dtype);
  74. CHECK(dst.layout.dtype);
  75. return relayout_format_cuda_nhwc_nchw(src, dst, stream, src_scale,
  76. dst_scale, src_zero_point,
  77. dst_zero_point);
  78. #undef CHECK
  79. } else if (mode == RelayoutFormat::Param::Mode::NCHW_NCHW4_WEIGHT) {
  80. return relayout_format_cuda_nchw_nchw4_weight(src, dst, stream);
  81. } else if (mode == RelayoutFormat::Param::Mode::NCHW4_NCHW) {
  82. return relayout_format_cuda_nchw4_nchw(src, dst, stream, group);
  83. } else {
  84. megdnn_throw(
  85. "only support nchw_nchw64/nchw64_nchw/nchw_nchw4/nchw4_nchw "
  86. "layout_format");
  87. }
  88. }
  89. // vim: ft=cpp syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台