You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

group_conv.cpp 8.0 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /**
  2. * \file dnn/src/cuda/conv_bias/group_conv.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include <utility>
  12. #include "src/common/conv_bias.h"
  13. #include "src/cuda/conv_bias/algo.h"
  14. using namespace megdnn;
  15. using namespace cuda;
  16. using namespace conv_bias;
  17. namespace {
  18. std::pair<TensorLayoutArray, ConvBiasForwardImpl::Param> sub_opr_config(
  19. const ConvBiasForwardImpl::AlgoBase::SizeArgs& args) {
  20. TensorLayout src_pg = *args.src_layout;
  21. TensorLayout filter_pg = *args.filter_layout;
  22. TensorLayout bias_pg = *args.bias_layout;
  23. TensorLayout z_pg = *args.z_layout;
  24. TensorLayout dst_pg = *args.dst_layout;
  25. auto nr_grp = args.filter_meta.group;
  26. size_t c_pos;
  27. if (args.filter_meta.format == megdnn::param::ConvBias::Format::NCHW ||
  28. args.filter_meta.format == megdnn::param::ConvBias::Format::NCHW4) {
  29. c_pos = 1;
  30. } else {
  31. megdnn_assert(args.filter_meta.format ==
  32. megdnn::param::ConvBias::Format::NHWC,
  33. "invalid conv format");
  34. c_pos = 3;
  35. }
  36. filter_pg.remove_axis_inplace(0);
  37. src_pg.shape[c_pos] /= nr_grp;
  38. bias_pg.ndim = 0;
  39. dst_pg.shape[c_pos] /= nr_grp;
  40. megdnn::param::ConvBias param = args.opr->param();
  41. param.sparse = megdnn::param::ConvBias::Sparse::DENSE;
  42. param.nonlineMode =
  43. megdnn::param::ConvBias::NonlineMode::IDENTITY;
  44. std::pair<TensorLayoutArray, ConvBiasForwardImpl::Param> ret;
  45. ret.first = {src_pg, filter_pg, bias_pg, z_pg, dst_pg};
  46. ret.second = param;
  47. return ret;
  48. }
  49. std::pair<TensorLayoutArray, std::unique_ptr<ConvBiasForward>> prepare_sub_opr(
  50. const ConvBiasForwardImpl::AlgoBase::SizeArgs& args) {
  51. auto convbias_opr = args.handle->create_operator<ConvBias>();
  52. set_execution_policy<ConvBiasForward, ConvBiasForward*>(
  53. args.opr, convbias_opr.get());
  54. auto&& config = sub_opr_config(args);
  55. convbias_opr->param() = config.second;
  56. return {config.first, std::move(convbias_opr)};
  57. }
  58. } // namespace
  59. std::vector<Algorithm::SearchItem>
  60. ConvBiasForwardImpl::AlgoGroupConvGeneral::get_subopr_list(
  61. const TensorLayoutArray& layouts, const OperatorBase* opr) const {
  62. AlgoBase::SizeArgs args{static_cast<const ConvBiasForwardImpl*>(opr),
  63. layouts[0],
  64. layouts[1],
  65. layouts[2],
  66. layouts[3],
  67. layouts[4]};
  68. auto&& config = sub_opr_config(args);
  69. std::string param_str;
  70. Algorithm::serialize_write_pod(config.second, param_str);
  71. return {{Algorithm::OprType::CONVBIAS_FORWARD, param_str, config.first}};
  72. }
  73. bool ConvBiasForwardImpl::AlgoGroupConvGeneral::is_available(
  74. const SizeArgs& args) const {
  75. if (args.src_layout->dtype == args.filter_layout->dtype &&
  76. args.src_layout->dtype == dtype::BFloat16()) {
  77. return false;
  78. }
  79. if (args.z_layout->ndim > 0 || args.filter_meta.group <= 1)
  80. return false;
  81. auto&& param = args.opr->param();
  82. if (param.format == param::ConvBias::Format::NCHW8 ||
  83. param.format == param::ConvBias::Format::CHWN4 ||
  84. param.format == param::ConvBias::Format::NCHW32)
  85. return false;
  86. auto dst_layout = *args.dst_layout;
  87. if (dst_layout.dtype.enumv() != args.bias_layout->dtype.enumv()) {
  88. dst_layout.dtype = DType();
  89. args.opr->check_or_deduce_dtype_fwd(args.src_layout->dtype,
  90. args.filter_layout->dtype,
  91. dst_layout.dtype);
  92. }
  93. auto conv_args = args;
  94. conv_args.dst_layout = &dst_layout;
  95. auto config = prepare_sub_opr(conv_args);
  96. bool ret = has_available_algo<ConvBiasForwardImpl>(
  97. static_cast<ConvBiasForwardImpl*>(config.second.get()),
  98. config.first[0], config.first[1], config.first[2], config.first[3],
  99. config.first[4]);
  100. return ret;
  101. }
  102. WorkspaceBundle ConvBiasForwardImpl::AlgoGroupConvGeneral::get_workspace_bundle(
  103. void* ptr, const SizeArgs& args) const {
  104. auto dst_layout = *args.dst_layout;
  105. SmallVector<size_t> sizes;
  106. if (dst_layout.dtype.enumv() != args.bias_layout->dtype.enumv()) {
  107. dst_layout.dtype = DType();
  108. args.opr->check_or_deduce_dtype_fwd(args.src_layout->dtype,
  109. args.filter_layout->dtype,
  110. dst_layout.dtype);
  111. sizes.push_back(dst_layout.span().dist_byte());
  112. }
  113. auto conv_args = args;
  114. conv_args.dst_layout = &dst_layout;
  115. auto config = prepare_sub_opr(conv_args);
  116. size_t mm_ws = config.second->get_workspace_in_bytes(
  117. config.first[0], config.first[1], config.first[2],
  118. config.first[3], config.first[4], nullptr);
  119. sizes.insert(sizes.begin(), mm_ws);
  120. return {ptr, std::move(sizes)};
  121. }
  122. size_t ConvBiasForwardImpl::AlgoGroupConvGeneral::get_workspace_in_bytes(
  123. const SizeArgs& args) const {
  124. return get_workspace_bundle(nullptr, args).total_size_in_bytes();
  125. }
  126. void ConvBiasForwardImpl::AlgoGroupConvGeneral::exec(
  127. const ExecArgs& args) const {
  128. auto bundle = get_workspace_bundle(args.workspace.raw_ptr, args);
  129. auto conv_dst_tensor = *args.dst_tensor;
  130. if (args.dst_layout->dtype.enumv() != args.bias_layout->dtype.enumv()) {
  131. conv_dst_tensor.raw_ptr = bundle.get(bundle.nr_workspace() - 1);
  132. conv_dst_tensor.layout.dtype = DType();
  133. args.opr->check_or_deduce_dtype_fwd(args.src_layout->dtype,
  134. args.filter_layout->dtype,
  135. conv_dst_tensor.layout.dtype);
  136. }
  137. {
  138. auto sub_args = args;
  139. sub_args.dst_tensor = &conv_dst_tensor;
  140. sub_args.dst_layout = &conv_dst_tensor.layout;
  141. auto config = prepare_sub_opr(sub_args);
  142. TensorND tsrc{args.src_tensor->raw_ptr, config.first[0]};
  143. TensorND tfilter{args.filter_tensor->raw_ptr, config.first[1]};
  144. TensorND tbias{args.bias_tensor->raw_ptr, config.first[2]};
  145. TensorND tz{args.z_tensor->raw_ptr, config.first[3]};
  146. TensorND tdst{conv_dst_tensor.raw_ptr, config.first[4]};
  147. size_t c_pos;
  148. if (args.filter_meta.format == Param::Format::NCHW ||
  149. args.filter_meta.format == Param::Format::NCHW4) {
  150. c_pos = 1;
  151. } else {
  152. megdnn_assert(args.filter_meta.format == Param::Format::NHWC,
  153. "invalid conv format");
  154. c_pos = 3;
  155. }
  156. auto grp = args.filter_meta.group;
  157. auto&& fm = args.filter_meta;
  158. auto strd_src = tsrc.layout.stride[c_pos] * fm.icpg *
  159. tsrc.layout.dtype.size(),
  160. strd_dst = tdst.layout.stride[c_pos] * fm.ocpg *
  161. tdst.layout.dtype.size(),
  162. strd_flt = fm.icpg * fm.ocpg * fm.spatial[0] * fm.spatial[1] *
  163. tfilter.layout.dtype.size();
  164. if (args.filter_meta.format == Param::Format::NCHW4) {
  165. strd_src >>= 2;
  166. strd_dst >>= 2;
  167. }
  168. for (uint32_t g = 0; g < grp; ++g) {
  169. config.second->exec(tsrc, tfilter, tbias,
  170. tz, tdst, nullptr, bundle.get_workspace(0));
  171. incr_voidp(tsrc.raw_ptr, strd_src);
  172. incr_voidp(tdst.raw_ptr, strd_dst);
  173. incr_voidp(tfilter.raw_ptr, strd_flt);
  174. }
  175. }
  176. handle_bias_and_nonlinear(args.handle, args.nonlinear_mode,
  177. &conv_dst_tensor, args.dst_tensor,
  178. args.bias_tensor);
  179. }
  180. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台