You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

algo.h 4.9 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /**
  2. * \file dnn/src/cuda/deformable_conv/bwd_data/algo.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include "megdnn/oprs.h"
  13. #include "src/common/algo_base.h"
  14. #include "src/common/metahelper.h"
  15. #include "src/common/utils.h"
  16. #include "src/cuda/handle.h"
  17. #include "src/cuda/deformable_conv/opr_impl.h"
  18. #include <unordered_map>
  19. namespace megdnn {
  20. namespace cuda {
  21. class DeformableConvBackwardDataImpl::AlgoBase : public Algorithm {
  22. protected:
  23. ~AlgoBase() = default;
  24. public:
  25. enum class AlgoType : uint32_t {
  26. CUDA_MATMUL,
  27. };
  28. using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;
  29. AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
  30. struct SizeArgs {
  31. DeformableConvBackwardDataImpl* opr;
  32. HandleImpl* handle;
  33. const TensorLayout& im_layout;
  34. CanonizedFilterMeta filter_meta;
  35. const TensorLayout& offset_layout;
  36. const TensorLayout& mask_layout;
  37. const TensorLayout& out_grad_layout;
  38. const TensorLayout& im_grad_layout;
  39. const TensorLayout& offset_grad_layout;
  40. const TensorLayout& mask_grad_layout;
  41. std::string to_string() const;
  42. SizeArgs(DeformableConvBackwardDataImpl* opr, const TensorLayout& im,
  43. const TensorLayout& filter, const TensorLayout& offset,
  44. const TensorLayout& mask, const TensorLayout& out_grad,
  45. const TensorLayout& im_grad, const TensorLayout& offset_grad,
  46. const TensorLayout& mask_grad);
  47. SizeArgs(DeformableConvBackwardDataImpl* opr, const TensorLayout& im,
  48. const CanonizedFilterMeta& filter, const TensorLayout& offset,
  49. const TensorLayout& mask, const TensorLayout& out_grad,
  50. const TensorLayout& im_grad, const TensorLayout& offset_grad,
  51. const TensorLayout& mask_grad);
  52. };
  53. struct ExecArgs : public SizeArgs {
  54. const TensorND im_tensor, filter_tensor, offset_tensor, mask_tensor,
  55. out_grad_tensor;
  56. TensorND im_grad_tensor, offset_grad_tensor, mask_grad_tensor;
  57. Workspace workspace;
  58. ExecArgs(DeformableConvBackwardDataImpl* opr, _megdnn_tensor_in im,
  59. _megdnn_tensor_in filter, _megdnn_tensor_in offset,
  60. _megdnn_tensor_in mask, _megdnn_tensor_in out_grad,
  61. _megdnn_tensor_out im_grad, _megdnn_tensor_out offset_grad,
  62. _megdnn_tensor_out mask_grad, _megdnn_workspace workspace);
  63. };
  64. virtual bool is_available(const SizeArgs& args) const = 0;
  65. virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
  66. virtual void exec(const ExecArgs& args) const = 0;
  67. bool is_available_wk(const SizeArgs& args, size_t limit) {
  68. return is_available(args) && get_workspace_in_bytes(args) <= limit;
  69. }
  70. bool is_available_reproducible(
  71. const SizeArgs& args, bool reproducible = true,
  72. size_t limit = std::numeric_limits<size_t>::max()) {
  73. return (!reproducible || is_reproducible()) &&
  74. is_available_wk(args, limit);
  75. }
  76. AlgoBase& check_workspace(const SizeArgs& args,
  77. const Workspace& workspace) {
  78. auto req = get_workspace_in_bytes(args);
  79. megdnn_assert(
  80. req <= workspace.size,
  81. "deformable_conv bwd_data algo %s: required workspace %zu "
  82. "bytes, got %zu",
  83. name(), req, workspace.size);
  84. return *this;
  85. }
  86. };
  87. class DeformableConvBackwardDataImpl::AlgoMatmul final : public AlgoBase {
  88. private:
  89. static WorkspaceBundle get_bundle(const SizeArgs& args);
  90. static void get_matmul_layout(const SizeArgs& args, TensorLayout& al,
  91. TensorLayout& bl, TensorLayout& cl);
  92. public:
  93. AlgoMatmul() {}
  94. bool is_available(const SizeArgs& args) const override;
  95. size_t get_workspace_in_bytes(const SizeArgs& args) const override;
  96. void exec(const ExecArgs& args) const override;
  97. bool is_reproducible() const override { return true; }
  98. const char* name() const override { return "AlgoMatmul"; }
  99. MEGDNN_DECL_ALGO_TYPE(CUDA_MATMUL)
  100. };
  101. class DeformableConvBackwardDataImpl::AlgoPack : NonCopyableObj {
  102. AlgoBase::Mapper m_all_algos_map;
  103. public:
  104. AlgoPack();
  105. AlgoMatmul algo_matmul;
  106. //! all algorithms
  107. std::vector<AlgoBase*> all_algos;
  108. const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
  109. };
  110. } // namespace cuda
  111. } // namespace megdnn
  112. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台