You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

algo.h 4.4 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125
  1. /**
  2. * \file dnn/src/cuda/deformable_conv/fwd/algo.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include "megdnn/oprs.h"
  13. #include "src/common/algo_base.h"
  14. #include "src/common/metahelper.h"
  15. #include "src/cuda/deformable_conv/opr_impl.h"
  16. #include "src/cuda/utils.h"
  17. #include <unordered_map>
  18. namespace megdnn {
  19. namespace cuda {
  20. class DeformableConvForwardImpl::AlgoBase : public Algorithm {
  21. protected:
  22. ~AlgoBase() = default;
  23. public:
  24. enum class AlgoType : uint32_t {
  25. CUDA_MATMUL,
  26. };
  27. using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;
  28. AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
  29. struct SizeArgs {
  30. DeformableConvForwardImpl* opr;
  31. HandleImpl* handle;
  32. const TensorLayout& im_layout;
  33. CanonizedFilterMeta filter_meta;
  34. const TensorLayout& offset_layout;
  35. const TensorLayout& mask_layout;
  36. const TensorLayout& dst_layout;
  37. std::string to_string() const;
  38. SizeArgs(DeformableConvForwardImpl* opr, const TensorLayout& im,
  39. const TensorLayout& filter, const TensorLayout& offset,
  40. const TensorLayout& mask, const TensorLayout& dst);
  41. SizeArgs(DeformableConvForwardImpl* opr, const TensorLayout& im,
  42. const CanonizedFilterMeta& filter, const TensorLayout& offset,
  43. const TensorLayout& mask, const TensorLayout& dst);
  44. };
  45. struct ExecArgs : public SizeArgs {
  46. const TensorND &im_tensor, filter_tensor, offset_tensor, mask_tensor,
  47. dst_tensor;
  48. Workspace workspace;
  49. ExecArgs(DeformableConvForwardImpl* opr, _megdnn_tensor_in im,
  50. _megdnn_tensor_in filter, _megdnn_tensor_in offset,
  51. _megdnn_tensor_in mask, _megdnn_tensor_out dst,
  52. _megdnn_workspace workspace);
  53. };
  54. virtual bool is_available(const SizeArgs& args) const = 0;
  55. virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
  56. virtual void exec(const ExecArgs& args) const = 0;
  57. bool is_available_wk(const SizeArgs& args, size_t limit) {
  58. return is_available(args) && get_workspace_in_bytes(args) <= limit;
  59. }
  60. bool is_available_attribute(
  61. const SizeArgs& args,
  62. const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
  63. const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
  64. size_t limit = std::numeric_limits<size_t>::max()) {
  65. return contain_attribute_all(positive_attr) &&
  66. !contain_attribute_any(negative_attr) &&
  67. is_available_wk(args, limit);
  68. }
  69. AlgoBase& check_workspace(const SizeArgs& args,
  70. const Workspace& workspace) {
  71. auto req = get_workspace_in_bytes(args);
  72. megdnn_assert(req <= workspace.size,
  73. "deformable_conv fwd algo %s: required workspace %zu "
  74. "bytes, got %zu",
  75. name(), req, workspace.size);
  76. return *this;
  77. }
  78. };
  79. class DeformableConvForwardImpl::AlgoMatmul final : public AlgoBase {
  80. private:
  81. static WorkspaceBundle get_bundle(const SizeArgs& args);
  82. public:
  83. bool is_available(const SizeArgs& args) const override;
  84. size_t get_workspace_in_bytes(const SizeArgs& args) const override;
  85. void exec(const ExecArgs& args) const override;
  86. AlgoAttribute attribute() const override {
  87. return AlgoAttribute::REPRODUCIBLE;
  88. }
  89. std::vector<SearchItem> get_subopr_list(
  90. const TensorLayoutArray& layouts,
  91. const OperatorBase* opr) const override;
  92. const char* name() const override { return "MATMUL"; }
  93. MEGDNN_DECL_ALGO_TYPE(CUDA_MATMUL)
  94. };
  95. class DeformableConvForwardImpl::AlgoPack : NonCopyableObj {
  96. AlgoBase::Mapper m_all_algos_map;
  97. public:
  98. AlgoPack();
  99. AlgoMatmul algo_matmul;
  100. //! all algorithms
  101. std::vector<AlgoBase*> all_algos;
  102. const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
  103. };
  104. } // namespace cuda
  105. } // namespace megdnn
  106. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台