You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

algo.h 6.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170
  1. /**
  2. * \file dnn/src/cuda/batched_matrix_mul/algo.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include <cuda.h>
  13. #include "megdnn/dtype.h"
  14. #include "megdnn/oprs.h"
  15. #include "src/common/utils.h"
  16. #include "src/cuda/batched_matrix_mul/opr_impl.h"
  17. #include "src/cuda/matrix_mul/cublasLt_wrapper.h"
  18. #include "src/common/metahelper.h"
  19. #if CUDA_VERSION >= 10010
  20. #include <cublasLt.h>
  21. #endif
  22. namespace megdnn {
  23. namespace cuda {
  24. class BatchedMatrixMulForwardImpl::AlgoBase : public Algorithm {
  25. protected:
  26. ~AlgoBase() = default;
  27. public:
  28. enum class AlgoType : uint32_t {
  29. CUDA_BRUTE_FORCE,
  30. CUDA_CUBLAS,
  31. CUDA_CUBLASLT,
  32. CUDA_INT8X8X32,
  33. };
  34. using Mapper = std::unordered_map<AlgorithmDesc, AlgoBase*>;
  35. AlgoBase() : Algorithm() { m_handle_type = Handle::HandleType::CUDA; }
  36. struct SizeArgs {
  37. BatchedMatrixMulForwardImpl* opr;
  38. TensorLayout layout_a, layout_b, layout_c;
  39. std::string to_string() const;
  40. SizeArgs(BatchedMatrixMulForwardImpl* o, const TensorLayout& A,
  41. const TensorLayout& B, const TensorLayout& C);
  42. bool can_be_treated_as_int8x8x32() const {
  43. return layout_a.dtype.enumv() == layout_b.dtype.enumv() &&
  44. (layout_a.dtype.enumv() == DTypeEnum::Int8 ||
  45. layout_a.dtype.enumv() == DTypeEnum::QuantizedS8) &&
  46. (layout_c.dtype.enumv() == DTypeEnum::Int32 ||
  47. layout_c.dtype.enumv() == DTypeEnum::QuantizedS32) &&
  48. opr->param().format == param::MatrixMul::Format::DEFAULT;
  49. }
  50. };
  51. struct ExecArgs : public SizeArgs {
  52. TensorND tensor_a, tensor_b, tensor_c;
  53. Workspace workspace;
  54. ExecArgs(BatchedMatrixMulForwardImpl* o, _megdnn_tensor_in A,
  55. _megdnn_tensor_in B, _megdnn_tensor_in C,
  56. _megdnn_workspace workspace);
  57. };
  58. virtual bool is_available(const SizeArgs& args) const = 0;
  59. virtual size_t get_workspace_in_bytes(const SizeArgs& args) const = 0;
  60. virtual void exec(const ExecArgs& args) const = 0;
  61. virtual const char* name() const = 0;
  62. bool is_available_wk(const SizeArgs& args, size_t limit) {
  63. return is_available(args) && get_workspace_in_bytes(args) <= limit;
  64. }
  65. bool is_available_attribute(
  66. const SizeArgs& args,
  67. const AlgoAttribute& positive_attr = AlgoAttribute::REPRODUCIBLE,
  68. const AlgoAttribute& negative_attr = AlgoAttribute::DEFAULT,
  69. size_t limit = std::numeric_limits<size_t>::max()) {
  70. return contain_attribute_all(positive_attr) &&
  71. !contain_attribute_any(negative_attr) &&
  72. is_available_wk(args, limit);
  73. }
  74. AlgoBase& check_workspace(const SizeArgs& args,
  75. const Workspace& workspace) {
  76. auto req = get_workspace_in_bytes(args);
  77. megdnn_assert(req <= workspace.size,
  78. "batched matrix mul fwd algo %s: required workspace %zu "
  79. "bytes, got %zu",
  80. name(), req, workspace.size);
  81. return *this;
  82. }
  83. };
  84. class BatchedMatrixMulForwardImpl::AlgoBruteForce final
  85. : public BatchedMatrixMulForwardImpl::AlgoBase {
  86. using Param = MatrixMulForward::Param;
  87. private:
  88. WorkspaceBundle get_workspace_bundle();
  89. public:
  90. bool is_available(const SizeArgs& args) const override;
  91. size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
  92. void exec(const ExecArgs& args) const final;
  93. AlgoAttribute attribute()const override{
  94. return AlgoAttribute::REPRODUCIBLE;
  95. }
  96. const char* name() const override { return "BRUTE_FORCE"; }
  97. MEGDNN_DECL_ALGO_TYPE(CUDA_BRUTE_FORCE)
  98. std::vector<SearchItem> get_subopr_list(
  99. const TensorLayoutArray& layouts,
  100. const OperatorBase* opr) const override;
  101. };
  102. class BatchedMatrixMulForwardImpl::AlgoCublas final
  103. : public BatchedMatrixMulForwardImpl::AlgoBase {
  104. public:
  105. AlgoCublas() = default;
  106. bool is_available(const SizeArgs& args) const override;
  107. size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
  108. void exec(const ExecArgs& args) const final;
  109. AlgoAttribute attribute() const override {
  110. return AlgoAttribute::REPRODUCIBLE;
  111. }
  112. const char* name() const override { return "CUBLAS"; }
  113. MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLAS)
  114. };
  115. #if CUDA_VERSION >= 10010
  116. class BatchedMatrixMulForwardImpl::AlgoCublasLt final : public AlgoBase {
  117. public:
  118. AlgoCublasLt() = default;
  119. bool is_available(const SizeArgs& args) const override;
  120. size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
  121. void exec(const ExecArgs& args) const final;
  122. AlgoAttribute attribute() const override {
  123. return AlgoAttribute::REPRODUCIBLE;
  124. }
  125. const char* name() const override { return "CUBLAS_LT"; }
  126. MEGDNN_DECL_ALGO_TYPE(CUDA_CUBLASLT)
  127. };
  128. #endif
  129. class BatchedMatrixMulForwardImpl::AlgoInt8x8x32 final
  130. : public BatchedMatrixMulForwardImpl::AlgoBase {
  131. public:
  132. AlgoInt8x8x32() = default;
  133. bool is_available(const SizeArgs& args) const override;
  134. size_t get_workspace_in_bytes(const SizeArgs& /*args*/) const override;
  135. void exec(const ExecArgs& args) const final;
  136. AlgoAttribute attribute() const override {
  137. return AlgoAttribute::REPRODUCIBLE;
  138. }
  139. const char* name() const override { return "INT8x8x32"; }
  140. MEGDNN_DECL_ALGO_TYPE(CUDA_INT8X8X32)
  141. };
  142. class BatchedMatrixMulForwardImpl::AlgoPack : NonCopyableObj {
  143. private:
  144. AlgoBase::Mapper m_all_algos_map;
  145. MatrixMulForwardImpl::AlgoPack mm_pack;
  146. public:
  147. AlgoPack();
  148. AlgoCublas cublas;
  149. #if CUDA_VERSION >= 10010
  150. AlgoCublasLt cublasLt;
  151. #endif
  152. AlgoInt8x8x32 int8x8x32;
  153. std::vector<AlgoBase*> all_algos;
  154. AlgoBruteForce brute_force;
  155. const AlgoBase::Mapper& all_algos_map() const { return m_all_algos_map; }
  156. };
  157. } // namespace cuda
  158. } // namespace megdnn

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台