You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

indexing_multi_axis_vec.h 4.3 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /**
  2. * \file dnn/test/common/indexing_multi_axis_vec.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include "test/common/opr_proxy.h"
  13. namespace megdnn {
  14. namespace test {
  15. struct OprProxyIndexingMultiAxisVecHelper {
  16. size_t axes[TensorLayout::MAX_NDIM];
  17. /*!
  18. * \brief OprProxy for indexing multi-vec family oprs
  19. *
  20. * \param init_axes axes that are indexed
  21. */
  22. OprProxyIndexingMultiAxisVecHelper(std::initializer_list<size_t> init_axes = {}) {
  23. size_t i = 0;
  24. for (auto ax : init_axes)
  25. axes[i++] = ax;
  26. }
  27. OprProxyIndexingMultiAxisVecHelper(SmallVector<size_t> init_axes) {
  28. size_t i = 0;
  29. for (auto ax : init_axes)
  30. axes[i++] = ax;
  31. }
  32. IndexingMultiAxisVec::IndexDesc make_index_desc(
  33. const TensorNDArray& tensors) const {
  34. megdnn_assert(tensors.size() >= 3);
  35. IndexingMultiAxisVec::IndexDesc ret;
  36. ret.resize(tensors.size() - 2);
  37. for (size_t i = 2; i < tensors.size(); ++i) {
  38. ret[i - 2] = {axes[i - 2], tensors[i]};
  39. }
  40. return ret;
  41. }
  42. size_t get_index_ndim(const TensorNDArray& tensors) const {
  43. megdnn_assert(tensors.size() >= 3);
  44. size_t ndim = 0;
  45. for (size_t i = 2; i < tensors.size(); ++i) {
  46. ndim = std::max(tensors[i].layout.ndim, ndim);
  47. }
  48. return ndim;
  49. }
  50. IndexingMultiAxisVec::IndexDescLayoutOnly make_index_layout(
  51. const TensorLayoutArray& layouts) const {
  52. megdnn_assert(layouts.size() >= 3);
  53. IndexingMultiAxisVec::IndexDescLayoutOnly ret;
  54. ret.resize(layouts.size() - 2);
  55. for (size_t i = 2; i < layouts.size(); ++i) {
  56. ret[i - 2] = {axes[i - 2], layouts[i]};
  57. }
  58. return ret;
  59. }
  60. };
  61. template <>
  62. struct OprProxy<IndexingMultiAxisVec> : public OprProxyIndexingMultiAxisVecHelper {
  63. using OprProxyIndexingMultiAxisVecHelper::OprProxyIndexingMultiAxisVecHelper;
  64. void exec(IndexingMultiAxisVec* opr, const TensorNDArray& tensors) const {
  65. WorkspaceWrapper W(
  66. opr->handle(), opr->get_workspace_in_bytes(
  67. tensors[1].layout, axes, tensors.size() - 2,
  68. get_index_ndim(tensors)));
  69. opr->exec(tensors[0], make_index_desc(tensors), tensors[1], W.workspace());
  70. }
  71. void deduce_layout(IndexingMultiAxisVec* opr, TensorLayoutArray& layouts) {
  72. opr->deduce_layout(layouts[0], make_index_layout(layouts), layouts[1]);
  73. }
  74. };
  75. template <>
  76. struct OprProxy<IndexingIncrMultiAxisVec> : public OprProxyIndexingMultiAxisVecHelper {
  77. using OprProxyIndexingMultiAxisVecHelper::OprProxyIndexingMultiAxisVecHelper;
  78. void exec(IndexingIncrMultiAxisVec* opr, const TensorNDArray& tensors) const {
  79. WorkspaceWrapper W(
  80. opr->handle(), opr->get_workspace_in_bytes(
  81. tensors[1].layout, axes, tensors.size() - 2,
  82. get_index_ndim(tensors)));
  83. opr->exec(tensors[0], tensors[1], make_index_desc(tensors), W.workspace());
  84. }
  85. void deduce_layout(IndexingIncrMultiAxisVec*, TensorLayoutArray&) {}
  86. };
  87. template <>
  88. struct OprProxy<IndexingSetMultiAxisVec> : public OprProxyIndexingMultiAxisVecHelper {
  89. using OprProxyIndexingMultiAxisVecHelper::OprProxyIndexingMultiAxisVecHelper;
  90. void exec(IndexingSetMultiAxisVec* opr, const TensorNDArray& tensors) const {
  91. WorkspaceWrapper W(
  92. opr->handle(), opr->get_workspace_in_bytes(
  93. tensors[1].layout, axes, tensors.size() - 2,
  94. get_index_ndim(tensors)));
  95. opr->exec(tensors[0], tensors[1], make_index_desc(tensors), W.workspace());
  96. }
  97. void deduce_layout(IndexingSetMultiAxisVec*, TensorLayoutArray&) {}
  98. };
  99. } // namespace test
  100. } // namespace megdnn
  101. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台