You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

indexing_multi_axis_vec.h 3.8 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. /**
  2. * \file dnn/test/common/indexing_multi_axis_vec.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include "test/common/opr_proxy.h"
  13. namespace megdnn {
  14. namespace test {
  15. struct OprProxyIndexingMultiAxisVecHelper {
  16. size_t axes[TensorLayout::MAX_NDIM];
  17. /*!
  18. * \brief OprProxy for indexing multi-vec family oprs
  19. *
  20. * \param init_axes axes that are indexed
  21. */
  22. OprProxyIndexingMultiAxisVecHelper(std::initializer_list<size_t> init_axes = {}) {
  23. size_t i = 0;
  24. for (auto ax : init_axes)
  25. axes[i++] = ax;
  26. }
  27. OprProxyIndexingMultiAxisVecHelper(SmallVector<size_t> init_axes) {
  28. size_t i = 0;
  29. for (auto ax : init_axes)
  30. axes[i++] = ax;
  31. }
  32. IndexingMultiAxisVec::IndexDesc make_index_desc(
  33. const TensorNDArray& tensors) const {
  34. megdnn_assert(tensors.size() >= 3);
  35. IndexingMultiAxisVec::IndexDesc ret;
  36. ret.resize(tensors.size() - 2);
  37. for (size_t i = 2; i < tensors.size(); ++i) {
  38. ret[i - 2] = {axes[i - 2], tensors[i]};
  39. }
  40. return ret;
  41. }
  42. IndexingMultiAxisVec::IndexDescLayoutOnly make_index_layout(
  43. const TensorLayoutArray& layouts) const {
  44. megdnn_assert(layouts.size() >= 3);
  45. IndexingMultiAxisVec::IndexDescLayoutOnly ret;
  46. ret.resize(layouts.size() - 2);
  47. for (size_t i = 2; i < layouts.size(); ++i) {
  48. ret[i - 2] = {axes[i - 2], layouts[i]};
  49. }
  50. return ret;
  51. }
  52. };
  53. template <>
  54. struct OprProxy<IndexingMultiAxisVec> : public OprProxyIndexingMultiAxisVecHelper {
  55. using OprProxyIndexingMultiAxisVecHelper::OprProxyIndexingMultiAxisVecHelper;
  56. void exec(IndexingMultiAxisVec* opr, const TensorNDArray& tensors) const {
  57. WorkspaceWrapper W(
  58. opr->handle(), opr->get_workspace_in_bytes(
  59. tensors[1].layout, axes, tensors.size() - 2));
  60. opr->exec(tensors[0], make_index_desc(tensors), tensors[1], W.workspace());
  61. }
  62. void deduce_layout(IndexingMultiAxisVec* opr, TensorLayoutArray& layouts) {
  63. opr->deduce_layout(layouts[0], make_index_layout(layouts), layouts[1]);
  64. }
  65. };
  66. template <>
  67. struct OprProxy<IndexingIncrMultiAxisVec> : public OprProxyIndexingMultiAxisVecHelper {
  68. using OprProxyIndexingMultiAxisVecHelper::OprProxyIndexingMultiAxisVecHelper;
  69. void exec(IndexingIncrMultiAxisVec* opr, const TensorNDArray& tensors) const {
  70. WorkspaceWrapper W(
  71. opr->handle(), opr->get_workspace_in_bytes(
  72. tensors[1].layout, axes, tensors.size() - 2));
  73. opr->exec(tensors[0], tensors[1], make_index_desc(tensors), W.workspace());
  74. }
  75. void deduce_layout(IndexingIncrMultiAxisVec*, TensorLayoutArray&) {}
  76. };
  77. template <>
  78. struct OprProxy<IndexingSetMultiAxisVec> : public OprProxyIndexingMultiAxisVecHelper {
  79. using OprProxyIndexingMultiAxisVecHelper::OprProxyIndexingMultiAxisVecHelper;
  80. void exec(IndexingSetMultiAxisVec* opr, const TensorNDArray& tensors) const {
  81. WorkspaceWrapper W(
  82. opr->handle(), opr->get_workspace_in_bytes(
  83. tensors[1].layout, axes, tensors.size() - 2));
  84. opr->exec(tensors[0], tensors[1], make_index_desc(tensors), W.workspace());
  85. }
  86. void deduce_layout(IndexingSetMultiAxisVec*, TensorLayoutArray&) {}
  87. };
  88. } // namespace test
  89. } // namespace megdnn
  90. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台