You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

algos.h 1.9 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. /**
  2. * \file dnn/src/aarch64/conv_bias/int8/algos.h
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #pragma once
  12. #include "src/aarch64/conv_bias/opr_impl.h"
  13. #include "src/fallback/conv_bias/opr_impl.h"
  14. #include "src/common/opr_delegate.h"
  15. namespace megdnn {
  16. namespace aarch64 {
  17. using FallbackConvBiasImpl = fallback::ConvBiasImpl;
  18. class ConvBiasImpl::AlgoS8MatrixMul final : public AlgoBase {
  19. static WorkspaceBundle get_bundle(const NCBKernSizeParam& param);
  20. static void kimpl(const NCBKernParam& param, const NCBKernIndex& ncb_index);
  21. public:
  22. bool is_reproducible() const override { return true; }
  23. const char* name() const override { return "S8MATMUL"; }
  24. bool usable(const NCBKernSizeParam& param,
  25. AlgoSelectionStrategy algo_selection_strategy) const override;
  26. size_t get_workspace(const NCBKernSizeParam& param) const override {
  27. return get_bundle(param).total_size_in_bytes();
  28. }
  29. SmallVector<NCBKern> dispatch_kerns(
  30. const NCBKernSizeParam& param) const override {
  31. size_t group = param.filter_meta.group;
  32. return {{kimpl, {group, 1_z, 1_z}}};
  33. }
  34. //! select matmul to the highest preference
  35. bool is_preferred(const NCBKernSizeParam& param) const override {
  36. static CpuOprDelegationStorage<1> storage;
  37. auto conv_bias_opr = storage.get<ConvBias, 0>();
  38. return static_cast<ConvBiasImpl*>(conv_bias_opr)
  39. ->is_matmul_quantized_prefer(param);
  40. }
  41. };
  42. } // namespace aarch64
  43. } // namespace megdnn
  44. // vim: syntax=cpp.doxygen

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台