You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

cumsum.cpp 2.7 kB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. /**
  2. * \file dnn/test/cuda/cumsum.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "test/cuda/fixture.h"
  12. #include "megdnn/oprs.h"
  13. #include "test/common/checker.h"
  14. namespace megdnn {
  15. namespace test {
  16. TEST_F(CUDA, CUMSUM)
  17. {
  18. Checker<Cumsum> checker(handle_cuda());
  19. struct TestArg {
  20. param::Cumsum param;
  21. TensorShape shape;
  22. TestArg(param::Cumsum param, TensorShape shape):
  23. param(param), shape(shape)
  24. {}
  25. };
  26. std::vector<TestArg> args, args_int32;
  27. for (auto shape: TensorShapeArray{{10000}, {33000, 33},
  28. {100, 100, 100}, {30, 30, 30, 30}}) {
  29. for (size_t axis = 0; axis < shape.ndim; ++axis) {
  30. args.emplace_back(param::Cumsum(axis, true, true), shape);
  31. args.emplace_back(param::Cumsum(axis, true, false), shape);
  32. args.emplace_back(param::Cumsum(axis, false, true), shape);
  33. args.emplace_back(param::Cumsum(axis, false, false), shape);
  34. }
  35. }
  36. for (auto shape: TensorShapeArray{{1}, {10}, {100}, {1000}, {10000},
  37. {100000}})
  38. {
  39. args.emplace_back(param::Cumsum(0, true, true), shape);
  40. args.emplace_back(param::Cumsum(0, true, false), shape);
  41. args.emplace_back(param::Cumsum(0, false, true), shape);
  42. args.emplace_back(param::Cumsum(0, false, false), shape);
  43. }
  44. for (auto shape: TensorShapeArray{{1}, {10}, {100}, {1000}, {10000},
  45. {100000}, {1000000}, {1050000}, {2100000}})
  46. {
  47. args_int32.emplace_back(param::Cumsum(0, true, true), shape);
  48. args_int32.emplace_back(param::Cumsum(0, true, false), shape);
  49. args_int32.emplace_back(param::Cumsum(0, false, true), shape);
  50. args_int32.emplace_back(param::Cumsum(0, false, false), shape);
  51. }
  52. for (auto arg: args) {
  53. checker.set_param(arg.param);
  54. checker.set_epsilon(1e-2);
  55. checker.set_dtype(0, dtype::Float32()).execs({{arg.shape}, {}});
  56. checker.set_dtype(0, dtype::Int16()).execs({{arg.shape}, {}});
  57. checker.set_dtype(0, dtype::Int32()).execs({{arg.shape}, {}});
  58. }
  59. for (auto arg: args_int32) {
  60. checker.set_param(arg.param);
  61. checker.set_epsilon(1e-2);
  62. checker.set_dtype(0, dtype::Int32()).execs({{arg.shape}, {}});
  63. }
  64. }
  65. } // namespace test
  66. } // namespace megdnn
  67. // vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台