You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

trace.cpp 3.0 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. /**
  2. * \file imperative/python/src/trace.cpp
  3. * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  4. *
  5. * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
  6. *
  7. * Unless required by applicable law or agreed to in writing,
  8. * software distributed under the License is distributed on an
  9. * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10. */
  11. #include "./trace.h"
  12. #include "./helper.h"
  13. #include "megbrain/imperative/ops/autogen.h"
  14. namespace py = pybind11;
  15. namespace mgb::imperative::python {
  16. apply_result_t apply_tensor_on_var_node(ApplyContext& ctx) {
  17. apply_result_t outputs;
  18. cg::VarNodeArray vinputs(ctx.nargs);
  19. for (size_t i = 0; i < ctx.nargs; i++) {
  20. vinputs[i] = ctx.args[i]->m_var;
  21. }
  22. auto ovars = OpDef::apply_on_var_node(*ctx.op, vinputs);
  23. for (size_t i = 0; i < ovars.size(); i++) {
  24. outputs.emplace_back(std::make_shared<Tensor>(ovars[i]));
  25. }
  26. return outputs;
  27. }
  28. apply_result_t apply_trace(ApplyContext& ctx) {
  29. apply_result_t outputs;
  30. bool run_apply_on_var_node = false;
  31. for (size_t i = 0; i < ctx.nargs; i++) {
  32. run_apply_on_var_node |= ((ctx.args[i]->m_handle.get() == nullptr) & (ctx.args[i]->m_var != nullptr));
  33. }
  34. if (ctx.backward) {
  35. // reach here when symbolic=True or compiled=True
  36. // call megbrain_graph.py apply(BackwardGraph, *args)
  37. auto args = py::tuple(ctx.nargs);
  38. for (size_t i = 0; i < ctx.nargs; i++) {
  39. args[i] = py::cast(ctx.args[i]->m_var);
  40. }
  41. py::object ret = cpp_apply_backward_varnode(py::cast(ctx.op), *args);
  42. if (!ret) {
  43. throw py::value_error("invalid py object call");
  44. }
  45. // assumption: python function always returns PyList
  46. auto tup = py::reinterpret_borrow<py::list>(ret);
  47. for (auto i = 0; i < tup.size(); i++) {
  48. auto pitem = tup[i].cast<cg::VarNode *>();
  49. outputs.emplace_back(std::make_shared<Tensor>(pitem));
  50. }
  51. return outputs;
  52. }
  53. if (run_apply_on_var_node && !is_symbolic) {
  54. return apply_tensor_on_var_node(ctx);
  55. }
  56. py::object pyf;
  57. if (is_compiled) {
  58. // run apply in compiled mode, step 2, 3, etc
  59. pyf = cpp_apply_compiled_mode;
  60. } else {
  61. // run first step, both symbolic and non symbolic
  62. pyf = cpp_apply_with_tracing;
  63. }
  64. auto args = py::tuple(ctx.nargs);
  65. for (size_t i = 0; i < ctx.nargs; i++) {
  66. args[i] = TensorWrapper::make(std::move(std::shared_ptr<Tensor>(ctx.args[i]))).release();
  67. }
  68. auto ret = pyf(py::cast(ctx.op), *args);
  69. // assumption: python function always returns PyList
  70. auto tup = py::reinterpret_borrow<py::list>(ret);
  71. for (auto i = 0; i < tup.size(); i++) {
  72. auto tw = TensorWrapper::cast_safe(tup[i].ptr());
  73. outputs.emplace_back(tw->m_tensor);
  74. }
  75. return outputs;
  76. }
  77. } // namespace mgb::imperative::python

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台