Browse Source

refactor(mge/jit): remove is_compiled flag in cpp tensor

GitOrigin-RevId: 15f90af735
release-1.4
Megvii Engine Team 4 years ago
parent
commit
2b7da42287
6 changed files with 19 additions and 60 deletions
  1. +0
    -6
      imperative/python/megengine/jit/__init__.py
  2. +10
    -13
      imperative/python/megengine/jit/tracing.py
  3. +0
    -1
      imperative/python/src/graph_rt.cpp
  4. +5
    -23
      imperative/python/src/tensor.cpp
  5. +1
    -2
      imperative/python/src/tensor.h
  6. +3
    -15
      imperative/python/src/trace.cpp

+ 0
- 6
imperative/python/megengine/jit/__init__.py View File

@@ -7,15 +7,11 @@
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from ..core._imperative_rt.core2 import (
set_cpp_apply_compiled_mode,
set_cpp_apply_const_compiled_mode,
set_cpp_apply_const_with_tracing,
set_cpp_apply_with_tracing,
)
from .sublinear_memory_config import SublinearMemoryConfig
from .tracing import (
apply_compiled_mode,
apply_const_compiled_mode,
apply_const_with_tracing,
apply_with_tracing,
exclude_from_trace,
@@ -24,5 +20,3 @@ from .tracing import (

set_cpp_apply_with_tracing(apply_with_tracing)
set_cpp_apply_const_with_tracing(apply_const_with_tracing)
set_cpp_apply_compiled_mode(apply_compiled_mode)
set_cpp_apply_const_compiled_mode(apply_const_compiled_mode)

+ 10
- 13
imperative/python/megengine/jit/tracing.py View File

@@ -12,20 +12,16 @@ import functools
import itertools
import json
import os
import typing
import weakref

import numpy as np

from ..core._imperative_rt import GraphProfiler, common
from ..core._imperative_rt import GraphProfiler
from ..core._imperative_rt.core2 import Tensor as RawTensor
from ..core._imperative_rt.core2 import (
TensorWeakRef,
apply,
set_compiled,
set_tracing,
skip_tracing,
unset_compiled,
unset_tracing,
)
from ..core._imperative_rt.ops import (
@@ -394,7 +390,6 @@ class trace:
if self._untraced:
self._init_trace(self._symbolic)
else:
set_compiled()
if self._graph is None:
self._compile()
self._graph.execute()
@@ -442,7 +437,6 @@ class trace:
self._tensor_remaps = None
self._set_active(False)
set_symbolic_shape(self._save_symbolic_shape)
unset_compiled()
unset_tracing()

def do_exit():
@@ -989,11 +983,6 @@ class trace:
raise RuntimeError("trace is not set with profiling=True")
return json.loads(self._profiler.get())

def __del__(self):
for x in self._tinfo:
if getattr(x, "bound_data", None):
x.bound_data = None

def trace(self, *args, **kwargs):
raise NotImplementedError(
"trace is deemed unbeneficial with the new "
@@ -1148,6 +1137,9 @@ def apply_const_compiled_mode(value, dtype, device, is_const, no_cache, name):


def apply_with_tracing(op: OpDef, *args: RawTensor):
if active_trace._graph:
# if member _graph exits, then is_compiled
return apply_compiled_mode(op, *args)
if hasattr(op, "scope"):
op.scope = AutoNaming.get_scope()
if active_trace._symbolic:
@@ -1162,11 +1154,16 @@ def apply_with_tracing(op: OpDef, *args: RawTensor):


def apply_const_with_tracing(value, dtype, device, is_const, no_cache, name):
if active_trace._graph:
return apply_const_compiled_mode(value, dtype, device, is_const, no_cache, name)
if active_trace._symbolic:
outputs = apply_const_symbolic_mode(value, dtype, device, name)
else:
unset_tracing()
outputs = (RawTensor(value, dtype, device, False, name),)
outputs = RawTensor(value, dtype, device, False, name)
if np.array(value).ndim == 0:
setscalar(outputs)
outputs = (outputs,)
set_tracing()
active_trace._record_const(outputs)
return list(outputs)

+ 0
- 1
imperative/python/src/graph_rt.cpp View File

@@ -23,7 +23,6 @@
#include "./common.h"
#include "./ops.h"
#include "megbrain/gopt/inference.h"
#include "megbrain/imperative/ops/utility.h"


namespace py = pybind11;


+ 5
- 23
imperative/python/src/tensor.cpp View File

@@ -36,27 +36,21 @@ namespace mgb::imperative::python {

interpreter::Interpreter::Channel* interpreter_for_py;

PyObject *cpp_apply_with_tracing, *cpp_apply_const_with_tracing,
*cpp_apply_compiled_mode, *cpp_apply_const_compiled_mode;

PyObject *cpp_apply_with_tracing, *cpp_apply_const_with_tracing;
PyObject *cpp_apply_backward_varnode;


#define REGISTE_APPLY_FUNC(mode) \
void set_##mode(py::object pyf) { \
mode = pyf.ptr(); \
#define REGISTE_APPLY_FUNC(mode) \
void set_##mode(py::object pyf) { \
mode = pyf.ptr(); \
}

REGISTE_APPLY_FUNC(cpp_apply_with_tracing)
REGISTE_APPLY_FUNC(cpp_apply_const_with_tracing)
REGISTE_APPLY_FUNC(cpp_apply_compiled_mode)
REGISTE_APPLY_FUNC(cpp_apply_const_compiled_mode)
REGISTE_APPLY_FUNC(cpp_apply_backward_varnode)

#undef REGISTE_APPLY_FUNC

bool is_tracing = false;
bool is_compiled = false;

#define SET_UNSET_PROP(mode) \
void set_##mode() { \
@@ -67,7 +61,6 @@ bool is_compiled = false;
} \

SET_UNSET_PROP(tracing)
SET_UNSET_PROP(compiled)

#undef SET_UNSET_PROP

@@ -263,14 +256,7 @@ TensorWrapper::TensorWrapper(PyObject* args, PyObject* kwargs) {

// const op
if (is_const && is_tracing) {
PyObject *pyf;
if (is_compiled) {
pyf = cpp_apply_const_compiled_mode;
} else {
pyf = cpp_apply_const_with_tracing;
}

auto py_ret = PyObject_Call(pyf, tup.ptr(), nullptr);
auto py_ret = PyObject_Call(cpp_apply_const_with_tracing, tup.ptr(), nullptr);
if (!py_ret) throw py::error_already_set();
auto py_list = py::reinterpret_steal<py::list>(py_ret);
if (auto* t = try_cast(py_list[0].ptr())) {
@@ -961,8 +947,6 @@ void init_tensor(py::module m) {

m.def("set_cpp_apply_with_tracing", &set_cpp_apply_with_tracing);
m.def("set_cpp_apply_const_with_tracing", &set_cpp_apply_const_with_tracing);
m.def("set_cpp_apply_compiled_mode", &set_cpp_apply_compiled_mode);
m.def("set_cpp_apply_const_compiled_mode", &set_cpp_apply_const_compiled_mode);
m.def("set_cpp_apply_backward_varnode", &set_cpp_apply_backward_varnode);

m.attr("skip_tracing") = &skip_tracing;
@@ -979,8 +963,6 @@ void init_tensor(py::module m) {

m.def("set_tracing", &set_tracing);
m.def("unset_tracing", &unset_tracing);
m.def("set_compiled", &set_compiled);
m.def("unset_compiled", &unset_compiled);
}

#undef MGE_PY_INTERFACE


+ 1
- 2
imperative/python/src/tensor.h View File

@@ -237,7 +237,6 @@ template <typename... Args>
constexpr bool is_all_tensor_ptr = (... && std::is_same_v<decltype(resolve_arrow(std::declval<Args>())), Tensor*>);

extern bool is_tracing; // FIXME: should use ApplyContext::global_enable
extern bool is_compiled;

template <typename... Args, std::enable_if_t<is_all_tensor_ptr<Args...>, int> = 0>
apply_result_t apply(std::shared_ptr<OpDef> op, Args&&... args) {
@@ -282,7 +281,7 @@ inline auto apply(std::shared_ptr<OpDef> op, Tensor*const* args, size_t nargs) {

void init_tensor(pybind11::module);

extern PyObject *cpp_apply_with_tracing, *cpp_apply_compiled_mode;
extern PyObject *cpp_apply_with_tracing;
extern PyObject *cpp_apply_backward_varnode;

} // namespace mgb::imperative::python


+ 3
- 15
imperative/python/src/trace.cpp View File

@@ -22,7 +22,6 @@ apply_result_t apply_trace(ApplyContext& ctx) {
apply_result_t outputs;

if (ctx.backward) {
// reach here when compiled=True
// call megbrain_graph.py apply(BackwardGraph, *args)
auto args = py::tuple(ctx.nargs + 1);
args[0] = py::cast(ctx.op);
@@ -42,27 +41,16 @@ apply_result_t apply_trace(ApplyContext& ctx) {
return outputs;
}

PyObject* pyf;
if (is_compiled) {
// run apply in compiled mode, step 2, 3, etc
pyf = cpp_apply_compiled_mode;
} else {
// run first step, both symbolic and non symbolic
pyf = cpp_apply_with_tracing;
}

auto args = py::tuple(ctx.nargs + 1);
args[0] = py::cast(ctx.op);
for (size_t i = 0; i < ctx.nargs; i++) {
args[i + 1] = TensorWrapper::make(ctx.args[i]->shared_from_this());
}
auto pyout = PyObject_Call(pyf, args.ptr(), nullptr);
auto pyout = PyObject_Call(cpp_apply_with_tracing, args.ptr(), nullptr);
if (!pyout) throw py::error_already_set();
auto ret = py::reinterpret_steal<py::object>(pyout);

// assumption: python function always returns PyList
auto tup = py::reinterpret_borrow<py::list>(ret);
for (auto i = 0; i < tup.size(); i++) {
auto tup = py::reinterpret_steal<py::list>(pyout);
for (size_t i = 0; i < tup.size(); i++) {
auto tw = TensorWrapper::try_cast(tup[i].ptr());
outputs.emplace_back(tw->m_tensor);
}


Loading…
Cancel
Save