diff --git a/imperative/python/megengine/jit/tracing.py b/imperative/python/megengine/jit/tracing.py index 73817a0a..925cb8ee 100644 --- a/imperative/python/megengine/jit/tracing.py +++ b/imperative/python/megengine/jit/tracing.py @@ -293,7 +293,9 @@ class trace: h = getattr(x, "_mixin_handle", -1) if h < 0 or (not self._capture_as_const and self._tinfo[h].exported): h, info = self._new_handle() - name = auto_naming.get_scope() + "." + x.c_name if x.c_name else x._name + name = ( + auto_naming.get_scope() + "." + (x.c_name if x.c_name else x._name) + ) info.name = name info.external = True info.device = x.device @@ -1123,11 +1125,11 @@ def apply_symbolic_mode(op: OpDef, *args: RawTensor): return outputs -def apply_const_symbolic_mode(value, dtype, device): +def apply_const_symbolic_mode(value, dtype, device, name): graph = active_trace._lazy_eval_graph # don't need to unset tracing # because varnode construction will ignore tracing flag - ret = RawTensor(graph.make_const(value, dtype=dtype, device=device)) + ret = RawTensor(graph.make_const(value, dtype=dtype, device=device, name=name)) if np.array(value).ndim == 0: setscalar(ret) return (ret,) @@ -1175,7 +1177,7 @@ def apply_with_tracing(op: OpDef, *args: RawTensor): def apply_const_with_tracing(value, dtype, device, is_const, no_cache, name): if active_trace._symbolic: - outputs = apply_const_symbolic_mode(value, dtype, device) + outputs = apply_const_symbolic_mode(value, dtype, device, name) else: unset_tracing() outputs = (RawTensor(value, dtype, device, False, name),) diff --git a/imperative/python/megengine/tensor.py b/imperative/python/megengine/tensor.py index 9194c34b..5c941893 100644 --- a/imperative/python/megengine/tensor.py +++ b/imperative/python/megengine/tensor.py @@ -33,7 +33,7 @@ class Tensor(_Tensor, ArrayMethodMixin): _q_dict = None def __new__( - cls, data, dtype=None, device=None, is_const=False, no_cache=False, name="" + cls, data, dtype=None, device=None, is_const=False, no_cache=False, name=None ): if device is None: cn = get_default_device() diff --git a/imperative/python/src/tensor.cpp b/imperative/python/src/tensor.cpp index 1cc7657a..44c4099e 100644 --- a/imperative/python/src/tensor.cpp +++ b/imperative/python/src/tensor.cpp @@ -234,7 +234,8 @@ TensorWrapper::TensorWrapper(PyObject* args, PyObject* kwargs) { CompNode cn = tup[2].cast(); bool is_const = tup[3].cast(); bool no_cache = nargs == 6 ? tup[4].cast() : false; - std::string name = tup[nargs - 1].cast(); + std::string name; + if (tup[nargs - 1].ptr() != Py_None) name = tup[nargs - 1].cast(); // const op if (is_const && is_tracing) { diff --git a/imperative/python/test/unit/functional/test_tensor.py b/imperative/python/test/unit/functional/test_tensor.py index 6ad39a99..b2898c0f 100644 --- a/imperative/python/test/unit/functional/test_tensor.py +++ b/imperative/python/test/unit/functional/test_tensor.py @@ -408,6 +408,15 @@ def test_copy_d2d(): copy_test("gpu0:0", "gpu0:1") +def test_name(): + x = tensor(0) + assert x.name == "" + x.name = "x" + assert x.name == "x" + x = tensor(0, name="x") + assert x.name == "x" + + def test_q_dict(): x = tensor(1) assert x.q_dict["scale"] is None