You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor.py 8.2 kB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. # -*- coding: utf-8 -*-
  2. # MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
  3. #
  4. # Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
  5. #
  6. # Unless required by applicable law or agreed to in writing,
  7. # software distributed under the License is distributed on an
  8. # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9. from typing import Union
  10. import numpy as np
  11. from .core._imperative_rt import CompNode
  12. from .core._imperative_rt.core2 import Tensor as _Tensor
  13. from .core._imperative_rt.core2 import apply
  14. from .core._trace_option import use_symbolic_shape
  15. from .core._wrap import device as as_device
  16. from .core.ops.builtin import Copy, GetVarShape
  17. from .core.tensor.array_method import ArrayMethodMixin
  18. from .device import _valid_device, get_default_device
  19. from .logger import get_logger
  20. from .utils.deprecation import deprecated
  21. from .utils.naming import AutoNaming
  22. logger = get_logger(__name__)
  23. class Tensor(_Tensor, ArrayMethodMixin):
  24. r"""
  25. A tensor object represents a multidimensional, homogeneous array of fixed-size items.
  26. :param data: The value of returned Tensor.
  27. :param dtype: The dtype of returned Tensor. Uses data's dtype if not specified.
  28. :param device: The desired device of returned Tensor. Uses :func:`get_default_device` if not specified.
  29. :param is_const: Whether make it a ``ImutableTensor`` in tracing mode.
  30. :param no_cache: Whether cache it for memory sharing.
  31. :param name: Used to improve convenience in graph operation on dumped model.
  32. """
  33. grad = None
  34. dmap_callback = None
  35. _qparams = None
  36. def __new__(
  37. cls,
  38. data: Union["Tensor", np.ndarray, list, "scalar"] = None,
  39. dtype: np.dtype = None,
  40. device: str = None,
  41. is_const: bool = False,
  42. no_cache: bool = False,
  43. name: str = None,
  44. ):
  45. if data is None:
  46. data = []
  47. if device is None:
  48. cn = get_default_device()
  49. elif isinstance(device, str):
  50. if cls.dmap_callback is not None:
  51. cn = CompNode(cls.dmap_callback(device))
  52. else:
  53. cn = CompNode(device)
  54. else:
  55. if isinstance(device, CompNode):
  56. cn = device
  57. else:
  58. cn = device._cn
  59. if isinstance(data, _Tensor):
  60. obj = _Tensor.__new__(cls, data)
  61. else:
  62. if isinstance(data, np.ndarray):
  63. if 0 in data.strides:
  64. data = data.squeeze().reshape(data.shape)
  65. obj = _Tensor.__new__(cls, data, dtype, cn, is_const, no_cache, name)
  66. return obj
  67. def __init__(
  68. self,
  69. data: Union["Tensor", np.ndarray, list, "scalar"],
  70. dtype: np.dtype = None,
  71. device: str = None,
  72. is_const: bool = False,
  73. no_cache: bool = False,
  74. name: str = None,
  75. ):
  76. pass
  77. @property
  78. def shape(self) -> Union[tuple, "Tensor"]:
  79. r"""
  80. Returns a :class:`tuple` or a :class:`~.Tensor` represents tensor dimensions.
  81. .. note::
  82. The shape of a tensor was usually represented by a :class:`tuple`.
  83. But if a tensor was treated as symbolic placeholder with tracing,
  84. it's shape could also be a :class:`~.Tensor`. See :class:`~.trace` for more details.
  85. The shape property is usually used to get the current shape of a tensor,
  86. but may also be used to reshape the tensor in-place by assigning a tuple of tensor dimensions to it.
  87. As with :func:`~.reshape`, one of the new shape dimensions can be -1,
  88. in which case its value is inferred from the size of the tensor and the remaining dimensions.
  89. """
  90. shape = super().shape
  91. if shape == () or not use_symbolic_shape():
  92. return shape
  93. return apply(GetVarShape(), self)[0]
  94. @property
  95. def _tuple_shape(self):
  96. return super().shape
  97. @property
  98. def device(self) -> CompNode:
  99. r"""
  100. Returns a string represents the device a :class:`~.Tensor` storaged on.
  101. """
  102. return super().device
  103. @property
  104. def dtype(self) -> np.dtype:
  105. r"""
  106. Returns a :class:`numpy.dtype` object represents the data type of a :class:`~.Tensor`.
  107. """
  108. return super().dtype
  109. @property
  110. def qparams(self):
  111. r"""
  112. Returns a :class:`~.QParams` object containing quantization params of a :class:`~.Tensor`.
  113. """
  114. from .quantization.utils import create_qparams # pylint: disable=all
  115. if self._qparams is None:
  116. self._qparams = create_qparams()
  117. return self._qparams
  118. def numpy(self) -> np.ndarray:
  119. r"""
  120. Returns self :class:`~.Tensor` as a :class:`numpy.ndarray`.
  121. """
  122. return super().numpy()
  123. def detach(self):
  124. r"""
  125. Returns a new :class:`~.Tensor`, detached from the current graph.
  126. """
  127. return super().detach()
  128. def _reset(self, other):
  129. if not isinstance(other, _Tensor):
  130. other = Tensor(other, dtype=self.dtype, device=self.device)
  131. super()._reset(other)
  132. def __repr__(self):
  133. piece = "{}(".format(self.__class__.__name__)
  134. with np.printoptions(precision=4, suppress=True):
  135. piece += "{}".format(str(self.numpy()))
  136. if self.dtype != np.float32:
  137. piece += ", dtype={}".format(np.dtype(self.dtype).name)
  138. piece += ", device={}".format(self.device) + ")"
  139. return piece
  140. @property
  141. def name(self):
  142. return self.c_name
  143. @name.setter
  144. def name(self, name):
  145. self.c_name = name
  146. AutoNaming.record_var_name(self._mixin_handle, name)
  147. @deprecated(version="1.0", reason="no need to reuse an existing tensor since 1.0")
  148. def set_value(self, value):
  149. self._reset(value)
  150. @deprecated(version="1.0", reason="use ``*= 0`` instead")
  151. def reset_zero(self):
  152. self *= 0
  153. def to(self, device):
  154. r"""
  155. Copy self :class:`~.Tensor` to specified device. See :func:`~.copy`
  156. """
  157. if isinstance(device, str) and not _valid_device(device):
  158. raise ValueError(
  159. "invalid device name {}. For the correct format of the device name, please refer to the instruction of megengine.device.set_default_device()".format(
  160. device
  161. )
  162. )
  163. cn = as_device(device).to_c()
  164. return apply(Copy(comp_node=cn), self)[0]
  165. @property
  166. def requires_grad(self):
  167. raise AttributeError("requires_grad is reserved for future use")
  168. @requires_grad.setter
  169. def requires_grad(self, value):
  170. raise AttributeError("requires_grad is reserved for future use")
  171. @requires_grad.deleter
  172. def requires_grad(self):
  173. raise AttributeError("requires_grad is reserved for future use")
  174. def __hash__(self):
  175. return id(self)
  176. def __getnewargs__(self):
  177. r""" __getnewargs__ will be called for pickle serialization or deep copy
  178. """
  179. return (self.numpy(), self.dtype, self.device.logical_name)
  180. def __getstate__(self):
  181. r""" __getstate__ will be called for pickle serialization or deep copy
  182. """
  183. state = {}
  184. if self._qparams is not None:
  185. state["qparams"] = self._qparams
  186. return state
  187. def __setstate__(self, state):
  188. # for compatibility with old version not using fastcore
  189. if "data" in state:
  190. data = state.pop("data")
  191. device = state.pop("device")
  192. dtype = state.pop("dtype")
  193. self._reset(Tensor(data, dtype=dtype, device=device))
  194. # quantize related state for deepcopy
  195. if "qdict" in state:
  196. qparams = state.pop("qdict")
  197. logger.warning(
  198. "Tensor's 'qdict' state is depreciated. Use 'qparams' instead"
  199. )
  200. elif "qparams" in state:
  201. qparams = state.pop("qparams")
  202. else:
  203. qparams = None
  204. self._qparams = qparams
  205. tensor = Tensor
  206. class Parameter(Tensor):
  207. r"""
  208. A kind of Tensor that is to be considered a module parameter.
  209. """

MegEngine 安装包中集成了使用 GPU 运行代码所需的 CUDA 环境,不用区分 CPU 和 GPU 版。 如果想要运行 GPU 程序,请确保机器本身配有 GPU 硬件设备并安装好驱动。 如果你想体验在云端 GPU 算力平台进行深度学习开发的感觉,欢迎访问 MegStudio 平台