You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

tensor_utils.cpp 59 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679
  1. #include "megbrain/common.h"
  2. #include "megbrain/dtype.h"
  3. #include "megbrain/imperative/ops/autogen.h"
  4. #include "megbrain/imperative/ops/backward_graph.h"
  5. #include "megbrain/imperative/ops/utility.h"
  6. #include "megbrain/imperative/profiler.h"
  7. #include "megbrain/imperative/transformations/eval.h"
  8. #include "megbrain/imperative/transformations/lazy.h"
  9. #include "megbrain/imperative/transformations/scalar.h"
  10. #include "megbrain/imperative/transformations/symbol.h"
  11. #include "megbrain/imperative/transformations/trace.h"
  12. #include "megbrain/imperative/utils/map.h"
  13. #include "megbrain/opr/io.h"
  14. #include "megbrain/plugin/profiler.h"
  15. #include "./common.h"
  16. #include "./grad.h"
  17. #include "./graph_rt.h"
  18. #include "./helper.h"
  19. #include "./module_trace.h"
  20. #include "./numpy_dtypes.h"
  21. #include "./tensor.h"
  22. #include "./tensor_utils.h"
  23. #include "./transformation.h"
  24. #include <object.h>
  25. #include <pybind11/numpy.h>
  26. #include <pybind11/operators.h>
  27. #include <pybind11/pytypes.h>
  28. #include <pyerrors.h>
  29. #include <range/v3/all.hpp>
  30. #include <string>
  31. #include <unordered_map>
  32. #include "../../src/impl/mgb_cg_impl.h"
  33. namespace py = pybind11;
  34. namespace views = ranges::views;
  35. namespace mgb::imperative::python {
  36. /* ============== convert inputs ============== */
  37. // map numpy.dtype.kind to priority
  38. inline uint8_t category_priority(char c) {
  39. switch (c) {
  40. case 'f':
  41. return 3; // floating-point
  42. case 'i':
  43. return 2; // signed integer
  44. case 'u':
  45. return 2; // unsigned integer
  46. case 'b':
  47. return 1; // boolean
  48. default:
  49. return 0;
  50. }
  51. }
  52. // Returns the maximum value of the priority of each type in the list `types`.
  53. uint8_t max_priority(SmallVector<PyArray_Descr*> types) {
  54. if (types.size() == 0) {
  55. return 0;
  56. } else {
  57. uint8_t max_p = 0;
  58. for (auto&& desc : types) {
  59. max_p = std::max(max_p, category_priority(desc->kind));
  60. }
  61. return max_p;
  62. }
  63. }
  64. // Returns the data type with sufficient size to hold all types of
  65. // category `cat` in the list `types`.
  66. PyArray_Descr* promote_types(SmallVector<PyArray_Descr*> types, uint8_t cat) {
  67. // Return value: New reference
  68. SmallVector<PyArray_Descr*> used_types;
  69. for (auto&& desc : types) {
  70. auto&& v = category_priority(desc->kind);
  71. if (v == cat) {
  72. used_types.emplace_back(desc);
  73. }
  74. }
  75. mgb_assert(used_types.size() > 0, "size of used_types is 0");
  76. PyArray_Descr* res = used_types[0];
  77. Py_INCREF(res);
  78. for (size_t i = 1; i < used_types.size(); ++i) {
  79. PyArray_Descr* tmp = PyArray_PromoteTypes(used_types[i], res);
  80. Py_DECREF(res);
  81. res = tmp;
  82. }
  83. return res;
  84. }
  85. PyArray_Descr* scalar2dtype(PyObject* arg) {
  86. // Return value: New reference
  87. if (PyBool_Check(arg)) {
  88. auto&& descr = PyArray_DescrFromType(NPY_BOOL);
  89. return descr;
  90. }
  91. if (PyLong_CheckExact(arg)) {
  92. auto&& descr = PyArray_DescrFromType(NPY_INT32);
  93. return descr;
  94. }
  95. if (PyFloat_CheckExact(arg)) {
  96. auto&& descr = PyArray_DescrFromType(NPY_FLOAT32);
  97. return descr;
  98. }
  99. return nullptr;
  100. }
  101. PyArray_Descr* _dtype_promotion(PyObject* const* args, size_t nargs) {
  102. // Return value: New reference
  103. SmallVector<PyArray_Descr*> tensors;
  104. SmallVector<PyArray_Descr*> scalars;
  105. bool is_tuple = false;
  106. PyObject* tuple = nullptr;
  107. if (nargs == 1 && (PyTuple_Check(args[0]) || PyList_Check(args[0]))) {
  108. if (PyList_Check(args[0])) {
  109. tuple = PyList_AsTuple(args[0]);
  110. } else {
  111. tuple = args[0];
  112. Py_INCREF(tuple);
  113. }
  114. nargs = PyTuple_Size(tuple);
  115. is_tuple = true;
  116. }
  117. for (size_t i = 0; i < nargs; ++i) {
  118. PyObject* handle = is_tuple ? PyTuple_GetItem(tuple, i) : args[i];
  119. if (handle == Py_None)
  120. continue;
  121. TensorWrapper* tw = TensorWrapper::try_cast(handle);
  122. if (tw) {
  123. mgb::DType type = tw->m_tensor->dtype();
  124. auto&& descr = npy::dtype_mgb2np_descr(type);
  125. Py_INCREF(descr.get());
  126. tensors.emplace_back(descr.get());
  127. } else {
  128. if (PyArray_Check(handle) || PyArray_CheckScalar(handle)) {
  129. auto&& descr = PyArray_DescrFromObject(handle, nullptr);
  130. tensors.emplace_back(descr);
  131. continue;
  132. }
  133. PyArray_Descr* descr = scalar2dtype(handle);
  134. if (descr) {
  135. scalars.emplace_back(descr);
  136. continue;
  137. }
  138. }
  139. }
  140. auto max_pri_scalars = max_priority(scalars);
  141. auto max_pri_tensors = max_priority(tensors);
  142. if (max_pri_scalars <= 0 && max_pri_tensors <= 0) {
  143. throw py::value_error("invalid input, no dtype avaliable");
  144. }
  145. PyArray_Descr* res;
  146. if (max_pri_scalars > max_pri_tensors) {
  147. res = promote_types(scalars, max_pri_scalars);
  148. } else {
  149. res = promote_types(tensors, max_pri_tensors);
  150. }
  151. for (auto* p : tensors) {
  152. Py_DECREF(p);
  153. }
  154. for (auto* p : scalars) {
  155. Py_DECREF(p);
  156. }
  157. Py_XDECREF(tuple);
  158. return res;
  159. }
  160. CompNode _get_device(PyObject* const* args, size_t nargs) {
  161. bool is_tuple = false;
  162. PyObject* tuple = nullptr;
  163. if (nargs == 1 && (PyTuple_Check(args[0]) || PyList_Check(args[0]))) {
  164. if (PyList_Check(args[0])) {
  165. tuple = PyList_AsTuple(args[0]);
  166. } else {
  167. tuple = args[0];
  168. Py_INCREF(tuple);
  169. }
  170. nargs = PyTuple_Size(tuple);
  171. is_tuple = true;
  172. }
  173. bool valid = false;
  174. CompNode cn;
  175. for (size_t i = 0; i < nargs; ++i) {
  176. PyObject* handle = is_tuple ? PyTuple_GetItem(tuple, i) : args[i];
  177. TensorWrapper* tw = TensorWrapper::try_cast(handle);
  178. if (tw) {
  179. if (!valid) {
  180. cn = tw->m_tensor->comp_node();
  181. valid = true;
  182. } else {
  183. CompNode cn1 = tw->m_tensor->comp_node();
  184. if (cn1 != cn) {
  185. throw py::value_error(ssprintf(
  186. "ambiguous device: %s (from %s) vs %s (from %s)",
  187. cn.to_string().c_str(), cn.to_string_logical().c_str(),
  188. cn1.to_string().c_str(), cn1.to_string_logical().c_str()));
  189. }
  190. }
  191. }
  192. }
  193. if (!valid) {
  194. return CompNode::load(get_default_device());
  195. }
  196. Py_XDECREF(tuple);
  197. return cn;
  198. }
  199. // Returns the dtype that would result from performing an arithmetic
  200. // operation on the provided input tensors and scalars.
  201. PyObject* dtype_promotion(PyObject* self, PyObject* const* args, size_t nargs) {
  202. if (!nargs) {
  203. PyErr_SetString(PyExc_TypeError, "empty input is not allowed");
  204. return nullptr;
  205. }
  206. try {
  207. PyArray_Descr* res = _dtype_promotion(args, nargs);
  208. return py::cast(npy::dtype_np2mgb_descr(res)).release().ptr();
  209. }
  210. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  211. }
  212. PyObject* get_device(PyObject* self, PyObject* const* args, size_t nargs) {
  213. if (!nargs) {
  214. PyErr_SetString(PyExc_TypeError, "empty input is not allowed");
  215. return nullptr;
  216. }
  217. try {
  218. CompNode cn = _get_device(args, nargs);
  219. return py::cast(cn).release().ptr();
  220. }
  221. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  222. }
  223. bool is_scalar(PyObject* tensor) {
  224. auto* tw = TensorWrapper::try_cast(tensor);
  225. if (tw) {
  226. return tw->m_tensor->is_scalar();
  227. }
  228. return PyArray_CheckAnyScalar(tensor);
  229. }
  230. bool is_bool_list(PyObject* arg) {
  231. if (!PyList_Check(arg)) {
  232. return false;
  233. }
  234. size_t sz = PyList_Size(arg);
  235. if (!sz) {
  236. return false;
  237. }
  238. for (size_t i = 0; i < sz; ++i) {
  239. PyObject* handle = PyList_GetItem(arg, i);
  240. if (!PyBool_Check(handle)) {
  241. return false;
  242. }
  243. }
  244. return true;
  245. }
  246. bool is_bool_dtype(PyObject* args) {
  247. if (!PyObject_HasAttrString(args, "dtype"))
  248. return false;
  249. PyObject* dobj = PyObject_GetAttrString(args, "dtype");
  250. PyArray_Descr* dtype;
  251. PyArray_DescrConverter(dobj, &dtype);
  252. bool ret = (dtype->kind == 'b');
  253. Py_XDECREF(dtype);
  254. Py_XDECREF(dobj);
  255. return ret;
  256. }
  257. py::object device2obj(py::handle device, bool mapping = false) {
  258. if (device.ptr() == Py_None) {
  259. return py::cast(CompNode::load(get_default_device()));
  260. } else if (py::isinstance<py::str>(device)) {
  261. if (mapping) {
  262. py::object dmap = getattr(
  263. py::reinterpret_borrow<py::object>((PyObject*)py_tensor_type),
  264. "dmap_callback");
  265. if (dmap.ptr() != Py_None) {
  266. return device2obj(dmap(device), false);
  267. }
  268. }
  269. return py::cast(CompNode::load(device.cast<std::string>()));
  270. } else if (py::isinstance<CompNode>(device)) {
  271. return py::reinterpret_borrow<py::object>(device);
  272. } else {
  273. return getattr(device, "_cn");
  274. }
  275. }
  276. py::object _Const(py::handle value, py::handle dtype, py::handle device) {
  277. py::object val = py::reinterpret_borrow<py::object>(value);
  278. if (PyArray_Check(value.ptr())) {
  279. py::tuple strides =
  280. py::reinterpret_borrow<py::tuple>(getattr(value, "strides"));
  281. bool need_squeeze = false;
  282. for (size_t i = 0; i < strides.size(); ++i) {
  283. if (strides[i].cast<ptrdiff_t>() == 0) {
  284. need_squeeze = true;
  285. }
  286. }
  287. if (need_squeeze) {
  288. val = py::reinterpret_borrow<py::array>(value);
  289. py::object orig_shp = val.attr("shape");
  290. val = val.attr("squeeze")();
  291. val = val.attr("reshape")(orig_shp);
  292. }
  293. }
  294. py::object device_obj = device2obj(device, true);
  295. py::tuple tup =
  296. py::make_tuple(val, dtype, device_obj, true, false, py::none(), py::none());
  297. return TensorWrapper::make(py_tensor_type, tup.ptr(), nullptr);
  298. }
  299. py::tuple _make_shape_tuple(py::handle shape) {
  300. py::list orig;
  301. py::list ret(0);
  302. auto solve_one = [&](py::handle val) {
  303. if (TensorWrapper::try_cast(val.ptr())) {
  304. py::object np = getattr(val, "numpy")();
  305. PyArrayObject* arr = (PyArrayObject*)np.ptr();
  306. PyObject* maybe_list = PyArray_ToList(arr);
  307. if (PyList_Check(maybe_list)) {
  308. py::list may = py::reinterpret_steal<py::list>(maybe_list);
  309. for (size_t i = 0; i < may.size(); ++i) {
  310. ret.append(may[i]);
  311. }
  312. } else {
  313. mgb_assert(PyLong_Check(maybe_list));
  314. ret.append(PyLong_AsLong(maybe_list));
  315. Py_XDECREF(maybe_list);
  316. }
  317. } else if (PyArray_Check(val.ptr())) {
  318. ret.append(PyArray_PyIntAsInt(val.ptr()));
  319. } else {
  320. ret.append(PyLong_AsLong(val.ptr()));
  321. }
  322. };
  323. if (PyArray_Check(shape.ptr()) && !PyArray_CheckAnyScalar(shape.ptr())) {
  324. orig = py::reinterpret_steal<py::list>(
  325. PyArray_ToList((PyArrayObject*)shape.ptr()));
  326. for (size_t i = 0; i < orig.size(); ++i) {
  327. solve_one(orig[i]);
  328. }
  329. } else if (PyList_Check(shape.ptr())) {
  330. orig = py::reinterpret_borrow<py::list>(shape);
  331. for (size_t i = 0; i < orig.size(); ++i) {
  332. solve_one(orig[i]);
  333. }
  334. } else if (PyTuple_Check(shape.ptr())) {
  335. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape);
  336. for (size_t i = 0; i < tup.size(); ++i) {
  337. solve_one(tup[i]);
  338. }
  339. } else {
  340. solve_one(shape);
  341. }
  342. return py::reinterpret_steal<py::tuple>(PyList_AsTuple(ret.ptr()));
  343. }
  344. bool is_tensor(py::handle arg) {
  345. return bool(TensorWrapper::try_cast(arg.ptr()));
  346. }
  347. bool is_py_sequence(py::handle arg) {
  348. if (PyArray_Check(arg.ptr()) || TensorWrapper::try_cast(arg.ptr())) {
  349. return false;
  350. }
  351. return PySequence_Check(arg.ptr());
  352. }
  353. py::object get_res_by_refhdl(
  354. py::handle value, py::handle dtype, py::handle device, py::handle ref_hdl) {
  355. py::object res = _Const(value, dtype, device);
  356. py::object ref;
  357. if (py::isinstance<py::tuple>(ref_hdl)) {
  358. py::tuple tup = py::reinterpret_borrow<py::tuple>(ref_hdl);
  359. if (tup.size()) {
  360. ref = tup[0];
  361. } else {
  362. ref = py::none();
  363. }
  364. } else {
  365. ref = py::reinterpret_borrow<py::object>(ref_hdl);
  366. }
  367. if (PyObject_TypeCheck(ref.ptr(), py_varnode_type)) {
  368. auto temp = dtype.cast<mgb::DType>();
  369. ComputingGraph* graph = getattr(ref, "graph").cast<ComputingGraph*>();
  370. cg::VarNode* node = getattr(ref, "var").cast<cg::VarNode*>();
  371. CompNode cn;
  372. if (device.ptr() == Py_None) {
  373. cn = node->comp_node();
  374. } else {
  375. cn = device2obj(device).cast<CompNode>();
  376. }
  377. OperatorNodeConfig config(cn);
  378. auto hv = npy::np2tensor(
  379. value.ptr(), npy::Meth::borrow(cn), dtype.cast<mgb::DType>());
  380. auto typeobj = ref.get_type();
  381. return typeobj(opr::ImmutableTensor::make(*graph, hv, config).node());
  382. }
  383. return res;
  384. }
  385. mgb::DType _get_dtype(py::handle tensor) {
  386. auto tw = TensorWrapper::try_cast(tensor.ptr());
  387. return tw->m_tensor->dtype();
  388. }
  389. py::object _astype_cpp(py::handle tensor, py::handle dtype_hdl) {
  390. PyArray_Descr* descr;
  391. if (!PyArray_DescrConverter(dtype_hdl.ptr(), &descr)) {
  392. throw py::value_error(ssprintf(
  393. "can not convert to numpy.dtype from %s",
  394. dtype_hdl.ptr()->ob_type->tp_name));
  395. }
  396. PyArray_Descr* cur = npy::dtype_mgb2np_descr(_get_dtype(tensor)).get();
  397. if (!dtype_equal(cur, descr)) {
  398. std::shared_ptr<OpDef> op = TypeCvt::make(npy::dtype_np2mgb_descr(descr));
  399. py::object Op = py::cast(op);
  400. PyObject* p[2] = {Op.ptr(), tensor.ptr()};
  401. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  402. return ret[0];
  403. } else {
  404. return py::reinterpret_borrow<py::object>(tensor);
  405. }
  406. }
  407. py::object _convert_single_value_cpp(
  408. py::handle value, py::handle dtype, py::handle device) {
  409. if (is_tensor(value)) {
  410. if (_get_dtype(value).category() != DTypeCategory::QUANTIZED) {
  411. return _astype_cpp(value, dtype);
  412. }
  413. } else {
  414. return _Const(value, dtype, device);
  415. }
  416. return py::reinterpret_borrow<py::object>(value);
  417. }
  418. py::object _convert_inputs_cpp(
  419. PyObject* const* args, size_t nargs, py::object dtype, py::object device) {
  420. ComputingGraph* graph = nullptr;
  421. py::handle typeobj;
  422. py::list lis;
  423. for (size_t i = 0; i < nargs; ++i) {
  424. py::handle h = py::handle(args[i]);
  425. lis.append(h);
  426. }
  427. auto convert = [&](py::object value) {
  428. if (value.is_none()) {
  429. return value;
  430. }
  431. return _convert_single_value_cpp(value, dtype, device);
  432. };
  433. for (size_t i = 0; i < lis.size(); ++i) {
  434. lis[i] = convert(lis[i]);
  435. }
  436. return py::reinterpret_steal<py::tuple>(PyList_AsTuple(lis.ptr()));
  437. }
  438. py::object _astensor1d_cpp(
  439. py::handle value, py::handle dtype, py::handle device, py::handle ref) {
  440. py::object ret;
  441. py::object device_obj = py::none();
  442. py::object ndim_obj = py::none();
  443. if (device.ptr() != Py_None) {
  444. device_obj = device2obj(device);
  445. }
  446. if (PyObject_TypeCheck(value.ptr(), py_varnode_type)) {
  447. try {
  448. getattr(value, "ndim");
  449. } catch (py::error_already_set& err) {
  450. if (dtype.ptr() != Py_None) {
  451. ret = _astype_cpp(value, dtype);
  452. } else {
  453. ret = py::reinterpret_borrow<py::object>(value);
  454. }
  455. if (device.ptr() != Py_None) {
  456. std::shared_ptr<OpDef> op = Copy::make(device_obj.cast<CompNode>());
  457. py::object Op = py::cast(op);
  458. PyObject* p[2] = {Op.ptr(), ret.ptr()};
  459. py::tuple copy_ret =
  460. py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  461. return copy_ret[0];
  462. }
  463. return ret;
  464. }
  465. }
  466. size_t ndim = 999;
  467. if (hasattr(value, "ndim")) {
  468. ndim = getattr(value, "ndim").cast<size_t>();
  469. if (ndim != 0 && ndim != 1) {
  470. throw py::value_error("ndim != 1 or 0, get : " + std::to_string(ndim));
  471. }
  472. if (!is_tensor(value)) {
  473. return get_res_by_refhdl(value, dtype, device, ref);
  474. } else {
  475. return py::reinterpret_borrow<py::object>(value);
  476. }
  477. }
  478. if (!is_py_sequence(value)) {
  479. throw py::type_error();
  480. }
  481. py::list lis = py::reinterpret_steal<py::list>(PySequence_List(value.ptr()));
  482. bool need_concat = false;
  483. for (size_t i = 0; i < lis.size(); ++i) {
  484. if (is_tensor(lis[i])) {
  485. need_concat = true;
  486. break;
  487. }
  488. }
  489. if (!need_concat) {
  490. return get_res_by_refhdl(value, dtype, device, ref);
  491. }
  492. if (lis.size() > 1) {
  493. py::list flat_list;
  494. for (auto item : lis) {
  495. if (!PyList_Check(item.ptr())) {
  496. flat_list.append(item);
  497. } else {
  498. py::list sub_lis =
  499. py::reinterpret_steal<py::list>(PySequence_List(item.ptr()));
  500. for (auto sub_item : sub_lis) {
  501. flat_list.append(sub_item);
  502. }
  503. }
  504. }
  505. std::vector<PyObject*> c_args(flat_list.size() + 1);
  506. for (size_t i = 0; i < flat_list.size(); ++i) {
  507. c_args[i] = flat_list[i].ptr();
  508. }
  509. c_args[flat_list.size()] = Py_None;
  510. py::tuple inp_tup = py::reinterpret_steal<py::tuple>(
  511. convert_inputs_cpp(NULL, c_args.data(), c_args.size()));
  512. if (device_obj.is_none()) {
  513. std::vector<PyObject*> inp(inp_tup.size());
  514. for (size_t i = 0; i < inp_tup.size(); ++i) {
  515. inp[i] = inp_tup[i].ptr();
  516. }
  517. device_obj = py::cast(_get_device(inp.data(), inp.size()));
  518. }
  519. std::shared_ptr<OpDef> op = Concat::make(0, device_obj.cast<CompNode>());
  520. py::object Op = py::cast(op);
  521. std::vector<PyObject*> p;
  522. p.resize(inp_tup.size() + 1);
  523. p[0] = Op.ptr();
  524. for (size_t i = 0; i < inp_tup.size(); ++i) {
  525. p[i + 1] = inp_tup[i].ptr();
  526. }
  527. py::tuple concat_ret =
  528. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  529. ret = concat_ret[0];
  530. } else {
  531. ret = lis[0];
  532. }
  533. if (dtype.ptr() != Py_None) {
  534. return _astype_cpp(ret, dtype);
  535. } else {
  536. return ret;
  537. }
  538. }
  539. py::object _get_index(py::object tensor, py::object src) {
  540. if (!TensorWrapper::try_cast(tensor.ptr())) {
  541. auto get_const = [&](mgb::DType dtype) -> py::object {
  542. return _Const(tensor, py::cast(dtype), src.attr("device"));
  543. };
  544. if (is_bool_list(tensor.ptr()) || is_bool_dtype(tensor.ptr())) {
  545. tensor = get_const(dtype::Bool());
  546. } else {
  547. tensor = get_const(dtype::Int32());
  548. }
  549. if (!is_bool_dtype(tensor.ptr())) {
  550. return tensor;
  551. }
  552. } else {
  553. if (!is_bool_dtype(tensor.ptr())) {
  554. return tensor;
  555. }
  556. }
  557. std::shared_ptr<OpDef> op = CondTake::make();
  558. py::object Op = py::cast(op);
  559. PyObject* p[3] = {Op.ptr(), tensor.ptr(), tensor.ptr()};
  560. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  561. return ret[1];
  562. }
  563. py::tuple _try_cond_take(py::handle tensor, py::handle index) {
  564. if (!hasattr(index, "dtype") || !hasattr(index, "shape")) {
  565. return py::tuple();
  566. }
  567. if (!is_bool_dtype(index.ptr()) ||
  568. _make_shape_tuple(getattr(index, "shape"))
  569. .not_equal(_make_shape_tuple(getattr(tensor, "shape")))) {
  570. return py::tuple();
  571. }
  572. py::object iobj;
  573. if (PyArray_Check(index.ptr())) {
  574. iobj = _Const(
  575. index, py::cast((mgb::DType)dtype::Bool()), getattr(tensor, "device"));
  576. } else {
  577. iobj = py::reinterpret_borrow<py::object>(index);
  578. }
  579. std::shared_ptr<OpDef> op = CondTake::make();
  580. py::object Op = py::cast(op);
  581. PyObject* p[3] = {Op.ptr(), tensor.ptr(), iobj.ptr()};
  582. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  583. return ret;
  584. }
  585. py::tuple _remove_ellipsis(py::object tensor, py::tuple tuple_val) {
  586. size_t tuple_size = tuple_val.size();
  587. size_t ndim_sum = 0, cur_sum = 0;
  588. int pos = -1;
  589. bool has_unknown_ndim_bool_index = false;
  590. for (size_t i = 0; i < tuple_size; ++i) {
  591. py::object handle = tuple_val[i];
  592. if (handle.is_none()) {
  593. continue;
  594. } else if (handle.ptr() == Py_Ellipsis) {
  595. pos = static_cast<int>(i);
  596. for (size_t j = 0; j < i; ++j) {
  597. py::object t = tuple_val[j];
  598. if (t.ptr() == Py_Ellipsis) {
  599. throw py::index_error("only one ellipsis is allowed.");
  600. }
  601. }
  602. } else {
  603. size_t ndim_incr = 1;
  604. if (hasattr(handle, "dtype") && is_bool_dtype(handle.ptr()) &&
  605. hasattr(handle, "ndim")) {
  606. py::object ndim;
  607. try {
  608. ndim = getattr(handle, "ndim");
  609. } catch (py::error_already_set& err) {
  610. has_unknown_ndim_bool_index = true;
  611. }
  612. if (PyLong_Check(ndim.ptr())) {
  613. ndim_incr = PyLong_AsLong(ndim.ptr());
  614. } else {
  615. has_unknown_ndim_bool_index = true;
  616. }
  617. }
  618. cur_sum += ndim_incr;
  619. }
  620. }
  621. if (pos == -1) {
  622. return tuple_val;
  623. } else {
  624. if (has_unknown_ndim_bool_index) {
  625. throw py::index_error(
  626. "does not support bool index with unknown shape when using "
  627. "Ellipsis.");
  628. }
  629. try {
  630. ndim_sum = getattr(tensor, "ndim").cast<size_t>();
  631. } catch (py::error_already_set& err) {
  632. throw py::index_error(
  633. "does not support Ellipsis when tensor's ndim is unknown.");
  634. }
  635. py::tuple ret(ndim_sum - cur_sum + tuple_size - 1);
  636. size_t idx = 0;
  637. for (size_t i = 0; i < tuple_size; ++i) {
  638. if (i == pos) {
  639. for (size_t j = cur_sum; j < ndim_sum; ++j) {
  640. ret[idx++] = PySlice_New(NULL, NULL, NULL);
  641. }
  642. } else {
  643. ret[idx++] = tuple_val[i];
  644. }
  645. }
  646. return ret;
  647. }
  648. }
  649. py::object _reshape_cpp(py::handle inp_hdl, py::handle args);
  650. py::tuple _expand_bool_dim(py::object tensor, py::tuple tuple_val) {
  651. py::tuple cur_shape = _make_shape_tuple(py::handle(getattr(tensor, "shape")));
  652. py::list new_tuple_val(0);
  653. size_t offset = 0;
  654. size_t tdim = 0;
  655. size_t nonedim = 0;
  656. for (size_t i = 0; i < tuple_val.size(); ++i) {
  657. py::handle k = tuple_val[i];
  658. if (k.ptr() == Py_None) {
  659. nonedim++;
  660. new_tuple_val.append(k);
  661. continue;
  662. }
  663. if (is_bool_dtype(k.ptr())) {
  664. size_t ndim = getattr(k, "ndim").cast<size_t>();
  665. if (ndim > 1) {
  666. py::tuple ishape = _make_shape_tuple(py::handle(getattr(k, "shape")));
  667. for (size_t j = 0; j < ndim; ++j) {
  668. if (cur_shape[tdim + j - offset].cast<size_t>() !=
  669. ishape[j].cast<size_t>()) {
  670. std::string msg =
  671. "boolean index did not match tensor along "
  672. "dimension " +
  673. std::to_string(tdim + j) + "; dimension is " +
  674. std::to_string(
  675. cur_shape[tdim + j - offset].cast<size_t>()) +
  676. " but corresponding boolean dimension is " +
  677. std::to_string(ishape[j].cast<size_t>());
  678. throw py::index_error(msg.c_str());
  679. }
  680. }
  681. py::object new_k = getattr(k, "reshape")(-1);
  682. py::object kshape = getattr(new_k, "shape");
  683. py::list new_shape(0);
  684. PyObject* sym = PyObject_CallObject(cpp_use_symbolic_shape, nullptr);
  685. bool is_sym = (sym == Py_True);
  686. Py_XDECREF(sym);
  687. if (is_sym) {
  688. py::object tshape = getattr(tensor, "shape");
  689. for (size_t j = 0; j < i - nonedim; ++j) {
  690. new_shape.append(tshape[py::int_(j)]);
  691. }
  692. new_shape.append(kshape[py::int_(0)]);
  693. for (size_t j = tdim + ndim - offset; j < cur_shape.size(); ++j) {
  694. new_shape.append(cur_shape[j]);
  695. }
  696. py::object shape_tensor = _astensor1d_cpp(
  697. new_shape, py::none(), py::none(), py::none());
  698. tensor = _reshape_cpp(tensor, shape_tensor);
  699. cur_shape = _make_shape_tuple(shape_tensor);
  700. } else {
  701. for (size_t j = 0; j < i - nonedim; ++j) {
  702. new_shape.append(cur_shape[j]);
  703. }
  704. new_shape.append(py::reinterpret_borrow<py::tuple>(kshape)[0]);
  705. for (size_t j = tdim + ndim - offset; j < cur_shape.size(); ++j) {
  706. new_shape.append(cur_shape[j]);
  707. }
  708. cur_shape = new_shape;
  709. tensor = _reshape_cpp(tensor, cur_shape);
  710. }
  711. offset++;
  712. tdim += ndim;
  713. }
  714. new_tuple_val.append(k);
  715. } else {
  716. new_tuple_val.append(k);
  717. tdim++;
  718. }
  719. }
  720. return py::make_tuple(tensor, py::reinterpret_borrow<py::tuple>(new_tuple_val));
  721. }
  722. std::pair<size_t, bool> get_ndim_safe(py::handle tensor) {
  723. if (auto p = TensorWrapper::try_cast(tensor.ptr())) {
  724. return {p->m_tensor->shape()->ndim, true};
  725. }
  726. try {
  727. return {getattr(tensor, "ndim").cast<size_t>(), true};
  728. } catch (py::error_already_set& err) {
  729. return {0, false};
  730. }
  731. }
  732. py::tuple _unpack_indexes(py::handle inp_hdl, py::handle idx_hdl) {
  733. py::object inp = py::reinterpret_borrow<py::object>(inp_hdl);
  734. py::tuple tuple_val;
  735. if (py::isinstance<py::tuple>(idx_hdl)) {
  736. tuple_val = py::reinterpret_borrow<py::tuple>(idx_hdl);
  737. } else {
  738. tuple_val = py::make_tuple(idx_hdl);
  739. }
  740. bool use_subtensor = true;
  741. bool need_remove_ellipsis = false;
  742. bool need_expand_bool_dim = false;
  743. size_t idx_ndim = 0;
  744. for (size_t i = 0; i < tuple_val.size(); ++i) {
  745. py::object k = tuple_val[i];
  746. if (k.is_none()) {
  747. continue;
  748. } else if (k.ptr() == Py_Ellipsis) {
  749. need_remove_ellipsis = true;
  750. } else {
  751. if (is_bool_dtype(k.ptr()) && hasattr(k, "ndim")) {
  752. size_t ndim = get_ndim_safe(k).first;
  753. idx_ndim += ndim;
  754. if (ndim > 1) {
  755. need_expand_bool_dim = true;
  756. }
  757. } else {
  758. idx_ndim++;
  759. }
  760. }
  761. }
  762. try {
  763. size_t inp_ndim = getattr(inp, "ndim").cast<size_t>();
  764. if (idx_ndim > inp_ndim) {
  765. std::string msg = "too many indices for tensor: tensor is " +
  766. std::to_string(inp_ndim) + "-dimensional, but " +
  767. std::to_string(idx_ndim) + " were indexed";
  768. throw py::index_error(msg.c_str());
  769. }
  770. } catch (py::error_already_set& err) {
  771. ; // ignore
  772. }
  773. if (need_remove_ellipsis) {
  774. tuple_val = _remove_ellipsis(inp, tuple_val);
  775. }
  776. if (need_expand_bool_dim) {
  777. py::object shape = getattr(inp, "shape");
  778. if (shape.ptr() != Py_None) {
  779. py::tuple ret = _expand_bool_dim(inp, tuple_val);
  780. inp = ret[0];
  781. tuple_val = ret[1];
  782. }
  783. }
  784. std::vector<int32_t> axis;
  785. for (size_t i = 0; i < tuple_val.size(); ++i) {
  786. if (tuple_val[i].is_none()) {
  787. axis.push_back(i);
  788. }
  789. }
  790. if (axis.size()) {
  791. std::shared_ptr<OpDef> op = AddAxis::make(axis);
  792. py::object Op = py::cast(op);
  793. PyObject* p[2] = {Op.ptr(), inp.ptr()};
  794. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  795. inp = ret[0];
  796. }
  797. py::list items;
  798. py::list tensors;
  799. int cur_axis = -1;
  800. for (size_t i = 0; i < tuple_val.size(); ++i) {
  801. py::object handle = tuple_val[i];
  802. cur_axis++;
  803. if (handle.is_none()) {
  804. continue;
  805. }
  806. if (!is_scalar(handle.ptr()) && !PySlice_Check(handle.ptr())) {
  807. use_subtensor = false;
  808. }
  809. py::list item;
  810. item.append(cur_axis);
  811. auto push = [&](PyObject* v) {
  812. if (v == Py_None) {
  813. item.append(false);
  814. } else {
  815. item.append(true);
  816. tensors.append(_get_index(py::reinterpret_borrow<py::object>(v), inp));
  817. }
  818. };
  819. if (PySlice_Check(handle.ptr())) {
  820. PySliceObject* s = (PySliceObject*)handle.ptr();
  821. if (s->start == Py_None && s->stop == Py_None && s->step == Py_None) {
  822. continue;
  823. }
  824. push(s->start);
  825. push(s->stop);
  826. push(s->step);
  827. item.append(false);
  828. } else {
  829. for (size_t j = 0; j < 3; j++)
  830. item.append(false);
  831. push(handle.ptr());
  832. }
  833. items.append(item);
  834. }
  835. return py::make_tuple(inp, tensors, items, use_subtensor, need_expand_bool_dim);
  836. }
  837. py::object _expand_args(py::handle args) {
  838. if (!PyTuple_Check(args.ptr())) {
  839. return py::reinterpret_borrow<py::object>(args);
  840. }
  841. py::tuple args_tup = py::reinterpret_borrow<py::tuple>(args.ptr());
  842. if (args_tup.size() == 1 &&
  843. (PySequence_Check(args_tup[0].ptr()) || is_tensor(args_tup[0].ptr()))) {
  844. return py::reinterpret_borrow<py::object>(args_tup[0]);
  845. } else {
  846. return py::reinterpret_steal<py::list>(PySequence_List(args_tup.ptr()));
  847. }
  848. }
  849. std::tuple<std::vector<int32_t>, bool> tuple2vector(py::object shape) {
  850. std::vector<int32_t> shp;
  851. if (!PyTuple_Check(shape.ptr())) {
  852. return {shp, false};
  853. }
  854. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape);
  855. for (size_t i = 0; i < tup.size(); ++i) {
  856. if (!PyLong_Check(tup[i].ptr())) {
  857. shp.clear();
  858. return {shp, false};
  859. } else {
  860. shp.push_back(tup[i].cast<int32_t>());
  861. }
  862. }
  863. return {shp, true};
  864. }
  865. bool enable_fastpath(py::handle inp) {
  866. auto&& tm_tr = TransformationManager::get_instance()
  867. .segments[TransformationManager::Segment::ModuleTrace];
  868. bool is_varnode = PyObject_TypeCheck(inp.ptr(), py_varnode_type);
  869. if (is_varnode ||
  870. TransformationManager::get_instance()
  871. .segments[TransformationManager::Segment::Trace]
  872. .size() > 0 ||
  873. (tm_tr.size() > 0 &&
  874. reinterpret_cast<ModuleTraceTransformation*>(tm_tr[0].get())->enabled())) {
  875. return false;
  876. }
  877. return true;
  878. }
  879. py::object _broadcast_cpp(py::handle input, py::handle args) {
  880. py::object shape = _expand_args(args);
  881. py::list dims;
  882. bool all_imm;
  883. if (PyList_Check(shape.ptr()) || PyTuple_Check(shape.ptr())) {
  884. dims = py::reinterpret_steal<py::list>(PySequence_List(shape.ptr()));
  885. mgb_assert(!dims.is_none());
  886. all_imm = true;
  887. py::object inp_shape = py::none();
  888. size_t inp_ndim;
  889. for (size_t i = 0; i < dims.size(); ++i) {
  890. py::object dim = dims[i];
  891. if (dim.is_none()) {
  892. ptrdiff_t right = (ptrdiff_t)i - dims.size();
  893. if (inp_shape.is_none()) {
  894. inp_shape = input.attr("shape");
  895. mgb_assert(!inp_shape.is_none());
  896. inp_ndim = py::len(inp_shape);
  897. }
  898. if ((ptrdiff_t)inp_ndim + right < 0) {
  899. throw py::value_error("size connot be `None` for new axis");
  900. }
  901. dim = inp_shape.attr("__getitem__")(right);
  902. dims[i] = dim;
  903. }
  904. if (py::int_::check_(dim)) {
  905. if (dim.cast<long>() < 0) {
  906. throw py::value_error(ssprintf(
  907. "expect shape[%zu] >= 0 or use `None` to auto infer, got "
  908. "%s",
  909. i, py::repr(dims[i]).cast<std::string>().c_str()));
  910. }
  911. } else {
  912. all_imm = false;
  913. }
  914. }
  915. shape = dims;
  916. } else {
  917. all_imm = false;
  918. }
  919. bool fastpath = all_imm && enable_fastpath(input);
  920. if ((!fastpath) && (!is_tensor(shape))) {
  921. shape = _astensor1d_cpp(
  922. shape, py::cast((mgb::DType)dtype::Int32()), input.attr("device"),
  923. input);
  924. }
  925. std::shared_ptr<OpDef> op;
  926. SmallVector<PyObject*> p(2);
  927. if (fastpath) {
  928. std::vector<int32_t> shape_vec;
  929. for (auto&& dim : dims) {
  930. shape_vec.push_back(dim.cast<long>());
  931. }
  932. op = Broadcast::make(shape_vec);
  933. } else {
  934. op = Broadcast::make();
  935. p.push_back(shape.ptr());
  936. }
  937. py::object py_op = py::cast(op);
  938. p[0] = py_op.ptr();
  939. p[1] = input.ptr();
  940. py::tuple ret =
  941. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  942. return ret[0];
  943. }
  944. py::object _reshape_cpp(py::handle inp_hdl, py::handle args) {
  945. py::object shape_hdl = _expand_args(args);
  946. py::object shape_tuple;
  947. try {
  948. shape_tuple = _make_shape_tuple(shape_hdl);
  949. } catch (py::error_already_set& err) {
  950. shape_tuple = py::reinterpret_borrow<py::object>(shape_hdl);
  951. }
  952. int32_t unspec_axis = -1;
  953. if (PyTuple_Check(shape_tuple.ptr())) {
  954. py::tuple tup = py::reinterpret_borrow<py::tuple>(shape_tuple);
  955. for (size_t i = 0; i < tup.size(); ++i) {
  956. py::object obj = py::reinterpret_borrow<py::object>(tup[i]);
  957. if (obj < py::int_(0)) {
  958. if (obj.not_equal(py::int_(-1))) {
  959. throw py::value_error(
  960. "expect shape [" + std::to_string(i) + "] >= -1, got " +
  961. repr(obj).cast<std::string>());
  962. }
  963. if (unspec_axis >= 0) {
  964. throw py::value_error(
  965. "multiple -1 in shape: " + std::to_string(unspec_axis) +
  966. " & " + std::to_string(i));
  967. }
  968. unspec_axis = i;
  969. }
  970. }
  971. }
  972. auto [shape, fastpath] = tuple2vector(shape_tuple);
  973. fastpath &= enable_fastpath(inp_hdl);
  974. std::shared_ptr<OpDef> op;
  975. std::vector<PyObject*> p;
  976. py::object shape_tensor;
  977. if (fastpath) {
  978. if (unspec_axis >= 0) {
  979. op = Reshape::make(unspec_axis, shape);
  980. } else {
  981. op = Reshape::make(::megdnn::param::OptionalAxisV1::INVALID_AXIS, shape);
  982. }
  983. p.resize(2);
  984. } else {
  985. shape.clear();
  986. if (unspec_axis >= 0) {
  987. op = Reshape::make(unspec_axis, shape);
  988. } else {
  989. op = Reshape::make();
  990. }
  991. shape_tensor = _astensor1d_cpp(
  992. shape_hdl, py::cast((mgb::DType)dtype::Int32()),
  993. getattr(inp_hdl, "device"), inp_hdl);
  994. p.resize(3);
  995. p[2] = shape_tensor.ptr();
  996. }
  997. py::object Op = py::cast(op);
  998. p[0] = Op.ptr();
  999. p[1] = inp_hdl.ptr();
  1000. py::tuple ret =
  1001. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1002. return ret[0];
  1003. }
  1004. py::object _adaptive_pool2d_cpp(
  1005. py::handle inp_hdl, py::handle shape_val_hdl, py::handle pool_mode_hdl) {
  1006. py::object shape_hdl = py::reinterpret_borrow<py::object>(shape_val_hdl);
  1007. py::list shps(0);
  1008. auto mode_string = pool_mode_hdl.cast<std::string>();
  1009. ::megdnn::param::AdaptivePooling::Mode pool_mode =
  1010. ::megdnn::param::AdaptivePooling::Mode::MAX;
  1011. if (mode_string.compare(std::string("AVERAGE")) == 0) {
  1012. pool_mode = ::megdnn::param::AdaptivePooling::Mode::AVERAGE;
  1013. }
  1014. std::shared_ptr<OpDef> op;
  1015. std::vector<PyObject*> p;
  1016. auto pool_format = ::megdnn::param::AdaptivePooling::Format::NCHW;
  1017. auto inp_format = getattr(inp_hdl, "format").cast<std::string>();
  1018. if (inp_format == "nhwc") {
  1019. pool_format = ::megdnn::param::AdaptivePooling::Format::NHWC;
  1020. }
  1021. if (TensorWrapper::try_cast(shape_val_hdl.ptr())) {
  1022. std::vector<int32_t> shp;
  1023. op = AdaptivePooling::make(pool_mode, pool_format, shp);
  1024. py::object Op = py::cast(op);
  1025. p.resize(3);
  1026. p[0] = Op.ptr();
  1027. p[1] = inp_hdl.ptr();
  1028. p[2] = shape_val_hdl.ptr();
  1029. py::tuple ret =
  1030. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1031. return ret[0];
  1032. } else if (!PyTuple_Check(shape_val_hdl.ptr())) {
  1033. shps.append(PyLong_AsLong(shape_val_hdl.ptr()));
  1034. shps.append(PyLong_AsLong(shape_val_hdl.ptr()));
  1035. shape_hdl = py::reinterpret_borrow<py::object>(shps);
  1036. }
  1037. py::object shape_tuple;
  1038. try {
  1039. shape_tuple = _make_shape_tuple(shape_hdl);
  1040. } catch (py::error_already_set& err) {
  1041. shape_tuple = py::reinterpret_borrow<py::object>(shape_hdl);
  1042. }
  1043. auto [shape, fastpath] = tuple2vector(shape_tuple);
  1044. fastpath &= enable_fastpath(inp_hdl);
  1045. py::object shape_tensor;
  1046. op = AdaptivePooling::make(pool_mode, pool_format, shape);
  1047. if (fastpath) {
  1048. p.resize(2);
  1049. } else {
  1050. p.resize(3);
  1051. shape_tensor = _astensor1d_cpp(
  1052. shape_hdl, py::cast((mgb::DType)dtype::Int32()),
  1053. getattr(inp_hdl, "device"), inp_hdl);
  1054. p[2] = shape_tensor.ptr();
  1055. }
  1056. py::object Op = py::cast(op);
  1057. p[0] = Op.ptr();
  1058. p[1] = inp_hdl.ptr();
  1059. py::tuple ret =
  1060. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1061. return ret[0];
  1062. }
  1063. py::object _getitem_cpp(py::handle inp_hdl, py::handle idx_hdl) {
  1064. py::tuple try_res = _try_cond_take(inp_hdl, idx_hdl);
  1065. if (try_res.size() == 2) {
  1066. return try_res[0];
  1067. }
  1068. py::tuple up = _unpack_indexes(inp_hdl, idx_hdl);
  1069. py::object tensor = py::reinterpret_borrow<py::object>(up[0]);
  1070. py::list tensors = py::reinterpret_borrow<py::list>(up[1]);
  1071. py::list py_items = py::reinterpret_borrow<py::list>(up[2]);
  1072. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> cpp_items;
  1073. for (size_t i = 0; i < py_items.size(); ++i) {
  1074. py::list item = py::reinterpret_borrow<py::list>(py_items[i]);
  1075. cpp_items.push_back(
  1076. {item[0].cast<int8_t>(), item[1].cast<bool>(), item[2].cast<bool>(),
  1077. item[3].cast<bool>(), item[4].cast<bool>()});
  1078. }
  1079. std::shared_ptr<OpDef> op;
  1080. if (up[3].cast<bool>()) {
  1081. op = Subtensor::make(cpp_items);
  1082. } else {
  1083. op = IndexingMultiAxisVec::make(cpp_items);
  1084. }
  1085. std::vector<PyObject*> p;
  1086. p.resize(tensors.size() + 2);
  1087. py::object Op = py::cast(op);
  1088. p[0] = Op.ptr();
  1089. p[1] = tensor.ptr();
  1090. for (size_t i = 0; i < tensors.size(); ++i) {
  1091. p[i + 2] = tensors[i].ptr();
  1092. }
  1093. py::tuple ret =
  1094. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1095. return ret[0];
  1096. }
  1097. py::object _setitem_cpp(py::handle inp_hdl, py::handle idx_hdl, py::handle val_hdl) {
  1098. py::object org_shape = getattr(inp_hdl, "shape");
  1099. py::object val = py::reinterpret_borrow<py::object>(val_hdl);
  1100. if (!TensorWrapper::try_cast(val.ptr())) {
  1101. val = _Const(val_hdl, getattr(inp_hdl, "dtype"), getattr(inp_hdl, "device"));
  1102. }
  1103. py::tuple up = _unpack_indexes(inp_hdl, idx_hdl);
  1104. py::object tensor = py::reinterpret_borrow<py::object>(up[0]);
  1105. py::list tensors = py::reinterpret_borrow<py::list>(up[1]);
  1106. py::list py_items = py::reinterpret_borrow<py::list>(up[2]);
  1107. std::vector<std::tuple<int8_t, bool, bool, bool, bool>> cpp_items;
  1108. for (size_t i = 0; i < py_items.size(); ++i) {
  1109. py::list item = py::reinterpret_borrow<py::list>(py_items[i]);
  1110. cpp_items.push_back(
  1111. {item[0].cast<int8_t>(), item[1].cast<bool>(), item[2].cast<bool>(),
  1112. item[3].cast<bool>(), item[4].cast<bool>()});
  1113. }
  1114. std::shared_ptr<OpDef> op, set_op;
  1115. if (up[3].cast<bool>()) {
  1116. op = Subtensor::make(cpp_items);
  1117. } else {
  1118. op = IndexingMultiAxisVec::make(cpp_items);
  1119. }
  1120. std::vector<PyObject*> p;
  1121. p.resize(tensors.size() + 2);
  1122. py::object Op = py::cast(op);
  1123. p[0] = Op.ptr();
  1124. p[1] = tensor.ptr();
  1125. for (size_t i = 0; i < tensors.size(); ++i) {
  1126. p[i + 2] = tensors[i].ptr();
  1127. }
  1128. py::tuple ret =
  1129. py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1130. py::object tmp_result = ret[0];
  1131. try {
  1132. py::tuple value_shape =
  1133. py::reinterpret_borrow<py::tuple>(val.attr("_tuple_shape"));
  1134. py::tuple tmp_result_shape =
  1135. py::reinterpret_borrow<py::tuple>(tmp_result.attr("_tuple_shape"));
  1136. for (size_t i = 0; i < value_shape.size() && i < tmp_result_shape.size(); ++i) {
  1137. size_t vs = value_shape[value_shape.size() - i - 1].cast<size_t>();
  1138. size_t ts =
  1139. tmp_result_shape[tmp_result_shape.size() - i - 1].cast<size_t>();
  1140. if (vs != 1 && vs != ts) {
  1141. std::string lhs = "", rhs = "";
  1142. for (size_t j = 0; j < tmp_result_shape.size(); ++j) {
  1143. lhs += std::to_string(tmp_result_shape[j].cast<size_t>());
  1144. if (j)
  1145. lhs += ",";
  1146. }
  1147. for (size_t j = 0; j < value_shape.size(); ++j) {
  1148. rhs += std::to_string(value_shape[j].cast<size_t>());
  1149. if (j)
  1150. rhs += ",";
  1151. }
  1152. throw py::value_error(
  1153. "cannot copy tensor with shape (" + rhs +
  1154. ") to subtensor with shape (" + lhs + ")");
  1155. }
  1156. }
  1157. } catch (py::error_already_set& err) {
  1158. ;
  1159. }
  1160. val = _broadcast_cpp(val, getattr(tmp_result, "shape"));
  1161. if (up[3].cast<bool>()) {
  1162. set_op = SetSubtensor::make(cpp_items);
  1163. } else {
  1164. set_op = IndexingSetMultiAxisVec::make(cpp_items);
  1165. }
  1166. std::vector<PyObject*> q;
  1167. q.resize(tensors.size() + 3);
  1168. py::object Set_Op = py::cast(set_op);
  1169. q[0] = Set_Op.ptr();
  1170. q[1] = tensor.ptr();
  1171. q[2] = val.ptr();
  1172. for (size_t i = 0; i < tensors.size(); ++i) {
  1173. q[i + 3] = tensors[i].ptr();
  1174. }
  1175. py::tuple result =
  1176. py::reinterpret_steal<py::object>(py_apply(NULL, q.data(), q.size()));
  1177. py::object res = result[0];
  1178. if (up[4].cast<bool>()) {
  1179. res = _reshape_cpp(res, org_shape);
  1180. }
  1181. return res;
  1182. }
  1183. py::object _split_cpp(
  1184. py::handle inp_hdl, py::handle nsplits_or_sections_hdl, py::handle axis_hdl) {
  1185. py::object shape_obj = getattr(inp_hdl, "shape");
  1186. py::object n_total = shape_obj[axis_hdl];
  1187. int ndim = shape_obj.attr("__len__")().cast<int>();
  1188. int axis = axis_hdl.cast<int>();
  1189. if (axis >= ndim) {
  1190. throw py::value_error("Invalid axis " + std::to_string(axis));
  1191. }
  1192. int n_sections;
  1193. bool is_array;
  1194. if (is_py_sequence(nsplits_or_sections_hdl)) {
  1195. n_sections = PySequence_Length(nsplits_or_sections_hdl.ptr()) + 1;
  1196. is_array = true;
  1197. } else {
  1198. n_sections = getattr(nsplits_or_sections_hdl, "__int__")().cast<int>();
  1199. is_array = false;
  1200. }
  1201. py::list partitions;
  1202. std::shared_ptr<OpDef> op;
  1203. std::vector<PyObject*> p;
  1204. if (is_array) {
  1205. py::list div_points;
  1206. py::list sections = py::reinterpret_borrow<py::object>(nsplits_or_sections_hdl);
  1207. div_points.append(0);
  1208. for (size_t i = 0; i < sections.size(); ++i) {
  1209. div_points.append(sections[i]);
  1210. }
  1211. div_points.append(n_total);
  1212. for (size_t i = 1; i < div_points.size(); ++i) {
  1213. if (div_points[i - 1] > div_points[i]) {
  1214. throw py::value_error(
  1215. "Invalid nsplits_or_secions: " +
  1216. repr(nsplits_or_sections_hdl).cast<std::string>());
  1217. }
  1218. py::object pos = div_points[i] - div_points[i - 1];
  1219. if (is_tensor(pos)) {
  1220. partitions.append(pos);
  1221. } else {
  1222. partitions.append(
  1223. _Const(pos, py::cast((mgb::DType)dtype::Int32()),
  1224. getattr(inp_hdl, "device")));
  1225. }
  1226. }
  1227. op = Split::make(axis, 0);
  1228. p.resize(partitions.size() + 2);
  1229. for (size_t i = 0; i < partitions.size(); ++i) {
  1230. p[i + 2] = partitions[i].ptr();
  1231. }
  1232. } else {
  1233. if (n_sections <= 0) {
  1234. throw py::value_error("Number sections must be larger than 0");
  1235. }
  1236. if (py::int_(n_sections) > n_total) {
  1237. throw py::value_error(
  1238. "The size " + repr(n_total).cast<std::string>() + " at dim " +
  1239. std::to_string(axis) + " cannot be split into " +
  1240. std::to_string(n_sections) + " sections");
  1241. }
  1242. op = Split::make(axis, n_sections);
  1243. p.resize(2);
  1244. }
  1245. py::object Op = py::cast(op);
  1246. p[0] = Op.ptr();
  1247. p[1] = inp_hdl.ptr();
  1248. return py::reinterpret_steal<py::object>(py_apply(NULL, p.data(), p.size()));
  1249. }
  1250. std::vector<int32_t> list2vector(py::handle li) {
  1251. std::vector<int32_t> axis;
  1252. if (is_py_sequence(li)) {
  1253. py::list tmp_list = py::reinterpret_steal<py::list>(PySequence_List(li.ptr()));
  1254. for (size_t i = 0; i < tmp_list.size(); ++i) {
  1255. axis.push_back(tmp_list[i].attr("__int__")().cast<int32_t>());
  1256. }
  1257. } else {
  1258. axis.push_back(getattr(li, "__int__")().cast<int32_t>());
  1259. }
  1260. return axis;
  1261. }
  1262. py::object _expand_dims_cpp(py::handle inp_hdl, py::handle axis_hdl) {
  1263. std::vector<int32_t> axis = list2vector(axis_hdl);
  1264. bool unknown_ndim = true;
  1265. size_t ndim = axis.size();
  1266. if (auto p = TensorWrapper::try_cast(inp_hdl.ptr())) {
  1267. auto&& shape = p->m_tensor->shape();
  1268. if (shape) {
  1269. unknown_ndim = false;
  1270. ndim += shape->ndim;
  1271. }
  1272. } else {
  1273. auto&& inp_ndim = get_ndim_safe(inp_hdl);
  1274. ndim += inp_ndim.first;
  1275. unknown_ndim &= !inp_ndim.second;
  1276. }
  1277. for (size_t i = 0; i < axis.size(); ++i) {
  1278. if (axis[i] < 0) {
  1279. if (unknown_ndim) {
  1280. throw py::index_error(
  1281. "Does not support negative index when tensor's ndim is "
  1282. "unknown");
  1283. }
  1284. axis[i] += static_cast<int32_t>(ndim);
  1285. }
  1286. }
  1287. if (!axis.size()) {
  1288. throw py::index_error("axis could not be empty");
  1289. }
  1290. std::sort(axis.begin(), axis.end());
  1291. std::shared_ptr<OpDef> op = AddAxis::make(axis = axis);
  1292. py::object Op = py::cast(op);
  1293. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1294. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1295. return ret[0];
  1296. }
  1297. py::object _squeeze_cpp(py::handle inp_hdl, py::handle axis_hdl) {
  1298. std::vector<int32_t> axis;
  1299. size_t ndim;
  1300. if (axis_hdl.ptr() != Py_None) {
  1301. axis = list2vector(axis_hdl);
  1302. }
  1303. if (auto p = TensorWrapper::try_cast(inp_hdl.ptr())) {
  1304. auto&& shape = p->m_tensor->shape();
  1305. if (shape) {
  1306. ndim = shape->ndim;
  1307. if (axis_hdl.ptr() == Py_None) {
  1308. for (size_t i = 0; i < shape->ndim; ++i) {
  1309. if (shape->shape[i] == 1) {
  1310. axis.push_back(i);
  1311. }
  1312. }
  1313. }
  1314. }
  1315. } else {
  1316. py::tuple shape =
  1317. py::reinterpret_borrow<py::tuple>(getattr(inp_hdl, "_tuple_shape"));
  1318. ndim = shape.size();
  1319. if (axis_hdl.ptr() == Py_None) {
  1320. for (size_t i = 0; i < shape.size(); ++i) {
  1321. if (shape[i].cast<size_t>() == 1) {
  1322. axis.push_back(i);
  1323. }
  1324. }
  1325. }
  1326. }
  1327. for (size_t i = 0; i < axis.size(); ++i) {
  1328. if (axis[i] < 0) {
  1329. axis[i] += static_cast<int32_t>(ndim);
  1330. }
  1331. }
  1332. std::sort(axis.begin(), axis.end());
  1333. for (size_t i = 0; i < axis.size(); ++i) {
  1334. axis[i] -= static_cast<int32_t>(i);
  1335. }
  1336. std::shared_ptr<OpDef> op = RemoveAxis::make(axis = axis);
  1337. py::object Op = py::cast(op);
  1338. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1339. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1340. return ret[0];
  1341. }
  1342. py::object _transpose_cpp(py::handle inp_hdl, py::handle args) {
  1343. py::object obj = _expand_args(args);
  1344. py::list lis;
  1345. if (!is_tensor(obj.ptr()) && PySequence_Check(obj.ptr())) {
  1346. lis = py::reinterpret_steal<py::list>(PySequence_List(obj.ptr()));
  1347. } else {
  1348. py::object np = getattr(obj, "numpy")();
  1349. PyArrayObject* arr = (PyArrayObject*)np.ptr();
  1350. PyObject* maybe_list = PyArray_ToList(arr);
  1351. if (PyList_Check(maybe_list)) {
  1352. lis = py::reinterpret_steal<py::list>(maybe_list);
  1353. }
  1354. }
  1355. if (get_ndim_safe(inp_hdl).first == 0) {
  1356. if (lis.size() != 0) {
  1357. throw py::index_error(
  1358. "transpose for scalar does not accept additional args");
  1359. }
  1360. return getattr(inp_hdl, "to")(getattr(inp_hdl, "device"));
  1361. }
  1362. std::vector<int32_t> pattern;
  1363. if (!lis.size()) {
  1364. size_t ndim = getattr(inp_hdl, "ndim").cast<size_t>();
  1365. for (size_t i = 0; i < ndim; ++i) {
  1366. pattern.push_back(ndim - i - 1);
  1367. }
  1368. } else {
  1369. for (size_t i = 0; i < lis.size(); ++i) {
  1370. if (PyLong_Check(lis[i].ptr())) {
  1371. pattern.push_back(lis[i].cast<int32_t>());
  1372. } else {
  1373. if (lis[i].cast<std::string>() == "x") {
  1374. pattern.push_back(-1);
  1375. }
  1376. }
  1377. }
  1378. }
  1379. std::shared_ptr<OpDef> op = Dimshuffle::make(pattern);
  1380. py::object Op = py::cast(op);
  1381. PyObject* p[2] = {Op.ptr(), inp_hdl.ptr()};
  1382. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1383. return ret[0];
  1384. }
  1385. py::object _matmul_cpp(
  1386. py::handle inp1, py::handle inp2, py::handle dim1, py::handle dim2,
  1387. py::handle transpose_a, py::handle transpose_b, py::handle compute_mode,
  1388. py::handle profile, py::handle deterministic) {
  1389. ::megdnn::param::MatrixMul::ComputeMode mode =
  1390. ::megdnn::param::MatrixMul::ComputeMode::DEFAULT;
  1391. if (compute_mode.cast<std::string>().compare(std::string("float32")) == 0) {
  1392. mode = ::megdnn::param::MatrixMul::ComputeMode::FLOAT32;
  1393. }
  1394. ::megdnn::param::ExecutionPolicy::Strategy cstrategy =
  1395. static_cast<::megdnn::param::ExecutionPolicy::Strategy>(0);
  1396. if (profile.cast<bool>()) {
  1397. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::PROFILE;
  1398. } else {
  1399. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::HEURISTIC;
  1400. }
  1401. if (deterministic.cast<bool>()) {
  1402. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::REPRODUCIBLE;
  1403. }
  1404. std::shared_ptr<OpDef> op = MatrixMul::make(
  1405. transpose_a.cast<bool>(), transpose_b.cast<bool>(), mode,
  1406. ::megdnn::param::MatrixMul::Format::DEFAULT, cstrategy, UINT64_MAX,
  1407. dim1.cast<uint32_t>(), dim2.cast<uint32_t>());
  1408. py::object Op = py::cast(op);
  1409. PyObject* p[3] = {Op.ptr(), inp1.ptr(), inp2.ptr()};
  1410. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  1411. return ret[0];
  1412. }
  1413. py::object _batched_matmul_cpp(
  1414. py::handle inp1, py::handle inp2, py::handle dim1, py::handle dim2,
  1415. py::handle transpose_a, py::handle transpose_b, py::handle compute_mode,
  1416. py::handle profile, py::handle deterministic) {
  1417. ::megdnn::param::MatrixMul::ComputeMode mode =
  1418. ::megdnn::param::MatrixMul::ComputeMode::DEFAULT;
  1419. if (compute_mode.cast<std::string>().compare(std::string("float32")) == 0) {
  1420. mode = ::megdnn::param::MatrixMul::ComputeMode::FLOAT32;
  1421. }
  1422. ::megdnn::param::ExecutionPolicy::Strategy cstrategy =
  1423. static_cast<::megdnn::param::ExecutionPolicy::Strategy>(0);
  1424. if (profile.cast<bool>()) {
  1425. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::PROFILE;
  1426. } else {
  1427. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::HEURISTIC;
  1428. }
  1429. if (deterministic.cast<bool>()) {
  1430. cstrategy |= ::megdnn::param::ExecutionPolicy::Strategy::REPRODUCIBLE;
  1431. }
  1432. std::shared_ptr<OpDef> op = BatchedMatrixMul::make(
  1433. transpose_a.cast<bool>(), transpose_b.cast<bool>(), mode,
  1434. ::megdnn::param::MatrixMul::Format::DEFAULT, cstrategy, UINT64_MAX,
  1435. dim1.cast<uint32_t>(), dim2.cast<uint32_t>());
  1436. py::object Op = py::cast(op);
  1437. PyObject* p[3] = {Op.ptr(), inp1.ptr(), inp2.ptr()};
  1438. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 3));
  1439. return ret[0];
  1440. }
  1441. py::object _pixel_shuffle_cpp(py::handle inp, py::handle val, py::handle func) {
  1442. if (enable_fastpath(inp) && PyLong_Check(val.ptr())) {
  1443. std::shared_ptr<OpDef> op = PixelShuffle::make(val.cast<int32_t>());
  1444. py::object Op = py::cast(op);
  1445. PyObject* p[2] = {Op.ptr(), inp.ptr()};
  1446. py::tuple ret = py::reinterpret_steal<py::object>(py_apply(NULL, p, 2));
  1447. return ret[0];
  1448. } else {
  1449. // fallback to traceable subgraph implement
  1450. return func(inp, val);
  1451. }
  1452. }
  1453. PyObject* make_shape_tuple(PyObject* self, PyObject* const* args, size_t nargs) {
  1454. try {
  1455. return _make_shape_tuple(args[0]).release().ptr();
  1456. }
  1457. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1458. }
  1459. PyObject* getitem_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1460. try {
  1461. return _getitem_cpp(args[0], args[1]).release().ptr();
  1462. }
  1463. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1464. }
  1465. PyObject* setitem_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1466. try {
  1467. return _setitem_cpp(args[0], args[1], args[2]).release().ptr();
  1468. }
  1469. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1470. }
  1471. PyObject* split_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1472. try {
  1473. return _split_cpp(args[0], args[1], args[2]).release().ptr();
  1474. }
  1475. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1476. }
  1477. PyObject* expand_dims_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1478. try {
  1479. return _expand_dims_cpp(args[0], args[1]).release().ptr();
  1480. }
  1481. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1482. }
  1483. PyObject* squeeze_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1484. try {
  1485. return _squeeze_cpp(args[0], args[1]).release().ptr();
  1486. }
  1487. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1488. }
  1489. PyObject* transpose_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1490. try {
  1491. return _transpose_cpp(args[0], args[1]).release().ptr();
  1492. }
  1493. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1494. }
  1495. PyObject* broadcast_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1496. try {
  1497. return _broadcast_cpp(args[0], args[1]).release().ptr();
  1498. }
  1499. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1500. }
  1501. PyObject* reshape_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1502. try {
  1503. return _reshape_cpp(args[0], args[1]).release().ptr();
  1504. }
  1505. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1506. }
  1507. PyObject* adaptive_pool2d_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1508. try {
  1509. return _adaptive_pool2d_cpp(args[0], args[1], args[2]).release().ptr();
  1510. }
  1511. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1512. }
  1513. PyObject* pixel_shuffle_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1514. try {
  1515. return _pixel_shuffle_cpp(args[0], args[1], args[2]).release().ptr();
  1516. }
  1517. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1518. }
  1519. PyObject* Const(PyObject* self, PyObject* const* args, size_t nargs) {
  1520. try {
  1521. return _Const(args[0], args[1], args[2]).release().ptr();
  1522. }
  1523. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1524. }
  1525. PyObject* astype_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1526. try {
  1527. return _astype_cpp(args[0], args[1]).release().ptr();
  1528. }
  1529. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1530. }
  1531. PyObject* matmul_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1532. try {
  1533. return _matmul_cpp(
  1534. args[0], args[1], args[2], args[3], args[4], args[5], args[6],
  1535. args[7], args[8])
  1536. .release()
  1537. .ptr();
  1538. }
  1539. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1540. }
  1541. PyObject* batched_matmul_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1542. try {
  1543. return _batched_matmul_cpp(
  1544. args[0], args[1], args[2], args[3], args[4], args[5], args[6],
  1545. args[7], args[8])
  1546. .release()
  1547. .ptr();
  1548. }
  1549. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1550. }
  1551. PyObject* convert_single_value_cpp(
  1552. PyObject* self, PyObject* const* args, size_t nargs) {
  1553. try {
  1554. return _convert_single_value_cpp(args[0], args[1], args[2]).release().ptr();
  1555. }
  1556. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1557. }
  1558. PyObject* convert_inputs_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1559. try {
  1560. py::object dtype = py::reinterpret_steal<py::object>(
  1561. dtype_promotion(self, args, nargs - 1));
  1562. py::object device;
  1563. if (args[nargs - 1] == Py_None) {
  1564. device = py::reinterpret_steal<py::object>(
  1565. get_device(self, args, nargs - 1));
  1566. } else {
  1567. device = py::reinterpret_borrow<py::object>(args[nargs - 1]);
  1568. }
  1569. return _convert_inputs_cpp(args, nargs - 1, dtype, device).release().ptr();
  1570. }
  1571. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1572. }
  1573. PyObject* astensor1d_cpp(PyObject* self, PyObject* const* args, size_t nargs) {
  1574. try {
  1575. return _astensor1d_cpp(args[0], args[1], args[2], args[3]).release().ptr();
  1576. }
  1577. PYEXT17_TRANSLATE_EXC_RET(nullptr)
  1578. }
  1579. } // namespace mgb::imperative::python