diff --git a/imperative/python/megengine/core/tensor/megbrain_graph.py b/imperative/python/megengine/core/tensor/megbrain_graph.py index 9b4707b7..5cddd773 100644 --- a/imperative/python/megengine/core/tensor/megbrain_graph.py +++ b/imperative/python/megengine/core/tensor/megbrain_graph.py @@ -274,7 +274,7 @@ def dump_graph( keep_var_name: int = 1, keep_param_name: bool = False, keep_opr_priority: bool = False, - strip_info_file=None, + strip_info_file=None ): """serialize the computing graph of `output_vars` and get byte result. diff --git a/imperative/python/megengine/core/tensor/multipledispatch/conflict.py b/imperative/python/megengine/core/tensor/multipledispatch/conflict.py index 6989755d..ec852aa7 100644 --- a/imperative/python/megengine/core/tensor/multipledispatch/conflict.py +++ b/imperative/python/megengine/core/tensor/multipledispatch/conflict.py @@ -40,6 +40,8 @@ # All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved. # -------------------------------------------------------------------------------------- +from collections import OrderedDict + from .utils import _toposort, groupby from .variadic import isvariadic @@ -159,5 +161,5 @@ def ordering(signatures): for s in signatures: if s not in edges: edges[s] = [] - edges = dict((k, [b for a, b in v]) for k, v in edges.items()) + edges = OrderedDict((k, [b for a, b in v]) for k, v in edges.items()) return _toposort(edges) diff --git a/imperative/python/megengine/distributed/helper.py b/imperative/python/megengine/distributed/helper.py index 81cfc77b..72a85f2a 100644 --- a/imperative/python/megengine/distributed/helper.py +++ b/imperative/python/megengine/distributed/helper.py @@ -54,14 +54,14 @@ def synchronized(func: Callable): return wrapper -def get_device_count_by_fork(device_type: str): - q = mp.Queue() +def worker(queue, device_type): + num = get_device_count(device_type) + queue.put(num) - def worker(queue): - num = get_device_count(device_type) - queue.put(num) - p = mp.Process(target=worker, args=(q,)) +def get_device_count_by_fork(device_type: str): + q = mp.Queue() + p = mp.Process(target=worker, args=(q, device_type)) p.start() p.join() return q.get() diff --git a/imperative/python/src/dispatcher.cpp b/imperative/python/src/dispatcher.cpp index 2d2cd844..616e1791 100644 --- a/imperative/python/src/dispatcher.cpp +++ b/imperative/python/src/dispatcher.cpp @@ -151,16 +151,19 @@ struct Dispatcher { public: static constexpr auto tp_name = "Dispatcher"; - PyObject* tp_vectorcall(PyObject*const* args, Py_ssize_t nargs) { - if (!prepare_call(args, nargs)) return nullptr; - return do_call([=](PyObject* func){return _PyObject_FastCall(func, const_cast(args), nargs);}); - } - PyObject* tp_call(PyObject* args, PyObject* kwargs) { if (!prepare_call(&PyTuple_GET_ITEM(args, 0), PyTuple_GET_SIZE(args))) return nullptr; return do_call([=](PyObject* func){return PyObject_Call(func, args, kwargs);}); } +#if PY_MINOR_VERSION >= 6 + PyObject* tp_vectorcall(PyObject*const* args, Py_ssize_t nargs) { + if (!prepare_call(args, nargs)) return nullptr; + return do_call([=](PyObject* func){return _PyObject_FastCall(func, const_cast(args), nargs);}); + } +#endif + +#if PY_MINOR_VERSION >= 6 PyObject* super(PyObject*const* args, Py_ssize_t nargs) { if (stack.empty()) { PyErr_SetString(PyExc_RuntimeError, "super called at top level"); @@ -169,6 +172,16 @@ public: stack.emplace_back_safely(stack.back()).mro_offset++; return do_call([=](PyObject* func){return _PyObject_FastCall(func, const_cast(args), nargs);}); } +#else + PyObject* super(PyObject* args, PyObject* kwargs) { + if (stack.empty()) { + PyErr_SetString(PyExc_RuntimeError, "super called at top level"); + return nullptr; + } + stack.emplace_back_safely(stack.back()).mro_offset++; + return do_call([=](PyObject* func){return PyObject_Call(func, args, kwargs);}); + } +#endif void enable(PyObject* func) { auto obj = py::reinterpret_borrow(func); @@ -204,7 +217,11 @@ void init_dispatcher(py::module m) { .def<&Dispatcher::enable>("enable") .def<&Dispatcher::disable>("disable") .def<&Dispatcher::clear_cache>("clear_cache") +#if PY_MINOR_VERSION >= 6 .def<&Dispatcher::tp_vectorcall>("call") +#else + .def<&Dispatcher::tp_call>("call") +#endif .def<&Dispatcher::super>("super") .finalize(); if (!dispatcher_type) throw py::error_already_set(); diff --git a/imperative/python/test/run.sh b/imperative/python/test/run.sh index 4920e2e9..825c1017 100755 --- a/imperative/python/test/run.sh +++ b/imperative/python/test/run.sh @@ -1,6 +1,7 @@ #!/bin/bash -e test_dirs="test megengine" + TEST_PLAT=$1 if [[ "$TEST_PLAT" == cpu ]]; then @@ -13,9 +14,9 @@ else fi pushd $(dirname "${BASH_SOURCE[0]}")/.. >/dev/null - PYTHONPATH="." python3 -m pytest $test_dirs -m 'not isolated_distributed' + PYTHONPATH="." PY_IGNORE_IMPORTMISMATCH=1 python3 -m pytest $test_dirs -m 'not isolated_distributed' if [[ "$TEST_PLAT" == cuda ]]; then echo "test GPU pytest now" - PYTHONPATH="." python3 -m pytest $test_dirs -m 'isolated_distributed' + PYTHONPATH="." PY_IGNORE_IMPORTMISMATCH=1 python3 -m pytest $test_dirs -m 'isolated_distributed' fi popd >/dev/null