提交 36a4fb56 编写于 作者: M Megvii Engine Team

ci(image): update docker image install python package

GitOrigin-RevId: 171e95b3d975edabb3ce5deace7b60b6751bbcc4
上级 2070aaae
......@@ -274,7 +274,7 @@ def dump_graph(
keep_var_name: int = 1,
keep_param_name: bool = False,
keep_opr_priority: bool = False,
strip_info_file=None,
strip_info_file=None
):
"""serialize the computing graph of `output_vars` and get byte result.
......
......@@ -40,6 +40,8 @@
# All Megvii Modifications are Copyright (C) 2014-2020 Megvii Inc. All rights reserved.
# --------------------------------------------------------------------------------------
from collections import OrderedDict
from .utils import _toposort, groupby
from .variadic import isvariadic
......@@ -159,5 +161,5 @@ def ordering(signatures):
for s in signatures:
if s not in edges:
edges[s] = []
edges = dict((k, [b for a, b in v]) for k, v in edges.items())
edges = OrderedDict((k, [b for a, b in v]) for k, v in edges.items())
return _toposort(edges)
......@@ -54,14 +54,14 @@ def synchronized(func: Callable):
return wrapper
def get_device_count_by_fork(device_type: str):
q = mp.Queue()
def worker(queue, device_type):
num = get_device_count(device_type)
queue.put(num)
def worker(queue):
num = get_device_count(device_type)
queue.put(num)
p = mp.Process(target=worker, args=(q,))
def get_device_count_by_fork(device_type: str):
q = mp.Queue()
p = mp.Process(target=worker, args=(q, device_type))
p.start()
p.join()
return q.get()
......
......@@ -151,16 +151,19 @@ struct Dispatcher {
public:
static constexpr auto tp_name = "Dispatcher";
PyObject* tp_vectorcall(PyObject*const* args, Py_ssize_t nargs) {
if (!prepare_call(args, nargs)) return nullptr;
return do_call([=](PyObject* func){return _PyObject_FastCall(func, const_cast<PyObject**>(args), nargs);});
}
PyObject* tp_call(PyObject* args, PyObject* kwargs) {
if (!prepare_call(&PyTuple_GET_ITEM(args, 0), PyTuple_GET_SIZE(args))) return nullptr;
return do_call([=](PyObject* func){return PyObject_Call(func, args, kwargs);});
}
#if PY_MINOR_VERSION >= 6
PyObject* tp_vectorcall(PyObject*const* args, Py_ssize_t nargs) {
if (!prepare_call(args, nargs)) return nullptr;
return do_call([=](PyObject* func){return _PyObject_FastCall(func, const_cast<PyObject**>(args), nargs);});
}
#endif
#if PY_MINOR_VERSION >= 6
PyObject* super(PyObject*const* args, Py_ssize_t nargs) {
if (stack.empty()) {
PyErr_SetString(PyExc_RuntimeError, "super called at top level");
......@@ -169,6 +172,16 @@ public:
stack.emplace_back_safely(stack.back()).mro_offset++;
return do_call([=](PyObject* func){return _PyObject_FastCall(func, const_cast<PyObject**>(args), nargs);});
}
#else
PyObject* super(PyObject* args, PyObject* kwargs) {
if (stack.empty()) {
PyErr_SetString(PyExc_RuntimeError, "super called at top level");
return nullptr;
}
stack.emplace_back_safely(stack.back()).mro_offset++;
return do_call([=](PyObject* func){return PyObject_Call(func, args, kwargs);});
}
#endif
void enable(PyObject* func) {
auto obj = py::reinterpret_borrow<py::object>(func);
......@@ -204,7 +217,11 @@ void init_dispatcher(py::module m) {
.def<&Dispatcher::enable>("enable")
.def<&Dispatcher::disable>("disable")
.def<&Dispatcher::clear_cache>("clear_cache")
#if PY_MINOR_VERSION >= 6
.def<&Dispatcher::tp_vectorcall>("call")
#else
.def<&Dispatcher::tp_call>("call")
#endif
.def<&Dispatcher::super>("super")
.finalize();
if (!dispatcher_type) throw py::error_already_set();
......
#!/bin/bash -e
test_dirs="test megengine"
TEST_PLAT=$1
if [[ "$TEST_PLAT" == cpu ]]; then
......@@ -13,9 +14,9 @@ else
fi
pushd $(dirname "${BASH_SOURCE[0]}")/.. >/dev/null
PYTHONPATH="." python3 -m pytest $test_dirs -m 'not isolated_distributed'
PYTHONPATH="." PY_IGNORE_IMPORTMISMATCH=1 python3 -m pytest $test_dirs -m 'not isolated_distributed'
if [[ "$TEST_PLAT" == cuda ]]; then
echo "test GPU pytest now"
PYTHONPATH="." python3 -m pytest $test_dirs -m 'isolated_distributed'
PYTHONPATH="." PY_IGNORE_IMPORTMISMATCH=1 python3 -m pytest $test_dirs -m 'isolated_distributed'
fi
popd >/dev/null
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册