Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
MegEngine 天元
MegEngine
提交
b764dad6
MegEngine
项目概览
MegEngine 天元
/
MegEngine
1 年多 前同步成功
通知
410
Star
4707
Fork
583
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
MegEngine
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
提交
b764dad6
编写于
12月 01, 2022
作者:
M
Megvii Engine Team
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
feat(imperative): speed up fill
GitOrigin-RevId: 6aeefccb48ab14865a1cb6ae7fc7ce553e645237
上级
28c6ebfe
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
584 addition
and
13 deletion
+584
-13
dnn/src/cuda/fill/kern.cu
dnn/src/cuda/fill/kern.cu
+1
-0
dnn/src/cuda/fill/opr_impl.cpp
dnn/src/cuda/fill/opr_impl.cpp
+1
-0
dnn/src/naive/fill/opr_impl.cpp
dnn/src/naive/fill/opr_impl.cpp
+1
-0
imperative/python/megengine/functional/tensor.py
imperative/python/megengine/functional/tensor.py
+18
-8
imperative/src/impl/ops/fill.cpp
imperative/src/impl/ops/fill.cpp
+142
-0
imperative/tablegen/generated/hash.txt
imperative/tablegen/generated/hash.txt
+5
-5
imperative/tablegen/generated/opdef.cpp.inl
imperative/tablegen/generated/opdef.cpp.inl
+83
-0
imperative/tablegen/generated/opdef.cpy.inl
imperative/tablegen/generated/opdef.cpy.inl
+274
-0
imperative/tablegen/generated/opdef.h.inl
imperative/tablegen/generated/opdef.h.inl
+29
-0
imperative/tablegen/generated/opdef.py.inl
imperative/tablegen/generated/opdef.py.inl
+17
-0
src/core/include/megbrain/ir/ops.td
src/core/include/megbrain/ir/ops.td
+13
-0
未找到文件。
dnn/src/cuda/fill/kern.cu
浏览文件 @
b764dad6
...
...
@@ -27,6 +27,7 @@ void exec_internal(T* dst, T value, size_t size, cudaStream_t stream) {
#define INST(T) template void exec_internal<T>(T*, T, size_t, cudaStream_t);
#define cb(DType) INST(typename DTypeTrait<DType>::ctype)
MEGDNN_FOREACH_COMPUTING_DTYPE
(
cb
)
cb
(
::
megdnn
::
dtype
::
Bool
)
}
// namespace fill
}
// namespace cuda
...
...
dnn/src/cuda/fill/opr_impl.cpp
浏览文件 @
b764dad6
...
...
@@ -17,6 +17,7 @@ void FillImpl::exec(_megdnn_tensor_out dst, _megdnn_workspace workspace) {
dst.ptr<ctype>(), static_cast<ctype>(param().value), size, stream); \
}
MEGDNN_FOREACH_COMPUTING_DTYPE
(
cb
)
cb
(
::
megdnn
::
dtype
::
Bool
)
#undef cb
}
...
...
dnn/src/naive/fill/opr_impl.cpp
浏览文件 @
b764dad6
...
...
@@ -17,6 +17,7 @@ void FillImpl::exec(_megdnn_tensor_out dst, _megdnn_workspace workspace) {
MEGDNN_DISPATCH_CPU_KERN_OPR(exec_internal<ctype>(dst.ptr<ctype>(), size)); \
}
MEGDNN_FOREACH_COMPUTING_DTYPE
(
cb
)
cb
(
::
megdnn
::
dtype
::
Bool
)
#undef cb
}
...
...
imperative/python/megengine/functional/tensor.py
浏览文件 @
b764dad6
...
...
@@ -338,7 +338,14 @@ def ones(
Tensor([[1. 1.]
[1. 1.]], device=xpux:0)
"""
return
full
(
shape
,
1.0
,
dtype
=
dtype
,
device
=
device
)
if
isinstance
(
shape
,
int
):
shape
=
(
shape
,)
if
device
==
None
:
device
=
get_default_device
()
op
=
builtin
.
Fill
(
1
,
dtype
)
shape
=
astensor1d
(
shape
,
dtype
=
"int32"
,
device
=
device
)
(
x
,)
=
apply
(
op
,
shape
)
return
x
def
zeros
(
...
...
@@ -364,7 +371,14 @@ def zeros(
Tensor([[0. 0. 0.]
[0. 0. 0.]], device=xpux:0)
"""
return
full
(
shape
,
0.0
,
dtype
=
dtype
,
device
=
device
)
if
isinstance
(
shape
,
int
):
shape
=
(
shape
,)
if
device
==
None
:
device
=
get_default_device
()
op
=
builtin
.
Fill
(
0
,
dtype
)
shape
=
astensor1d
(
shape
,
dtype
=
"int32"
,
device
=
device
)
(
x
,)
=
apply
(
op
,
shape
)
return
x
def
zeros_like
(
inp
:
Tensor
)
->
Tensor
:
...
...
@@ -419,12 +433,8 @@ def full_like(inp: Tensor, value: Union[int, float]) -> Tensor:
Tensor([[2 2 2]
[2 2 2]], dtype=int32, device=xpux:0)
"""
x
=
Const
(
value
,
inp
.
dtype
,
inp
.
device
)
if
inp
.
ndim
==
0
:
return
x
# set x's format to use FormatTransformation rule for Broadcast.
rst
=
broadcast_to
(
x
,
inp
.
shape
)
op
=
builtin
.
FillLike
(
value
=
value
)
(
rst
,)
=
apply
(
op
,
inp
)
rst
.
format
=
inp
.
format
return
rst
...
...
imperative/src/impl/ops/fill.cpp
0 → 100644
浏览文件 @
b764dad6
#include "../dnn_op_helper.h"
#include "../op_trait.h"
#include "megbrain/graph/helper.h"
#include "megbrain/imperative/ops/autogen.h"
#include "megbrain/opr/io.h"
#include "megbrain/opr/tensor_gen.h"
#include "megbrain/opr/tensor_manip.h"
namespace
mgb
{
namespace
imperative
{
namespace
fill
{
auto
apply_on_var_node
(
const
OpDef
&
def
,
const
VarNodeArray
&
inputs
)
{
auto
&&
op
=
static_cast
<
const
Fill
&>
(
def
);
mgb_assert
(
inputs
.
size
()
==
1
);
auto
comp_node
=
inputs
[
0
]
->
comp_node
();
auto
name
=
op
.
make_name
();
DTypeScalar
scalar
(
op
.
dtype
);
scalar
.
set_retain_dtype
(
op
.
value
);
auto
graph
=
inputs
[
0
]
->
owner_graph
();
auto
scalar_shape
=
opr
::
ImmutableTensor
::
make
(
*
graph
,
scalar
,
{
name
,
comp_node
});
return
opr
::
Broadcast
::
make
(
scalar_shape
,
inputs
[
0
],
{
name
});
}
std
::
tuple
<
SmallVector
<
LogicalTensorDesc
>
,
bool
>
infer_output_attrs_fallible
(
const
OpDef
&
def
,
const
SmallVector
<
LogicalTensorDesc
>&
inputs
)
{
auto
&&
op
=
def
.
cast_final_safe
<
Fill
>
();
auto
&&
tshp
=
inputs
[
0
];
auto
comp_node
=
inputs
[
0
].
comp_node
;
if
(
tshp
.
layout
.
ndim
==
0
||
tshp
.
value
.
empty
())
{
return
{{{
TensorLayout
(
op
.
dtype
),
comp_node
}},
false
};
}
TensorShape
out_shape
;
out_shape
.
ndim
=
tshp
.
layout
.
shape
[
0
];
auto
*
ptr
=
tshp
.
value
.
ptr
<
dt_int32
>
();
for
(
size_t
i
=
0
;
i
<
out_shape
.
ndim
;
++
i
)
{
out_shape
[
i
]
=
ptr
[
i
];
}
return
{{{
TensorLayout
(
out_shape
,
op
.
dtype
),
comp_node
}},
true
};
}
SmallVector
<
TensorPtr
>
apply_on_physical_tensor
(
const
OpDef
&
def
,
const
SmallVector
<
TensorPtr
>&
inputs
,
SmallVector
<
LogicalTensorDesc
>&
output_descs
,
const
bool
&
validated
)
{
auto
&&
op
=
def
.
cast_final_safe
<
Fill
>
();
auto
comp_node
=
inputs
[
0
]
->
comp_node
();
TensorShape
tshp
;
cg
::
copy_tensor_value_to_shape
(
tshp
,
inputs
[
0
]
->
get_value
().
proxy_to_default_cpu
());
TensorLayout
oup_layout
=
TensorLayout
{
tshp
,
op
.
dtype
};
auto
output
=
Tensor
::
make
(
oup_layout
,
comp_node
);
if
(
oup_layout
.
total_nr_elems
()
!=
0
)
{
// empty tensor like Tensor([])
DnnOprCaller
<
megdnn
::
Fill
>
caller
(
comp_node
,
megdnn
::
Fill
::
Param
{
op
.
value
});
caller
.
exec_with_ws
(
output
);
}
return
{
output
};
}
SmallVector
<
VarNode
::
LayoutConstraintCallback
>
get_input_layout_constraint
(
const
OpDef
&
def
,
const
SmallVector
<
TensorPtr
>&
inputs
)
{
SmallVector
<
VarNode
::
LayoutConstraintCallback
>
layout_checker
(
inputs
.
size
());
layout_checker
[
0
]
=
[](
const
TensorLayout
&
layout
)
{
return
layout
.
is_contiguous
();
};
return
layout_checker
;
}
OP_TRAIT_REG
(
Fill
,
Fill
)
.
apply_on_var_node
(
apply_on_var_node
)
.
infer_output_attrs_fallible
(
infer_output_attrs_fallible
)
.
apply_on_physical_tensor
(
apply_on_physical_tensor
)
.
get_input_layout_constraint
(
get_input_layout_constraint
)
.
fallback
();
}
// namespace fill
namespace
fill_like
{
auto
apply_on_var_node
(
const
OpDef
&
def
,
const
VarNodeArray
&
inputs
)
{
auto
&&
op
=
static_cast
<
const
FillLike
&>
(
def
);
mgb_assert
(
inputs
.
size
()
==
1
);
auto
comp_node
=
inputs
[
0
]
->
comp_node
();
megdnn
::
DType
oup_dtype
=
inputs
[
0
]
->
dtype
();
auto
name
=
op
.
make_name
();
DTypeScalar
scalar
(
oup_dtype
);
scalar
.
set_retain_dtype
(
op
.
value
);
auto
graph
=
inputs
[
0
]
->
owner_graph
();
auto
scalar_shape
=
opr
::
ImmutableTensor
::
make
(
*
graph
,
scalar
,
{
name
,
comp_node
});
return
opr
::
Broadcast
::
make
(
scalar_shape
,
opr
::
GetVarShape
::
make
(
inputs
[
0
]),
{
name
});
}
std
::
tuple
<
SmallVector
<
LogicalTensorDesc
>
,
bool
>
infer_output_attrs_fallible
(
const
OpDef
&
def
,
const
SmallVector
<
LogicalTensorDesc
>&
inputs
)
{
mgb_assert
(
inputs
.
size
()
==
1
);
auto
&&
inp
=
inputs
[
0
];
if
(
inp
.
layout
.
ndim
==
0
)
{
return
{{{
TensorLayout
{
inp
.
layout
.
dtype
},
inp
.
comp_node
}},
false
};
}
return
{{{
TensorLayout
(
inp
.
layout
),
inp
.
comp_node
}},
true
};
}
SmallVector
<
TensorPtr
>
apply_on_physical_tensor
(
const
OpDef
&
def
,
const
SmallVector
<
TensorPtr
>&
inputs
,
SmallVector
<
LogicalTensorDesc
>&
output_descs
,
const
bool
&
validated
)
{
mgb_assert
(
inputs
.
size
()
==
1
);
auto
&&
op
=
def
.
cast_final_safe
<
FillLike
>
();
auto
&&
inp
=
inputs
[
0
];
TensorLayout
oup_layout
=
inp
->
layout
();
CompNode
oup_cn
=
inp
->
comp_node
();
auto
output
=
Tensor
::
make
(
oup_layout
,
oup_cn
);
if
(
oup_layout
.
total_nr_elems
()
!=
0
)
{
// empty tensor like Tensor([])
DnnOprCaller
<
megdnn
::
Fill
>
caller
(
oup_cn
,
megdnn
::
Fill
::
Param
{
op
.
value
});
caller
.
exec_with_ws
(
output
);
}
return
{
output
};
}
SmallVector
<
VarNode
::
LayoutConstraintCallback
>
get_input_layout_constraint
(
const
OpDef
&
def
,
const
SmallVector
<
TensorPtr
>&
inputs
)
{
SmallVector
<
VarNode
::
LayoutConstraintCallback
>
layout_checker
(
inputs
.
size
());
layout_checker
[
0
]
=
[](
const
TensorLayout
&
layout
)
{
return
layout
.
is_contiguous
();
};
return
layout_checker
;
}
OP_TRAIT_REG
(
FillLike
,
FillLike
)
.
apply_on_var_node
(
apply_on_var_node
)
.
infer_output_attrs_fallible
(
infer_output_attrs_fallible
)
.
apply_on_physical_tensor
(
apply_on_physical_tensor
)
.
get_input_layout_constraint
(
get_input_layout_constraint
)
.
fallback
();
}
// namespace fill_like
}
// namespace imperative
}
// namespace mgb
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}
\ No newline at end of file
imperative/tablegen/generated/hash.txt
浏览文件 @
b764dad6
8dd504f360fd3d3bfb560c970b568153 ../../dnn/scripts/opr_param_defs.py
6811fde221f86d1ef8de425a3c83127b
../../src/core/include/megbrain/ir/ops.td
55123da1605ef6edd79e3a2ede8aefeb
generated/opdef.h.inl
6f4beb6d12cdd9ec4c4e61b6d7d35144
generated/opdef.cpp.inl
185ba3c3a0fce480ee498cef058670b2
generated/opdef.py.inl
b7ed7a638b7586709bb23dd153fb58b1
generated/opdef.cpy.inl
06e8a3af239b545470b38b3e82960935
../../src/core/include/megbrain/ir/ops.td
7f37497cffb24554073cbc42b89e2db8
generated/opdef.h.inl
1e2041f6374e48d53762ddfe7a6ebca3
generated/opdef.cpp.inl
9a813355a742330e9ba6e5c14ea67c7c
generated/opdef.py.inl
8d4ae7fef8234d8c79ac52017f4710e3
generated/opdef.cpy.inl
71e1462bf4d882e2615c3c632cb671cc generated/enum_macro.h
imperative/tablegen/generated/opdef.cpp.inl
浏览文件 @
b764dad6
...
...
@@ -3564,6 +3564,89 @@ OP_TRAIT_REG(FastpathCopy, FastpathCopy)
.props(FastpathCopy_props_impl)
.make_name(FastpathCopy_make_name_impl);
MGB_DYN_TYPE_OBJ_FINAL_IMPL(Fill);
namespace {
size_t Fill_hash_impl(const OpDef& def_) {
auto&& op_ = def_.cast_final_safe<Fill>();
static_cast<void>(op_);
size_t val = mgb::hash(op_.dyn_typeinfo());
val = mgb::hash_pair_combine(val, mgb::hash(op_.value));
val = mgb::hash_pair_combine(val, mgb::hash(op_.dtype.handle()));
val = mgb::hash_pair_combine(val, mgb::hash(op_.comp_node));
return val;
}
bool Fill_is_same_st_impl(const OpDef& lhs_, const OpDef& rhs_) {
auto &&a_ = lhs_.cast_final_safe<Fill>(),
&&b_ = rhs_.cast_final_safe<Fill>();
static_cast<void>(a_);
static_cast<void>(b_);
if (a_.value != b_.value) return false;
if (a_.dtype != b_.dtype) return false;
if (a_.comp_node != b_.comp_node) return false;
return true;
}
std::vector<std::pair<const char*, std::string>> Fill_props_impl(const OpDef& def_) {
auto&& op_ = def_.cast_final_safe<Fill>();
static_cast<void>(op_);
std::vector<std::pair<const char*, std::string>> props_;
props_.emplace_back("value", std::to_string(op_.value));
props_.emplace_back("dtype", op_.dtype.name());
props_.emplace_back("comp_node", op_.comp_node.to_string());
return props_;
}
std::string Fill_make_name_impl(const OpDef& def_) {
auto&& op_ = def_.cast_final_safe<Fill>();
static_cast<void>(op_);
return "Fill";
}
} // anonymous namespace
OP_TRAIT_REG(Fill, Fill)
.hash(Fill_hash_impl)
.is_same_st(Fill_is_same_st_impl)
.props(Fill_props_impl)
.make_name(Fill_make_name_impl);
MGB_DYN_TYPE_OBJ_FINAL_IMPL(FillLike);
namespace {
size_t FillLike_hash_impl(const OpDef& def_) {
auto&& op_ = def_.cast_final_safe<FillLike>();
static_cast<void>(op_);
size_t val = mgb::hash(op_.dyn_typeinfo());
val = mgb::hash_pair_combine(val, mgb::hash(op_.value));
val = mgb::hash_pair_combine(val, mgb::hash(op_.comp_node));
return val;
}
bool FillLike_is_same_st_impl(const OpDef& lhs_, const OpDef& rhs_) {
auto &&a_ = lhs_.cast_final_safe<FillLike>(),
&&b_ = rhs_.cast_final_safe<FillLike>();
static_cast<void>(a_);
static_cast<void>(b_);
if (a_.value != b_.value) return false;
if (a_.comp_node != b_.comp_node) return false;
return true;
}
std::vector<std::pair<const char*, std::string>> FillLike_props_impl(const OpDef& def_) {
auto&& op_ = def_.cast_final_safe<FillLike>();
static_cast<void>(op_);
std::vector<std::pair<const char*, std::string>> props_;
props_.emplace_back("value", std::to_string(op_.value));
props_.emplace_back("comp_node", op_.comp_node.to_string());
return props_;
}
std::string FillLike_make_name_impl(const OpDef& def_) {
auto&& op_ = def_.cast_final_safe<FillLike>();
static_cast<void>(op_);
return "FillLike";
}
} // anonymous namespace
OP_TRAIT_REG(FillLike, FillLike)
.hash(FillLike_hash_impl)
.is_same_st(FillLike_is_same_st_impl)
.props(FillLike_props_impl)
.make_name(FillLike_make_name_impl);
MGB_DYN_TYPE_OBJ_FINAL_IMPL(GammaRNG);
namespace {
...
...
imperative/tablegen/generated/opdef.cpy.inl
浏览文件 @
b764dad6
...
...
@@ -10421,6 +10421,278 @@ void _init_py_FastpathCopy(py::module m) {
mgb_assert(PyOp(OpDef)::ctype2pytype.emplace(FastpathCopy::typeinfo(), &py_type).second);
}
PyOpDefBegin(Fill) // {
static PyGetSetDef py_getsetters[];
static PyMethodDef tp_methods[];
static PyObject* getstate(PyObject* self, PyObject*) {
auto& opdef = reinterpret_cast<PyOp(Fill)*>(self)->inst();
static_cast<void>(opdef);
std::unordered_map<std::string, py::object> state {
{"value", serialization<decltype(opdef.value)>::dump(opdef.value)},
{"dtype", serialization<decltype(opdef.dtype)>::dump(opdef.dtype)},
{"comp_node", serialization<decltype(opdef.comp_node)>::dump(opdef.comp_node)}
};
return py::cast(state).release().ptr();
}
static PyObject* setstate(PyObject* self, PyObject* args) {
PyObject* dict = PyTuple_GetItem(args, 0);
if (!dict) return NULL;
auto state = py::cast<std::unordered_map<std::string, py::object>>(dict);
auto& opdef = reinterpret_cast<PyOp(Fill)*>(self)->inst();
static_cast<void>(opdef);
{
auto&& iter = state.find("value");
if (iter != state.end()) {
opdef.value = serialization<decltype(opdef.value)>::load(iter->second);
}
}
{
auto&& iter = state.find("dtype");
if (iter != state.end()) {
opdef.dtype = serialization<decltype(opdef.dtype)>::load(iter->second);
}
}
{
auto&& iter = state.find("comp_node");
if (iter != state.end()) {
opdef.comp_node = serialization<decltype(opdef.comp_node)>::load(iter->second);
}
}
Py_RETURN_NONE;
}
static int py_init(PyObject *self, PyObject *args, PyObject *kwds);
static PyObject* py_init_proxy(PyObject *self, PyObject *args, PyObject *kwds);
static PyMethodDef py_init_methoddef;
// };
PyOpDefEnd(Fill)
int PyOp(Fill)::py_init(PyObject *self, PyObject *args, PyObject *kwds) {
static const char* kwlist[] = {"value", "dtype", "comp_node", "scope", NULL};
PyObject *value = NULL, *dtype = NULL, *comp_node = NULL, *scope = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOOO", const_cast<char**>(kwlist), &value, &dtype, &comp_node, &scope))
return -1;
if (value) {
try {
// TODO: remove this guard which is used for pybind11 implicit conversion
py::detail::loader_life_support guard{};
reinterpret_cast<PyOp(Fill)*>(self)->inst().value =
py::cast<decltype(Fill::value)>(py::handle(value));
} CATCH_ALL(-1)
}
if (dtype) {
try {
// TODO: remove this guard which is used for pybind11 implicit conversion
py::detail::loader_life_support guard{};
reinterpret_cast<PyOp(Fill)*>(self)->inst().dtype =
py::cast<decltype(Fill::dtype)>(py::handle(dtype));
} CATCH_ALL(-1)
}
if (comp_node) {
try {
// TODO: remove this guard which is used for pybind11 implicit conversion
py::detail::loader_life_support guard{};
reinterpret_cast<PyOp(Fill)*>(self)->inst().comp_node =
py::cast<decltype(Fill::comp_node)>(py::handle(comp_node));
} CATCH_ALL(-1)
}
if (scope) {
try {
reinterpret_cast<PyOp(OpDef)*>(self)->op
->set_scope(py::cast<std::string>(py::handle(scope)));
} CATCH_ALL(-1)
}
return 0;
}
PyGetSetDef PyOp(Fill)::py_getsetters[] = {
{const_cast<char*>("value"), py_get_generic(Fill, value), py_set_generic(Fill, value), const_cast<char*>("value"), NULL},
{const_cast<char*>("dtype"), py_get_generic(Fill, dtype), py_set_generic(Fill, dtype), const_cast<char*>("dtype"), NULL},
{const_cast<char*>("comp_node"), py_get_generic(Fill, comp_node), py_set_generic(Fill, comp_node), const_cast<char*>("comp_node"), NULL},
{NULL} /* Sentinel */
};
PyMethodDef PyOp(Fill)::tp_methods[] = {
{const_cast<char*>("__getstate__"), PyOp(Fill)::getstate, METH_NOARGS, "Fill getstate"},
{const_cast<char*>("__setstate__"), PyOp(Fill)::setstate, METH_VARARGS, "Fill setstate"},
{NULL} /* Sentinel */
};
PyObject *PyOp(Fill)::py_init_proxy(PyObject *self, PyObject *args, PyObject *kwds) {
if (PyOp(Fill)::py_init(self, args, kwds) < 0) {
return NULL;
}
Py_RETURN_NONE;
}
PyMethodDef PyOp(Fill)::py_init_methoddef = {
"__init__",
(PyCFunction)PyOp(Fill)::py_init_proxy,
METH_VARARGS | METH_KEYWORDS,
"__init__(self, value: float = ..., dtype: str = ..., comp_node: str = ...) -> None\n"
};
void _init_py_Fill(py::module m) {
using py_op = PyOp(Fill);
auto& py_type = PyOpType(Fill);
py_type = {PyVarObject_HEAD_INIT(NULL, 0)};
py_type.tp_name = "megengine.core._imperative_rt.ops.Fill";
py_type.tp_basicsize = sizeof(PyOp(Fill));
py_type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
py_type.tp_doc = "Fill";
py_type.tp_base = &PyOpType(OpDef);
py_type.tp_dealloc = py_dealloc_generic<py_op>;
py_type.tp_new = py_new_generic<py_op>;
py_type.tp_init = py_op::py_init;
py_type.tp_methods = py_op::tp_methods;
py_type.tp_getset = py_op::py_getsetters;
py_type.tp_dict = PyDict_New();
PyObject* descr = PyDescr_NewMethod(&PyOpType(Fill), &PyOp(Fill)::py_init_methoddef);
PyDict_SetItemString(py_type.tp_dict, "__init__", descr);
mgb_assert(PyType_Ready(&py_type) >= 0);
PyType_Modified(&py_type);
m.add_object("Fill", reinterpret_cast<PyObject*>(&py_type));
mgb_assert(PyOp(OpDef)::ctype2pytype.emplace(Fill::typeinfo(), &py_type).second);
}
PyOpDefBegin(FillLike) // {
static PyGetSetDef py_getsetters[];
static PyMethodDef tp_methods[];
static PyObject* getstate(PyObject* self, PyObject*) {
auto& opdef = reinterpret_cast<PyOp(FillLike)*>(self)->inst();
static_cast<void>(opdef);
std::unordered_map<std::string, py::object> state {
{"value", serialization<decltype(opdef.value)>::dump(opdef.value)},
{"comp_node", serialization<decltype(opdef.comp_node)>::dump(opdef.comp_node)}
};
return py::cast(state).release().ptr();
}
static PyObject* setstate(PyObject* self, PyObject* args) {
PyObject* dict = PyTuple_GetItem(args, 0);
if (!dict) return NULL;
auto state = py::cast<std::unordered_map<std::string, py::object>>(dict);
auto& opdef = reinterpret_cast<PyOp(FillLike)*>(self)->inst();
static_cast<void>(opdef);
{
auto&& iter = state.find("value");
if (iter != state.end()) {
opdef.value = serialization<decltype(opdef.value)>::load(iter->second);
}
}
{
auto&& iter = state.find("comp_node");
if (iter != state.end()) {
opdef.comp_node = serialization<decltype(opdef.comp_node)>::load(iter->second);
}
}
Py_RETURN_NONE;
}
static int py_init(PyObject *self, PyObject *args, PyObject *kwds);
static PyObject* py_init_proxy(PyObject *self, PyObject *args, PyObject *kwds);
static PyMethodDef py_init_methoddef;
// };
PyOpDefEnd(FillLike)
int PyOp(FillLike)::py_init(PyObject *self, PyObject *args, PyObject *kwds) {
static const char* kwlist[] = {"value", "comp_node", "scope", NULL};
PyObject *value = NULL, *comp_node = NULL, *scope = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO", const_cast<char**>(kwlist), &value, &comp_node, &scope))
return -1;
if (value) {
try {
// TODO: remove this guard which is used for pybind11 implicit conversion
py::detail::loader_life_support guard{};
reinterpret_cast<PyOp(FillLike)*>(self)->inst().value =
py::cast<decltype(FillLike::value)>(py::handle(value));
} CATCH_ALL(-1)
}
if (comp_node) {
try {
// TODO: remove this guard which is used for pybind11 implicit conversion
py::detail::loader_life_support guard{};
reinterpret_cast<PyOp(FillLike)*>(self)->inst().comp_node =
py::cast<decltype(FillLike::comp_node)>(py::handle(comp_node));
} CATCH_ALL(-1)
}
if (scope) {
try {
reinterpret_cast<PyOp(OpDef)*>(self)->op
->set_scope(py::cast<std::string>(py::handle(scope)));
} CATCH_ALL(-1)
}
return 0;
}
PyGetSetDef PyOp(FillLike)::py_getsetters[] = {
{const_cast<char*>("value"), py_get_generic(FillLike, value), py_set_generic(FillLike, value), const_cast<char*>("value"), NULL},
{const_cast<char*>("comp_node"), py_get_generic(FillLike, comp_node), py_set_generic(FillLike, comp_node), const_cast<char*>("comp_node"), NULL},
{NULL} /* Sentinel */
};
PyMethodDef PyOp(FillLike)::tp_methods[] = {
{const_cast<char*>("__getstate__"), PyOp(FillLike)::getstate, METH_NOARGS, "FillLike getstate"},
{const_cast<char*>("__setstate__"), PyOp(FillLike)::setstate, METH_VARARGS, "FillLike setstate"},
{NULL} /* Sentinel */
};
PyObject *PyOp(FillLike)::py_init_proxy(PyObject *self, PyObject *args, PyObject *kwds) {
if (PyOp(FillLike)::py_init(self, args, kwds) < 0) {
return NULL;
}
Py_RETURN_NONE;
}
PyMethodDef PyOp(FillLike)::py_init_methoddef = {
"__init__",
(PyCFunction)PyOp(FillLike)::py_init_proxy,
METH_VARARGS | METH_KEYWORDS,
"__init__(self, value: float = ..., comp_node: str = ...) -> None\n"
};
void _init_py_FillLike(py::module m) {
using py_op = PyOp(FillLike);
auto& py_type = PyOpType(FillLike);
py_type = {PyVarObject_HEAD_INIT(NULL, 0)};
py_type.tp_name = "megengine.core._imperative_rt.ops.FillLike";
py_type.tp_basicsize = sizeof(PyOp(FillLike));
py_type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;
py_type.tp_doc = "FillLike";
py_type.tp_base = &PyOpType(OpDef);
py_type.tp_dealloc = py_dealloc_generic<py_op>;
py_type.tp_new = py_new_generic<py_op>;
py_type.tp_init = py_op::py_init;
py_type.tp_methods = py_op::tp_methods;
py_type.tp_getset = py_op::py_getsetters;
py_type.tp_dict = PyDict_New();
PyObject* descr = PyDescr_NewMethod(&PyOpType(FillLike), &PyOp(FillLike)::py_init_methoddef);
PyDict_SetItemString(py_type.tp_dict, "__init__", descr);
mgb_assert(PyType_Ready(&py_type) >= 0);
PyType_Modified(&py_type);
m.add_object("FillLike", reinterpret_cast<PyObject*>(&py_type));
mgb_assert(PyOp(OpDef)::ctype2pytype.emplace(FillLike::typeinfo(), &py_type).second);
}
PyOpDefBegin(GammaRNG) // {
static PyGetSetDef py_getsetters[];
static PyMethodDef tp_methods[];
...
...
@@ -21733,6 +22005,8 @@ void _init_py_WarpPerspectiveBackwardMat(py::module m) {
_init_py_Eye(m); \
_init_py_FakeQuant(m); \
_init_py_FastpathCopy(m); \
_init_py_Fill(m); \
_init_py_FillLike(m); \
_init_py_GammaRNG(m); \
_init_py_GaussianRNG(m); \
_init_py_GetVarShape(m); \
...
...
imperative/tablegen/generated/opdef.h.inl
浏览文件 @
b764dad6
...
...
@@ -944,6 +944,35 @@ public:
FastpathCopy() = default;
};
class Fill : public OpDefImplBase<Fill> {
MGB_DYN_TYPE_OBJ_FINAL_DECL;
public:
float value = 0;
::megdnn::DType dtype;
::mgb::CompNode comp_node;
Fill() = default;
Fill(float value_, ::megdnn::DType dtype_, ::mgb::CompNode comp_node_, std::string scope_ = {}): value(value_), dtype(dtype_), comp_node(comp_node_) { set_scope(scope_); }
Fill(::megdnn::param::Fill packed_param_0, ::megdnn::DType dtype_, ::mgb::CompNode comp_node_): value(packed_param_0.value), dtype(dtype_), comp_node(comp_node_) {}
::megdnn::param::Fill param() const {
return {value};
}
};
class FillLike : public OpDefImplBase<FillLike> {
MGB_DYN_TYPE_OBJ_FINAL_DECL;
public:
float value = 0;
::mgb::CompNode comp_node;
FillLike() = default;
FillLike(float value_, ::mgb::CompNode comp_node_, std::string scope_ = {}): value(value_), comp_node(comp_node_) { set_scope(scope_); }
FillLike(::megdnn::param::Fill packed_param_0, ::mgb::CompNode comp_node_): value(packed_param_0.value), comp_node(comp_node_) {}
::megdnn::param::Fill param() const {
return {value};
}
};
class GammaRNG : public OpDefImplBase<GammaRNG> {
MGB_DYN_TYPE_OBJ_FINAL_DECL;
...
...
imperative/tablegen/generated/opdef.py.inl
浏览文件 @
b764dad6
...
...
@@ -1189,6 +1189,23 @@ py::class_<FastpathCopy, std::shared_ptr<FastpathCopy>, OpDef> FastpathCopyInst(
FastpathCopyInst
.def(py::init<>());
py::class_<Fill, std::shared_ptr<Fill>, OpDef> FillInst(m, "Fill");
FillInst
.def(py::init<float, ::megdnn::DType, ::mgb::CompNode, std::string>(), py::arg("value") = 0, py::arg("dtype"), py::arg("comp_node"), py::arg("scope") = {})
.def(py::init<>())
.def_readwrite("value", &Fill::value)
.def_readwrite("dtype", &Fill::dtype)
.def_readwrite("comp_node", &Fill::comp_node);
py::class_<FillLike, std::shared_ptr<FillLike>, OpDef> FillLikeInst(m, "FillLike");
FillLikeInst
.def(py::init<float, ::mgb::CompNode, std::string>(), py::arg("value") = 0, py::arg("comp_node"), py::arg("scope") = {})
.def(py::init<>())
.def_readwrite("value", &FillLike::value)
.def_readwrite("comp_node", &FillLike::comp_node);
py::class_<GammaRNG, std::shared_ptr<GammaRNG>, OpDef> GammaRNGInst(m, "GammaRNG");
GammaRNGInst
...
...
src/core/include/megbrain/ir/ops.td
浏览文件 @
b764dad6
...
...
@@ -248,6 +248,19 @@ def PermutationRNG: MgbHashableOp<"PermutationRNG", [PermutationRNGParam]> {
let cmpFunction = [{return $0.handle == $1.handle && $0.dtype == $1.dtype;}];
}
def Fill: MgbHashableOp<"Fill", [FillParam]> {
let extraArguments = (ins
MgbDTypeAttr:$dtype,
MgbCompNodeAttr:$comp_node
);
}
def FillLike: MgbHashableOp<"FillLike", [FillParam]> {
let extraArguments = (ins
MgbCompNodeAttr:$comp_node
);
}
def ShuffleRNG: MgbHashableOp<"ShuffleRNG", [ShuffleRNGParam]> {
let extraArguments = (ins
MgbSizeTAddr:$handle
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录