提交 9b908c02 编写于 作者: M Megvii Engine Team

refactor(opr/dnn): remove MegDNNOprInputsLayoutModifier

It was used mainly for easier quantized operator migration. It became unused now.

GitOrigin-RevId: c4a306d0a4cd27fe14782e1f0cf3078a20cad0c8
上级 3adad485
......@@ -65,8 +65,6 @@ size_t MatrixMul::get_workspace_size_bytes(
TensorLayout i0(input_shapes[0], input(0)->dtype()),
i1(input_shapes[1], input(1)->dtype()),
out(output_shapes[0], output(0)->dtype());
intl::MegDNNOprInputsLayoutModifier<megdnn::MatrixMul>::apply(
tparam, {&i0, &i1, &out});
auto transpose = [](TensorLayout& dst, bool& param) {
std::swap(dst.shape[0], dst.shape[1]);
......@@ -102,8 +100,6 @@ void MatrixMul::scn_do_execute() {
MGB_TRY {
transpose(inp0.layout, tparam.transposeA);
transpose(inp1.layout, tparam.transposeB);
intl::MegDNNOprInputsLayoutModifier<megdnn::MatrixMul>::apply(
tparam, {&inp0.layout, &inp1.layout, &out.layout});
megdnn_opr()->exec(inp0, inp1, out,
intl::get_megdnn_workspace_from_var(output(1)));
}
......@@ -186,8 +182,6 @@ size_t BatchedMatrixMul::get_workspace_size_bytes(
TensorLayout i0(input_shapes[0], input(0)->dtype()),
i1(input_shapes[1], input(1)->dtype()),
out(output_shapes[0], output(0)->dtype());
intl::MegDNNOprInputsLayoutModifier<megdnn::BatchedMatrixMul>::apply(
tparam, {&i0, &i1, &out});
auto transpose = [](TensorLayout& dst, bool& param) {
std::swap(dst.shape[1], dst.shape[2]);
......@@ -224,8 +218,6 @@ void BatchedMatrixMul::scn_do_execute() {
MGB_TRY {
transpose(inp0.layout, tparam.transposeA);
transpose(inp1.layout, tparam.transposeB);
intl::MegDNNOprInputsLayoutModifier<megdnn::BatchedMatrixMul>::apply(
tparam, {&inp0.layout, &inp1.layout, &out.layout});
megdnn_opr()->exec(inp0, inp1, out,
intl::get_megdnn_workspace_from_var(output(1)));
}
......
......@@ -133,12 +133,6 @@ struct OprArityTrait;
const DeviceTensorND* out_val, \
megdnn::Workspace& workspace) { \
opr->exec(TENSORS(cb_dnn), workspace); \
} \
\
static void modify_input_layouts(_Opr* opr, \
const TensorLayoutArray& layouts) { \
intl::MegDNNOprInputsLayoutModifier<_Opr>::apply( \
opr->param(), {LAYOUTS(cb_ref)}); \
} \
}
......@@ -443,7 +437,6 @@ class AlgoChooser {
std::tuple_size<ConvTensorLayouts>::value == 8,
"Convolution AlgoChooser assumes arity = 3 , 5 or 8 (for "
"deformable conv)");
OprArityTrait<Opr>::modify_input_layouts(megdnn_opr, m_layouts);
}
Opr* megdnn_opr() const { return m_megdnn_opr; }
......
......@@ -46,21 +46,6 @@ namespace intl {
}
};
/*!
* \brief Template that can be specialized and modify input tensors' layout
* before passing to MegDNN. The implementation has to ensure that
* modified layout is compatible with the original one.
* Will be invoked in get_workspace_in_bytes, deduce_layout and exec.
* Note that the output layout maybe invalid during deduce_layout.
*
* \tparam Opr An MegDNN opr class
*/
template <class MegDNNOpr>
struct MegDNNOprInputsLayoutModifier {
static inline void apply(const typename MegDNNOpr::Param&,
std::initializer_list<const TensorLayout*>) {}
};
//! get megdnn Workspace object from a workspace var
megdnn::Workspace get_megdnn_workspace_from_var(VarNode *var);
......
......@@ -15,13 +15,8 @@
template<>
struct _MegDNNOprMethInvoker<_NR_INPUTS, _NR_OUTPUTS> {
#define _cb_ref_in(_x) inout[_x]
#define _cb_ref_out(_x) inout[_NR_INPUTS + _x]
#define _cb_in(_x) \
{ ishp[_x], mgb_opr->input(_x)->dtype(), mgb_opr->input(_x)->format() }
#define _cb_unused(_x) {}
#define _cb_ptr_in(_x) &(_cb_ref_in(_x))
#define _cb_ptr_out(_x) &(_cb_ref_out(_x))
template<class Opr>
static inline size_t get_workspace_in_bytes(
Opr *opr, const cg::OperatorNodeBase *mgb_opr,
......@@ -29,15 +24,7 @@ struct _MegDNNOprMethInvoker<_NR_INPUTS, _NR_OUTPUTS> {
const TensorShapeArray &oshp) {
#define _cb_out(_x) \
{ oshp[_x], mgb_opr->output(_x)->dtype(), mgb_opr->output(_x)->format() }
TensorLayout inout[_NR_INPUTS + _NR_OUTPUTS] = {
_FOREACH_IO(_cb_in, _cb_out)
};
MegDNNOprInputsLayoutModifier<Opr>::apply(opr->param(), {
_FOREACH_IO(_cb_ptr_in, _cb_ptr_out)
});
return opr->get_workspace_in_bytes(
_FOREACH_IO(_cb_ref_in, _cb_ref_out)
);
return opr->get_workspace_in_bytes(_FOREACH_IO(_cb_in, _cb_out));
#undef _cb_out
}
......@@ -46,51 +33,26 @@ struct _MegDNNOprMethInvoker<_NR_INPUTS, _NR_OUTPUTS> {
Opr *opr, const cg::OperatorNodeBase *mgb_opr,
const TensorShapeArray &ishp,
TensorShapeArray &oshp) {
#define _cb_out(_x) \
{ mgb_opr->output(_x)->dtype(), mgb_opr->output(_x)->format() }
TensorLayout inout[_NR_INPUTS + _NR_OUTPUTS] = {
_FOREACH_IO(_cb_in, _cb_out)
};
MegDNNOprInputsLayoutModifier<Opr>::apply(opr->param(), {
_FOREACH_IO(_cb_ptr_in, _cb_ptr_out)
});
opr->deduce_layout(
_FOREACH_IO(_cb_ref_in, _cb_ref_out)
);
#define _cb_out(_x) ov[_x]
TensorLayout ov[_NR_OUTPUTS];
for (int i = 0; i < _NR_OUTPUTS; ++ i)
ov[i] = {mgb_opr->output(i)->dtype(), mgb_opr->output(i)->format()};
opr->deduce_layout(_FOREACH_IO(_cb_in, _cb_out));
for (int i = 0; i < _NR_OUTPUTS; ++ i)
oshp[i] = _cb_ref_out(i);
oshp[i] = ov[i];
}
#undef _cb_out
#undef _cb_ptr_out
#undef _cb_ptr_in
#undef _cb_unused
#undef _cb_in
#undef _cb_ref_out
#undef _cb_ref_in
template<class Opr>
static inline void exec(Opr *opr, const cg::OperatorNodeBase *mgb_opr) {
#define _cb_ref_in(_x) inout[_x]
#define _cb_ref_out(_x) inout[_NR_INPUTS + _x]
#define _cb_in(_x) mgb_opr->input(_x)->dev_tensor().as_megdnn()
#define _cb_out(_x) mgb_opr->output(_x)->dev_tensor().as_megdnn()
#define _cb_ptr_in(_x) &(_cb_ref_in(_x).layout)
#define _cb_ptr_out(_x) &(_cb_ref_out(_x).layout)
megdnn::TensorND inout[_NR_INPUTS + _NR_OUTPUTS] = {
_FOREACH_IO(_cb_in, _cb_out)
};
MegDNNOprInputsLayoutModifier<Opr>::apply(opr->param(), {
_FOREACH_IO(_cb_ptr_in, _cb_ptr_out)
});
opr->exec(
_FOREACH_IO(_cb_ref_in, _cb_ref_out),
_FOREACH_IO(_cb_in, _cb_out),
get_megdnn_workspace_from_var(mgb_opr->output().back()));
#undef _cb_ptr_out
#undef _cb_ptr_in
#undef _cb_out
#undef _cb_in
#undef _cb_ref_out
#undef _cb_ref_in
}
};
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册