matmul.cpp 20.6 KB
Newer Older
1 2 3 4
#include <numeric>
#include "../blob_manager_impl.h"
#include "../dnn_op_helper.h"
#include "../op_trait.h"
5
#include "megbrain/graph/symbol_var.h"
6
#include "megbrain/imperative/ops/autogen.h"
7
#include "megbrain/opr/basic_arith.h"
8
#include "megbrain/opr/blas.h"
9 10
#include "megbrain/opr/io.h"
#include "megbrain/opr/tensor_manip.h"
11 12 13 14 15 16 17 18

#include "../algo_chooser.h"

namespace mgb {
namespace imperative {

namespace {
namespace matrix_mul {
19

20 21 22
auto apply_on_var_node(const OpDef& def, const VarNodeArray& inputs) {
    auto&& matmul = def.cast_final_safe<MatrixMul>();
    mgb_assert(inputs.size() == 2);
23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
    auto inp1 = SymbolVar{inputs[0]}, inp2 = SymbolVar{inputs[1]};
    auto dim1 = matmul.dimA, dim2 = matmul.dimB;

    auto cn = inputs[0]->comp_node();
    using Desc = opr::AxisAddRemove::AxisDesc;
    using IndexDesc = opr::Subtensor::IndexDesc;
    OperatorNodeConfig config{matmul.make_name(), cn};

    DTypeScalar vi{-1};
    auto graph = inputs[0]->owner_graph();

    SymbolVar shp1_head, shp1_tail, shp2_head, shp2_tail;
    if (dim1 > 2) {
        auto idx = opr::ImmutableTensor::make(*graph, vi, config);
        auto shp1 = inp1.symshape();
        IndexDesc head_desc(1);
        head_desc[0].end = idx;
        shp1_head = opr::Subtensor::make(shp1, head_desc);
        auto batch = opr::Reduce::make(shp1_head, {Reduce::Mode::PRODUCT, 0});
        IndexDesc tail_desc(1);
        tail_desc[0].begin = idx;
        shp1_tail = opr::Subtensor::make(shp1, tail_desc);
        auto tshp = opr::Concat::make({batch, shp1_tail}, 0, cn);
        inp1 = inp1.reshape(tshp);
    }
    if (dim2 > 2) {
        auto idx = opr::ImmutableTensor::make(*graph, vi, config);
        auto shp2 = inp2.symshape();
        IndexDesc head_desc(1);
        head_desc[0].end = idx;
        shp2_head = opr::Subtensor::make(shp2, head_desc);
        auto batch = opr::Reduce::make(shp2_head, {Reduce::Mode::PRODUCT, 0});
        IndexDesc tail_desc(1);
        tail_desc[0].begin = idx;
        auto shp2_tail = opr::Subtensor::make(shp2, tail_desc);
        auto tshp = opr::Concat::make({batch, shp2_tail}, 0, cn);
        inp2 = inp2.reshape(tshp);
    }
    auto result =
            opr::MatrixMul::make(inp1, inp2, matmul.param(), matmul.policy(), config);
    if (dim1 > 2) {
        auto idx = opr::ImmutableTensor::make(*graph, vi, config);
        auto result_shape = result.symshape();
        IndexDesc tail_desc(1);
        tail_desc[0].begin = idx;
        auto shp_tail = opr::Subtensor::make(result_shape, tail_desc);
        auto tshp = opr::Concat::make({shp1_head, shp_tail}, 0, cn);
        result = result.reshape(tshp);
    }
    if (dim2 > 2) {
        auto idx = opr::ImmutableTensor::make(*graph, vi, config);
        auto result_shape = result.symshape();
        IndexDesc tail_desc(1);
        tail_desc[0].begin = idx;
        auto shp_tail = opr::Subtensor::make(result_shape, tail_desc);
        auto tshp = opr::Concat::make({shp2_head, shp_tail}, 0, cn);
        result = result.reshape(tshp);
    }

    return result;
83 84 85 86 87 88 89 90 91
}

std::tuple<SmallVector<LogicalTensorDesc>, bool> infer_output_attrs_fallible(
        const OpDef& def, const SmallVector<LogicalTensorDesc>& inputs) {
    auto&& matmul = def.cast_final_safe<MatrixMul>();
    auto layout1 = inputs[0].layout;
    auto layout2 = inputs[1].layout;
    size_t dim1 = layout1.ndim, dim2 = layout2.ndim;

92
    DType dst_dtype;
93 94 95 96 97 98 99 100 101 102 103 104 105
    if (dim1 == dim2 && dim2 >= 3) {  // only happens in backward
        for (size_t i = 1; i + 1 < layout1.ndim; ++i) {
            layout1[0] *= layout1[i];
            layout2[0] *= layout2[i];
        }
        layout1[1] = layout1[layout1.ndim - 1];
        layout1.ndim = 2;
        layout1.init_contiguous_stride();
        layout2[1] = layout2[layout2.ndim - 1];
        layout2.ndim = 2;
        layout2.init_contiguous_stride();
        dim1 = dim2 = 2;
    }
106 107 108 109 110

    DnnOprCaller<megdnn::MatrixMul> dnn_opr(inputs[0].comp_node);
    dnn_opr.op->param() = matmul.param();
    dnn_opr.op->deduce_dtype(layout1.dtype, layout1.dtype, dst_dtype);

111
    if (dim1 == 0 || dim2 == 0) {
112
        return {{{TensorLayout(dst_dtype), inputs[0].comp_node}}, false};
113 114 115 116 117 118 119 120
    }

    if (matmul.transposeA)
        std::swap(layout1[0], layout1[1]);
    if (matmul.transposeB)
        std::swap(layout2[0], layout2[1]);

    mgb_assert(layout1[dim1 - 1] == layout2[0]);
121 122

    TensorLayout dst_layout(dst_dtype);
123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
    size_t ci = 0;
    for (size_t i = 0; i < dim1 - 1; i++)
        dst_layout[ci++] = layout1[i];
    if (dim2 == 2)
        dst_layout[ci++] = layout2[1];
    dst_layout.ndim = ci;
    dst_layout.init_contiguous_stride();

    SmallVector<LogicalTensorDesc> out_descs(1u);
    out_descs[0] = {dst_layout, inputs[0].comp_node};
    return {out_descs, true};
}

SmallVector<TensorPtr> apply_on_physical_tensor(
        const OpDef& def, const SmallVector<TensorPtr>& inputs,
        SmallVector<LogicalTensorDesc>& output_descs, const bool& validated) {
    auto&& matmul = def.cast_final_safe<MatrixMul>();
    auto&& cn = inputs[0]->comp_node();

    using TensorND = megdnn::TensorND;
    SmallVector<TensorND> inp_tensornds(inputs.size());
    TensorLayout layout1 = inputs[0]->layout(), layout2 = inputs[1]->layout();

146 147 148
    DnnOprCaller<megdnn::MatrixMul> dnn_opr(cn);
    dnn_opr.op->param() = matmul.param();

149 150 151 152 153 154 155 156 157 158 159 160 161
    if (matmul.dimA == matmul.dimB && matmul.dimB >= 3) {  // only happens in backward
        for (size_t i = 1; i + 1 < layout1.ndim; ++i) {
            layout1[0] *= layout1[i];
            layout2[0] *= layout2[i];
        }
        layout1[1] = layout1[layout1.ndim - 1];
        layout1.ndim = 2;
        layout1.init_contiguous_stride();
        layout2[1] = layout2[layout2.ndim - 1];
        layout2.ndim = 2;
        layout2.init_contiguous_stride();
    }

162 163 164
    DType dst_dtype;
    dnn_opr.op->deduce_dtype(layout1.dtype, layout1.dtype, dst_dtype);

165 166 167 168 169 170 171 172
    // only matters when layout1 has dim 2
    if (matmul.transposeA)
        std::swap(layout1.shape[0], layout1.shape[1]);
    // only matters when layout2 has dim 2
    if (matmul.transposeB)
        std::swap(layout2.shape[0], layout2.shape[1]);

    size_t dim1 = layout1.ndim, dim2 = layout2.ndim;
173
    TensorLayout real_dst_layout(dst_dtype);
174 175 176 177 178 179 180 181 182 183 184 185 186 187
    if (validated) {
        real_dst_layout = output_descs[0].layout;
    } else {
        size_t ri = 0;
        for (size_t i = 0; i < dim1 - 2; i++)
            real_dst_layout[ri++] = layout1[i];
        real_dst_layout[ri++] = layout1[dim1 - 2];
        if (dim2 == 2)
            real_dst_layout[ri++] = layout2[dim2 - 1];
        real_dst_layout.ndim = ri;
        real_dst_layout.init_contiguous_stride();
    }

    if (dim1 == 0 || dim2 == 0 || layout1[layout1.ndim - 1] == 0) {
188 189 190 191
        auto out = Tensor::make(real_dst_layout, cn);

        if (!out->empty()) {
            dev_tensor_memset(out->dev_tensor(), 0);
192
        }
193
        return {out};
194 195 196
    }

    TensorLayout layout_a = layout1, layout_b = layout2;
197
    if (dim1 > 2) {
198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
        size_t batch = std::accumulate(
                layout1.shape, layout1.shape + dim1 - 1, (size_t)1,
                std::multiplies<size_t>());

        TensorShape na = TensorShape{batch, layout1[dim1 - 1]};
        auto inp1 = inputs[0];
        if (!layout1.try_reshape(layout_a, na)) {
            inp1 = Tensor::make(inp1->blob(), inp1->offset(), layout1);
            inp1->to_contiguous_inplace();
            layout1 = inp1->layout();
            layout_a = TensorLayout{{batch, layout1[dim1 - 1]}, layout1.dtype};
        }

        layout_a.init_contiguous_stride();
        inp_tensornds[0] = inp1->dnn_tensor();
        inp_tensornds[0].layout = layout_a;
    } else {
        inp_tensornds[0] = inputs[0]->dnn_tensor();
    }

218
    inp_tensornds[1] = inputs[1]->dnn_tensor();
219

220
    TensorLayout dst_layout = TensorLayout({layout_a[0], layout_b[1]}, dst_dtype);
221 222
    dst_layout.init_contiguous_stride();

223 224 225 226 227
    if (matmul.transposeA)
        std::swap(layout_a.shape[0], layout_a.shape[1]);
    if (matmul.transposeB)
        std::swap(layout_b.shape[0], layout_b.shape[1]);

228 229 230 231
    if (matmul.dimA == matmul.dimB && matmul.dimB >= 3) {  // only happens in backward
        inp_tensornds[0].layout = layout_a;
        inp_tensornds[1].layout = layout_b;
    }
232 233
    size_t sz = setup_algo<megdnn::MatrixMul>(
            {layout_a, layout_b, dst_layout}, dnn_opr.op.get(), 0, false, false, cn,
234
            matmul.policy(), false, &inp_tensornds);
235 236
    auto out = Tensor::make(dst_layout, cn);
    auto dnn_wk = dnn_opr.create_workspace(sz);
237

238 239
    dnn_opr.op->exec(inp_tensornds[0], inp_tensornds[1], out->dnn_tensor(), dnn_wk);
    return {out->sub(0, real_dst_layout)};
240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264
}

SmallVector<VarNode::LayoutConstraintCallback> get_input_layout_constraint(
        const OpDef& def, const SmallVector<TensorPtr>& inputs) {
    SmallVector<VarNode::LayoutConstraintCallback> layout_checker(inputs.size());
    layout_checker[0] = layout_checker[1] = [](const TensorLayout& layout) {
        return layout.is_contiguous();
    };
    return layout_checker;
}

OP_TRAIT_REG(MatrixMul, MatrixMul)
        .apply_on_var_node(apply_on_var_node)
        .infer_output_attrs_fallible(infer_output_attrs_fallible)
        .apply_on_physical_tensor(apply_on_physical_tensor)
        .get_input_layout_constraint(get_input_layout_constraint)
        .fallback();
}  // namespace matrix_mul
}  // namespace

namespace {
namespace batched_matrix_mul {
auto apply_on_var_node(const OpDef& def, const VarNodeArray& inputs) {
    auto&& matmul = def.cast_final_safe<BatchedMatrixMul>();
    mgb_assert(inputs.size() == 2);
265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
    auto inp1 = SymbolVar{inputs[0]}, inp2 = SymbolVar{inputs[1]};
    auto dim1 = matmul.dimA, dim2 = matmul.dimB;

    auto cn = inputs[0]->comp_node();
    using Desc = opr::AxisAddRemove::AxisDesc;
    using IndexDesc = opr::Subtensor::IndexDesc;
    OperatorNodeConfig config{matmul.make_name(), cn};

    DTypeScalar vi{-2};
    auto graph = inputs[0]->owner_graph();
    auto idx = opr::ImmutableTensor::make(*graph, vi, config);

    auto shp1 = inp1.symshape();
    auto shp2 = inp2.symshape();
    SymbolVar shp1_head, shp1_tail, shp2_head, shp2_tail;
    SymbolVar batch_shape;
    if (dim1 > dim2) {
        HostTensorND hv = HostTensorND(cn, {1}, dtype::Int32());
        auto* ptr = hv.ptr<dt_int32>();
        ptr[0] = -dim2;
        IndexDesc head_desc(1);
        head_desc[0].end = opr::ImmutableTensor::make(*graph, hv, config);
        shp1_head = opr::Subtensor::make(shp1, head_desc);
        shp2 = opr::Concat::make({shp1_head, shp2}, 0, cn);
        inp2 = inp2.broadcast(shp2);
        head_desc[0].end = idx;
        batch_shape = opr::Subtensor::make(shp1, head_desc);
    }
    if (dim2 > dim1) {
        HostTensorND hv = HostTensorND(cn, {1}, dtype::Int32());
        auto* ptr = hv.ptr<dt_int32>();
        ptr[0] = -dim1;
        IndexDesc head_desc(1);
        head_desc[0].end = opr::ImmutableTensor::make(*graph, hv, config);
        shp2_head = opr::Subtensor::make(shp2, head_desc);
        shp1 = opr::Concat::make({shp2_head, shp1}, 0, cn);
        inp1 = inp1.broadcast(shp1);
        head_desc[0].end = idx;
        batch_shape = opr::Subtensor::make(shp2, head_desc);
    }
    if (dim1 == dim2) {
        IndexDesc head_desc(1);
        head_desc[0].end = idx;
        batch_shape = opr::Subtensor::make(shp1, head_desc);
    }

    auto maxdim = dim1 > dim2 ? dim1 : dim2;
    if (maxdim > 3) {
        IndexDesc tail_desc(1);
        tail_desc[0].begin = idx;
        shp1_tail = opr::Subtensor::make(shp1, tail_desc);
        auto batch = opr::Reduce::make(batch_shape, {Reduce::Mode::PRODUCT, 0});
        shp1 = opr::Concat::make({batch, shp1_tail}, 0, cn);
        inp1 = inp1.reshape(shp1);
        shp2_tail = opr::Subtensor::make(shp2, tail_desc);
        shp2 = opr::Concat::make({batch, shp2_tail}, 0, cn);
        inp2 = inp2.reshape(shp2);
    }

    auto result = opr::BatchedMatrixMul::make(
            inp1, inp2, matmul.param(), matmul.policy(), config);

    if (maxdim > 3) {
        auto result_shp = result.symshape();
        IndexDesc tail_desc(1);
        tail_desc[0].begin = idx;
        auto shp_tail = opr::Subtensor::make(result_shp, tail_desc);
        result_shp = opr::Concat::make({batch_shape, shp_tail}, 0, cn);
        result = result.reshape(result_shp);
    }
    return result;
336 337 338 339 340 341 342 343
}

std::tuple<SmallVector<LogicalTensorDesc>, bool> infer_output_attrs_fallible(
        const OpDef& def, const SmallVector<LogicalTensorDesc>& inputs) {
    auto&& matmul = def.cast_final_safe<BatchedMatrixMul>();
    TensorLayout layout1 = inputs[0].layout, layout2 = inputs[1].layout;
    size_t dim1 = layout1.ndim, dim2 = layout2.ndim;

344 345 346 347 348 349
    DType dst_dtype;

    DnnOprCaller<megdnn::MatrixMul> dnn_opr(inputs[0].comp_node);
    dnn_opr.op->param() = matmul.param();
    dnn_opr.op->deduce_dtype(layout1.dtype, layout1.dtype, dst_dtype);

350
    if (dim1 == 0 || dim2 == 0) {
351
        return {{{TensorLayout(dst_dtype), inputs[0].comp_node}}, false};
352 353 354 355 356 357 358
    }

    if (matmul.transposeA)
        std::swap(layout1[dim1 - 1], layout1[dim1 - 2]);
    if (matmul.transposeB)
        std::swap(layout2[dim2 - 1], layout2[dim2 - 2]);

359
    TensorLayout dst_layout(dst_dtype);
360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
    size_t di = 0;
    if (dim1 > dim2) {
        for (size_t i = 0; i < dim1 - 2; i++)
            dst_layout[di++] = layout1[i];
    } else {
        for (size_t i = 0; i < dim2 - 2; i++)
            dst_layout[di++] = layout2[i];
    }
    if (dim1 > 1)
        dst_layout[di++] = layout1[dim1 - 2];
    if (dim2 > 1)
        dst_layout[di++] = layout2[dim2 - 1];
    dst_layout.ndim = di;
    dst_layout.init_contiguous_stride();

    SmallVector<LogicalTensorDesc> out_descs(1u);
    out_descs[0] = {dst_layout, inputs[0].comp_node};
    return {out_descs, true};
}

SmallVector<TensorPtr> apply_on_physical_tensor(
        const OpDef& def, const SmallVector<TensorPtr>& inputs,
        SmallVector<LogicalTensorDesc>& output_descs, const bool& validated) {
    auto&& matmul = def.cast_final_safe<BatchedMatrixMul>();
    auto&& cn = inputs[0]->comp_node();

    TensorLayout layout1 = inputs[0]->layout(), layout2 = inputs[1]->layout();
    size_t dim1 = layout1.ndim, dim2 = layout2.ndim;

389 390 391 392 393
    DnnOprCaller<megdnn::BatchedMatrixMul> dnn_opr(cn);
    dnn_opr.op->param() = matmul.param();
    DType dst_dtype;
    dnn_opr.op->deduce_dtype(layout1.dtype, layout1.dtype, dst_dtype);

394 395
    TensorShape tshp, batch_shp;
    size_t j = 0;
396
    auto inp1 = inputs[0], inp2 = inputs[1];
397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435
    if (dim1 > dim2) {
        for (size_t i = 0; i < dim1 - 2; i++)
            tshp[j++] = layout1.shape[i];
        batch_shp = tshp;
        batch_shp.ndim = dim1 - 2;
        tshp[j++] = layout2[layout2.ndim - 2];
        tshp[j++] = layout2[layout2.ndim - 1];
        tshp.ndim = j;
        layout2 = layout2.broadcast(tshp);
    }
    if (dim2 > dim1) {
        for (size_t i = 0; i < dim2 - 2; i++)
            tshp[j++] = layout2.shape[i];
        batch_shp = tshp;
        batch_shp.ndim = dim2 - 2;
        tshp[j++] = layout1[layout1.ndim - 2];
        tshp[j++] = layout1[layout1.ndim - 1];
        tshp.ndim = j;
        layout1 = layout1.broadcast(tshp);
    }
    if (dim1 == dim2) {
        for (size_t i = 0; i < dim1 - 2; i++)
            tshp[j++] = layout1.shape[i];
        batch_shp = tshp;
        batch_shp.ndim = dim1 - 2;
    }

    TensorShape shp1 = batch_shp, shp2 = batch_shp;
    shp1.ndim += 2;
    shp2.ndim += 2;
    size_t maxdim = dim1 > dim2 ? dim1 : dim2;
    size_t nbatch = batch_shp[0];
    if (maxdim > 3) {
        nbatch = std::accumulate(
                batch_shp.shape, batch_shp.shape + batch_shp.ndim, (size_t)1,
                std::multiplies<size_t>());

        TensorLayout layout_a;

436 437
        // batched_matmul does not support memory forwarding, so ensure contiguous
        // manually
438 439
        TensorShape nl1 = TensorShape(
                {nbatch, layout1[layout1.ndim - 2], layout1[layout1.ndim - 1]});
440 441 442 443
        inp1 = Tensor::make(inputs[0]->blob(), inputs[0]->offset(), layout1);
        inp1->to_contiguous_inplace();
        layout1 = inp1->layout();
        layout_a = layout1.reshape(nl1);
444 445 446 447
        layout1 = layout_a;

        TensorShape nl2 = TensorShape(
                {nbatch, layout2[layout2.ndim - 2], layout2[layout2.ndim - 1]});
448 449 450 451
        inp2 = Tensor::make(inputs[1]->blob(), inputs[1]->offset(), layout2);
        inp2->to_contiguous_inplace();
        layout2 = inp2->layout();
        layout_a = layout2.reshape(nl2);
452 453 454 455 456 457
        layout2 = layout_a;
    }

    TensorLayout dst_layout(
            {nbatch, matmul.transposeA ? layout1[2] : layout1[1],
             matmul.transposeB ? layout2[1] : layout2[2]},
458
            dst_dtype);
459 460 461
    dst_layout.init_contiguous_stride();

    if (dim1 == 0 || dim2 == 0 || layout1[layout1.ndim - 1] == 0) {
462 463 464 465
        auto out = Tensor::make(dst_layout, cn);

        if (!out->empty()) {
            dev_tensor_memset(out->dev_tensor(), 0);
466
        }
467
        return {out};
468 469
    }

470 471 472 473 474 475 476 477 478
    SmallVector<megdnn::TensorND> inp_tensornds(2u);
    inp_tensornds[0] = inp1->dnn_tensor();
    inp_tensornds[0].layout = layout1;
    inp_tensornds[1] = inp2->dnn_tensor();
    inp_tensornds[1].layout = layout2;

    size_t sz = setup_algo<megdnn::BatchedMatrixMul>(
            {layout1, layout2, dst_layout}, dnn_opr.op.get(), 0, false, false, cn,
            matmul.policy(), false, &inp_tensornds);
479

480
    auto out = Tensor::make(dst_layout, cn);
481

482 483
    auto dnn_wk = dnn_opr.create_workspace(sz);
    dnn_opr.op->exec(inp_tensornds[0], inp_tensornds[1], out->dnn_tensor(), dnn_wk);
484 485 486 487 488 489

    shp1[shp1.ndim - 2] = dst_layout[dst_layout.ndim - 2];
    shp1[shp1.ndim - 1] = dst_layout[dst_layout.ndim - 1];
    if (maxdim > 3) {
        dst_layout = dst_layout.reshape(shp1);
    }
490
    return {out->sub(0, dst_layout)};
491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
}

SmallVector<VarNode::LayoutConstraintCallback> get_input_layout_constraint(
        const OpDef& def, const SmallVector<TensorPtr>& inputs) {
    SmallVector<VarNode::LayoutConstraintCallback> layout_checker(inputs.size());
    layout_checker[0] = layout_checker[1] = [](const TensorLayout& layout) {
        return layout.is_contiguous();
    };
    return layout_checker;
}

OP_TRAIT_REG(BatchedMatrixMul, BatchedMatrixMul)
        .apply_on_var_node(apply_on_var_node)
        .infer_output_attrs_fallible(infer_output_attrs_fallible)
        .get_input_layout_constraint(get_input_layout_constraint)
        .apply_on_physical_tensor(apply_on_physical_tensor)
        .fallback();
}  // namespace batched_matrix_mul
}  // namespace

namespace {
namespace dot {

auto apply_on_var_node(const OpDef& def, const VarNodeArray& inputs) {
    auto&& op = def.cast_final_safe<Dot>();
    mgb_assert(inputs.size() == 2);
    OperatorNodeConfig config{op.make_name()};
    return opr::Dot::make(inputs[0], inputs[1], config);
}

SmallVector<TensorPtr> apply_on_physical_tensor(
        const OpDef& def, const SmallVector<TensorPtr>& inputs,
        SmallVector<LogicalTensorDesc>& output_descs, const bool& validated) {
    auto comp_node = inputs[0]->comp_node();
    using TensorND = megdnn::TensorND;
    SmallVector<TensorND> inp_tensornds;
    inp_tensornds.reserve(inputs.size());
    DnnOprCaller<megdnn::Dot> dnn_opr(comp_node);
    for (unsigned i = 0; i < inputs.size(); ++i) {
        auto dnn_ten = inputs[i]->dnn_tensor();
        inp_tensornds.push_back(dnn_ten);
    }
    TensorLayout oup_layout{inputs[0]->dtype()};
    auto inp1_tensor = inputs[0]->dnn_tensor();
    auto inp2_tensor = inputs[1]->dnn_tensor();
    dnn_opr.op->deduce_layout(inp1_tensor.layout, inp2_tensor.layout, oup_layout);

    if (inputs[0]->layout().is_empty() || inputs[1]->layout().is_empty()) {
539 540 541
        auto out = Tensor::make(oup_layout, comp_node);
        if (!out->empty()) {
            dev_tensor_memset(out->dev_tensor(), 0);
542
        }
543
        return {out};
544 545 546 547 548
    }

    auto sz = dnn_opr.op->get_workspace_in_bytes(
            inp_tensornds[0].layout, inp_tensornds[1].layout, output_descs[0].layout);

549
    auto out = Tensor::make(oup_layout, comp_node);
550

551
    auto dnn_wk = dnn_opr.create_workspace(sz);
552

553
    dnn_opr.op->exec(inp_tensornds[0], inp_tensornds[1], out->dnn_tensor(), dnn_wk);
554

555
    return {out};
556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580
}

std::tuple<SmallVector<LogicalTensorDesc>, bool> infer_output_attrs_fallible(
        const OpDef& def, const SmallVector<LogicalTensorDesc>& inputs) {
    mgb_assert(
            inputs.size() == 2, "Dot expects 2 inputs; got %lu actually",
            inputs.size());
    SmallVector<LogicalTensorDesc> dests(1);
    dests[0].layout = TensorLayout(TensorShape{1}, inputs[0].layout.dtype);
    dests[0].comp_node = inputs[0].comp_node;
    bool validated = inputs[0].layout.ndim != 0 && inputs[1].layout.ndim != 0;
    return {dests, validated};
}

OP_TRAIT_REG(Dot, Dot, mgb::opr::Dot)
        .apply_on_var_node(apply_on_var_node)
        .infer_output_attrs_fallible(infer_output_attrs_fallible)
        .apply_on_physical_tensor(apply_on_physical_tensor)
        .fallback();

}  // namespace dot
}  // anonymous namespace

}  // namespace imperative
}  // namespace mgb