elemwise_helper.cuh 38.9 KB
Newer Older
1 2 3 4 5 6 7 8
/**
 * \file dnn/src/cuda/elemwise_helper.cuh
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
 * Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
9 10
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
 */

#pragma once

#include "src/common/elemwise_helper.cuh"
#include "src/cuda/utils.cuh"
#include "src/cuda/int_fastdiv.cuh"
#include "src/cuda/query_blocksize.cuh"

/*
 * please note that all arithmetics on GPU are 32-bit for best performance; this
 * limits max possible size
 */

namespace megdnn {
namespace cuda {

//! internals for element-wise
namespace elemwise_intl {
#define devfunc __device__ __forceinline__

32 33 34 35 36 37 38
/*!
 * \brief get cuda launch specs for element-wise kernel
 * \param kern kernel function address
 * \param size total size of elements
 */
void get_launch_spec(const void* kern, size_t size, int* grid_size,
                     int* block_size);
39

40
MEGDNN_NORETURN void on_bad_ndim(int ndim);
41

42 43 44 45 46 47 48 49 50 51 52 53
/*!
 * \brief broadcast type
 * BCAST_x[0]x[1]...: x[i] == !stride[i]
 */
enum BcastType {
    BCAST_OTHER,
    BCAST_1010,
    BCAST_101,
    BCAST_10,
    BCAST_01,
    BCAST_FULL
};
54

55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
/*!
 * \brief read and write type trait for byte width integer type
 */
template <typename ctype>
class VectTypeTrait;

struct __attribute__((aligned(8))) half4 {
    dt_float16 x, y, z, w;
};

__device__ __forceinline__ half4 make_half4(dt_float16 x, dt_float16 y,
                                            dt_float16 z, dt_float16 w) {
    half4 t;
    t.x = x, t.y = y, t.z = z, t.w = w;
    return t;
}
71

72 73 74
struct __attribute__((aligned(8))) bhalf4 {
    dt_bfloat16 x, y, z, w;
};
75

76 77 78 79 80 81
__device__ __forceinline__ bhalf4 make_bhalf4(dt_bfloat16 x, dt_bfloat16 y,
                                              dt_bfloat16 z, dt_bfloat16 w) {
    bhalf4 t;
    t.x = x, t.y = y, t.z = z, t.w = w;
    return t;
}
82

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
#define INST(_ctype, _vect_type)                                               \
    template <>                                                                \
    class VectTypeTrait<_ctype> {                                              \
    public:                                                                    \
        using vect_type = _vect_type;                                          \
        static const size_t packed_size = sizeof(_vect_type) / sizeof(_ctype); \
        static __device__ __forceinline__ vect_type make_vector(_ctype x,      \
                                                                _ctype y,      \
                                                                _ctype z,      \
                                                                _ctype w) {    \
            return make_##_vect_type(as_raw(x), as_raw(y), as_raw(z),          \
                                     as_raw(w));                               \
        }                                                                      \
    }
#define as_raw(x) x
98 99 100 101 102 103 104
INST(dt_int8, char4);
INST(dt_uint8, uchar4);
INST(dt_float32, float4);
INST(dt_float16, half4);
INST(dt_bfloat16, bhalf4);
INST(dt_int32, int4);
INST(dt_int16, short4);
105 106
#undef as_raw
#define as_raw(x) x.as_int8()
107
INST(dt_qint8, char4);
108 109
#undef as_raw
#define as_raw(x) x.as_uint8()
110
INST(dt_quint8, uchar4);
111 112
#undef as_raw
#define as_raw(x) x.as_int32()
113
INST(dt_qint32, int4);
114 115 116
#undef as_raw
#undef INST

117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
/*!
 * \brief visitor to access an elemeent in a tensor at given logic index
 * \tparam ctype plain element ctype (i.e. ctype in DTypeTrait)
 * \tparam brdcast_mask bit mask for broadcast of params; (i.e. stride[i] is
 *      0 iff (brdcast_mask & (1<<(ndim-1-i))) is 1.
 *
 * host interface:
 *      void host_init(
 *              const TensorND &tensor, int grid_size, int block_size)
 *
 * device interface:
 *      void thread_init(uint32_t idx)
 *          called on thread entrance, with logical indexing; the index may
 *          go beyond buffer range
 *
 *      ctype* ptr()
 *          return buffer pointer; can be used by specialized OpCaller
 *
 *      void next()
 *          called before moving to next chunk on each thread
 *
 *      int offset(uint32_t idx)
 *          get physical offset from logical index
 *
 *      ctype& at(uint32_t idx)
 *          ptr()[offset(idx)]
 *
 */
template <int ndim, typename ctype, BcastType brd_type>
class ParamElemVisitor;

/*!
 * \brief visitor to access vector element in a tensor at given logic index
 * \tparam ctype same as ParamElemVisitor, vect_type packed vector type of
 * element ctype (i.e. vect_type in VectTypeTrait) \tparam brdcast_mask same
 * as ParamElemVisitor
 *
 *
 * device interface:
 *      vect_type& at(uint32_t idx)
 *          ptr()[offset(idx)]
 *
 */
template <int ndim, typename ctype, BcastType brd_type>
class ParamVectVisitor;

/* f{{{ ParamElemVisitor specializations */
164 165 166 167 168 169

#define PARAM_ELEM_VISITOR_COMMON_DEV      \
    devfunc ctype* ptr() { return m_ptr; } \
    devfunc ctype& at(uint32_t idx) { return m_ptr[offset(idx)]; }
#define PARAM_ELEM_VISITOR_COMMON_HOST static const int packed_size = 1;

170 171 172 173 174
//! specialization for BCAST_OTHER
template <int ndim, typename ctype>
class ParamElemVisitor<ndim, ctype, BCAST_OTHER> {
protected:
    ctype* __restrict m_ptr;
175

176 177
private:
    int m_stride[ndim];
178

179
    //! m_shape_highdim[i] = original_shape[i + 1]
180
#ifdef _MSC_VER
181
    Uint32Fastdiv m_shape_highdim[ndim > 1 ? ndim - 1 : 1];
182
#else
183
    Uint32Fastdiv m_shape_highdim[ndim];
184 185
#endif

186 187 188
public:
    static const int NDIM = ndim;
    PARAM_ELEM_VISITOR_COMMON_HOST
189

190
    void host_init(const TensorND& rv, int grid_size, int block_size);
191 192

#if MEGDNN_CC_CUDA
193
    devfunc void thread_init(uint32_t) {}
194

195
    devfunc void next() {}
196

197 198
    devfunc int offset(uint32_t idx) {
        int offset = 0;
199
#pragma unroll
200 201 202 203 204
        for (int i = ndim - 1; i >= 1; --i) {
            Uint32Fastdiv& shp = m_shape_highdim[i - 1];
            uint32_t idx_div = idx / shp;
            offset += (idx - idx_div * shp.divisor()) * m_stride[i];
            idx = idx_div;
205
        }
206 207 208
        offset += idx * m_stride[0];
        return offset;
    }
209

210
    PARAM_ELEM_VISITOR_COMMON_DEV
211
#endif
212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232
};

/*!
 * \brief specialization for ndim == 3 and BCAST_101
 * (for dimshuffle 'x', 0, 'x')
 *
 * visit: idx / m_shape2 % m_shape1
 */
template <typename ctype>
class ParamElemVisitor<3, ctype, BCAST_101> {
    StridedDivSeq2 m_shape12;
    int m_stride1;

protected:
    ctype* __restrict m_ptr;

public:
    static const int NDIM = 3;
    PARAM_ELEM_VISITOR_COMMON_HOST

    void host_init(const TensorND& rv, int grid_size, int block_size);
233 234

#if MEGDNN_CC_CUDA
235
    devfunc void thread_init(uint32_t idx) { m_shape12.device_init(idx); }
236

237
    devfunc void next() { m_shape12.next(); }
238

239
    devfunc int offset(uint32_t idx) { return m_shape12.get() * m_stride1; }
240

241
    PARAM_ELEM_VISITOR_COMMON_DEV
242
#endif
243
};
244

245 246 247 248 249 250 251 252 253
/*!
 * \brief specialization for ndim == 2 and BCAST_10
 *
 * visit: idx % m_shape1
 */
template <typename ctype>
class ParamElemVisitor<2, ctype, BCAST_10> {
    StridedDivSeq<false> m_shape1;
    int m_stride1;
254

255 256
protected:
    ctype* __restrict m_ptr;
257

258 259 260
public:
    static const int NDIM = 2;
    PARAM_ELEM_VISITOR_COMMON_HOST
261

262
    void host_init(const TensorND& rv, int grid_size, int block_size);
263 264

#if MEGDNN_CC_CUDA
265
    devfunc void thread_init(uint32_t idx) { m_shape1.device_init(idx); }
266

267
    devfunc void next() { m_shape1.next(); }
268

269
    devfunc int offset(uint32_t idx) { return m_shape1.r() * m_stride1; }
270

271
    PARAM_ELEM_VISITOR_COMMON_DEV
272
#endif
273
};
274

275 276 277 278 279 280 281 282 283
/*!
 * \brief specialization for ndim == 2 and BCAST_01
 *
 * visit: idx / shape1
 */
template <typename ctype>
class ParamElemVisitor<2, ctype, BCAST_01> {
    StridedDivSeq<true> m_shape1;
    int m_stride0;
284

285 286
protected:
    ctype* __restrict m_ptr;
287

288 289 290
public:
    static const int NDIM = 2;
    PARAM_ELEM_VISITOR_COMMON_HOST
291

292
    void host_init(const TensorND& rv, int grid_size, int block_size);
293 294

#if MEGDNN_CC_CUDA
295
    devfunc void thread_init(uint32_t idx) { m_shape1.device_init(idx); }
296

297
    devfunc void next() { m_shape1.next(); }
298

299
    devfunc int offset(uint32_t idx) { return m_shape1.q() * m_stride0; }
300

301
    PARAM_ELEM_VISITOR_COMMON_DEV
302
#endif
303
};
304

305 306 307 308 309
//! specialization for ndim == 1 and BCAST_FULL
template <typename ctype>
class ParamElemVisitor<1, ctype, BCAST_FULL> {
protected:
    ctype* __restrict m_ptr;
310

311 312 313
public:
    static const int NDIM = 1;
    PARAM_ELEM_VISITOR_COMMON_HOST
314

315
    void host_init(const TensorND& rv, int grid_size, int block_size);
316 317

#if MEGDNN_CC_CUDA
318
    devfunc void thread_init(uint32_t) {}
319

320
    devfunc void next() {}
321

322 323 324 325
    devfunc int offset(uint32_t idx) {
        MEGDNN_MARK_USED_VAR(idx);
        return 0;
    }
326

327
    PARAM_ELEM_VISITOR_COMMON_DEV
328
#endif
329
};
330 331 332 333

#undef PARAM_ELEM_VISITOR_COMMON_DEV
#undef PARAM_ELEM_VISITOR_COMMON_HOST

334
/* f}}} */
335

336
/* f{{{ ParamVectVisitor specializations */
337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355

#if MEGDNN_CC_CUDA
#define DEVICE_WRAPPER(x) x
#else
#define DEVICE_WRAPPER(x)
#endif
#define INST_PARAM_VECT_VISITOR                                        \
    template <int ndim, typename ctype>                                \
    class ParamVectVisitor<ndim, ctype, _brdcast_mask>                 \
            : public ParamElemVisitor<ndim, ctype, _brdcast_mask> {    \
    public:                                                            \
        using Super = ParamElemVisitor<ndim, ctype, _brdcast_mask>;    \
        using rwtype = typename VectTypeTrait<ctype>::vect_type;       \
        static const int packed_size = sizeof(rwtype) / sizeof(ctype); \
        DEVICE_WRAPPER(devfunc rwtype& at(uint32_t idx) {              \
            return *(rwtype*)(&Super::m_ptr[Super::offset(idx)]);      \
        })                                                             \
    };
#define _brdcast_mask BCAST_OTHER
356
INST_PARAM_VECT_VISITOR;
357 358
#undef _brdcast_mask
#define _brdcast_mask BCAST_01
359
INST_PARAM_VECT_VISITOR;
360 361
#undef _brdcast_mask
#define _brdcast_mask BCAST_10
362
INST_PARAM_VECT_VISITOR;
363 364
#undef _brdcast_mask
#define _brdcast_mask BCAST_101
365
INST_PARAM_VECT_VISITOR;
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382
#undef _brdcast_mask
#define INST_DT_IBYTE(ctype)                                                \
    template <int ndim>                                                     \
    class ParamVectVisitor<ndim, ctype, BCAST_FULL>                         \
            : public ParamElemVisitor<ndim, ctype, BCAST_FULL> {            \
    public:                                                                 \
        using Super = ParamElemVisitor<ndim, ctype, BCAST_FULL>;            \
        using rwtype = typename VectTypeTrait<ctype>::vect_type;            \
        static const int packed_size = sizeof(rwtype) / sizeof(ctype);      \
        DEVICE_WRAPPER(rwtype vect_scalar;                                  \
                       devfunc rwtype & at(uint32_t /* idx */) {            \
                           ctype v = Super::m_ptr[0];                       \
                           vect_scalar = VectTypeTrait<ctype>::make_vector( \
                                   v, v, v, v);                             \
                           return vect_scalar;                              \
                       })                                                   \
    }
383 384 385 386
INST_DT_IBYTE(dt_int8);
INST_DT_IBYTE(dt_uint8);
INST_DT_IBYTE(dt_qint8);
INST_DT_IBYTE(dt_quint8);
387 388 389 390
#undef INST_DT_IBYTE
#undef DEVICE_WRAPPER
#undef INST_PARAM_VECT_VISITOR

391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409
/*!
 * \brief specialization for ndim == 4 and BCAST_1010
 *
 * visit: (idx % m_shape3) * m_stride3 + (idx / m_shape23 % m_shape1) *
 * m_stride1
 */
template <typename ctype>
class ParamVectVisitor<4, ctype, BCAST_1010> {
    StridedDivSeq2 m_shape123;
    StridedDivSeq<false> m_shape3;
    int m_stride3, m_stride1;
    ctype* __restrict m_ptr;

public:
    static const int NDIM = 4;
    using rwtype = typename VectTypeTrait<ctype>::vect_type;
    static const int packed_size = sizeof(rwtype) / sizeof(ctype);

    void host_init(const TensorND& rv, int grid_size, int block_size);
410 411

#if MEGDNN_CC_CUDA
412 413 414 415
    devfunc void thread_init(uint32_t idx) {
        m_shape123.device_init(idx);
        m_shape3.device_init(idx);
    }
416

417 418 419 420
    devfunc void next() {
        m_shape123.next();
        m_shape3.next();
    }
421

422 423 424
    devfunc int offset(uint32_t idx) {
        return m_shape3.r() * m_stride3 + m_shape123.get() * m_stride1;
    }
425

426 427
    devfunc ctype* ptr() { return m_ptr; }
    devfunc rwtype& at(uint32_t idx) { return *(rwtype*)(&m_ptr[offset(idx)]); }
428
#endif
429
};
430

431
/* f}}} */
432 433 434

#if MEGDNN_CC_CUDA

435
/* f{{{ user operator callers */
436

437 438 439 440 441 442 443 444 445 446
/*
 * OpCaller is used to invoke user operator with loaded element arguments.
 *
 * device interface:
 *      void thread_init(uint32_t idx);
 *
 *      void on(uint32_t idx);
 *
 *      void next();
 */
447

448 449 450 451 452 453 454
/*!
 * \brief call user op directly without visiting any params (i.e. arity ==
 *      0)
 */
template <class Op>
struct OpCallerNull {
    Op op;
455

456
    devfunc void thread_init(uint32_t) {}
457

458
    devfunc void on(uint32_t idx) { op(idx); }
459

460 461
    devfunc void next() {}
};
462

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
/*!
 * \brief call an operator whose each param are promted to the same ndim and
 *      brdcast_mask
 * \tparam PVis ParamElemVisitor class
 */
template <class Op, int arity, class PVis>
struct OpCallerUniform;

//! specialization for arity == 1
template <class Op, class PVis>
struct OpCallerUniform<Op, 1, PVis> {
    Op op;
    PVis par[1];
    static const uint32_t packed_size = PVis::packed_size;

    devfunc void thread_init(uint32_t idx) {
        idx = idx * packed_size;
        par[0].thread_init(idx);
    }
482

483 484 485 486
    devfunc void on(uint32_t idx) {
        idx = idx * packed_size;
        op(idx, par[0].at(idx));
    }
487

488 489 490 491 492 493 494 495
    devfunc void on(uint32_t idx, uint32_t remain) {
        idx = idx * packed_size;
        if (remain >= packed_size) {
            op(idx, par[0].at(idx));
        } else {
            auto ptr0 = par[0].ptr();
            for (int i = 0; i < remain; i++) {
                op(idx + i, ptr0[par[0].offset(idx + i)]);
496 497
            }
        }
498
    }
499

500 501 502 503 504 505 506 507 508 509 510 511 512 513
    devfunc void next() { par[0].next(); }
};
//! specialization for arity == 2
template <class Op, class PVis>
struct OpCallerUniform<Op, 2, PVis> {
    Op op;
    PVis par[2];
    static const uint32_t packed_size = PVis::packed_size;

    devfunc void thread_init(uint32_t idx) {
        idx = idx * packed_size;
        par[0].thread_init(idx);
        par[1].thread_init(idx);
    }
514

515 516 517 518
    devfunc void on(uint32_t idx) {
        idx = idx * packed_size;
        op(idx, par[0].at(idx), par[1].at(idx));
    }
519

520 521 522 523 524 525 526 527 528 529
    devfunc void on(uint32_t idx, uint32_t remain) {
        idx = idx * packed_size;
        if (remain >= packed_size) {
            op(idx, par[0].at(idx), par[1].at(idx));
        } else {
            auto ptr0 = par[0].ptr();
            auto ptr1 = par[1].ptr();
            for (int i = 0; i < remain; i++) {
                op(idx + i, ptr0[par[0].offset(idx + i)],
                   ptr1[par[1].offset(idx + i)]);
530 531
            }
        }
532
    }
533

534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551
    devfunc void next() {
        par[0].next();
        par[1].next();
    }
};
//! specialization for arity == 3
template <class Op, class PVis>
struct OpCallerUniform<Op, 3, PVis> {
    Op op;
    PVis par[3];
    static const uint32_t packed_size = PVis::packed_size;

    devfunc void thread_init(uint32_t idx) {
        idx = idx * packed_size;
        par[0].thread_init(idx);
        par[1].thread_init(idx);
        par[2].thread_init(idx);
    }
552

553 554 555 556
    devfunc void on(uint32_t idx) {
        idx = idx * packed_size;
        op(idx, par[0].at(idx), par[1].at(idx), par[2].at(idx));
    }
557

558 559 560 561 562 563 564 565 566 567 568 569
    devfunc void on(uint32_t idx, uint32_t remain) {
        idx = idx * packed_size;
        if (remain >= packed_size) {
            op(idx, par[0].at(idx), par[1].at(idx), par[2].at(idx));
        } else {
            auto ptr0 = par[0].ptr();
            auto ptr1 = par[1].ptr();
            auto ptr2 = par[2].ptr();
            for (int i = 0; i < remain; i++) {
                op(idx + i, ptr0[par[0].offset(idx + i)],
                   ptr1[par[1].offset(idx + i)], ptr2[par[2].offset(idx + i)]);
            }
570
        }
571
    }
572

573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
    devfunc void next() {
        par[0].next();
        par[1].next();
        par[2].next();
    }
};

/*!
 * \brief call binary (i.e. arity == 2) operator with different param
 *      visitors
 */
template <class Op, class PVis0, class PVis1>
struct OpCallerBinary {
    Op op;
    PVis0 par0;
    PVis1 par1;
    MEGDNN_STATIC_ASSERT(PVis0::packed_size == PVis1::packed_size,
                         "vector size mismatch")

    static const uint32_t packed_size = PVis0::packed_size;

    devfunc void thread_init(uint32_t idx) {
        idx = idx * packed_size;
        par0.thread_init(idx);
        par1.thread_init(idx);
    }
599

600 601 602 603
    devfunc void on(uint32_t idx) {
        idx = idx * packed_size;
        op(idx, par0.at(idx), par1.at(idx));
    }
604

605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621
    devfunc void next() {
        par0.next();
        par1.next();
    }
};

/* f}}} */

template <class OpCaller>
__global__ void cuda_kern(OpCaller op_caller, uint32_t size) {
    uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x,
             delta = blockDim.x * gridDim.x;
    // each thread works on at most 3 elements; see get_launch_spec
    op_caller.thread_init(idx);
    if (idx < size) {
        op_caller.on(idx);
        idx += delta;
622
        if (idx < size) {
623
            op_caller.next();
624 625 626 627 628 629 630 631
            op_caller.on(idx);
            idx += delta;
            if (idx < size) {
                op_caller.next();
                op_caller.on(idx);
            }
        }
    }
632
}
633

634 635 636 637 638 639 640 641 642 643
template <class Op, int arity, class PVis>
__global__ void cuda_kern(OpCallerUniform<Op, arity, PVis> op_caller,
                          uint32_t size) {
    constexpr uint32_t packed_size = PVis::packed_size;
    const uint32_t size_packed = DIVUP(size, packed_size);
    uint32_t idx = blockIdx.x * blockDim.x + threadIdx.x,
             delta = blockDim.x * gridDim.x;
    if (idx < size_packed) {
        op_caller.on(idx, size - packed_size * idx);
        idx += delta;
644 645 646 647 648 649 650 651
        if (idx < size_packed) {
            op_caller.on(idx, size - packed_size * idx);
            idx += delta;
            if (idx < size_packed) {
                op_caller.on(idx, size - packed_size * idx);
            }
        }
    }
652
}
653

654 655 656
//! invoke a user Op passed to run_elemwise
template <class Op, typename ctype, int arity>
class UserOpInvoker;
657

658
/* f{{{ UserOpInvoker specializations */
659

660 661 662 663 664 665
//! run op by promoting all params to same ndim
template <class Op, typename ctype, int arity>
class UserOpInvokerToSameNdim {
    const ElemwiseOpParamN<arity>& m_param;
    cudaStream_t m_stream;
    const Op& m_op;
666

667 668
    void dispatch0() {
        switch (m_param.max_ndim) {
669
#define cb(ndim) \
670 671 672
    case ndim:   \
        return dispatch1<ndim>();
            MEGDNN_FOREACH_TENSOR_NDIM(cb)
673 674
#undef cb
        }
675 676
        on_bad_ndim(m_param.max_ndim);
    }
677

678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695
    template <int ndim>
    void dispatch1() {
        typedef OpCallerUniform<Op, arity,
                                ParamElemVisitor<ndim, ctype, BCAST_OTHER>>
                Caller;
        size_t size = m_param.size;
        int grid_size, block_size;
        void (*fptr)(Caller, uint32_t) = cuda_kern<Caller>;
        get_launch_spec(reinterpret_cast<const void*>(fptr), size, &grid_size,
                        &block_size);

        Caller caller;
        caller.op = m_op;
        for (int i = 0; i < arity; ++i)
            caller.par[i].host_init(m_param[i], grid_size, block_size);
        (*fptr)<<<grid_size, block_size, 0, m_stream>>>(caller, size);
        after_kernel_launch();
    }
696

697 698 699 700 701 702 703
public:
    UserOpInvokerToSameNdim(const ElemwiseOpParamN<arity>& param,
                            cudaStream_t stream, const Op& op)
            : m_param(param), m_stream(stream), m_op(op) {
        dispatch0();
    }
};
704

705 706 707 708 709 710 711 712 713 714 715
template <class Op, typename ctype, int arity>
class UserOpInvokerToSameNdimIByteHelper {
public:
    UserOpInvokerToSameNdimIByteHelper(const ElemwiseOpParamN<arity>& param,
                                       cudaStream_t stream, const Op& op)
            : m_rw_size(param.size),
              m_param(param),
              m_stream(stream),
              m_op(op) {
        if (!try_vect_load_store_contiguous() && !try_vect_load_store()) {
            dispatch0();
716
        }
717
    }
718

719 720 721 722 723 724 725
private:
    const ElemwiseOpParamN<arity>& m_param;
    size_t m_rw_size;
    cudaStream_t m_stream;
    const Op& m_op;
    using vect_type = typename VectTypeTrait<ctype>::vect_type;
    static const size_t packed_size = VectTypeTrait<ctype>::packed_size;
726

727 728
    void dispatch0() {
        switch (m_param.max_ndim) {
729 730 731
#define cb(ndim) \
    case ndim:   \
        return dispatch1<ndim>();
732
            MEGDNN_FOREACH_TENSOR_NDIM(cb)
733 734
#undef cb
        }
735 736
        on_bad_ndim(m_param.max_ndim);
    }
737

738 739
    void dispatch0_vect() {
        switch (m_param.max_ndim) {
740 741 742
#define cb(ndim) \
    case ndim:   \
        return dispatch1_vect<ndim>();
743
            MEGDNN_FOREACH_TENSOR_NDIM(cb)
744 745
#undef cb
        }
746 747
        on_bad_ndim(m_param.max_ndim);
    }
748

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
    void dispatch_contiguous() {
        typedef ParamVectVisitor<1, ctype, BCAST_OTHER> PVis;
        typedef OpCallerUniform<Op, arity, PVis> Caller;
        size_t size = m_rw_size;
        int grid_size, block_size;
        void (*fptr)(Caller, uint32_t) = cuda_kern<Op, arity, PVis>;
        get_launch_spec(reinterpret_cast<const void*>(fptr), size, &grid_size,
                        &block_size);

        Caller caller;
        caller.op = m_op;
        for (int i = 0; i < arity; ++i)
            caller.par[i].host_init(m_param[i], grid_size, block_size);
        (*fptr)<<<grid_size, block_size, 0, m_stream>>>(caller, m_param.size);
        after_kernel_launch();
    }
765

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781
    template <int ndim>
    void dispatch1() {
        typedef ParamElemVisitor<ndim, ctype, BCAST_OTHER> PVis;
        typedef OpCallerUniform<Op, arity, PVis> Caller;
        size_t size = m_rw_size;
        int grid_size, block_size;
        void (*fptr)(Caller, uint32_t) = cuda_kern<Caller>;
        get_launch_spec(reinterpret_cast<const void*>(fptr), size, &grid_size,
                        &block_size);
        Caller caller;
        caller.op = m_op;
        for (int i = 0; i < arity; ++i)
            caller.par[i].host_init(m_param[i], grid_size, block_size);
        (*fptr)<<<grid_size, block_size, 0, m_stream>>>(caller, size);
        after_kernel_launch();
    }
782

783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798
    template <int ndim>
    void dispatch1_vect() {
        typedef ParamVectVisitor<ndim, ctype, BCAST_OTHER> PVis;
        typedef OpCallerUniform<Op, arity, PVis> Caller;
        size_t size = m_rw_size;
        int grid_size, block_size;
        void (*fptr)(Caller, uint32_t) = cuda_kern<Caller>;
        get_launch_spec(reinterpret_cast<const void*>(fptr), size, &grid_size,
                        &block_size);
        Caller caller;
        caller.op = m_op;
        for (int i = 0; i < arity; ++i)
            caller.par[i].host_init(m_param[i], grid_size, block_size);
        (*fptr)<<<grid_size, block_size, 0, m_stream>>>(caller, size);
        after_kernel_launch();
    }
799

800 801 802 803 804 805 806 807 808 809 810
    bool try_vect_load_store() {
        auto try_last_contig = [](const TensorLayout& layout) {
            return layout.stride[layout.ndim - 1] == 1 &&
                   layout[layout.ndim - 1] % packed_size == 0;
        };
        /*
         * \NOTE: remove try_scalar() to adapt multi-type tenary op
         */
        for (int i = 0; i < arity; ++i) {
            if (!try_last_contig(m_param[i].layout))
                return false;
811
        }
812 813 814 815
        m_rw_size /= packed_size;
        dispatch0_vect();
        return true;
    }
816

817 818 819 820 821 822 823
    bool try_vect_load_store_contiguous() {
        auto try_contig = [](const TensorLayout& layout) {
            return (layout.is_contiguous());
        };
        for (int i = 0; i < arity; ++i) {
            if (!try_contig(m_param[i].layout))
                return false;
824
        }
825 826 827 828 829
        m_rw_size = DIVUP(m_rw_size, packed_size);
        dispatch_contiguous();
        return true;
    }
};
830 831 832 833 834 835 836 837 838 839 840 841

#define INST_DT_IBYTE(ctype)                                                \
    template <class Op, int arity>                                          \
    class UserOpInvokerToSameNdim<Op, ctype, arity>                         \
            : public UserOpInvokerToSameNdimIByteHelper<Op, ctype, arity> { \
        using Super = UserOpInvokerToSameNdimIByteHelper<Op, ctype, arity>; \
                                                                            \
    public:                                                                 \
        UserOpInvokerToSameNdim(const ElemwiseOpParamN<arity>& param,       \
                                cudaStream_t stream, const Op& op)          \
                : Super{param, stream, op} {}                               \
    }
842 843 844 845
INST_DT_IBYTE(dt_int8);
INST_DT_IBYTE(dt_uint8);
INST_DT_IBYTE(dt_qint8);
INST_DT_IBYTE(dt_quint8);
846 847
#undef INST_DT_IBYTE

848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874
//! implement general case by UserOpInvokerToSameNdim
template <class Op, typename ctype, int arity>
class UserOpInvoker : public UserOpInvokerToSameNdim<Op, ctype, arity> {
public:
    UserOpInvoker(const ElemwiseOpParamN<arity>& param, cudaStream_t stream,
                  const Op& op)
            : UserOpInvokerToSameNdim<Op, ctype, arity>(param, stream, op) {}
};

//! specialization for arity == 0
template <class Op, typename ctype>
class UserOpInvoker<Op, ctype, 0> {
public:
    UserOpInvoker(const ElemwiseOpParamN<0>& param, cudaStream_t stream,
                  const Op& op) {
        size_t size = param.size;
        typedef OpCallerNull<Op> Caller;
        Caller caller;
        caller.op = op;
        int grid_size, block_size;
        void (*fptr)(Caller, uint32_t) = cuda_kern<Caller>;
        get_launch_spec(reinterpret_cast<const void*>(fptr), size, &grid_size,
                        &block_size);
        (*fptr)<<<grid_size, block_size, 0, stream>>>(caller, size);
        after_kernel_launch();
    }
};
875 876

#define DEFINE_BRDCAST_DISPATCH_RECEIVERS(_cb_header, _cb_dispatch, _stride) \
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899
    _cb_header(1) {                                                          \
        const ptrdiff_t* stride = _stride;                                   \
        if (!stride[0]) {                                                    \
            return _cb_dispatch(1, BCAST_FULL);                              \
        }                                                                    \
        _cb_dispatch(1, BCAST_OTHER);                                        \
    }                                                                        \
    _cb_header(2) {                                                          \
        const ptrdiff_t* stride = _stride;                                   \
        if (!stride[0] && stride[1]) {                                       \
            return _cb_dispatch(2, BCAST_10);                                \
        }                                                                    \
        if (stride[0] && !stride[1]) {                                       \
            return _cb_dispatch(2, BCAST_01);                                \
        }                                                                    \
        _cb_dispatch(2, BCAST_OTHER);                                        \
    }                                                                        \
    _cb_header(3) {                                                          \
        const ptrdiff_t* stride = _stride;                                   \
        if (!stride[0] && stride[1] && !stride[2]) {                         \
            return _cb_dispatch(3, BCAST_101);                               \
        }                                                                    \
        _cb_dispatch(3, BCAST_OTHER);                                        \
900 901
    }

902 903 904 905 906 907 908 909 910 911 912 913 914
//! specialization for binary opr
template <class Op, typename ctype>
class UserOpInvoker<Op, ctype, 2> {
    bool m_invoked;
    const ElemwiseOpParamN<2>& m_param;
    cudaStream_t m_stream;
    const Op& m_op;

    void fallback() {
        megdnn_assert(!m_invoked);
        UserOpInvokerToSameNdim<Op, ctype, 2>(m_param, m_stream, m_op);
        m_invoked = true;
    }
915

916 917
    void dispatch0() {
        switch (m_param[0].layout.ndim) {
918
#define cb(ndim) \
919 920 921
    case ndim:   \
        return dispatch1_##ndim();
            MEGDNN_FOREACH_TENSOR_NDIM_SMALL(cb)
922 923
#undef cb
        }
924 925
        fallback();
    }
926 927 928

#define cb_header(ndim) void dispatch1_##ndim()
#define cb_dispatch(ndim, brdcast_mask) \
929 930 931
    dispatch2<ParamElemVisitor<ndim, ctype, brdcast_mask>>()
    DEFINE_BRDCAST_DISPATCH_RECEIVERS(cb_header, cb_dispatch,
                                      m_param[0].layout.stride)
932 933 934
#undef cb_header
#undef cb_dispatch

935 936 937
    template <class PVis0>
    void dispatch2() {
        switch (m_param[1].layout.ndim) {
938
#define cb(ndim) \
939 940 941
    case ndim:   \
        return dispatch3_##ndim<PVis0>();
            MEGDNN_FOREACH_TENSOR_NDIM_SMALL(cb)
942 943
#undef cb
        }
944 945
        fallback();
    }
946

947 948
#define cb_header(ndim)    \
    template <class PVis0> \
949 950
    void dispatch3_##ndim()
#define cb_dispatch(ndim, brdcast_mask) \
951 952 953
    do_run<PVis0, ParamElemVisitor<ndim, ctype, brdcast_mask>>()
    DEFINE_BRDCAST_DISPATCH_RECEIVERS(cb_header, cb_dispatch,
                                      m_param[1].layout.stride)
954 955 956
#undef cb_header
#undef cb_dispatch

957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
    template <class PVis0, class PVis1>
    void do_run() {
        megdnn_assert(!m_invoked);
        m_invoked = true;
        typedef OpCallerBinary<Op, PVis0, PVis1> Caller;
        int grid_size, block_size;
        void (*fptr)(Caller, uint32_t) = cuda_kern<Caller>;
        size_t size = m_param.size;
        get_launch_spec(reinterpret_cast<const void*>(fptr), size, &grid_size,
                        &block_size);
        Caller caller;
        caller.op = m_op;
        caller.par0.host_init(m_param[0], grid_size, block_size);
        caller.par1.host_init(m_param[1], grid_size, block_size);
        (*fptr)<<<grid_size, block_size, 0, m_stream>>>(caller, size);
        after_kernel_launch();
    }
974

975 976 977 978 979 980 981 982 983
public:
    UserOpInvoker(const ElemwiseOpParamN<2>& param, cudaStream_t stream,
                  const Op& op)
            : m_param(param), m_stream(stream), m_op(op) {
        m_invoked = false;
        dispatch0();
        megdnn_assert(m_invoked);
    }
};
984 985 986 987 988 989 990 991 992 993 994 995

#define DEFINE_VECT_BRDCAST_DISPATCH_RECEIVERS(_cb_header, _cb_dispatch, \
                                               _stride)                  \
    DEFINE_BRDCAST_DISPATCH_RECEIVERS(_cb_header, _cb_dispatch, _stride) \
    _cb_header(4) {                                                      \
        const ptrdiff_t* stride = _stride;                               \
        if (!stride[0] && stride[1] && !stride[2] && stride[3]) {        \
            return _cb_dispatch(4, BCAST_1010);                          \
        }                                                                \
        _cb_dispatch(4, BCAST_OTHER);                                    \
    }

996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
template <class Op, typename ctype>
class UserOpInvokerBinaryIByteHelper {
private:
    bool m_invoked;
    size_t m_rw_size;
    const ElemwiseOpParamN<2>& m_param;
    cudaStream_t m_stream;
    const Op& m_op;
    using vect_type = typename VectTypeTrait<ctype>::vect_type;
    static const size_t packed_size = VectTypeTrait<ctype>::packed_size;
    bool try_vect_load_store() {
        auto try_last_contig_or_scalar = [](const TensorLayout& layout) {
            return (layout.stride[layout.ndim - 1] == 1 &&
                    layout[layout.ndim - 1] % packed_size == 0) ||
                   (layout.ndim == 1 && layout.stride[0] == 0);
        };
        for (int i = 0; i < 2; ++i) {
            if (!try_last_contig_or_scalar(m_param[i].layout))
                return false;
1015
        }
1016 1017 1018 1019
        m_rw_size /= packed_size;
        dispatch0_vect();
        return true;
    }
1020

1021 1022 1023 1024 1025 1026 1027
    bool try_vect_load_store_contiguous() {
        auto try_contig = [](const TensorLayout& layout) {
            return (layout.is_contiguous());
        };
        for (int i = 0; i < 2; ++i) {
            if (!try_contig(m_param[i].layout))
                return false;
1028
        }
1029 1030 1031 1032
        m_rw_size = DIVUP(m_rw_size, packed_size);
        dispatch_contiguous();
        return true;
    }
1033

1034 1035
    void dispatch0() {
        switch (m_param[0].layout.ndim) {
1036 1037 1038
#define cb(ndim) \
    case ndim:   \
        return dispatch1_##ndim();
1039
            MEGDNN_FOREACH_TENSOR_NDIM_SMALL(cb)
1040 1041
#undef cb
        }
1042 1043
        fallback();
    }
1044

1045 1046
    void dispatch0_vect() {
        switch (m_param[0].layout.ndim) {
1047 1048 1049
#define cb(ndim) \
    case ndim:   \
        return dispatch1_vect_##ndim();
1050
            MEGDNN_FOREACH_TENSOR_NDIM_SMALL(cb)
1051
#undef cb
1052 1053
            case 4:
                return dispatch1_vect_4();
1054
        }
1055 1056
        fallback();
    }
1057

1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
    void dispatch_contiguous() {
        m_invoked = true;
        typedef ParamVectVisitor<1, ctype, BCAST_OTHER> PVis;
        typedef OpCallerUniform<Op, 2, PVis> Caller;
        size_t size = m_rw_size;
        int grid_size, block_size;
        void (*fptr)(Caller, uint32_t) = cuda_kern<Op, 2, PVis>;
        get_launch_spec(reinterpret_cast<const void*>(fptr), size, &grid_size,
                        &block_size);

        Caller caller;
        caller.op = m_op;
        for (int i = 0; i < 2; ++i)
            caller.par[i].host_init(m_param[i], grid_size, block_size);
        (*fptr)<<<grid_size, block_size, 0, m_stream>>>(caller, m_param.size);
        after_kernel_launch();
    }
1075

1076 1077 1078 1079 1080
    void fallback() {
        megdnn_assert(!m_invoked);
        UserOpInvokerToSameNdim<Op, ctype, 2>(m_param, m_stream, m_op);
        m_invoked = true;
    }
1081 1082 1083 1084

#define cb_header(ndim) void dispatch1_##ndim()
#define cb_dispatch(ndim, brdcast_mask) \
    dispatch2<ParamElemVisitor<ndim, ctype, brdcast_mask>>()
1085 1086
    DEFINE_BRDCAST_DISPATCH_RECEIVERS(cb_header, cb_dispatch,
                                      m_param[0].layout.stride)
1087 1088 1089 1090 1091 1092
#undef cb_header
#undef cb_dispatch

#define cb_header(ndim) void dispatch1_vect_##ndim()
#define cb_dispatch(ndim, brdcast_mask) \
    dispatch2_vect<ParamVectVisitor<ndim, ctype, brdcast_mask>>()
1093 1094
    DEFINE_VECT_BRDCAST_DISPATCH_RECEIVERS(cb_header, cb_dispatch,
                                           m_param[0].layout.stride)
1095 1096 1097
#undef cb_header
#undef cb_dispatch

1098 1099 1100
    template <class PVis0>
    void dispatch2() {
        switch (m_param[1].layout.ndim) {
1101 1102 1103
#define cb(ndim) \
    case ndim:   \
        return dispatch3_##ndim<PVis0>();
1104
            MEGDNN_FOREACH_TENSOR_NDIM_SMALL(cb)
1105 1106
#undef cb
        }
1107 1108
        fallback();
    }
1109

1110 1111 1112
    template <class PVis0>
    void dispatch2_vect() {
        switch (m_param[1].layout.ndim) {
1113 1114 1115
#define cb(ndim) \
    case ndim:   \
        return dispatch3_vect_##ndim<PVis0>();
1116
            MEGDNN_FOREACH_TENSOR_NDIM_SMALL(cb)
1117
#undef cb
1118 1119
            case 4:
                return dispatch3_vect_4<PVis0>();
1120
        }
1121 1122
        fallback();
    }
1123 1124 1125 1126 1127 1128

#define cb_header(ndim)    \
    template <class PVis0> \
    void dispatch3_##ndim()
#define cb_dispatch(ndim, brdcast_mask) \
    do_run<PVis0, ParamElemVisitor<ndim, ctype, brdcast_mask>>()
1129 1130
    DEFINE_BRDCAST_DISPATCH_RECEIVERS(cb_header, cb_dispatch,
                                      m_param[1].layout.stride)
1131 1132 1133 1134 1135 1136 1137 1138
#undef cb_header
#undef cb_dispatch

#define cb_header(ndim)    \
    template <class PVis0> \
    void dispatch3_vect_##ndim()
#define cb_dispatch(ndim, brdcast_mask) \
    do_run<PVis0, ParamVectVisitor<ndim, ctype, brdcast_mask>>()
1139 1140
    DEFINE_VECT_BRDCAST_DISPATCH_RECEIVERS(cb_header, cb_dispatch,
                                           m_param[1].layout.stride)
1141 1142 1143
#undef cb_header
#undef cb_dispatch

1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160
    template <class PVis0, class PVis1>
    void do_run() {
        megdnn_assert(!m_invoked);
        m_invoked = true;
        typedef OpCallerBinary<Op, PVis0, PVis1> Caller;
        int grid_size, block_size;
        void (*fptr)(Caller, uint32_t) = cuda_kern<Caller>;
        size_t size = m_rw_size;
        get_launch_spec(reinterpret_cast<const void*>(fptr), size, &grid_size,
                        &block_size);
        Caller caller;
        caller.op = m_op;
        caller.par0.host_init(m_param[0], grid_size, block_size);
        caller.par1.host_init(m_param[1], grid_size, block_size);
        (*fptr)<<<grid_size, block_size, 0, m_stream>>>(caller, size);
        after_kernel_launch();
    }
1161

1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
public:
    UserOpInvokerBinaryIByteHelper(const ElemwiseOpParamN<2>& param,
                                   cudaStream_t stream, const Op& op)
            : m_rw_size(param.size),
              m_param(param),
              m_stream(stream),
              m_op(op) {
        m_invoked = false;
        if (!try_vect_load_store_contiguous() && !try_vect_load_store()) {
            dispatch0();
1172
        }
1173 1174 1175
        megdnn_assert(m_invoked);
    }
};
1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187

#define INST_DT_IBYTE(ctype)                                                 \
    template <class Op>                                                      \
    class UserOpInvoker<Op, ctype, 2>                                        \
            : public UserOpInvokerBinaryIByteHelper<Op, ctype> {             \
        using Super = UserOpInvokerBinaryIByteHelper<Op, ctype>;             \
                                                                             \
    public:                                                                  \
        UserOpInvoker(const ElemwiseOpParamN<2>& param, cudaStream_t stream, \
                      const Op& op)                                          \
                : Super{param, stream, op} {}                                \
    }
1188 1189 1190 1191
INST_DT_IBYTE(dt_int8);
INST_DT_IBYTE(dt_uint8);
INST_DT_IBYTE(dt_qint8);
INST_DT_IBYTE(dt_quint8);
1192 1193 1194 1195 1196 1197
#undef INST_DT_IBYTE
#endif

#undef DEFINE_BRDCAST_DISPATCH_RECEIVERS
#undef DEFINE_VECT_BRDCAST_DISPATCH_RECEIVERS

1198
/* f}}} */
1199 1200

#undef devfunc
1201
}  // namespace elemwise_intl
1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222

/*!
 * \brief general element-wise kernel launcher
 *
 * \tparam arity number of params for the operator
 * \param param param values for the operator; must have been initialized (i.e.
 *      by calling ElemwiseOpParamN::init_from_given_tensor). The params
 *      can have arbitrary layouts, as long as they share the same total number
 *      of elements.
 * \param op callable with a signature compatible with
 *      `void op(uint32_t idx, ctype& param0, ..., ctype& param[arity - 1])`
 *      if arity == 0, there is only an `idx` input
 *      if ctype=dt_int8, dt_uint8, dt_qint8, dt_quint8, a signature compatible
 * with `void op(uint32_t idx, vect_type& param0, ..., ctype& param[arity - 1])`
 * should be implemented
 */
template <class Op, typename ctype, int arity>
void run_elemwise(const ElemwiseOpParamN<arity>& param, cudaStream_t stream,
                  const Op& op = Op());

#if MEGDNN_CC_CUDA
1223 1224 1225
template <class Op, typename ctype, int arity>
void run_elemwise(const ElemwiseOpParamN<arity>& param, cudaStream_t stream,
                  const Op& op) {
1226 1227 1228 1229 1230 1231 1232 1233
    param.assert_initialized();
    elemwise_intl::UserOpInvoker<Op, ctype, arity>(param, stream, op);
}

/*!
 * \brief explicit instantialization of run_elemwise for given template params;
 *      used in .cu files, so corresponding run_elemwise can be called from .cpp
 */
1234 1235 1236
#define INST_RUN_ELEMWISE(Op, ctype, arity)       \
    template void run_elemwise<Op, ctype, arity>( \
            const ElemwiseOpParamN<arity>&, cudaStream_t, const Op&)
1237 1238 1239

#endif

1240 1241
}  // namespace cuda
}  // namespace megdnn
1242 1243

// vim: ft=cpp syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}