elemwise.cpp 38.4 KB
Newer Older
1 2 3 4
/**
 * \file src/opr/test/basic_arith/elemwise.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8 9 10 11 12 13 14
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */

#include "./erfinv.h"
#include "megbrain/opr/basic_arith.h"
#include "megbrain/opr/io.h"
M
Megvii Engine Team 已提交
15 16 17
#include "megbrain/opr/tensor_manip.h"
#include "megbrain/test/autocheck.h"
#include "megbrain/test/helper.h"
18 19

#include <algorithm>
M
Megvii Engine Team 已提交
20
#include <cmath>
21 22 23 24

using namespace mgb;

namespace {
M
Megvii Engine Team 已提交
25
using Mode = opr::Elemwise::Mode;
26

M
Megvii Engine Team 已提交
27 28 29
using InputGenerator = Maybe<thin_function<void(HostTensorND&)>>;
// msvc would check for callable of None, so we use this to replace None
const InputGenerator NONE_INPUT_GEN;
30

M
Megvii Engine Team 已提交
31
std::unordered_set<Mode, enumhash> tested_mode;
32

M
Megvii Engine Team 已提交
33 34 35 36
/* ======================= opr special impls ======================= */
float do_mod(float a, float b) {
    return std::fmod(a, b);
}
37

M
Megvii Engine Team 已提交
38 39 40
int do_mod(int a, int b) {
    return a % b;
}
41

42 43 44 45 46 47 48 49 50 51 52 53 54
float do_floor_div(float a, float b) {
    return std::floor(a / b);
}

int do_floor_div(int a, int b) {
    if ((a ^ b) < 0) {
        const auto quot = a / b;
        const auto rem = a % b;
        return rem ? quot - 1 : quot;
    }
    return a / b;
}

M
Megvii Engine Team 已提交
55 56 57
float do_erfinv(float x) {
    return erfinvf(x);
}
58

M
Megvii Engine Team 已提交
59 60 61
float do_erfcinv(float x) {
    return erfcinvf(x);
}
62

M
Megvii Engine Team 已提交
63 64 65
float do_h_swish(float x) {
    return x * fmaxf(fminf(x + 3.f, 6.f), 0.f) / 6.f;
}
66

M
Megvii Engine Team 已提交
67 68 69
float do_h_swish_grad(float x, float y) {
    return x < -3.f ? 0.f : (x > 3.f ? y : (2.f * x + 3.f) / 6.f * y);
}
70

M
Megvii Engine Team 已提交
71 72 73 74
template <typename T>
T do_log_sum_exp(T a, T b) {
    return std::log(std::exp(a) + std::exp(b));
}
75

M
Megvii Engine Team 已提交
76 77 78
float do_fast_tanh(float x) {
    return x * (27.f + x * x) / (27.f + 9.f * x * x);
}
79

M
Megvii Engine Team 已提交
80 81 82 83 84
float do_fast_tanh_grad(float x, float y) {
    float x_pow2 = x * x;
    float deno = 3.f + x_pow2;
    return ((-48.f * x_pow2) / deno + 27.f + x_pow2) / (deno * 9.f) * y;
}
85

M
Megvii Engine Team 已提交
86 87 88 89
float do_fuse_add_h_swish(float x, float y) {
    float z = x + y;
    return z * fmaxf(fminf(z + 3.f, 6.f), 0.f) / 6.f;
}
90

M
Megvii Engine Team 已提交
91 92 93 94 95 96 97 98 99 100
template <typename T>
T do_shl(T, T);  // undefined
template <typename T>
T do_shr(T, T);  // undefined
int do_shl(int x, int y) {
    return x << y;
}
int do_shr(int x, int y) {
    return x >> y;
}
101

M
Megvii Engine Team 已提交
102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
template <typename T>
struct MulType {};
template <>
struct MulType<int8_t> {
    typedef int16_t type;
};
template <>
struct MulType<int16_t> {
    typedef int32_t type;
};
template <>
struct MulType<int32_t> {
    typedef int64_t type;
};
template <>
struct MulType<uint8_t> {
    typedef uint16_t type;
};

template <typename T>
T rounding_shift_right_upward(T x, int k) {
    T mask = (T(1) << k) - 1;
    T threshold = mask >> 1;
    return (x >> k) + ((x & mask) > threshold);
}
127

M
Megvii Engine Team 已提交
128 129 130 131 132
template <typename T>
T do_round_mulh_saturate(T a, T b) {
    MEGDNN_STATIC_ASSERT(
            std::numeric_limits<T>::digits <= 32,
            "Portable RMULH is not supported for integer "
133
            "types larger than 32 bits.")
M
Megvii Engine Team 已提交
134 135
    MEGDNN_STATIC_ASSERT(
            std::numeric_limits<T>::is_integer,
136
            "Input types should be integer for RMULH")
M
Megvii Engine Team 已提交
137 138 139 140 141 142 143 144 145 146 147
    bool overflow = a == b && a == DTypeTrait<T>::min();
    // TODO: This really should be
    // rounding_shift_right_away_from_zero, but we haven't yet found a fast
    // way to implement it on ARM NEON. For now, we just try to align with
    // NEON's VQRDMULH and hope that it does not harm our NN badly.
    return overflow
                 ? DTypeTrait<T>::max()
                 : static_cast<T>(rounding_shift_right_upward(
                           typename MulType<T>::type(a) * typename MulType<T>::type(b),
                           std::numeric_limits<T>::digits));
}
148

M
Megvii Engine Team 已提交
149 150 151 152 153
float do_gelu_grad(float x, float y) {
    float phi = 1.f / sqrtf(2.0 * M_PI) * expf(-0.5f * x * x);
    float normcdf_v = 0.5f * (1.f + erff(x / sqrtf(2.f)));
    return y * (normcdf_v + x * phi);
}
154

M
Megvii Engine Team 已提交
155
/* ======================= basic framework ======================= */
156

M
Megvii Engine Team 已提交
157 158 159 160
template <typename ctype, bool stable_sign = false>
void gen_nozero(HostTensorND& dest) {
    static RNGxorshf rng{next_rand_seed()};
    auto ptr = dest.template ptr<ctype>();
161

M
Megvii Engine Team 已提交
162 163 164 165 166 167
    if (DTypeTrait<ctype>::category == DTypeCategory::FLOAT) {
        for (size_t i = 0, it = dest.shape().total_nr_elems(); i < it; ++i) {
            auto v = rng() / (rng.max() + 1.0) * 3 - 1.5;
            bool vsign = v > 0;
            if (stable_sign) {
                vsign = i % 2;
168
            }
M
Megvii Engine Team 已提交
169 170
            v = std::abs(v) + 0.1;
            ptr[i] = vsign ? v : -v;
171
        }
M
Megvii Engine Team 已提交
172 173 174 175
    } else {
        for (size_t i = 0, it = dest.shape().total_nr_elems(); i < it; ++i) {
            ctype v = rng() / (rng.max() + 1.0) * 65536 - 32767, vsat = i % 2 * 2 - 1;
            ptr[i] = v == 0 ? vsat : v;
176
        }
M
Megvii Engine Team 已提交
177 178
    }
}
179

M
Megvii Engine Team 已提交
180 181 182
template <class Trait>
struct CheckerConfig {
    static constexpr bool enable_binary_inp_swap() { return true; }
183

M
Megvii Engine Team 已提交
184 185 186 187
    static constexpr bool allow_inp_grad(size_t idx) {
        MGB_MARK_USED_VAR(idx);
        return true;
    }
188

M
Megvii Engine Team 已提交
189 190 191 192 193
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t idx) {
        MGB_MARK_USED_VAR(idx);
        return NONE_INPUT_GEN;
    }
194

M
Megvii Engine Team 已提交
195 196 197 198
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 1e-2;
    }
199

M
Megvii Engine Team 已提交
200 201 202
    template <class Checker>
    static void update_checker(Checker& checker) {
        MGB_MARK_USED_VAR(checker);
203
    }
M
Megvii Engine Team 已提交
204 205 206 207 208 209 210 211 212 213 214
};

template <typename ctype>
InputGenerator get_inp_gen_f32_range(float low, float high) {
    mgb_assert(std::is_same<ctype MGB_COMMA dt_float32>::value && high - low >= 0.1);
    auto gen = [low, high](HostTensorND& dest) {
        HostTensorGenerator<dtype::Float32, RandomDistribution::UNIFORM> gen{low, high};
        dest = *gen(dest.shape());
    };
    return gen;
}
215

M
Megvii Engine Team 已提交
216 217 218 219 220 221 222 223 224 225 226 227 228
#define DEF_TRAIT(_mode, _expr)                                                      \
    struct _mode {                                                                   \
        static constexpr size_t ARITY = _CUR_ARITY;                                  \
        static constexpr Mode MODE = Mode::_mode;                                    \
        static constexpr bool ALLOW_INT = _ALLOW_INT;                                \
        static constexpr bool ALLOW_FLOAT = _ALLOW_FLOAT;                            \
        static constexpr bool ALLOW_BOOL = _ALLOW_BOOL;                              \
        static constexpr const char* NAME = #_mode;                                  \
        template <typename ctype>                                                    \
        static inline ctype apply(std::array<const ctype*, ARITY> inp, size_t idx) { \
            _EXPAND_PARAMS;                                                          \
            return _expr;                                                            \
        }                                                                            \
229 230 231 232
    };

#include "./elemwise_binary_trait_def.inl"
#include "./elemwise_ternary_trait_def.inl"
M
Megvii Engine Team 已提交
233
#include "./elemwise_unary_trait_def.inl"
234 235 236

#undef DEF_TRAIT

M
Megvii Engine Team 已提交
237 238 239 240
//! ensure nonzero value on some specific input
template <size_t nozero_idx, bool large_eps = true>
struct NoZeroCheckerConfig : public CheckerConfig<void> {
    static constexpr bool enable_binary_inp_swap() { return false; }
241

M
Megvii Engine Team 已提交
242 243 244 245 246 247
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t idx) {
        if (idx != nozero_idx)
            return NONE_INPUT_GEN;
        return gen_nozero<ctype>;
    }
248

M
Megvii Engine Team 已提交
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
    template <class Opt>
    static void update_opt(Opt& opt) {
        if (large_eps)
            opt.numdiff_eps_single_inp[nozero_idx] = 0.05;
    }
};
struct NoGradCheckerConfig : public CheckerConfig<void> {
    static constexpr bool allow_inp_grad(size_t) { return false; }
};

/* ======================= unary config ======================= */
template <>
struct CheckerConfig<RELU> : public NoZeroCheckerConfig<0> {};
template <>
struct CheckerConfig<ABS> : public NoZeroCheckerConfig<0> {};
template <>
struct CheckerConfig<CEIL> : public NoGradCheckerConfig {};
template <>
struct CheckerConfig<FLOOR> : public NoGradCheckerConfig {};
template <>
struct CheckerConfig<ROUND> : public NoGradCheckerConfig {};
template <>
struct CheckerConfig<LOG> : public CheckerConfig<void> {
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t) {
        return get_inp_gen_f32_range<ctype>(0.1, 4);
    }
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 1e-2;
        opt.numdiff_max_err = 0.1;
    }
};
template <>
struct CheckerConfig<LOG1P> : public CheckerConfig<void> {
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t) {
        return get_inp_gen_f32_range<ctype>(-0.2, 0.2);
    }
};
template <>
struct CheckerConfig<ACOS> : public CheckerConfig<void> {
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t) {
        return get_inp_gen_f32_range<ctype>(-0.95, 0.95);
    }
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 2e-3;
        opt.numdiff_max_err = 4e-3;
    }
};
template <>
struct CheckerConfig<ASIN> : public CheckerConfig<ACOS> {};
template <>
struct CheckerConfig<TANH> : public CheckerConfig<void> {
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t) {
        return get_inp_gen_f32_range<ctype>(-5, 5);
    }
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 2e-2;
    }
};
template <>
struct CheckerConfig<SIGMOID_GRAD> : public CheckerConfig<void> {
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 2e-2;
    }
};
template <>
struct CheckerConfig<ERF> : public CheckerConfig<void> {
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 2e-2;
    }
};
template <>
struct CheckerConfig<ERFINV> : public NoGradCheckerConfig {
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t) {
        return get_inp_gen_f32_range<ctype>(-1, 1);
    }
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 2e-2;
    }
};
template <>
struct CheckerConfig<ERFC> : public CheckerConfig<void> {
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 2e-2;
    }
};
template <>
struct CheckerConfig<ERFCINV> : public NoGradCheckerConfig {
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t) {
        return get_inp_gen_f32_range<ctype>(0, 2);
    }
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 2e-2;
    }
};

template <>
struct CheckerConfig<H_SWISH> : public CheckerConfig<void> {};
template <>
struct CheckerConfig<H_SWISH_GRAD> : public NoGradCheckerConfig {};

/* ======================= binary config ======================= */
template <bool for_mod>
struct BinaryInputMinGap : public CheckerConfig<void> {
    template <typename ctype, class Checker>
    static void do_update_checker(Checker& checker) {
        auto icoord = [](const typename Checker::NumInpArray& inp) {
            static const ctype GAP{for_mod ? 0.01f : 0.1f};
            if (DTypeTrait<ctype>::category != DTypeCategory::FLOAT)
371
                return;
M
Megvii Engine Team 已提交
372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
            auto p0 = inp[0]->template ptr<ctype>(), p1 = inp[1]->template ptr<ctype>();
            for (size_t i = 0, it = inp[0]->shape().total_nr_elems(); i < it; ++i) {
                if (for_mod) {
                    auto p1v = std::abs(p1[i]), mod = std::fmod(p0[i], p1v);
                    mod += mod < 0 ? p1v : 0;
                    if (mod < GAP || mod > p1v - GAP) {
                        mgb_assert(p1v > GAP * 4);
                        ctype m0, m1;
                        do {
                            p0[i] += GAP;
                            m0 = std::fmod(p0[i] - GAP, p1[i]);
                            m1 = std::fmod(p0[i] + GAP, p1[i]);
                        } while (std::abs(m1 - m0) > GAP * 2 + 1e-3);
                    }
                } else {
                    if (std::abs(p0[i] - p1[i]) < GAP) {
                        p1[i] += p0[i] < p1[i] ? GAP : -GAP;
                    }
390 391 392
                }
            }
        };
M
Megvii Engine Team 已提交
393
        checker.set_input_coordinator(icoord);
394 395
    }

M
Megvii Engine Team 已提交
396 397 398 399 400 401 402 403 404 405 406 407
    template <class Checker>
    static void update_checker(Checker& checker) {
        using ctype = typename Checker::ctype;
        if (std::is_integral<ctype>::value)
            return;
        if (std::is_same<ctype, dt_float16>::value)
            return do_update_checker<dt_float16>(checker);
        if (std::is_same<ctype, dt_float32>::value)
            return do_update_checker<dt_float32>(checker);
        mgb_assert(0);
    }
};
408

M
Megvii Engine Team 已提交
409 410
struct BinaryEQInput : public CheckerConfig<void> {
    static constexpr bool allow_inp_grad(size_t idx) { return idx >= 2; }
411

M
Megvii Engine Team 已提交
412 413 414 415 416 417 418 419 420 421 422
    template <class Checker>
    static void update_checker(Checker& checker) {
        using ctype = typename Checker::ctype;
        auto icoord = [](const typename Checker::NumInpArray& inp) {
            if (DTypeTrait<ctype>::category != DTypeCategory::FLOAT)
                return;
            auto p0 = inp[0]->template ptr<ctype>(), p1 = inp[1]->template ptr<ctype>();
            RNGxorshf rng{next_rand_seed()};
            for (size_t i = 0, it = inp[0]->shape().total_nr_elems(); i < it; ++i) {
                p0[i] = rng() % 3 == 0 ? p1[i] : p0[i];
            }
423
        };
M
Megvii Engine Team 已提交
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449
        checker.set_input_coordinator(icoord);
    }
};

struct BinaryPlaneNoPiInput : public CheckerConfig<void> {
    template <class Checker>
    static void update_checker(Checker& checker) {
        using ctype = typename Checker::ctype;
        auto icoord = [](const typename Checker::NumInpArray& inp) {
            if (DTypeTrait<ctype>::category != DTypeCategory::FLOAT)
                return;
            auto p0 = inp[0]->template ptr<ctype>(), p1 = inp[1]->template ptr<ctype>();
            RNGxorshf rng{next_rand_seed()};
            auto maxv = rng.max() + 1.0;
            for (size_t i = 0, it = inp[0]->shape().total_nr_elems(); i < it; ++i) {
                //! To be numerical stable, r cannot be too small
                auto r = rng() / maxv * 2 + 0.5;  //! radious
                //! Avoid pi value due to periodicity
                //! Numerical diff will be wrong there
                //! Range [-pi+eps, pi-eps]
                auto t = rng() / maxv * 3.1 * 2 - 3.1;  //! angle
                //! First input is y in space
                p0[i] = r * std::sin(t);
                //! Second input is x in space
                p1[i] = r * std::cos(t);
            }
450
        };
M
Megvii Engine Team 已提交
451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
        checker.set_input_coordinator(icoord);
    }
    static constexpr bool enable_binary_inp_swap() { return false; }
};
template <>
struct CheckerConfig<ATAN2> : public BinaryPlaneNoPiInput {
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 1e-3;
        opt.numdiff_max_err = 0.02;
    }
};

template <>
struct CheckerConfig<ABS_GRAD> : public NoZeroCheckerConfig<0> {};
template <>
struct CheckerConfig<FLOOR_DIV> : public NoZeroCheckerConfig<1, false> {
    static constexpr bool allow_inp_grad(size_t) { return false; }
};
template <>
struct CheckerConfig<TRUE_DIV> : public NoZeroCheckerConfig<1, false> {
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 1e-2;
        opt.numdiff_max_err = 0.1;
    }
};
template <>
struct CheckerConfig<EQ> : public BinaryEQInput {};
template <>
struct CheckerConfig<LEQ> : public NoGradCheckerConfig {};
template <>
struct CheckerConfig<LT> : public NoGradCheckerConfig {};
template <>
struct CheckerConfig<FUSE_ADD_H_SWISH> : public CheckerConfig<void> {};
template <>
struct CheckerConfig<SWITCH_GT0> : public NoZeroCheckerConfig<0> {};
template <>
struct CheckerConfig<POW> : public CheckerConfig<void> {
    static constexpr bool enable_binary_inp_swap() { return false; }
    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 1e-2;
        opt.numdiff_max_err = 0.06;
    }
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t idx) {
        auto func = [](HostTensorND& dest) {
            dest = *HostTensorGenerator<typename DTypeTrait<ctype>::dtype>{}(
                    dest.shape());
            auto ptr = dest.ptr<ctype>();
            for (size_t i = 0, t = dest.shape().total_nr_elems(); i < t; ++i) {
                ptr[i] = std::abs(ptr[i]) + 0.1;
            }
M
Megvii Engine Team 已提交
505
        };
M
Megvii Engine Team 已提交
506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
        if (idx == 0)
            return func;
        return NONE_INPUT_GEN;
    }
};
template <>
struct CheckerConfig<MAX> : public BinaryInputMinGap<false> {};
template <>
struct CheckerConfig<MIN> : public BinaryInputMinGap<false> {};
template <>
struct CheckerConfig<MOD> : public NoZeroCheckerConfig<1, false>,
                            public BinaryInputMinGap<true> {
    using NoZeroCheckerConfig<1, false>::get_inp_gen;
    using NoZeroCheckerConfig<1, false>::enable_binary_inp_swap;
    using BinaryInputMinGap<true>::update_checker;

    template <class Opt>
    static void update_opt(Opt& opt) {
        opt.numdiff_eps = 0.003;
525 526
    }

M
Megvii Engine Team 已提交
527 528
    static constexpr bool allow_inp_grad(size_t idx) { return idx == 0; }
};
529

M
Megvii Engine Team 已提交
530 531 532
template <>
struct CheckerConfig<SHL> : public CheckerConfig<void> {
    static constexpr bool enable_binary_inp_swap() { return false; }
533

M
Megvii Engine Team 已提交
534
    static constexpr bool allow_inp_grad(size_t idx) { return false; }
535

M
Megvii Engine Team 已提交
536 537 538 539 540 541 542 543 544 545 546 547 548
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t);
};
template <>
struct CheckerConfig<SHR> : public CheckerConfig<SHL> {};

template <>
InputGenerator CheckerConfig<SHL>::get_inp_gen<int>(size_t idx) {
    if (!idx)
        return NONE_INPUT_GEN;
    auto gen = [](HostTensorND& dest) {
        HostTensorGenerator<dtype::Int32, RandomDistribution::UNIFORM> gen{0, 32};
        dest = *gen(dest.shape());
549
    };
M
Megvii Engine Team 已提交
550 551
    return gen;
}
552

M
Megvii Engine Team 已提交
553 554 555 556 557 558 559 560 561 562 563 564 565 566 567
template <>
struct CheckerConfig<FUSE_ADD_RELU> : public CheckerConfig<void> {
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t) {
        return gen_nozero<ctype, true>;
    }
};

template <>
struct CheckerConfig<FAST_TANH> : public CheckerConfig<void> {
    template <typename ctype>
    static InputGenerator get_inp_gen(size_t) {
        return get_inp_gen_f32_range<ctype>(0.1, 5);
    }
};
568

M
Megvii Engine Team 已提交
569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640
template <>
struct CheckerConfig<FAST_TANH_GRAD> : public CheckerConfig<FAST_TANH> {
    static constexpr bool allow_inp_grad(size_t idx) {
        MGB_MARK_USED_VAR(idx);
        return false;
    }
};

template <>
struct CheckerConfig<SILU_GRAD> : public NoGradCheckerConfig {};
template <>
struct CheckerConfig<GELU_GRAD> : public NoGradCheckerConfig {};

/* ======================= ternary config ======================= */
template <>
struct CheckerConfig<COND_LEQ_MOV> : public BinaryInputMinGap<false> {};

/* ======================= test runner ======================= */
namespace detail {
template <typename dtype, class Trait>
struct enable_for_dtype_impl;

template <class Trait>
struct enable_for_dtype_impl<dtype::Float32, Trait> {
    static constexpr bool value = Trait::ALLOW_FLOAT;
};
template <>
struct enable_for_dtype_impl<dtype::Float32, void> {
    static constexpr bool value = false;
};
template <class Trait>
struct enable_for_dtype_impl<dtype::Int32, Trait> {
    static constexpr bool value = Trait::ALLOW_INT;
};
template <>
struct enable_for_dtype_impl<dtype::Int32, void> {
    static constexpr bool value = false;
};
template <class Trait>
struct enable_for_dtype_impl<dtype::Bool, Trait> {
    static constexpr bool value = Trait::ALLOW_BOOL;
};
}  // namespace detail

//! whether to enable test for specific dtype and Trait
template <typename dtype, class Trait>
constexpr bool enable_for_dtype = detail::enable_for_dtype_impl<dtype, Trait>::value;

template <typename Trait, typename dtype, bool enable = enable_for_dtype<dtype, Trait>>
struct TestRunner;

template <typename Trait, typename dtype>
struct TestRunner<Trait, dtype, true> {
    static void run();
};
template <typename Trait, typename dtype>
struct TestRunner<Trait, dtype, false> {
    static void run() {}
};
template <typename dtype>
struct TestRunner<void, dtype, false> {
    static void run() {}
};

template <typename Trait>
class TestOprBasicArithUnaryElemwise : public ::testing::Test {};
template <typename Trait>
class TestOprBasicArithBinaryElemwise : public ::testing::Test {};
template <typename Trait>
class TestOprBasicArithTernaryElemwise : public ::testing::Test {};

typedef ::testing::Types<
641 642 643
#define DEF_TRAIT(_mode, _expr) _mode,
#include "./elemwise_unary_trait_def.inl"
#undef DEF_TRAIT
M
Megvii Engine Team 已提交
644 645 646 647
        void  // extra void to consume last comma
        >
        UnaryTraitTypes;
TYPED_TEST_CASE(TestOprBasicArithUnaryElemwise, UnaryTraitTypes);
648

M
Megvii Engine Team 已提交
649
typedef ::testing::Types<
650 651 652
#define DEF_TRAIT(_mode, _expr) _mode,
#include "./elemwise_binary_trait_def.inl"
#undef DEF_TRAIT
M
Megvii Engine Team 已提交
653 654 655 656
        void  // extra void to consume last comma
        >
        BinaryTraitTypes;
TYPED_TEST_CASE(TestOprBasicArithBinaryElemwise, BinaryTraitTypes);
657

M
Megvii Engine Team 已提交
658
typedef ::testing::Types<
659 660 661
#define DEF_TRAIT(_mode, _expr) _mode,
#include "./elemwise_ternary_trait_def.inl"
#undef DEF_TRAIT
M
Megvii Engine Team 已提交
662 663 664 665
        void  // extra void to consume last comma
        >
        TernaryTraitTypes;
TYPED_TEST_CASE(TestOprBasicArithTernaryElemwise, TernaryTraitTypes);
666

M
Megvii Engine Team 已提交
667
}  // anonymous namespace
668

M
Megvii Engine Team 已提交
669
template <typename Trait, typename dtype>
670 671 672 673 674 675 676 677 678 679 680 681
void TestRunner<Trait, dtype, true>::run() {
    {
        Mode mode = Trait::MODE;
        // copy to temporary var to avoid undefined reference when linking
        tested_mode.insert(mode);
    }

    using ctype = typename DTypeTrait<dtype>::ctype;

    HostTensorGenerator<> gen;
    using Config = CheckerConfig<Trait>;

M
Megvii Engine Team 已提交
682 683 684 685
    static constexpr bool TEST_REV_INP =
            Trait::ARITY == 2 &&
            Config::allow_inp_grad(0) == Config::allow_inp_grad(1) &&
            Config::enable_binary_inp_swap();
686
    using Checker = AutoOprChecker<Trait::ARITY, TEST_REV_INP + 1, dtype>;
M
Megvii Engine Team 已提交
687
    auto make_graph = [&](const typename Checker::SymInpArray& inputs) {
688 689 690 691 692 693 694 695 696 697
        typename Checker::SymOutArray out;
        SymbolVarArray vinp(inputs.begin(), inputs.end());
        out[0] = opr::Elemwise::make(vinp, Trait::MODE);
        if (TEST_REV_INP) {
            std::swap(vinp[0], vinp[1]);
            out[1] = opr::Elemwise::make(vinp, Trait::MODE);
        }
        return out;
    };

M
Megvii Engine Team 已提交
698 699
    auto fwd = [&](typename Checker::NumOutArray& dest,
                   typename Checker::NumInpArray inp) {
700 701 702 703 704
        dest[0].resize(inp[0]->shape());
        if (TEST_REV_INP)
            dest[1].resize(inp[0]->shape());

        std::array<const ctype*, Trait::ARITY> iptr;
M
Megvii Engine Team 已提交
705
        for (size_t i = 0; i < Trait::ARITY; ++i)
706 707 708 709 710
            iptr[i] = inp[i]->template ptr<ctype>();

        size_t sz = dest[0].shape().total_nr_elems();

        ctype* optr = dest[0].template ptr<ctype>();
M
Megvii Engine Team 已提交
711
        for (size_t i = 0; i < sz; ++i)
712 713 714 715 716
            optr[i] = Trait::apply(iptr, i);

        if (TEST_REV_INP) {
            std::swap(iptr[0], iptr[1]);
            ctype* optr = dest[1].template ptr<ctype>();
M
Megvii Engine Team 已提交
717
            for (size_t i = 0; i < sz; ++i)
718 719 720 721 722 723
                optr[i] = Trait::apply(iptr, i);
        }
    };

    Checker checker{make_graph, fwd};
    checker.set_extra_err_msg(ssprintf("mode=%s", Trait::NAME));
M
Megvii Engine Team 已提交
724
    for (size_t i = 0; i < Trait::ARITY; ++i) {
725 726 727 728 729 730 731 732 733 734 735
        auto func = Config::template get_inp_gen<ctype>(i);
        if (func.valid())
            checker.set_input_generator(i, func.val());

        checker.set_input_allow_grad(i, Config::allow_inp_grad(i));
    }

    TensorShape shapes[] = {{1}, {23, 3}, {666}};
    typename Checker::RunOptions opt;
    Config::update_opt(opt);
    Config::update_checker(checker);
M
Megvii Engine Team 已提交
736
    for (auto&& ishp : shapes) {
737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
        typename Checker::ShapeInpArray inp;
        std::fill(inp.begin(), inp.end(), ishp);
        checker.run(inp, opt);
    }
}

TYPED_TEST(TestOprBasicArithUnaryElemwise, Int32) {
    TestRunner<TypeParam, dtype::Int32>::run();
}
TYPED_TEST(TestOprBasicArithBinaryElemwise, Int32) {
    TestRunner<TypeParam, dtype::Int32>::run();
}
TYPED_TEST(TestOprBasicArithTernaryElemwise, Int32) {
    TestRunner<TypeParam, dtype::Int32>::run();
}

TYPED_TEST(TestOprBasicArithUnaryElemwise, Float32) {
    set_rand_seed(19931102);
    TestRunner<TypeParam, dtype::Float32>::run();
}
TYPED_TEST(TestOprBasicArithBinaryElemwise, Float32) {
    set_rand_seed(19931150);
    TestRunner<TypeParam, dtype::Float32>::run();
}
TYPED_TEST(TestOprBasicArithTernaryElemwise, Float32) {
    set_rand_seed(19931102);
    TestRunner<TypeParam, dtype::Float32>::run();
}

TEST(TestOprBasicArithElemwise, CheckAllModeTested) {
    size_t nr_member = opr::Elemwise::Param::MODE_NR_MEMBER;
M
Megvii Engine Team 已提交
768 769
    ASSERT_EQ(nr_member, tested_mode.size() + 4);
    // Not using TestRunner: NOT, AND, OR, XOR
770
}
M
Megvii Engine Team 已提交
771 772 773 774 775 776 777 778 779 780 781 782 783
#define TEST_OPR_BASIC_ARITH_UNARY_BOOL(_mode, _op)                  \
    TEST(TestOprBasicArithElemwise, _mode) {                         \
        HostTensorGenerator<dtype::Bool> gen;                        \
        auto host_x = gen({2, 1});                                   \
        auto ptr = host_x->ptr<dt_bool>();                           \
        for (size_t i = 0; i < 2; ++i) {                             \
            ptr[i] = (i & 1);                                        \
        }                                                            \
        auto graph = ComputingGraph::make();                         \
        using Mode = opr::Elemwise::Mode;                            \
        auto x = opr::Host2DeviceCopy::make(*graph, host_x),         \
             y = opr::Elemwise::make({x}, Mode::_mode);              \
        HostTensorND host_y;                                         \
M
Megvii Engine Team 已提交
784
        auto func = graph->compile({make_callback_copy(y, host_y)}); \
M
Megvii Engine Team 已提交
785 786 787 788 789 790 791
        func->execute();                                             \
        ASSERT_EQ(TensorShape({2, 1}), host_y.shape());              \
        auto ptry = host_y.ptr<dt_bool>();                           \
        for (int i = 0; i < 2; i++) {                                \
            ASSERT_EQ(_op ptr[i], ptry[i]);                          \
        }                                                            \
    }
M
Megvii Engine Team 已提交
792 793 794

TEST_OPR_BASIC_ARITH_UNARY_BOOL(NOT, !)

M
Megvii Engine Team 已提交
795 796 797 798
#define TEST_OPR_BASIC_ARITH_BINARY_BOOL(_mode, _op)                         \
    TEST(TestOprBasicArithElemwise, _mode) {                                 \
        HostTensorGenerator<dtype::Bool> gen;                                \
        auto host_x1 = gen({2, 2}), host_x2 = gen({2, 2});                   \
M
Megvii Engine Team 已提交
799
        auto ptr1 = host_x1->ptr<dt_bool>(), ptr2 = host_x2->ptr<dt_bool>(); \
M
Megvii Engine Team 已提交
800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
        for (size_t i = 0; i < 4; ++i) {                                     \
            ptr1[i] = (i < 2);                                               \
            ptr2[i] = (i & 1);                                               \
        }                                                                    \
        auto graph = ComputingGraph::make();                                 \
        using Mode = opr::Elemwise::Mode;                                    \
        auto x1 = opr::Host2DeviceCopy::make(*graph, host_x1),               \
             x2 = opr::Host2DeviceCopy::make(*graph, host_x2),               \
             y = opr::Elemwise::make({x1, x2}, Mode::_mode);                 \
        HostTensorND host_y;                                                 \
        auto func = graph->compile({make_callback_copy(y, host_y)});         \
        func->execute();                                                     \
        ASSERT_EQ(TensorShape({2, 2}), host_y.shape());                      \
        auto ptry = host_y.ptr<dt_bool>();                                   \
        for (int i = 0; i < 4; i++) {                                        \
            ASSERT_EQ(ptr1[i] _op ptr2[i], ptry[i]);                         \
        }                                                                    \
    }
M
Megvii Engine Team 已提交
818 819 820 821

TEST_OPR_BASIC_ARITH_BINARY_BOOL(AND, &&)
TEST_OPR_BASIC_ARITH_BINARY_BOOL(OR, ||)
TEST_OPR_BASIC_ARITH_BINARY_BOOL(XOR, ^)
822 823 824
TEST_OPR_BASIC_ARITH_BINARY_BOOL(LT, <)
TEST_OPR_BASIC_ARITH_BINARY_BOOL(LEQ, <=)
TEST_OPR_BASIC_ARITH_BINARY_BOOL(EQ, ==)
825 826 827 828

TEST(TestOprBasicArithElemwise, FuseMulAdd3Shapes) {
    using Checker = AutoOprChecker<3, 1>;

M
Megvii Engine Team 已提交
829 830 831
    opr::Elemwise* opr;
    auto make_graph =
            [&](const typename Checker::SymInpArray& i) -> Checker::SymOutArray {
832 833 834 835 836 837
        i[0].node()->owner_graph()->options().graph_opt_level = 0;
        auto ret = opr::Elemwise::make(i, Mode::FUSE_MUL_ADD3);
        opr = &ret.node()->owner_opr()->cast_final_safe<opr::Elemwise>();
        return {ret};
    };

M
Megvii Engine Team 已提交
838 839
    auto fwd = [&](typename Checker::NumOutArray& dest,
                   typename Checker::NumInpArray inp) {
840 841 842 843 844 845 846 847 848 849
        auto graph = ComputingGraph::make();
        graph->options().graph_opt_level = false;
        auto i = [&](size_t idx) {
            return opr::Host2DeviceCopy::make(*graph, inp[idx]);
        };
        auto ans = i(0) * i(1) + i(2);
        graph->compile({make_callback_copy(ans, dest[0])})->execute();
    };

    Checker checker{make_graph, fwd};
M
Megvii Engine Team 已提交
850 851
    checker.run({TensorShape{1, 2}, {2, 1}, {1, 2}})
            .run({TensorShape{1, 2}, {2, 1}, {1}});
852 853 854 855 856 857 858 859
    ASSERT_FALSE(opr->fuse_badlayout_warn_printed());
    checker.run({TensorShape{1, 1, 4}, {1, 3, 1}, {2, 1, 1}});
    ASSERT_TRUE(opr->fuse_badlayout_warn_printed());
}

TEST(TestOprBasicArithElemwise, FuseMulAdd4Shapes) {
    using Checker = AutoOprChecker<4, 1>;

M
Megvii Engine Team 已提交
860 861 862
    opr::Elemwise* opr;
    auto make_graph =
            [&](const typename Checker::SymInpArray& i) -> Checker::SymOutArray {
863 864 865 866 867 868
        i[0].node()->owner_graph()->options().graph_opt_level = 0;
        auto ret = opr::Elemwise::make(i, Mode::FUSE_MUL_ADD4);
        opr = &ret.node()->owner_opr()->cast_final_safe<opr::Elemwise>();
        return {ret};
    };

M
Megvii Engine Team 已提交
869 870
    auto fwd = [&](typename Checker::NumOutArray& dest,
                   typename Checker::NumInpArray inp) {
871 872 873 874 875 876 877 878 879 880
        auto graph = ComputingGraph::make();
        graph->options().graph_opt_level = false;
        auto i = [&](size_t idx) {
            return opr::Host2DeviceCopy::make(*graph, inp[idx]);
        };
        auto ans = i(0) * i(1) + i(2) * i(3);
        graph->compile({make_callback_copy(ans, dest[0])})->execute();
    };

    Checker checker{make_graph, fwd};
M
Megvii Engine Team 已提交
881 882 883 884 885
    checker.run({TensorShape{1, 2}, {2, 1}, {1, 2}, {2, 1}})
            .run({TensorShape{1, 2, 1, 2, 1, 2},
                  {2, 1, 2, 1, 2, 1},
                  {2, 1, 2, 1, 2, 1},
                  {1, 2, 1, 2, 1, 2}});
886 887 888 889 890 891 892 893 894 895
    ASSERT_FALSE(opr->fuse_badlayout_warn_printed());
    checker.run({TensorShape{1, 2}, {2, 1}, {2, 2}, {2, 2}});
    ASSERT_TRUE(opr->fuse_badlayout_warn_printed());
}

TEST(TestOprBasicArithElemwise, WritableFwdForSameStorage) {
    HostTensorGenerator<> gen;

    auto run = [&](int idx_val, bool should_overwrite) {
        auto host_x = gen({100});
M
Megvii Engine Team 已提交
896
        auto make_y = [&](ComputingGraph& graph) {
897 898 899
            using S = opr::Subtensor;
            auto x = opr::Host2DeviceCopy::make_no_fwd(graph, host_x),
                 idx = x.make_scalar(idx_val),
M
Megvii Engine Team 已提交
900 901 902
                 sub0 = S::make(x, {S::AxisIndexer::make_interval(0, None, idx, None)}),
                 sub1 = S::make(
                         x, {S::AxisIndexer::make_interval(0, -idx, None, None)}),
903 904 905 906
                 y = sub0 + sub1;
            auto chk_overwrite = [sub0, sub1, y]() {
                auto py = y.node()->prev_dev_ptr();
                return sub0.node()->prev_dev_ptr() == py ||
M
Megvii Engine Team 已提交
907
                       sub1.node()->prev_dev_ptr() == py;
908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
            };
            return std::make_pair(y, chk_overwrite);
        };
        auto g0 = ComputingGraph::make(), g1 = ComputingGraph::make();
        g1->options().seq_opt.enable_mem_plan_opt = false;
        auto y0 = make_y(*g0), y1 = make_y(*g1);
        HostTensorND host_y0, host_y1;
        auto f0 = g0->compile({make_callback_copy(y0.first, host_y0)}),
             f1 = g1->compile({make_callback_copy(y1.first, host_y1)});

        f0->execute();
        f1->execute();
        ASSERT_EQ(host_y1.shape(), TensorShape{static_cast<size_t>(idx_val)});
        MGB_ASSERT_TENSOR_EQ(host_y1, host_y0);
        ASSERT_EQ(should_overwrite, y0.second());
        ASSERT_FALSE(y1.second());
    };

    run(10, true);
    run(90, false);
}

TEST(TestOprBasicArithElemwise, NonContigInput) {
    HostTensorGenerator<> gen;

    auto graph = ComputingGraph::make();
    constexpr size_t SIZE = 100;
    auto host_x = gen({SIZE});
    using S = opr::Subtensor;
    auto x = opr::Host2DeviceCopy::make(*graph, host_x),
M
Megvii Engine Team 已提交
938 939
         xsub = S::make(
                 x, {S::AxisIndexer::make_interval(0, None, None, x.make_scalar(2))}),
940 941 942 943 944 945 946 947
         y = xsub + x.make_scalar(1.f);
    HostTensorND host_y;
    auto func = graph->compile({make_callback_copy(y, host_y)});
    func->execute();
    ASSERT_FALSE(xsub.node()->dev_tensor().layout().is_contiguous());

    ASSERT_EQ(SIZE / 2, host_y.layout().total_nr_elems());
    auto px = host_x->ptr<float>(), py = host_y.ptr<float>();
M
Megvii Engine Team 已提交
948
    for (size_t i = 0; i < SIZE / 2; ++i) {
949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
        MGB_ASSERT_FLOAT_EQ(px[i * 2] + 1, py[i]);
    }
}

TEST(TestOprBasicArithElemwise, CommutableDedup) {
    auto cn = CompNode::load("xpux");
    auto graph = ComputingGraph::make();
    auto host_x = std::make_shared<HostTensorND>(cn, TensorShape{100}),
         host_y = std::make_shared<HostTensorND>(cn, TensorShape{100});
    auto x = opr::Host2DeviceCopy::make(*graph, host_x),
         y = opr::Host2DeviceCopy::make(*graph, host_y);
    auto mk = [](Mode mode, SymbolVar x, SymbolVar y) {
        return opr::Elemwise::make({x, y}, mode);
    };
#define CHK(_a, _b) ASSERT_EQ((_a).node(), (_b).node())
    CHK(x + y, y + x);
    CHK(x * y, y * x);
    CHK(mk(Mode::EQ, x, y), mk(Mode::EQ, y, x));
    CHK(mk(Mode::MIN, x, y), mk(Mode::MIN, y, x));
    CHK(mk(Mode::MAX, x, y), mk(Mode::MAX, y, x));
    CHK(mk(Mode::LOG_SUM_EXP, x, y), mk(Mode::LOG_SUM_EXP, y, x));
M
Megvii Engine Team 已提交
970
    CHK(x<y, y> x);
971 972 973 974 975 976 977 978 979 980 981 982 983
#undef CHK
    ASSERT_NE((x - y).node(), (y - x).node());
}

TEST(TestLayoutUtil, CollectiveCollapse) {
    using namespace opr;
    auto shp2layout = [](const TensorShapeArray& tshps) {
        TensorLayoutArray tlayouts(tshps.size());
        for (size_t i = 0; i < tshps.size(); i++) {
            tlayouts[i] = TensorLayout(tshps[i], dtype::Float32());
        }
        return tlayouts;
    };
M
Megvii Engine Team 已提交
984
    auto check = [](const TensorLayoutArray& res, const TensorLayoutArray& std) {
985 986 987 988
        for (size_t i = 0; i < res.size(); i++) {
            ASSERT_EQ(std[i], res[i]);
        }
    };
M
Megvii Engine Team 已提交
989
    TensorShapeArray tshps1 = {{3, 3}, {3, 3}, {3, 3}};
990 991 992 993 994 995
    auto cc_res1 = Elemwise::collective_collapse(shp2layout(tshps1));
    TensorShapeArray std_res1 = {{9}, {9}, {9}};
    check(cc_res1, shp2layout(std_res1));

    TensorShapeArray tshps2 = {{3, 3, 3}, {1, 3, 3}};
    auto cc_res2 = Elemwise::collective_collapse(shp2layout(tshps2));
M
Megvii Engine Team 已提交
996
    TensorShapeArray std_res2{{3, 9}, {1, 9}};
997 998 999 1000
    check(cc_res2, shp2layout(std_res2));

    TensorShapeArray tshp3 = {{3, 3, 3}, {3, 3, 1}};
    auto cc_res3 = Elemwise::collective_collapse(shp2layout(tshp3));
M
Megvii Engine Team 已提交
1001
    TensorShapeArray std_res3{{9, 3}, {9, 1}};
1002 1003 1004 1005
    check(cc_res3, shp2layout(std_res3));

    TensorShapeArray tshp4 = {{3, 3, 3, 3}, {1, 3, 3, 1}};
    auto cc_res4 = Elemwise::collective_collapse(shp2layout(tshp4));
M
Megvii Engine Team 已提交
1006
    TensorShapeArray std_res4{{3, 9, 3}, {1, 9, 1}};
1007 1008 1009
    check(cc_res4, shp2layout(std_res4));

    TensorLayoutArray inp5 = {
M
Megvii Engine Team 已提交
1010 1011
            TensorLayout(TensorShape{3, 3}, {1, 3}, dtype::Float32()),
            TensorLayout(TensorShape{3, 3}, {1, 3}, dtype::Float32())};
1012 1013 1014 1015 1016
    auto cc_res5 = Elemwise::collective_collapse(inp5);
    auto std_res5 = inp5;
    check(cc_res5, std_res5);
}

1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
TEST(TestOprBasicArithElemwise, EmptyInputOutputUnary) {
    HostTensorGenerator<> gen;
    auto graph = ComputingGraph::make();
    auto host_x = gen({3, 0, 1, 3});
    auto x = opr::Host2DeviceCopy::make(*graph, host_x),
         y = opr::Elemwise::make(
                 {x}, opr::Elemwise::Param(opr::Elemwise::Param::Mode::RELU));
    HostTensorND host_y;
    auto func = graph->compile({make_callback_copy(y, host_y)});

    ASSERT_NO_THROW(func->execute().wait());
    ASSERT_TRUE(host_y.empty());
    ASSERT_TRUE(host_y.shape().is_empty());
1030
    MGB_ASSERT_SHAPE_EQ(host_y.shape(), TensorShape({3, 0, 1, 3}));
1031 1032 1033 1034 1035 1036
}

TEST(TestOprBasicArithElemwise, EmptyInputOutputBinary) {
    HostTensorGenerator<> gen;
    auto graph = ComputingGraph::make();
    auto host_x = gen({0, 8, 1, 7}), host_y = gen({0, 8, 1, 7});
1037

1038
    auto x = opr::Host2DeviceCopy::make(*graph, host_x),
M
Megvii Engine Team 已提交
1039
         y = opr::Host2DeviceCopy::make(*graph, host_y), z = x + y;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051
    HostTensorND host_z;
    auto func = graph->compile({make_callback_copy(z, host_z)});

    // Invalid broadcast
    host_y->resize({0, 9, 1, 7});
    ASSERT_ANY_THROW(func->execute().wait());

    // Broadcast to 0
    host_y->resize({1, 8, 0, 7});
    ASSERT_NO_THROW(func->execute().wait());
    ASSERT_TRUE(host_z.empty());
    ASSERT_TRUE(host_z.shape().is_empty());
1052
    MGB_ASSERT_SHAPE_EQ(host_z.shape(), TensorShape({0, 8, 0, 7}));
1053 1054 1055 1056 1057 1058

    // Broadcast to 0 (2)
    host_y->resize({2, 8, 1, 7});
    ASSERT_NO_THROW(func->execute().wait());
    ASSERT_TRUE(host_z.empty());
    ASSERT_TRUE(host_z.shape().is_empty());
1059
    MGB_ASSERT_SHAPE_EQ(host_z.shape(), TensorShape({0, 8, 1, 7}));
1060 1061 1062 1063 1064 1065 1066

    // Scalar broadcast
    z = x + x.make_scalar(1.f);
    func = graph->compile({make_callback_copy(z, host_z)});
    ASSERT_NO_THROW(func->execute().wait());
    ASSERT_TRUE(host_z.empty());
    ASSERT_TRUE(host_z.shape().is_empty());
1067
    MGB_ASSERT_SHAPE_EQ(host_z.shape(), TensorShape({0, 8, 1, 7}));
1068 1069
}

1070 1071 1072
TEST(TestOprBasicArithElemwise, PerformEmptyIO) {
    auto cn = CompNode::load("xpu0");
    HostTensorGenerator<> gen;
M
Megvii Engine Team 已提交
1073
    auto host_x1 = gen({2, 0, 3, 4}), host_x2 = gen({1});
1074 1075 1076 1077 1078 1079 1080 1081 1082 1083
    auto dev_x1 = std::make_shared<DeviceTensorND>(cn),
         dev_x2 = std::make_shared<DeviceTensorND>(cn);
    dev_x1->copy_from(*host_x1);
    dev_x2->copy_from(*host_x2);

    auto dev_y = std::make_shared<DeviceTensorND>(cn, dev_x1->dtype());
    dev_y->resize(dev_x1->shape());
    auto&& dnn_opr = opr::intl::create_megdnn_opr<megdnn::Elemwise>(cn);

    // test unary mode
M
Megvii Engine Team 已提交
1084
    for (auto mode : {Mode::NEGATE, Mode::EXP, Mode::LOG}) {
1085 1086 1087 1088 1089 1090 1091 1092
        SmallVector<DeviceTensorND> inputs = {*dev_x1};
        ASSERT_NO_THROW(opr::Elemwise::perform(mode, *dev_y, inputs, dnn_opr));
        ASSERT_TRUE(dev_y->empty());
        ASSERT_TRUE(dev_y->shape().is_empty());
        MGB_ASSERT_SHAPE_EQ(dev_y->shape(), dev_x1->shape());
    }

    // test binary mode
M
Megvii Engine Team 已提交
1093
    for (auto mode : {Mode::ADD, Mode::MUL, Mode::LT}) {
1094 1095 1096 1097 1098 1099 1100 1101
        SmallVector<DeviceTensorND> inputs = {*dev_x1, *dev_x2};
        ASSERT_NO_THROW(opr::Elemwise::perform(mode, *dev_y, inputs, dnn_opr));
        ASSERT_TRUE(dev_y->empty());
        ASSERT_TRUE(dev_y->shape().is_empty());
        MGB_ASSERT_SHAPE_EQ(dev_y->shape(), dev_x1->shape());
    }
}

1102
// vim: syntax=cpp.doxygen foldmethod=marker foldmarker=f{{{,f}}}