opr_impl.cpp 3.3 KB
Newer Older
1 2 3 4
/**
 * \file dnn/src/naive/argmxx/opr_impl.cpp
 * MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
 *
5
 * Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
6 7 8 9 10 11 12 13
 *
 * Unless required by applicable law or agreed to in writing,
 * software distributed under the License is distributed on an
 * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 */
#include "src/naive/argmxx/opr_impl.h"

#include "src/common/reduce_helper.h"
M
Megvii Engine Team 已提交
14
#include "src/common/utils.h"
15 16 17 18
#include "src/naive/handle.h"

#include <numeric>

19
namespace {
20 21 22

using namespace megdnn;

M
Megvii Engine Team 已提交
23 24
template <bool is_max>
struct traits;
25

M
Megvii Engine Team 已提交
26 27
template <>
struct traits<true> {
28
    static const float init;
29 30 31
    static bool better_than(float lhs, float rhs) {
        return std::isnan(lhs) ? true : lhs > rhs;
    }
32 33 34
};
const float traits<true>::init = std::numeric_limits<float>::lowest();

M
Megvii Engine Team 已提交
35 36
template <>
struct traits<false> {
37
    static const float init;
38 39 40
    static float better_than(float lhs, float rhs) {
        return std::isnan(lhs) ? true : lhs < rhs;
    }
41 42 43 44
};
const float traits<false>::init = std::numeric_limits<float>::max();

template <typename T, bool is_max>
M
Megvii Engine Team 已提交
45 46
void exec_forward(
        _megdnn_tensor_in src, _megdnn_tensor_out dst, const ArgmxxBase::Param& param) {
47 48
    size_t A, B, C;
    reduce::get_ABC(src.layout, A, B, C, param.axis);
M
Megvii Engine Team 已提交
49 50 51 52 53 54 55 56 57 58
    for (size_t a = 0; a < A; ++a)
        for (size_t c = 0; c < C; ++c) {
            float best_val = traits<is_max>::init;
            size_t best_arg = 0;
            for (size_t b = 0; b < B; ++b) {
                float curr_val = float(src.ptr<T>()[(a * B + b) * C + c]);
                if (traits<is_max>::better_than(curr_val, best_val)) {
                    best_val = curr_val;
                    best_arg = b;
                }
59
            }
M
Megvii Engine Team 已提交
60
            dst.ptr<dt_int32>()[a * C + c] = best_arg;
61 62 63
        }
}

M
Megvii Engine Team 已提交
64
}  // anonymous namespace
65 66 67 68

namespace megdnn {
namespace naive {

M
Megvii Engine Team 已提交
69 70
void ArgmaxForwardImpl::exec(
        _megdnn_tensor_in src, _megdnn_tensor_out dst, _megdnn_workspace workspace) {
71
    check_exec(src.layout, dst.layout, workspace.size);
M
Megvii Engine Team 已提交
72 73 74 75 76
#define cb(DType)                                                          \
    if (src.layout.dtype.enumv() == DTypeTrait<DType>::enumv) {            \
        using ctype = typename DTypeTrait<DType>::ctype;                   \
        MEGDNN_DISPATCH_CPU_KERN(                                          \
                static_cast<HandleImpl*>(handle()),                        \
77 78 79 80 81 82
                exec_forward<ctype MEGDNN_COMMA true>(src, dst, param())); \
    }
    MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
}

M
Megvii Engine Team 已提交
83 84
void ArgminForwardImpl::exec(
        _megdnn_tensor_in src, _megdnn_tensor_out dst, _megdnn_workspace workspace) {
85
    check_exec(src.layout, dst.layout, workspace.size);
M
Megvii Engine Team 已提交
86 87 88 89 90
#define cb(DType)                                                           \
    if (src.layout.dtype.enumv() == DTypeTrait<DType>::enumv) {             \
        using ctype = typename DTypeTrait<DType>::ctype;                    \
        MEGDNN_DISPATCH_CPU_KERN(                                           \
                static_cast<HandleImpl*>(handle()),                         \
91 92 93 94 95 96
                exec_forward<ctype MEGDNN_COMMA false>(src, dst, param())); \
    }
    MEGDNN_FOREACH_COMPUTING_DTYPE(cb)
#undef cb
}

M
Megvii Engine Team 已提交
97 98
}  // namespace naive
}  // namespace megdnn
99 100

// vim: syntax=cpp.doxygen