test_number_count_op.py 2.7 KB
Newer Older
R
Roc 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import op_test
import numpy as np
import unittest
import paddle
import paddle.fluid.core as core
from paddle.fluid.op import Operator
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.backward import append_backward
from paddle.distributed.models.moe import utils


def count(x, upper_range):
    res = np.zeros((upper_range, )).astype(int)
    for i in x.reshape(-1):
        if i >= 0 and i < len(res):
            res[i] += 1
    return res


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestExpertCountOpInt64(op_test.OpTest):
    def setUp(self):
        expert_num = 16
        self.op_type = "number_count"
        x = np.random.randint(-1, expert_num, size=(1000, 2)).astype('int64')
        self.inputs = {'gate_idx': x}
        self.outputs = {'Out': count(x, expert_num)}
        self.attrs = {"upper_range": expert_num}

    def test_forward(self):
        self.check_output_with_place(paddle.CUDAPlace(0))


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestExpertCountAPI(unittest.TestCase):
    def setUp(self):
        self.upper_range = 320
        self.x = np.random.randint(
            -1, self.upper_range, size=(6000, 200)).astype('int64')
        self.out = count(self.x, self.upper_range)
        self.place = paddle.CUDAPlace(0)

    def test_api_static(self):
        paddle.enable_static()
        with paddle.static.program_guard(paddle.static.Program()):
            x = paddle.fluid.data('x', self.x.shape, dtype="int64")
            out = utils._number_count(x, self.upper_range)
            exe = paddle.static.Executor(self.place)
            res = exe.run(feed={'x': self.x}, fetch_list=[out])
            assert np.allclose(res, self.out)

    def test_api_dygraph(self):
        paddle.disable_static()
        x = paddle.to_tensor(self.x)
        out = utils._number_count(x, self.upper_range)
        assert np.allclose(out.numpy(), self.out)


if __name__ == '__main__':
    paddle.enable_static()
    unittest.main()