test_gelu_op.py 3.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
#!/usr/bin/env python3

# Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest
import numpy as np
from op_test import OpTest, OpTestTool
from op_test_helper import TestCaseHelper
import paddle
import paddle.nn.functional as F
import cinn
from cinn.frontend import *
from cinn.common import *


@OpTestTool.skip_if(not is_compiled_with_cuda(),
                    "x86 test will be skipped due to timeout.")
class TestGeluOp(OpTest):
    def setUp(self):
        print(f"\nRunning {self.__class__.__name__}: {self.case}")
        self.prepare_inputs()

    def prepare_inputs(self):
        self.inputs = {
            "x":
            self.random(
                shape=self.case["x_shape"], dtype=self.case["x_dtype"]),
            "dout":
            self.random(shape=(32, 64), dtype=self.case["x_dtype"])
        }

    def build_paddle_program(self, target):
        x = paddle.to_tensor(self.inputs["x"], stop_gradient=False)
        out = F.gelu(x)

        self.paddle_outputs = [out]
        self.paddle_grads = self.get_paddle_grads([out], [x],
                                                  [self.inputs["dout"]])

    def build_cinn_program(self, target):
        builder = NetBuilder("gelu")
        x = builder.create_input(
            self.nptype2cinntype(self.case["x_dtype"]), self.case["x_shape"],
            "x")
        out = builder.gelu(x)
        prog = builder.build()
        forward_res = self.get_cinn_output(prog, target, [x],
                                           [self.inputs["x"]], [out])

        self.cinn_outputs = forward_res

    def test_check_results(self):
        self.check_outputs_and_grads()


class TestGeluShape(TestCaseHelper):
    def init_attrs(self):
        self.class_name = "TestGeluOp"
        self.cls = TestGeluOp
        self.inputs = [{
            "x_shape": [1024],
        }, {
            "x_shape": [512, 256],
        }, {
            "x_shape": [128, 64, 32],
        }, {
            "x_shape": [16, 8, 4, 2],
        }, {
            "x_shape": [16, 8, 4, 2, 1],
        }, {
            "x_shape": [1],
        }, {
            "x_shape": [1, 1, 1, 1, 1],
        }]
        self.dtypes = [{
            "x_dtype": "float32",
        }]
        self.attrs = []


class TestGeluDtype(TestCaseHelper):
    def init_attrs(self):
        self.class_name = "TestGeluOp"
        self.cls = TestGeluOp
        self.inputs = [{
            "x_shape": [32, 64],
        }]
        self.dtypes = [
            {
                "x_dtype": "float64",
            },
            {
                "x_dtype": "float32",
            },
            {
                "x_dtype": "float16",
            },
        ]
        self.attrs = []


if __name__ == "__main__":
    TestGeluShape().run()
    TestGeluDtype().run()