test_resnet18.py 4.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
#!/usr/bin/env python3

# Copyright (c) 2021 CINN Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import paddle as paddle
import paddle.fluid as fluid
from cinn.frontend import *
from cinn import Target
from cinn.framework import *
import unittest
import cinn
from cinn import runtime
from cinn import ir
from cinn import lang
from cinn.common import *
import numpy as np
import sys
import time

enable_gpu = sys.argv.pop()
model_dir = sys.argv.pop()
print("enable_gpu is : ", enable_gpu)
print("model_dir is : ", model_dir)


class TestLoadResnetModel(unittest.TestCase):
    def setUp(self):
        if enable_gpu == "ON":
            self.target = DefaultNVGPUTarget()
        else:
            self.target = DefaultHostTarget()
        self.model_dir = model_dir
        self.x_shape = [1, 3, 224, 224]
        self.target_tensor = 'save_infer_model/scale_0'
        self.input_tensor = 'image'

    def get_paddle_inference_result(self, model_dir, data):
50 51 52
        config = fluid.core.AnalysisConfig(
            model_dir + '/__model__', model_dir + '/params'
        )
53 54 55 56 57 58
        config.disable_gpu()
        config.switch_ir_optim(False)
        self.paddle_predictor = fluid.core.create_paddle_predictor(config)
        data = fluid.core.PaddleTensor(data)
        results = self.paddle_predictor.run([data])
        get_tensor = self.paddle_predictor.get_output_tensor(
59 60
            self.target_tensor
        ).copy_to_cpu()
61 62 63 64 65 66 67 68
        return get_tensor

    def apply_test(self):
        start = time.time()
        x_data = np.random.random(self.x_shape).astype("float32")
        self.executor = Interpreter([self.input_tensor], [self.x_shape])
        print("self.mode_dir is:", self.model_dir)
        # True means load combined model
69 70 71
        self.executor.load_paddle_model(
            self.model_dir, self.target, True, "resnet18"
        )
72 73 74 75 76 77 78 79 80 81 82 83 84 85
        end1 = time.time()
        print("load_paddle_model time is: %.3f sec" % (end1 - start))
        a_t = self.executor.get_tensor(self.input_tensor)
        a_t.from_numpy(x_data, self.target)
        out = self.executor.get_tensor(self.target_tensor)
        out.from_numpy(np.zeros(out.shape(), dtype='float32'), self.target)
        for i in range(10):
            self.executor.run()

        repeat = 10
        end4 = time.perf_counter()
        for i in range(repeat):
            self.executor.run()
        end5 = time.perf_counter()
86 87 88 89
        print(
            "Repeat %d times, average Executor.run() time is: %.3f ms"
            % (repeat, (end5 - end4) * 1000 / repeat)
        )
90 91 92 93 94 95

        a_t.from_numpy(x_data, self.target)
        out.from_numpy(np.zeros(out.shape(), dtype='float32'), self.target)
        self.executor.run()

        out = out.numpy(self.target)
96
        target_result = self.get_paddle_inference_result(self.model_dir, x_data)
97 98 99 100 101 102

        print("result in test_model: \n")
        out = out.reshape(-1)
        target_result = target_result.reshape(-1)
        for i in range(0, min(out.shape[0], 200)):
            if np.abs(out[i] - target_result[i]) > 1.0:
103 104 105 106 107 108 109 110 111 112
                print(
                    "Error! ",
                    i,
                    "-th data has diff with target data:\n",
                    out[i],
                    " vs: ",
                    target_result[i],
                    ". Diff is: ",
                    out[i] - target_result[i],
                )
113 114 115 116 117 118 119 120 121
        # TODO(thisjiang): revert atol to 1e-3 after fix inference mul problem
        self.assertTrue(np.allclose(out, target_result, atol=1.0))

    def test_model(self):
        self.apply_test()


if __name__ == "__main__":
    unittest.main()