“0947171b730334a4a1775151f15fc60f2a3c2946”上不存在“ppdet/git@gitcode.net:paddlepaddle/PaddleDetection.git”
module.py 9.0 KB
Newer Older
W
wuzewu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
# coding=utf-8
from __future__ import absolute_import
from __future__ import division

import ast
import argparse
import os

import numpy as np
import paddle.fluid as fluid
import paddlehub as hub
from paddle.fluid.core import PaddleTensor, AnalysisConfig, create_paddle_predictor
from paddlehub.module.module import moduleinfo, runnable, serving
from paddlehub.common.paddle_helper import add_vars_prefix

from resnet50_vd_wildanimals.processor import postprocess, base64_to_cv2
from resnet50_vd_wildanimals.data_feed import reader
from resnet50_vd_wildanimals.resnet_vd import ResNet50_vd


@moduleinfo(
    name="resnet50_vd_wildanimals",
    type="CV/image_classification",
    author="baidu-vis",
    author_email="",
    summary=
    "ResNet50vd is a image classfication model, this module is trained with IFAW's self-built wild animals dataset.",
    version="1.0.0")
class ResNet50vdWildAnimals(hub.Module):
    def _initialize(self):
W
wuzewu 已提交
31
        self.default_pretrained_model_path = os.path.join(self.directory, "model")
W
wuzewu 已提交
32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68
        label_file = os.path.join(self.directory, "label_list.txt")
        with open(label_file, 'r', encoding='utf-8') as file:
            self.label_list = file.read().split("\n")[:-1]
        self._set_config()

    def get_expected_image_width(self):
        return 224

    def get_expected_image_height(self):
        return 224

    def get_pretrained_images_mean(self):
        im_mean = np.array([0.485, 0.456, 0.406]).reshape(1, 3)
        return im_mean

    def get_pretrained_images_std(self):
        im_std = np.array([0.229, 0.224, 0.225]).reshape(1, 3)
        return im_std

    def _set_config(self):
        """
        predictor config setting.
        """
        cpu_config = AnalysisConfig(self.default_pretrained_model_path)
        cpu_config.disable_glog_info()
        cpu_config.disable_gpu()
        self.cpu_predictor = create_paddle_predictor(cpu_config)

        try:
            _places = os.environ["CUDA_VISIBLE_DEVICES"]
            int(_places[0])
            use_gpu = True
        except:
            use_gpu = False
        if use_gpu:
            gpu_config = AnalysisConfig(self.default_pretrained_model_path)
            gpu_config.disable_glog_info()
W
wuzewu 已提交
69
            gpu_config.enable_use_gpu(memory_pool_init_size_mb=1000, device_id=0)
W
wuzewu 已提交
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
            self.gpu_predictor = create_paddle_predictor(gpu_config)

    def context(self, trainable=True, pretrained=True):
        """context for transfer learning.

        Args:
            trainable (bool): Set parameters in program to be trainable.
            pretrained (bool) : Whether to load pretrained model.

        Returns:
            inputs (dict): key is 'image', corresponding vaule is image tensor.
            outputs (dict): key is :
                'classification', corresponding value is the result of classification.
                'feature_map', corresponding value is the result of the layer before the fully connected layer.
            context_prog (fluid.Program): program for transfer learning.
        """
        context_prog = fluid.Program()
        startup_prog = fluid.Program()
        with fluid.program_guard(context_prog, startup_prog):
            with fluid.unique_name.guard():
W
wuzewu 已提交
90
                image = fluid.layers.data(name="image", shape=[3, 224, 224], dtype="float32")
W
wuzewu 已提交
91
                resnet_vd = ResNet50_vd()
W
wuzewu 已提交
92
                output, feature_map = resnet_vd.net(input=image, class_dim=len(self.label_list))
W
wuzewu 已提交
93 94 95

                name_prefix = '@HUB_{}@'.format(self.name)
                inputs = {'image': name_prefix + image.name}
W
wuzewu 已提交
96
                outputs = {'classification': name_prefix + output.name, 'feature_map': name_prefix + feature_map.name}
W
wuzewu 已提交
97 98 99
                add_vars_prefix(context_prog, name_prefix)
                add_vars_prefix(startup_prog, name_prefix)
                global_vars = context_prog.global_block().vars
W
wuzewu 已提交
100 101
                inputs = {key: global_vars[value] for key, value in inputs.items()}
                outputs = {key: global_vars[value] for key, value in outputs.items()}
W
wuzewu 已提交
102 103 104 105 106 107 108

                place = fluid.CPUPlace()
                exe = fluid.Executor(place)
                # pretrained
                if pretrained:

                    def _if_exist(var):
W
wuzewu 已提交
109
                        b = os.path.exists(os.path.join(self.default_pretrained_model_path, var.name))
W
wuzewu 已提交
110 111
                        return b

W
wuzewu 已提交
112
                    fluid.io.load_vars(exe, self.default_pretrained_model_path, context_prog, predicate=_if_exist)
W
wuzewu 已提交
113 114 115 116 117 118 119
                else:
                    exe.run(startup_prog)
                # trainable
                for param in context_prog.global_block().iter_parameters():
                    param.trainable = trainable
        return inputs, outputs, context_prog

W
wuzewu 已提交
120
    def classification(self, images=None, paths=None, batch_size=1, use_gpu=False, top_k=1):
W
wuzewu 已提交
121 122 123 124 125 126 127 128 129 130 131 132 133
        """
        API for image classification.

        Args:
            images (numpy.ndarray): data of images, shape of each is [H, W, C].
            paths (list[str]): The paths of images.
            batch_size (int): batch size.
            use_gpu (bool): Whether to use gpu.
            top_k (int): Return top k results.

        Returns:
            res (list[dict]): The classfication results.
        """
W
wuzewu 已提交
134 135 136 137 138 139
        if use_gpu:
            try:
                _places = os.environ["CUDA_VISIBLE_DEVICES"]
                int(_places[0])
            except:
                raise RuntimeError(
140
                    "Environment Variable CUDA_VISIBLE_DEVICES is not set correctly. If you wanna use gpu, please set CUDA_VISIBLE_DEVICES as cuda_device_id."
W
wuzewu 已提交
141 142
                )

W
wuzewu 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161
        all_data = list()
        for yield_data in reader(images, paths):
            all_data.append(yield_data)

        total_num = len(all_data)
        loop_num = int(np.ceil(total_num / batch_size))

        res = list()
        for iter_id in range(loop_num):
            batch_data = list()
            handle_id = iter_id * batch_size
            for image_id in range(batch_size):
                try:
                    batch_data.append(all_data[handle_id + image_id])
                except:
                    pass
            # feed batch image
            batch_image = np.array([data['image'] for data in batch_data])
            batch_image = PaddleTensor(batch_image.copy())
W
wuzewu 已提交
162 163 164
            predictor_output = self.gpu_predictor.run([batch_image]) if use_gpu else self.cpu_predictor.run(
                [batch_image])
            out = postprocess(data_out=predictor_output[0].as_ndarray(), label_list=self.label_list, top_k=top_k)
W
wuzewu 已提交
165 166 167
            res += out
        return res

W
wuzewu 已提交
168
    def save_inference_model(self, dirname, model_filename=None, params_filename=None, combined=True):
W
wuzewu 已提交
169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205
        if combined:
            model_filename = "__model__" if not model_filename else model_filename
            params_filename = "__params__" if not params_filename else params_filename
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)

        program, feeded_var_names, target_vars = fluid.io.load_inference_model(
            dirname=self.default_pretrained_model_path, executor=exe)

        fluid.io.save_inference_model(
            dirname=dirname,
            main_program=program,
            executor=exe,
            feeded_var_names=feeded_var_names,
            target_vars=target_vars,
            model_filename=model_filename,
            params_filename=params_filename)

    @serving
    def serving_method(self, images, **kwargs):
        """
        Run as a service.
        """
        images_decode = [base64_to_cv2(image) for image in images]
        results = self.classification(images=images_decode, **kwargs)
        return results

    @runnable
    def run_cmd(self, argvs):
        """
        Run as a command.
        """
        self.parser = argparse.ArgumentParser(
            description="Run the {} module.".format(self.name),
            prog='hub run {}'.format(self.name),
            usage='%(prog)s',
            add_help=True)
W
wuzewu 已提交
206
        self.arg_input_group = self.parser.add_argument_group(title="Input options", description="Input data. Required")
W
wuzewu 已提交
207
        self.arg_config_group = self.parser.add_argument_group(
W
wuzewu 已提交
208
            title="Config options", description="Run configuration for controlling module behavior, not required.")
W
wuzewu 已提交
209 210 211
        self.add_module_config_arg()
        self.add_module_input_arg()
        args = self.parser.parse_args(argvs)
W
wuzewu 已提交
212
        results = self.classification(paths=[args.input_path], batch_size=args.batch_size, use_gpu=args.use_gpu)
W
wuzewu 已提交
213 214 215 216 217 218 219
        return results

    def add_module_config_arg(self):
        """
        Add the command config options.
        """
        self.arg_config_group.add_argument(
W
wuzewu 已提交
220 221 222
            '--use_gpu', type=ast.literal_eval, default=False, help="whether use GPU or not.")
        self.arg_config_group.add_argument('--batch_size', type=ast.literal_eval, default=1, help="batch size.")
        self.arg_config_group.add_argument('--top_k', type=ast.literal_eval, default=1, help="Return top k results.")
W
wuzewu 已提交
223 224 225 226 227

    def add_module_input_arg(self):
        """
        Add the command input options.
        """
W
wuzewu 已提交
228
        self.arg_input_group.add_argument('--input_path', type=str, help="path to image.")