inference_transpiler.py 14.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import os
18
import numpy as np
19 20 21
from .. import core
from ..framework import Program
from ..executor import global_scope
22 23


24
class InferenceTranspiler(object):
L
Luo Tao 已提交
25
    '''
26 27 28 29 30 31
    Convert the fluid program to optimized inference program.

    There are several optimizations:

      - fuse convolution and batch normalization
      - fuse batch normalization and relu (MKLDNN only)
L
Luo Tao 已提交
32 33

    Examples:
34

L
Luo Tao 已提交
35 36 37 38 39 40 41 42 43
    .. code-block:: python

        # As InferenceTranspiler will modify the original program,
        # please clone before use it.
        inference_transpiler_program = program.clone()
        t = fluid.InferenceTranspiler()
        t.transpile(inference_transpiler_program, place)
    '''

L
Luo Tao 已提交
44
    def transpile(self, program, place, scope=None):
45
        '''
L
Luo Tao 已提交
46 47 48 49 50 51
        Run the transpiler.

        Args:
            program (Program): program to transpile
            place (Place): inference place
            scope (Scope|None): inference Scope
L
Luo Tao 已提交
52
        '''
L
Luo Tao 已提交
53 54 55 56 57 58 59 60 61
        if not isinstance(program, Program):
            raise TypeError("program should be as Program type")
        if not isinstance(place, core.CPUPlace) and not isinstance(
                place, core.CUDAPlace):
            raise TypeError("place should be as CPUPlace/CUDAPlace type")
        if scope is None:
            scope = global_scope()
        if not isinstance(scope, core.Scope):
            raise TypeError("scope should be as Scope type or None")
62 63 64 65 66 67
        use_mkldnn = bool(os.getenv("FLAGS_use_mkldnn", False))
        if use_mkldnn:
            self._fuse_relu_mkldnn(program)
            self._fuse_conv_bias_mkldnn(program)
        else:
            self._fuse_batch_norm(program, place, scope)
68

W
Wu Yi 已提交
69
    def _fuse_relu_mkldnn(self, program):
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
        '''
        Transpile the program by fused relu activation for MKLDNN program.

        Relu activation following batch norm OP can be fused by adding
        :math:`fuse_with_relu` attribute to batch norm OP.

        The result of fuse is:

        - before:

          - batch_norm->relu->any_other_op

        - after:

          - batch_norm->any_other_op

        :param program: program to transpile
        :type program: Program
        '''
        self.block = program.block(0)

        i = 0
        while i < len(self.block.ops) - 1:
            current_op = self.block.ops[i]
            if current_op.type in ['batch_norm']:
                next_op = self.block.ops[i + 1]
                if next_op.type == 'relu':
                    # modify bnorm OP to include relu
                    current_op.set_attr("fuse_with_relu", True)
                    # remove relu OP
W
Wu Yi 已提交
100
                    self.block._remove_op(i + 1)
101 102 103 104 105 106 107
            i = i + 1

        self._remove_unused_var()
        # TODO(luotao): use clone() method to flush the program.desc in force,
        # since some large program.desc will not be flushed immediately.
        # And a better solution will be considered later.
        program = program.clone()
L
Luo Tao 已提交
108

109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171
    def _fuse_conv_bias_mkldnn(self, program):
        '''
        Transpile the program by fused convolution and elementwise_add.

        Replace conv2d and elementwise_add ops with a new conv2d op
        based on an old conv2d op and the :math:`Bias` taken from
        elementwise_add.

        For input :math:`X`:

        - Conv process:            :math:`X = input * W`
        - Elementwise_add process: :math` X = X + bias`

        After fuse into one operation:

        .. math::

            X = input * W + bias

        The operator transformation is:

        - before:

          - conv->elementwise_add->any_other_op

        - after:

          - conv->any_other_op

        The transpile stages are:

        1. Extract bias and output variables from elementwise_add.
        2. Extract Input, Weight and attributes from conv op.
        3. Create a new convolution op based on extracted params.
        4. Remove old conv op.
        5. Remove elementwise_add.
        5. Remove unused variables.

        Args:
            program (Program): program to transpile

        '''
        self.block = program.block(0)

        i = 0
        while i < len(self.block.ops) - 2:
            current_op = self.block.ops[i]
            next_op = self.block.ops[i + 1]
            # conv2d with bias
            if current_op.type in ['conv2d'] and \
               next_op.type in ['elementwise_add']:
                self._fuse_conv_bias(i, current_op, next_op)
                self.block._remove_op(i + 1)  # Remove old conv
                self.block._remove_op(i + 1)  # Remove elementwise_add
                i = i + 1
            i = i + 1

        self._remove_unused_var()
        # TODO(luotao): use clone() method to flush the program.desc in force,
        # since some large program.desc will not be flushed immediately.
        # And a better solution will be considered later.
        program = program.clone()

W
Wu Yi 已提交
172
    def _fuse_batch_norm(self, program, place, scope):
L
Luo Tao 已提交
173 174
        '''
        Transpile the program by fused batch normalization.
175 176 177

        The batch normalization followed the convolution or fully connected layer
        can be integrated with them. Doing so will give us a forward acceleration,
178
        especially in environments like mobile or embedded.
179

L
Luo Tao 已提交
180 181
        For input :math:`X`:

182 183
        - Conv process:        :math:`X = input * W + bias`
        - Batch norm process:  :math:`X' = (X - mean) / std`
L
Luo Tao 已提交
184
        - Scale Process:       :math:`Y = a * X' + b`
185 186 187

        After fuse into one operation:

L
Luo Tao 已提交
188 189 190 191
        .. math::

            Y &= (input * W + bias - mean) / std * a + b \\\\
              &= input * a * W / std + ((bias - mean) / std * a + b)
192

193
        The operator transformation is:
L
Luo Tao 已提交
194

195
        - before:
L
Luo Tao 已提交
196

197 198
          - conv->batch_norm->any_other_op (bias == 0)
          - conv->elementwise_add->batch_norm->any_other_op (bias != 0)
199 200

        - after:
L
Luo Tao 已提交
201

202
          - conv->elementwise_add->any_other_op
203

204
        The transpile stages are:
L
Luo Tao 已提交
205

206
        1. insert elementwise_add op when bias == 0.
207
        2. fuse the batch_norm's parameters to conv and elementwise_add operators.
208 209 210
        3. remove batch_norm ops which are not used in any other ops.
        4. adjust the input of any_other_op to be the output of elementwise_add operator.
        5. remove unused variables.
211

L
Luo Tao 已提交
212 213 214 215
        Args:
            program (Program): program to transpile
            place (Place): inference place
            scope (Scope): inference Scope
216

217 218 219
        '''
        self.scope = scope
        self.place = place
220
        self.block = program.block(0)
221
        self.input_map = {}  # store the input names should be adjusted
222

223
        i = 0
224
        while i < len(self.block.ops) - 2:
225
            current_op = self.block.ops[i]
226
            # TODO(luotao1): consider only conv2d now. fc would be delt later.
227
            if current_op.type in ['conv2d']:
228 229
                # TODO(luotao1): consider single chain network now.
                # For branch network, we counldn't use block.ops[i + 1] as
L
Luo Tao 已提交
230
                # the judgment condition.
231
                next_op = self.block.ops[i + 1]
232
                # conv2d without bias
233
                if (next_op.type == 'batch_norm'):
234 235 236
                    # insert bias op
                    bias_op = self._insert_bias_op(i + 1, current_op, next_op)
                    # fuse batch_norm
237
                    self._fuse_param(current_op, next_op, bias_op, 0)
238
                    # remove batch_norm_op
W
Wu Yi 已提交
239
                    self.block._remove_op(i + 2)
240
                    i = i + 1
241 242 243 244 245 246 247
                # conv2d with bias, the next_op.type is elementwise_add
                elif (next_op.type == 'elementwise_add'):
                    next_next_op = self.block.ops[i + 2]
                    if (next_next_op.type == 'batch_norm'):
                        # fuse batch_norm
                        self._fuse_param(current_op, next_next_op, next_op, 1)
                        # remove batch_norm_op
W
Wu Yi 已提交
248
                        self.block._remove_op(i + 2)
249
                        i = i + 1
250
            i = i + 1
251
        self._adjust_input()
252
        self._remove_unused_var()
253 254
        # TODO(luotao): use clone() method to flush the program.desc in force,
        # since some large program.desc will not be flushed immediately.
L
Luo Tao 已提交
255
        # And a better solution will be considered later.
L
Luo Tao 已提交
256
        program = program.clone()
257 258 259 260

    # ====================== private transpiler functions =====================
    def _insert_bias_op(self, index, current_op, bn_op):
        '''
261
        Construct elementwise_add operator for adding bias
262
        and insert it into program.
263

264 265 266 267 268 269 270 271 272 273 274
        :param index: insert location of bias_op
        :type index: Int
        :param current_op: current operator (conv or fc)
        :type current_op: Operator
        :param bn_op: batch norm operator
        :type bn_op: Operator
        :return: bias_op
        :rtype: Operator
        '''
        # The input of bias_op is current_op's output and Bias of bn_op
        # The output of bias_op is bn_op's output
275 276 277 278
        x_var = self.block.var(current_op.output("Output")[0])
        y_var = self.block.var(bn_op.input("Bias")[0])
        out_var = self.block.var(bn_op.output("Y")[0])

W
Wu Yi 已提交
279
        bias_op = self.block._insert_op(
280 281 282 283 284 285
            index,
            type="elementwise_add",
            inputs={"X": x_var,
                    "Y": y_var},
            outputs={"Out": out_var},
            attrs={"axis": 1})  # dim_start=1
286 287
        return bias_op

288
    def _fuse_param(self, current_op, bn_op, bias_op, with_bias):
289 290
        '''
        fuse the batch_norm_op' parameters to current_op (conv or fc)
291

292 293 294 295 296 297
        :param current_op: current operator (conv or fc)
        :type current_op: Operator
        :param bn_op: batch norm operator
        :type bn_op: Operator
        :param bias_op: elementwise_add operator for adding bias
        :type bias_op: Operator
298
        :param with_bias: If current operator has bias, with_bias = 1; otherwise 0.
299
        :type with_bias: Int
300 301
        '''

L
Luo Tao 已提交
302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317
        def _update_param(op, old_param_name, new_param):
            # For the sake of remaining the original variables the same as before,
            # create new variables in scope to store the new parameters.
            old_param_name = old_param_name[0]
            old_var = self.block.vars[old_param_name]
            new_param_name = old_param_name + '_fuse_bn'
            new_var = self.block.create_parameter(
                name=new_param_name.encode('ascii'),
                type=old_var.type,
                dtype=old_var.dtype,
                shape=old_var.shape)
            op.rename_input(old_param_name, new_param_name)
            self.scope.var(new_param_name)

            tensor = self.scope.find_var(new_param_name).get_tensor()
            tensor.set(np.array(new_param), self.place)
318 319

        def _load_param(param_name):
L
Luo Tao 已提交
320
            return np.array(self.scope.find_var(param_name[0]).get_tensor())
321 322 323 324 325 326 327 328 329 330 331 332

        bias_bn = _load_param(bn_op.input("Bias"))  #Bias
        scale_bn = _load_param(bn_op.input("Scale"))  #Scale
        mean_bn = _load_param(bn_op.input("Mean"))  #Mean
        var_bn = _load_param(bn_op.input("Variance"))  #Variance

        # TODO(luotao1): consider only conv2d now. fc would be delt later.
        current_param = _load_param(current_op.input("Filter"))
        std_bn = np.float32(np.sqrt(np.add(var_bn, 1e-5)))
        tmp = np.float32(np.divide(scale_bn, std_bn))

        # add bias of batch_norm_op to conv2d
333 334 335 336
        if with_bias:
            bias = _load_param(bias_op.input("Y"))
        else:
            bias = np.zeros(bias_bn.shape)
337 338 339 340 341 342 343 344 345
        bias = np.float32(
            np.add(np.multiply(np.subtract(bias, mean_bn), tmp), bias_bn))

        # re-compute weight of conv2d
        tmp = tmp.reshape(tmp.shape[0], -1)
        dst_param = current_param.reshape((tmp.shape[0], -1))
        dst_param = np.float32(np.multiply(dst_param, tmp))
        dst_param = dst_param.reshape(current_param.shape)

L
Luo Tao 已提交
346 347 348
        # update parameters
        _update_param(current_op, current_op.input("Filter"), dst_param)
        _update_param(bias_op, bias_op.input("Y"), bias)
349

350 351 352
        # collect the renamed input
        self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0]

353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379
    def _fuse_conv_bias(self, index, conv_op, elementwise_add_op):
        '''
        fuse the conv op with elementwise_add

        :param index: index of the conv_op in ops list
        :type index: Int
        :param conv_op: convolution operator
        :type conv_op: Operator
        :param elementwise_add_op: convolution's bias operator
        :type elementwise_add_op: Operator
        '''

        bias_var = self.block.var(elementwise_add_op.input("Y")[0])
        out_var = self.block.var(elementwise_add_op.output("Out")[0])
        filter_var = self.block.var(conv_op.input("Filter")[0])
        in_var = self.block.var(conv_op.input("Input")[0])
        attrs = {name: conv_op.attr(name) for name in conv_op.attr_names}

        self.block._insert_op(
            index,
            type="conv2d",
            inputs={"Input": in_var,
                    "Filter": filter_var,
                    "Bias": bias_var},
            outputs={"Output": out_var},
            attrs=attrs)

380
    def _adjust_input(self):
381 382 383 384 385 386 387
        for i in range(len(self.block.ops)):
            current_op = self.block.ops[i]
            for input_arg in current_op.input_arg_names:
                if input_arg in self.input_map:
                    current_op.rename_input(input_arg,
                                            self.input_map[input_arg])

388 389
    def _remove_unused_var(self):
        '''
390
        remove unused varibles in program
391 392
        '''
        args = []
393 394 395 396
        for i in range(len(self.block.ops)):
            current_op = self.block.ops[i]
            args += current_op.input_arg_names
            args += current_op.output_arg_names
397 398
        args = list(set(args))  # unique the input and output arguments

399
        for var in list(self.block.vars.keys()):
400
            if var not in args:
W
Wu Yi 已提交
401
                self.block._remove_var(var)