inference_transpiler.py 15.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16
from __future__ import print_function

17
import os
18
import numpy as np
19 20 21
from .. import core
from ..framework import Program
from ..executor import global_scope
22 23


24
class InferenceTranspiler(object):
L
Luo Tao 已提交
25
    '''
26 27 28 29 30 31
    Convert the fluid program to optimized inference program.

    There are several optimizations:

      - fuse convolution and batch normalization
      - fuse batch normalization and relu (MKLDNN only)
L
Luo Tao 已提交
32 33

    Examples:
34

L
Luo Tao 已提交
35 36 37 38 39 40 41 42 43
    .. code-block:: python

        # As InferenceTranspiler will modify the original program,
        # please clone before use it.
        inference_transpiler_program = program.clone()
        t = fluid.InferenceTranspiler()
        t.transpile(inference_transpiler_program, place)
    '''

L
Luo Tao 已提交
44
    def transpile(self, program, place, scope=None):
45
        '''
L
Luo Tao 已提交
46 47 48 49 50 51
        Run the transpiler.

        Args:
            program (Program): program to transpile
            place (Place): inference place
            scope (Scope|None): inference Scope
L
Luo Tao 已提交
52
        '''
L
Luo Tao 已提交
53 54 55 56 57 58 59 60 61
        if not isinstance(program, Program):
            raise TypeError("program should be as Program type")
        if not isinstance(place, core.CPUPlace) and not isinstance(
                place, core.CUDAPlace):
            raise TypeError("place should be as CPUPlace/CUDAPlace type")
        if scope is None:
            scope = global_scope()
        if not isinstance(scope, core.Scope):
            raise TypeError("scope should be as Scope type or None")
62
        use_mkldnn = bool(os.getenv("FLAGS_use_mkldnn", False))
M
Michal Gallus 已提交
63

64
        self._fuse_batch_norm(program, place, scope)
65 66
        if use_mkldnn:
            self._fuse_conv_bias_mkldnn(program)
M
Michal Gallus 已提交
67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
            self._fuse_conv_relu_mkldnn(program)
            self._fuse_bn_relu_mkldnn(program)

    def _fuse_conv_relu_mkldnn(self, program):
        '''
        Transpile the program by fused relu activation for MKLDNN program.
        Relu activation following convolution OP can be fused by adding
        'fuse_relu' attribute to convolution OP.
        The result of fuse is:
            - before:
                - conv->relu->any_other_op
            - after:
                - conv->any_other_op
        :param program: program to transpile
        :type program: Program
        '''
        self.block = program.block(0)

        i = 0
        while i < len(self.block.ops):
            current_op = self.block.ops[i]
            if current_op.type in ['conv2d']:
                next_op = self.block.ops[i + 1]
                if next_op.type == 'relu':
                    # modify conv OP to include relu
                    current_op.set_attr("fuse_relu", True)
                    # remove conv OP
                    self.block._remove_op(i + 1)
            i = i + 1

        # TODO(luotao): use clone() method to flush the program.desc in force,
        # since some large program.desc will not be flushed immediately.
        # And a better solution will be considered later.
        program = program.clone()
101

M
Michal Gallus 已提交
102
    def _fuse_bn_relu_mkldnn(self, program):
103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132
        '''
        Transpile the program by fused relu activation for MKLDNN program.

        Relu activation following batch norm OP can be fused by adding
        :math:`fuse_with_relu` attribute to batch norm OP.

        The result of fuse is:

        - before:

          - batch_norm->relu->any_other_op

        - after:

          - batch_norm->any_other_op

        :param program: program to transpile
        :type program: Program
        '''
        self.block = program.block(0)

        i = 0
        while i < len(self.block.ops) - 1:
            current_op = self.block.ops[i]
            if current_op.type in ['batch_norm']:
                next_op = self.block.ops[i + 1]
                if next_op.type == 'relu':
                    # modify bnorm OP to include relu
                    current_op.set_attr("fuse_with_relu", True)
                    # remove relu OP
W
Wu Yi 已提交
133
                    self.block._remove_op(i + 1)
134 135 136 137 138 139 140
            i = i + 1

        self._remove_unused_var()
        # TODO(luotao): use clone() method to flush the program.desc in force,
        # since some large program.desc will not be flushed immediately.
        # And a better solution will be considered later.
        program = program.clone()
L
Luo Tao 已提交
141

142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
    def _fuse_conv_bias_mkldnn(self, program):
        '''
        Transpile the program by fused convolution and elementwise_add.

        Replace conv2d and elementwise_add ops with a new conv2d op
        based on an old conv2d op and the :math:`Bias` taken from
        elementwise_add.

        For input :math:`X`:

        - Conv process:            :math:`X = input * W`
        - Elementwise_add process: :math` X = X + bias`

        After fuse into one operation:

        .. math::

            X = input * W + bias

        The operator transformation is:

        - before:

          - conv->elementwise_add->any_other_op

        - after:

          - conv->any_other_op

        The transpile stages are:

        1. Extract bias and output variables from elementwise_add.
        2. Extract Input, Weight and attributes from conv op.
        3. Create a new convolution op based on extracted params.
        4. Remove old conv op.
        5. Remove elementwise_add.
        5. Remove unused variables.

        Args:
            program (Program): program to transpile

        '''
        self.block = program.block(0)

        i = 0
        while i < len(self.block.ops) - 2:
            current_op = self.block.ops[i]
            next_op = self.block.ops[i + 1]
            # conv2d with bias
            if current_op.type in ['conv2d'] and \
               next_op.type in ['elementwise_add']:
                self._fuse_conv_bias(i, current_op, next_op)
                self.block._remove_op(i + 1)  # Remove old conv
                self.block._remove_op(i + 1)  # Remove elementwise_add
            i = i + 1

        self._remove_unused_var()
        # TODO(luotao): use clone() method to flush the program.desc in force,
        # since some large program.desc will not be flushed immediately.
        # And a better solution will be considered later.
        program = program.clone()

W
Wu Yi 已提交
204
    def _fuse_batch_norm(self, program, place, scope):
L
Luo Tao 已提交
205 206
        '''
        Transpile the program by fused batch normalization.
207 208 209

        The batch normalization followed the convolution or fully connected layer
        can be integrated with them. Doing so will give us a forward acceleration,
210
        especially in environments like mobile or embedded.
211

L
Luo Tao 已提交
212 213
        For input :math:`X`:

214 215
        - Conv process:        :math:`X = input * W + bias`
        - Batch norm process:  :math:`X' = (X - mean) / std`
L
Luo Tao 已提交
216
        - Scale Process:       :math:`Y = a * X' + b`
217 218 219

        After fuse into one operation:

L
Luo Tao 已提交
220 221 222 223
        .. math::

            Y &= (input * W + bias - mean) / std * a + b \\\\
              &= input * a * W / std + ((bias - mean) / std * a + b)
224

225
        The operator transformation is:
L
Luo Tao 已提交
226

227
        - before:
L
Luo Tao 已提交
228

229 230
          - conv->batch_norm->any_other_op (bias == 0)
          - conv->elementwise_add->batch_norm->any_other_op (bias != 0)
231 232

        - after:
L
Luo Tao 已提交
233

234
          - conv->elementwise_add->any_other_op
235

236
        The transpile stages are:
L
Luo Tao 已提交
237

238
        1. insert elementwise_add op when bias == 0.
239
        2. fuse the batch_norm's parameters to conv and elementwise_add operators.
240 241 242
        3. remove batch_norm ops which are not used in any other ops.
        4. adjust the input of any_other_op to be the output of elementwise_add operator.
        5. remove unused variables.
243

L
Luo Tao 已提交
244 245 246 247
        Args:
            program (Program): program to transpile
            place (Place): inference place
            scope (Scope): inference Scope
248

249 250 251
        '''
        self.scope = scope
        self.place = place
252
        self.block = program.block(0)
253
        self.input_map = {}  # store the input names should be adjusted
254

255
        i = 0
256
        while i < len(self.block.ops) - 2:
257
            current_op = self.block.ops[i]
258
            # TODO(luotao1): consider only conv2d now. fc would be delt later.
259
            if current_op.type in ['conv2d']:
260 261
                # TODO(luotao1): consider single chain network now.
                # For branch network, we counldn't use block.ops[i + 1] as
L
Luo Tao 已提交
262
                # the judgment condition.
263
                next_op = self.block.ops[i + 1]
264
                # conv2d without bias
265
                if (next_op.type == 'batch_norm'):
266 267 268
                    # insert bias op
                    bias_op = self._insert_bias_op(i + 1, current_op, next_op)
                    # fuse batch_norm
269
                    self._fuse_param(current_op, next_op, bias_op, 0)
270
                    # remove batch_norm_op
W
Wu Yi 已提交
271
                    self.block._remove_op(i + 2)
272
                    i = i + 1
273 274 275 276 277 278 279
                # conv2d with bias, the next_op.type is elementwise_add
                elif (next_op.type == 'elementwise_add'):
                    next_next_op = self.block.ops[i + 2]
                    if (next_next_op.type == 'batch_norm'):
                        # fuse batch_norm
                        self._fuse_param(current_op, next_next_op, next_op, 1)
                        # remove batch_norm_op
W
Wu Yi 已提交
280
                        self.block._remove_op(i + 2)
281
                        i = i + 1
282
            i = i + 1
283
        self._adjust_input()
284
        self._remove_unused_var()
285 286
        # TODO(luotao): use clone() method to flush the program.desc in force,
        # since some large program.desc will not be flushed immediately.
L
Luo Tao 已提交
287
        # And a better solution will be considered later.
L
Luo Tao 已提交
288
        program = program.clone()
289 290 291 292

    # ====================== private transpiler functions =====================
    def _insert_bias_op(self, index, current_op, bn_op):
        '''
293
        Construct elementwise_add operator for adding bias
294
        and insert it into program.
295

296 297 298 299 300 301 302 303 304 305 306
        :param index: insert location of bias_op
        :type index: Int
        :param current_op: current operator (conv or fc)
        :type current_op: Operator
        :param bn_op: batch norm operator
        :type bn_op: Operator
        :return: bias_op
        :rtype: Operator
        '''
        # The input of bias_op is current_op's output and Bias of bn_op
        # The output of bias_op is bn_op's output
307 308 309 310
        x_var = self.block.var(current_op.output("Output")[0])
        y_var = self.block.var(bn_op.input("Bias")[0])
        out_var = self.block.var(bn_op.output("Y")[0])

W
Wu Yi 已提交
311
        bias_op = self.block._insert_op(
312 313 314 315 316 317
            index,
            type="elementwise_add",
            inputs={"X": x_var,
                    "Y": y_var},
            outputs={"Out": out_var},
            attrs={"axis": 1})  # dim_start=1
318 319
        return bias_op

320
    def _fuse_param(self, current_op, bn_op, bias_op, with_bias):
321 322
        '''
        fuse the batch_norm_op' parameters to current_op (conv or fc)
323

324 325 326 327 328 329
        :param current_op: current operator (conv or fc)
        :type current_op: Operator
        :param bn_op: batch norm operator
        :type bn_op: Operator
        :param bias_op: elementwise_add operator for adding bias
        :type bias_op: Operator
330
        :param with_bias: If current operator has bias, with_bias = 1; otherwise 0.
331
        :type with_bias: Int
332 333
        '''

L
Luo Tao 已提交
334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
        def _update_param(op, old_param_name, new_param):
            # For the sake of remaining the original variables the same as before,
            # create new variables in scope to store the new parameters.
            old_param_name = old_param_name[0]
            old_var = self.block.vars[old_param_name]
            new_param_name = old_param_name + '_fuse_bn'
            new_var = self.block.create_parameter(
                name=new_param_name.encode('ascii'),
                type=old_var.type,
                dtype=old_var.dtype,
                shape=old_var.shape)
            op.rename_input(old_param_name, new_param_name)
            self.scope.var(new_param_name)

            tensor = self.scope.find_var(new_param_name).get_tensor()
            tensor.set(np.array(new_param), self.place)
350 351

        def _load_param(param_name):
L
Luo Tao 已提交
352
            return np.array(self.scope.find_var(param_name[0]).get_tensor())
353 354 355 356 357 358 359 360 361 362 363 364

        bias_bn = _load_param(bn_op.input("Bias"))  #Bias
        scale_bn = _load_param(bn_op.input("Scale"))  #Scale
        mean_bn = _load_param(bn_op.input("Mean"))  #Mean
        var_bn = _load_param(bn_op.input("Variance"))  #Variance

        # TODO(luotao1): consider only conv2d now. fc would be delt later.
        current_param = _load_param(current_op.input("Filter"))
        std_bn = np.float32(np.sqrt(np.add(var_bn, 1e-5)))
        tmp = np.float32(np.divide(scale_bn, std_bn))

        # add bias of batch_norm_op to conv2d
365 366 367 368
        if with_bias:
            bias = _load_param(bias_op.input("Y"))
        else:
            bias = np.zeros(bias_bn.shape)
369 370 371 372 373 374 375 376 377
        bias = np.float32(
            np.add(np.multiply(np.subtract(bias, mean_bn), tmp), bias_bn))

        # re-compute weight of conv2d
        tmp = tmp.reshape(tmp.shape[0], -1)
        dst_param = current_param.reshape((tmp.shape[0], -1))
        dst_param = np.float32(np.multiply(dst_param, tmp))
        dst_param = dst_param.reshape(current_param.shape)

L
Luo Tao 已提交
378 379 380
        # update parameters
        _update_param(current_op, current_op.input("Filter"), dst_param)
        _update_param(bias_op, bias_op.input("Y"), bias)
381

382 383 384
        # collect the renamed input
        self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0]

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411
    def _fuse_conv_bias(self, index, conv_op, elementwise_add_op):
        '''
        fuse the conv op with elementwise_add

        :param index: index of the conv_op in ops list
        :type index: Int
        :param conv_op: convolution operator
        :type conv_op: Operator
        :param elementwise_add_op: convolution's bias operator
        :type elementwise_add_op: Operator
        '''

        bias_var = self.block.var(elementwise_add_op.input("Y")[0])
        out_var = self.block.var(elementwise_add_op.output("Out")[0])
        filter_var = self.block.var(conv_op.input("Filter")[0])
        in_var = self.block.var(conv_op.input("Input")[0])
        attrs = {name: conv_op.attr(name) for name in conv_op.attr_names}

        self.block._insert_op(
            index,
            type="conv2d",
            inputs={"Input": in_var,
                    "Filter": filter_var,
                    "Bias": bias_var},
            outputs={"Output": out_var},
            attrs=attrs)

412
    def _adjust_input(self):
413 414 415 416 417 418 419
        for i in range(len(self.block.ops)):
            current_op = self.block.ops[i]
            for input_arg in current_op.input_arg_names:
                if input_arg in self.input_map:
                    current_op.rename_input(input_arg,
                                            self.input_map[input_arg])

420 421
    def _remove_unused_var(self):
        '''
422
        remove unused varibles in program
423 424
        '''
        args = []
425 426 427 428
        for i in range(len(self.block.ops)):
            current_op = self.block.ops[i]
            args += current_op.input_arg_names
            args += current_op.output_arg_names
429 430
        args = list(set(args))  # unique the input and output arguments

431
        for var in list(self.block.vars.keys()):
432
            if var not in args:
W
Wu Yi 已提交
433
                self.block._remove_var(var)