inference_transpiler.py 6.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import numpy as np
import os
import shutil
from . import core


class InferenceTranspiler:
    def transpile(self, program, scope, place):
        '''
        Transpile the program to a inference program by fused batch normalization.
 
        The batch normalization followed the convolution or fully connected layer 
        can be integrated with them. Doing so will give us a forward acceleration, 
        especially in environments like mobile or embedded.
                    
        For input X:
        - Conv process:        X = input * W + bias 
        - Batch norm process:  X' = (X - mean) / std 
        - Scale Process:       Y = a * X' + b

        After fuse into one operation:

        Y = (input * W + bias - mean) / std * a + b
          = input * a * W / std + ((bias - mean) / std * a + b)

        The operator transformation is: 
        - before:
          - conv->batch_norm->any_other_op (bias == 0)
          - conv->elementwise_add->batch_norm->any_other_op (bias != 0)
        - after: 
          - conv->elementwise_add->any_other_op
        
        The transpile stages are:
        1. insert elementwise_add op when bias == 0, and adjust its input and output.
        2. fuse the batch_norm's parameters to conv and elementwise_add operators.
        3. remove batch_norm ops and its variables which are not used in any other ops.
        4. remove unused variables.

        :param program: program to transpile 
        :type program: Program
        :param scope: inference scope 
        :type scope: Scope
        :param place: inference place 
        :type place: Place
        :return: program by fused batch normalization
        :rtype: Program
        '''
        self.scope = scope
        self.place = place
64
        self.block = program.block(0)
65
        i = 0
66 67
        while i < len(self.block.ops):
            current_op = self.block.ops[i]
68
            # TODO(luotao1): consider only conv2d now. fc would be delt later.
69 70
            if current_op.type in ['conv2d']:
                next_op = self.block.ops[i + 1]
71 72
                # TODO(luotao1): consider only conv2d without bias now.
                # If conv2d with bias, the next_op.type is elementwise_add.
73
                if (next_op.type == 'batch_norm'):
74 75 76 77 78
                    # insert bias op
                    bias_op = self._insert_bias_op(i + 1, current_op, next_op)
                    # fuse batch_norm
                    self._fuse_param(current_op, next_op, bias_op)
                    # remove batch_norm_op
79
                    self.block.remove_op(i + 2)
80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
                    i = i + 1
            i = i + 1

        self._remove_unused_var()
        return program

    # ====================== private transpiler functions =====================
    def _insert_bias_op(self, index, current_op, bn_op):
        '''
        Construct elementwise_add operator for adding bias 
        and insert it into program.
        
        :param index: insert location of bias_op
        :type index: Int
        :param current_op: current operator (conv or fc)
        :type current_op: Operator
        :param bn_op: batch norm operator
        :type bn_op: Operator
        :return: bias_op
        :rtype: Operator
        '''
        # The input of bias_op is current_op's output and Bias of bn_op
        # The output of bias_op is bn_op's output
103 104 105 106 107 108 109 110 111 112 113
        x_var = self.block.var(current_op.output("Output")[0])
        y_var = self.block.var(bn_op.input("Bias")[0])
        out_var = self.block.var(bn_op.output("Y")[0])

        bias_op = self.block.insert_op(
            index,
            type="elementwise_add",
            inputs={"X": x_var,
                    "Y": y_var},
            outputs={"Out": out_var},
            attrs={"axis": 1})  # dim_start=1
114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163
        return bias_op

    def _fuse_param(self, current_op, bn_op, bias_op):
        '''
        fuse the batch_norm_op' parameters to current_op (conv or fc)
        
        :param current_op: current operator (conv or fc)
        :type current_op: Operator
        :param bn_op: batch norm operator
        :type bn_op: Operator
        :param bias_op: elementwise_add operator for adding bias
        :type bias_op: Operator
        '''

        def _load_tensor(param_name):
            return self.scope.find_var(param_name[0]).get_tensor()

        def _load_param(param_name):
            return np.array(_load_tensor(param_name))

        bias_bn = _load_param(bn_op.input("Bias"))  #Bias
        scale_bn = _load_param(bn_op.input("Scale"))  #Scale
        mean_bn = _load_param(bn_op.input("Mean"))  #Mean
        var_bn = _load_param(bn_op.input("Variance"))  #Variance

        # TODO(luotao1): consider only conv2d now. fc would be delt later.
        current_param = _load_param(current_op.input("Filter"))
        current_tensor = _load_tensor(current_op.input("Filter"))

        std_bn = np.float32(np.sqrt(np.add(var_bn, 1e-5)))
        tmp = np.float32(np.divide(scale_bn, std_bn))

        # add bias of batch_norm_op to conv2d
        bias = np.zeros(bias_bn.shape)
        bias = np.float32(
            np.add(np.multiply(np.subtract(bias, mean_bn), tmp), bias_bn))
        bias_tensor = _load_tensor(bias_op.input("Y"))
        bias_tensor.set(bias, self.place)

        # re-compute weight of conv2d
        tmp = tmp.reshape(tmp.shape[0], -1)
        dst_param = current_param.reshape((tmp.shape[0], -1))
        dst_param = np.float32(np.multiply(dst_param, tmp))
        dst_param = dst_param.reshape(current_param.shape)

        # set the updated parameters
        current_tensor.set(np.array(dst_param), self.place)

    def _remove_unused_var(self):
        '''
164
        remove unused varibles in program
165 166
        '''
        args = []
167 168 169 170
        for i in range(len(self.block.ops)):
            current_op = self.block.ops[i]
            args += current_op.input_arg_names
            args += current_op.output_arg_names
171 172
        args = list(set(args))  # unique the input and output arguments

173 174 175
        for var in self.block.vars.keys():
            if var not in args:
                self.block.remove_var(var)