auto_parallel_quantization.py 19.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18
import logging

import numpy as np

19
import paddle
20
from paddle.framework import IrGraph, core
21
from paddle.static.quantization import (
22
    AddQuantDequantForInferencePass,
23 24 25
    AddQuantDequantPassV2,
    OutScaleForTrainingPass,
    QuantizationTransformPassV2,
26
    quant_config,
27
)
28

29
from ..auto_parallel.converter import Converter
30
from ..auto_parallel.dist_attribute import OperatorDistAttr, TensorDistAttr
31 32
from .pass_base import PassBase, register_pass

33 34 35 36 37 38
TRANSFORM_PASS_OP_TYPES = list(
    quant_config.SUPPORT_WEIGHT_QUANTIZATION_OP_DICT.keys()
)
QUANT_DEQUANT_PASS_OP_TYPES = list(
    quant_config.SUPPORT_ACT_QUANTIZATION_OP_DICT.keys()
)
39 40 41 42 43 44 45 46 47


def _node_id(node):
    return (node.node.graph_id(), node.node.id())


@register_pass("auto_parallel_quantization")
class QuantizationPass(PassBase):
    def __init__(self):
48
        super().__init__()
49 50
        self.set_attr("dist_context", None)
        self.set_attr("params_grads", None)
51 52
        self.set_attr("mode", "train")
        self.set_attr("loss", None)
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67

    def _check_self(self):
        if self.get_attr("dist_context") is None:
            return False
        if self.get_attr("params_grads") is None:
            return False
        return True

    def _check_conflict(self, other_pass):
        return True

    def _apply_single_impl(self, main_program, startup_program, context):

        dist_context = self.get_attr("dist_context")
        params_grads = self.get_attr("params_grads")
68 69
        mode = self.get_attr("mode")
        loss = self.get_attr("loss")
70 71 72 73

        # TODO: scope and place will be removed,
        # cause params should be initialized by engine module.
        scope = paddle.static.global_scope()
74 75 76
        place = paddle.framework.CUDAPlace(
            paddle.distributed.ParallelEnv().dev_id
        )
77

78 79 80 81 82 83
        # 0. record the relation among blocks
        parent_idx_dict = dict()
        for block in main_program.blocks:
            parent_idx_dict[block.idx] = block.parent_idx

        is_test = True if mode != "train" else False
84
        # 1. Program convert to Graph, and this pass is only for train mode
85
        main_graph = IrGraph(
86
            core.Graph(main_program.desc), for_test=mode != "train"
87
        )
88 89 90 91 92

        # 2. Prepare inputs
        transform_pass_ops = []
        quant_dequant_ops = []
        quantize_op_types = [
93 94 95 96 97
            'conv2d',
            'depthwise_conv2d',
            'mul',
            'matmul',
            'matmul_v2',
98 99 100 101 102 103 104
        ]
        for op_type in quantize_op_types:
            if op_type in TRANSFORM_PASS_OP_TYPES:
                transform_pass_ops.append(op_type)
            elif op_type in QUANT_DEQUANT_PASS_OP_TYPES:
                quant_dequant_ops.append(op_type)

105 106 107 108 109
        weight_quantize_type = (
            "channel_wise_abs_max"
            if self.get_attr('channel_wise_abs_max')
            else "abs_max"
        )
110 111

        # 3. Add quant op for ops which have parameters
112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
        if len(transform_pass_ops) > 0:
            transform_pass = QuantizationTransformPassV2(
                scope=scope,
                place=place,
                weight_bits=self.get_attr('weight_bits'),
                activation_bits=self.get_attr('activation_bits'),
                skip_pattern=self.get_attr('not_quant_pattern'),
                activation_quantize_type="moving_average_abs_max",
                quantizable_op_type=transform_pass_ops,
                weight_quantize_type=weight_quantize_type,
                weight_quantize_func=None,
                act_quantize_func=None,
                weight_preprocess_func=None,
                act_preprocess_func=None,
                optimizer_func=None,
                executor=None,
                is_test=is_test,
            )
            for sub_graph in main_graph.all_sub_graphs():
                transform_pass.apply(sub_graph)
132 133

        # 4. Add quant op for ops which don't have parameter
134 135 136 137 138 139 140 141 142 143 144
        if len(quant_dequant_ops) > 0:
            quant_dequant_pass = AddQuantDequantPassV2(
                scope=scope,
                place=place,
                quant_bits=self.get_attr('activation_bits'),
                skip_pattern=self.get_attr('not_quant_pattern'),
                quantizable_op_type=quant_dequant_ops,
                is_test=is_test,
            )
            for sub_graph in main_graph.all_sub_graphs():
                quant_dequant_pass.apply(sub_graph)
145 146

        # 5. Gather quantitative information for the output
147
        out_scale_training_pass = OutScaleForTrainingPass(
148
            scope=scope, place=place, is_test=is_test
149
        )
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166
        for sub_graph in main_graph.all_sub_graphs():
            out_scale_training_pass.apply(sub_graph)

        # 6. When export quant model, traverse to find the output of each op, and insert the quant/dequant op after it.
        if mode != "train" and self.get_attr('onnx_format'):
            try:
                out_scale_infer_pass = AddQuantDequantForInferencePass(
                    scope=scope,
                    place=place,
                    quant_bits=self.get_attr('activation_bits'),
                )
                # for sub_graph in main_graph.all_sub_graphs():
                #     out_scale_infer_pass.apply(sub_graph)
            except:
                logging.warning(
                    "Unable to convert quant model with onnx_format=True, please update PaddlePaddle >= 2.4.0"
                )
167

168
        # 7. Convert Graph back to Program
169
        quant_program = main_graph.to_program()
170
        quant_program = self.move_presist_var_to_global_block(quant_program)
171

172
        # 8.1 get new prams_grads from quant_program
173 174 175 176 177 178 179 180 181
        new_params_grads = []
        for param, grad in params_grads:
            if param.name not in quant_program.global_block().vars:
                continue

            new_param = quant_program.global_block().vars[param.name]
            new_grad = quant_program.global_block().vars[grad.name]
            new_params_grads.append((new_param, new_grad))

182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233
        # 8.2 get new loss var
        new_loss = None
        if loss:
            new_loss = quant_program.global_block().vars[loss.name]

        # 8.3 recover the relation among blocks
        for block in quant_program.blocks:
            block.desc._set_forward_block_idx(parent_idx_dict[block.idx])

        # 9. complete distributed attribution
        self.set_dist_attr_for_qat_program(
            quant_program, main_program, dist_context
        )

        # 10. reset scale var value with dist_attr
        self.reset_scope_var(quant_program, dist_context, scope, place)

        context.set_attr("main_program", quant_program)
        context.set_attr("startup_program", startup_program)
        context.set_attr("params_grads", new_params_grads)
        context.set_attr("loss", new_loss)

    def move_presist_var_to_global_block(self, program):
        global_block = program.global_block()
        for _op in global_block.ops:
            if _op.type == "while":
                _block_id = _op.attr("sub_block").id
                _block = program.block(_block_id)
                persistables = []
                for _name, _var in _block.vars.items():
                    if _var.persistable:
                        global_block._clone_variable(_var)
                        persistables.append(_name)
                for _name in persistables:
                    _block._remove_var(_name)
                persistables.extend(_op.input('X'))
                _op.desc.set_input("X", persistables)
        return program

    def reset_scope_var(self, quant_program, dist_context, scope, place):
        # The var_value, created by qatization_passes, should has same shape with the value after parallel.
        for var in quant_program.list_vars():
            scope_var = scope.find_var(var.name)
            if not (scope_var and scope_var.get_tensor()._is_initialized()):
                continue
            tensor = scope_var.get_tensor()
            if var.shape == tensor.shape:
                continue

            var_dist_attr = dist_context.get_tensor_dist_attr_for_program(var)
            dist_attr = {
                "dims_mapping": var_dist_attr.dims_mapping,
234 235
                "process_shape": var_dist_attr.process_mesh.shape,
                "process_group": var_dist_attr.process_mesh.process_ids,
236 237 238 239 240 241 242 243 244 245 246 247
            }

            # slice tensor_value with dist_attr
            sliced_tensor = Converter.slice_with_dist_attr(
                np.array(tensor), dist_attr
            )
            tensor._clear()
            tensor.set(sliced_tensor, place)

    def set_dist_attr_for_qat_program(
        self, quant_program, main_program, dist_context
    ):
248 249 250 251 252
        # NOTE: hack implement, upgrading soon
        for ib, block in enumerate(quant_program.blocks):
            # recover origin ops' dist_attr and set quant ops' dist_attr
            qat_offset = 0
            for ip, quant_op in enumerate(block.ops):
253
                quant_op_dist_attr = OperatorDistAttr()
254

255 256 257 258
                if (
                    "quantize" in quant_op.type
                    or quant_op.type == "moving_average_abs_max_scale"
                ):
259
                    # set all quantization ops' dist_attr by quantified op
260 261
                    input_name = quant_op.desc.input('X')[0]
                    if "quantize" in input_name:
262 263 264
                        input_name = input_name[
                            : input_name.index(".quantized")
                        ]
265

266 267 268 269 270 271 272 273 274
                    if (
                        quant_op.type == "moving_average_abs_max_scale"
                        or ip - qat_offset >= len(main_program.blocks[ib].ops)
                    ):
                        consume_op = (
                            main_program.blocks[ib]
                            ._var_recursive(input_name)
                            .op
                        )
275
                    else:
276 277 278
                        consume_op = main_program.blocks[ib].ops[
                            ip - qat_offset
                        ]
279
                    consume_op_dist_attr = dist_context.get_dist_op_for_program(
280 281
                        consume_op
                    ).dist_attr
282 283 284
                    ref_process_mesh = consume_op_dist_attr.process_mesh

                    if input_name in consume_op_dist_attr.outputs_dist_attrs:
285 286 287
                        consume_input_dist_attr = (
                            consume_op_dist_attr.outputs_dist_attrs[input_name]
                        )
288
                    else:
289 290 291
                        consume_input_dist_attr = (
                            consume_op_dist_attr.inputs_dist_attrs[input_name]
                        )
292 293 294 295 296

                    quant_op_dist_attr.impl_idx = 0
                    quant_op_dist_attr.impl_type = "default"
                    quant_op_dist_attr.process_mesh = ref_process_mesh
                    quant_op_dist_attr.set_input_dist_attr(
297 298
                        quant_op.desc.input('X')[0], consume_input_dist_attr
                    )
299 300

                    for slot_name in quant_op.desc.input_names():
301 302 303
                        in_name = quant_op.desc.input(slot_name)[0]
                        input_var = block._var_recursive(in_name)
                        ref_dims_mapping = [-1]
304 305
                        if slot_name == "X":
                            continue
306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
                        elif slot_name in ['Scale', 'ZeroPoint']:
                            if (
                                quant_op.has_attr('quant_axis')
                                and quant_op.attr('quant_axis') != -1
                            ):
                                x_name = quant_op.desc.input('X')[0]
                                x_var = block._var_recursive(x_name)
                                x_dist_attr = (
                                    quant_op_dist_attr.get_input_dist_attr(
                                        x_name
                                    )
                                )
                                quant_axis = quant_op.attr('quant_axis')
                                ref_dims_mapping = [
                                    x_dist_attr.dims_mapping[quant_axis]
                                ]

323
                        tensor_dist_attr = TensorDistAttr()
324 325 326 327 328 329 330 331
                        tensor_dist_attr.process_mesh = ref_process_mesh
                        tensor_dist_attr.dims_mapping = ref_dims_mapping
                        dist_context.set_tensor_dist_attr_for_program(
                            input_var, tensor_dist_attr
                        )
                        quant_op_dist_attr.set_input_dist_attr(
                            in_name, tensor_dist_attr
                        )
332 333 334

                    for slot_name in quant_op.desc.output_names():
                        output_name = quant_op.desc.output(slot_name)[0]
335 336
                        output_var = block._var_recursive(output_name)
                        ref_dims_mapping = [-1]
337 338
                        if slot_name == "Y":
                            dist_context.set_tensor_dist_attr_for_program(
339 340
                                output_var, consume_input_dist_attr
                            )
341
                            quant_op_dist_attr.set_output_dist_attr(
342 343
                                output_name, consume_input_dist_attr
                            )
344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
                            continue
                        elif slot_name == "OutScale":
                            if (
                                quant_op.has_attr('quant_axis')
                                and quant_op.attr('quant_axis') != -1
                            ):
                                x_name = quant_op.desc.input('X')[0]
                                x_var = block._var_recursive(x_name)
                                x_dist_attr = (
                                    quant_op_dist_attr.get_input_dist_attr(
                                        x_name
                                    )
                                )
                                quant_axis = quant_op.attr('quant_axis')
                                ref_dims_mapping = [
                                    x_dist_attr.dims_mapping[quant_axis]
                                ]

362
                        tensor_dist_attr = TensorDistAttr()
363 364 365 366 367 368 369 370
                        tensor_dist_attr.process_mesh = ref_process_mesh
                        tensor_dist_attr.dims_mapping = ref_dims_mapping
                        dist_context.set_tensor_dist_attr_for_program(
                            output_var, tensor_dist_attr
                        )
                        quant_op_dist_attr.set_output_dist_attr(
                            output_name, tensor_dist_attr
                        )
371 372 373 374 375

                    quant_op._set_attr("op_device", "")
                    qat_offset += 1

                else:
376
                    # recover origin ops' dist_attr
377 378 379
                    origin_op = main_program.blocks[ib].ops[ip - qat_offset]
                    quant_op.desc.set_original_id(origin_op.desc.original_id())
                    dist_origin_op = dist_context.get_dist_op_for_program(
380 381 382 383 384
                        origin_op
                    )
                    assert (
                        dist_origin_op is not None
                    ), "origin op must have dist attr."
385 386 387 388

                    origin_op_dist_attr = dist_origin_op.dist_attr
                    quant_op_dist_attr.impl_idx = origin_op_dist_attr.impl_idx
                    quant_op_dist_attr.impl_type = origin_op_dist_attr.impl_type
389 390 391
                    quant_op_dist_attr.process_mesh = (
                        origin_op_dist_attr.process_mesh
                    )
392 393

                    scale_offset = 0
394
                    for idx, input_name in enumerate(quant_op.input_arg_names):
395 396 397 398 399 400 401 402 403 404 405 406
                        if (
                            origin_op.type == "while"
                            and input_name not in origin_op.input_arg_names
                        ):
                            assert (
                                "@scale" in input_name
                                or "@zero_point" in input_name
                            )
                            scale_offset += 1
                            continue

                        idx -= scale_offset
407
                        origin_input_name = origin_op.input_arg_names[idx]
408 409 410 411 412
                        origin_input_dist_attr = (
                            origin_op_dist_attr.inputs_dist_attrs[
                                origin_input_name
                            ]
                        )
413
                        quant_op_dist_attr.set_input_dist_attr(
414 415
                            input_name, origin_input_dist_attr
                        )
416 417

                    for idx, output_name in enumerate(
418 419
                        quant_op.output_arg_names
                    ):
420
                        origin_output_name = origin_op.output_arg_names[idx]
421 422 423 424 425
                        origin_output_dist_attr = (
                            origin_op_dist_attr.outputs_dist_attrs[
                                origin_output_name
                            ]
                        )
426
                        quant_op_dist_attr.set_output_dist_attr(
427 428
                            output_name, origin_output_dist_attr
                        )
429

430 431 432 433 434 435
                        if not main_program.blocks[ib]._find_var_recursive(
                            output_name
                        ):
                            origin_output_var = main_program.blocks[
                                ib
                            ]._var_recursive(origin_output_name)
436 437 438 439 440
                            origin_out_tensor_dist_attr = (
                                dist_context.get_dist_tensor_for_program(
                                    origin_output_var
                                ).dist_attr
                            )
441
                            quant_output_var = block._var_recursive(output_name)
442
                            dist_context.set_tensor_dist_attr_for_program(
443 444
                                quant_output_var, origin_out_tensor_dist_attr
                            )
445 446

                dist_context.set_op_dist_attr_for_program(
447 448
                    quant_op, quant_op_dist_attr
                )
449 450 451 452 453 454

            # recover vars' dist_attr
            for name, dst_var in block.vars.items():
                if name in main_program.blocks[ib].vars:
                    src_var = main_program.blocks[ib].vars[name]
                    dist_tensor = dist_context.get_dist_tensor_for_program(
455 456
                        src_var
                    )
457 458 459
                    if not dist_tensor:
                        continue
                    dist_context.set_tensor_dist_attr_for_program(
460 461
                        dst_var, dist_tensor.dist_attr
                    )