auto_parallel_quantization.py 19.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15 16 17 18
import logging

import numpy as np

19
import paddle
20
from paddle.framework import IrGraph, core
21
from paddle.static.quantization import (
22
    AddQuantDequantForInferencePass,
23 24 25
    AddQuantDequantPassV2,
    OutScaleForTrainingPass,
    QuantizationTransformPassV2,
26
    quant_config,
27
)
28

29 30 31 32 33
from ..auto_parallel.static.converter import Converter
from ..auto_parallel.static.dist_attribute import (
    OperatorDistAttr,
    TensorDistAttr,
)
34 35
from .pass_base import PassBase, register_pass

36 37 38 39 40 41
TRANSFORM_PASS_OP_TYPES = list(
    quant_config.SUPPORT_WEIGHT_QUANTIZATION_OP_DICT.keys()
)
QUANT_DEQUANT_PASS_OP_TYPES = list(
    quant_config.SUPPORT_ACT_QUANTIZATION_OP_DICT.keys()
)
42 43 44 45 46 47 48 49 50


def _node_id(node):
    return (node.node.graph_id(), node.node.id())


@register_pass("auto_parallel_quantization")
class QuantizationPass(PassBase):
    def __init__(self):
51
        super().__init__()
52 53
        self.set_attr("dist_context", None)
        self.set_attr("params_grads", None)
54 55
        self.set_attr("mode", "train")
        self.set_attr("loss", None)
56 57 58 59 60 61 62 63 64 65 66 67 68 69 70

    def _check_self(self):
        if self.get_attr("dist_context") is None:
            return False
        if self.get_attr("params_grads") is None:
            return False
        return True

    def _check_conflict(self, other_pass):
        return True

    def _apply_single_impl(self, main_program, startup_program, context):

        dist_context = self.get_attr("dist_context")
        params_grads = self.get_attr("params_grads")
71 72
        mode = self.get_attr("mode")
        loss = self.get_attr("loss")
73 74 75 76

        # TODO: scope and place will be removed,
        # cause params should be initialized by engine module.
        scope = paddle.static.global_scope()
77 78 79
        place = paddle.framework.CUDAPlace(
            paddle.distributed.ParallelEnv().dev_id
        )
80

81
        # 0. record the relation among blocks
82
        parent_idx_dict = {}
83 84 85 86
        for block in main_program.blocks:
            parent_idx_dict[block.idx] = block.parent_idx

        is_test = True if mode != "train" else False
87
        # 1. Program convert to Graph, and this pass is only for train mode
88
        main_graph = IrGraph(
89
            core.Graph(main_program.desc), for_test=mode != "train"
90
        )
91 92 93 94 95

        # 2. Prepare inputs
        transform_pass_ops = []
        quant_dequant_ops = []
        quantize_op_types = [
96 97 98 99 100
            'conv2d',
            'depthwise_conv2d',
            'mul',
            'matmul',
            'matmul_v2',
101 102 103 104 105 106 107
        ]
        for op_type in quantize_op_types:
            if op_type in TRANSFORM_PASS_OP_TYPES:
                transform_pass_ops.append(op_type)
            elif op_type in QUANT_DEQUANT_PASS_OP_TYPES:
                quant_dequant_ops.append(op_type)

108 109 110 111 112
        weight_quantize_type = (
            "channel_wise_abs_max"
            if self.get_attr('channel_wise_abs_max')
            else "abs_max"
        )
113 114

        # 3. Add quant op for ops which have parameters
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134
        if len(transform_pass_ops) > 0:
            transform_pass = QuantizationTransformPassV2(
                scope=scope,
                place=place,
                weight_bits=self.get_attr('weight_bits'),
                activation_bits=self.get_attr('activation_bits'),
                skip_pattern=self.get_attr('not_quant_pattern'),
                activation_quantize_type="moving_average_abs_max",
                quantizable_op_type=transform_pass_ops,
                weight_quantize_type=weight_quantize_type,
                weight_quantize_func=None,
                act_quantize_func=None,
                weight_preprocess_func=None,
                act_preprocess_func=None,
                optimizer_func=None,
                executor=None,
                is_test=is_test,
            )
            for sub_graph in main_graph.all_sub_graphs():
                transform_pass.apply(sub_graph)
135 136

        # 4. Add quant op for ops which don't have parameter
137 138 139 140 141 142 143 144 145 146 147
        if len(quant_dequant_ops) > 0:
            quant_dequant_pass = AddQuantDequantPassV2(
                scope=scope,
                place=place,
                quant_bits=self.get_attr('activation_bits'),
                skip_pattern=self.get_attr('not_quant_pattern'),
                quantizable_op_type=quant_dequant_ops,
                is_test=is_test,
            )
            for sub_graph in main_graph.all_sub_graphs():
                quant_dequant_pass.apply(sub_graph)
148 149

        # 5. Gather quantitative information for the output
150
        out_scale_training_pass = OutScaleForTrainingPass(
151
            scope=scope, place=place, is_test=is_test
152
        )
153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169
        for sub_graph in main_graph.all_sub_graphs():
            out_scale_training_pass.apply(sub_graph)

        # 6. When export quant model, traverse to find the output of each op, and insert the quant/dequant op after it.
        if mode != "train" and self.get_attr('onnx_format'):
            try:
                out_scale_infer_pass = AddQuantDequantForInferencePass(
                    scope=scope,
                    place=place,
                    quant_bits=self.get_attr('activation_bits'),
                )
                # for sub_graph in main_graph.all_sub_graphs():
                #     out_scale_infer_pass.apply(sub_graph)
            except:
                logging.warning(
                    "Unable to convert quant model with onnx_format=True, please update PaddlePaddle >= 2.4.0"
                )
170

171
        # 7. Convert Graph back to Program
172
        quant_program = main_graph.to_program()
173
        quant_program = self.move_presist_var_to_global_block(quant_program)
174

175
        # 8.1 get new prams_grads from quant_program
176 177 178 179 180 181 182 183 184
        new_params_grads = []
        for param, grad in params_grads:
            if param.name not in quant_program.global_block().vars:
                continue

            new_param = quant_program.global_block().vars[param.name]
            new_grad = quant_program.global_block().vars[grad.name]
            new_params_grads.append((new_param, new_grad))

185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
        # 8.2 get new loss var
        new_loss = None
        if loss:
            new_loss = quant_program.global_block().vars[loss.name]

        # 8.3 recover the relation among blocks
        for block in quant_program.blocks:
            block.desc._set_forward_block_idx(parent_idx_dict[block.idx])

        # 9. complete distributed attribution
        self.set_dist_attr_for_qat_program(
            quant_program, main_program, dist_context
        )

        # 10. reset scale var value with dist_attr
        self.reset_scope_var(quant_program, dist_context, scope, place)

        context.set_attr("main_program", quant_program)
        context.set_attr("startup_program", startup_program)
        context.set_attr("params_grads", new_params_grads)
        context.set_attr("loss", new_loss)

    def move_presist_var_to_global_block(self, program):
        global_block = program.global_block()
        for _op in global_block.ops:
            if _op.type == "while":
                _block_id = _op.attr("sub_block").id
                _block = program.block(_block_id)
                persistables = []
                for _name, _var in _block.vars.items():
                    if _var.persistable:
                        global_block._clone_variable(_var)
                        persistables.append(_name)
                for _name in persistables:
                    _block._remove_var(_name)
                persistables.extend(_op.input('X'))
                _op.desc.set_input("X", persistables)
        return program

    def reset_scope_var(self, quant_program, dist_context, scope, place):
        # The var_value, created by qatization_passes, should has same shape with the value after parallel.
        for var in quant_program.list_vars():
            scope_var = scope.find_var(var.name)
            if not (scope_var and scope_var.get_tensor()._is_initialized()):
                continue
            tensor = scope_var.get_tensor()
            if var.shape == tensor.shape:
                continue

            var_dist_attr = dist_context.get_tensor_dist_attr_for_program(var)
            dist_attr = {
                "dims_mapping": var_dist_attr.dims_mapping,
237 238
                "process_shape": var_dist_attr.process_mesh.shape,
                "process_group": var_dist_attr.process_mesh.process_ids,
239 240 241 242 243 244 245 246 247 248 249 250
            }

            # slice tensor_value with dist_attr
            sliced_tensor = Converter.slice_with_dist_attr(
                np.array(tensor), dist_attr
            )
            tensor._clear()
            tensor.set(sliced_tensor, place)

    def set_dist_attr_for_qat_program(
        self, quant_program, main_program, dist_context
    ):
251 252 253 254 255
        # NOTE: hack implement, upgrading soon
        for ib, block in enumerate(quant_program.blocks):
            # recover origin ops' dist_attr and set quant ops' dist_attr
            qat_offset = 0
            for ip, quant_op in enumerate(block.ops):
256
                quant_op_dist_attr = OperatorDistAttr()
257

258 259 260 261
                if (
                    "quantize" in quant_op.type
                    or quant_op.type == "moving_average_abs_max_scale"
                ):
262
                    # set all quantization ops' dist_attr by quantified op
263 264
                    input_name = quant_op.desc.input('X')[0]
                    if "quantize" in input_name:
265 266 267
                        input_name = input_name[
                            : input_name.index(".quantized")
                        ]
268

269 270 271 272 273 274 275 276 277
                    if (
                        quant_op.type == "moving_average_abs_max_scale"
                        or ip - qat_offset >= len(main_program.blocks[ib].ops)
                    ):
                        consume_op = (
                            main_program.blocks[ib]
                            ._var_recursive(input_name)
                            .op
                        )
278
                    else:
279 280 281
                        consume_op = main_program.blocks[ib].ops[
                            ip - qat_offset
                        ]
282
                    consume_op_dist_attr = dist_context.get_dist_op_for_program(
283 284
                        consume_op
                    ).dist_attr
285 286 287
                    ref_process_mesh = consume_op_dist_attr.process_mesh

                    if input_name in consume_op_dist_attr.outputs_dist_attrs:
288 289 290
                        consume_input_dist_attr = (
                            consume_op_dist_attr.outputs_dist_attrs[input_name]
                        )
291
                    else:
292 293 294
                        consume_input_dist_attr = (
                            consume_op_dist_attr.inputs_dist_attrs[input_name]
                        )
295 296 297 298 299

                    quant_op_dist_attr.impl_idx = 0
                    quant_op_dist_attr.impl_type = "default"
                    quant_op_dist_attr.process_mesh = ref_process_mesh
                    quant_op_dist_attr.set_input_dist_attr(
300 301
                        quant_op.desc.input('X')[0], consume_input_dist_attr
                    )
302 303

                    for slot_name in quant_op.desc.input_names():
304 305
                        in_name = quant_op.desc.input(slot_name)[0]
                        input_var = block._var_recursive(in_name)
306
                        ref_dims_mapping = [-1 for i in input_var.shape]
307 308
                        if slot_name == "X":
                            continue
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
                        elif slot_name in ['Scale', 'ZeroPoint']:
                            if (
                                quant_op.has_attr('quant_axis')
                                and quant_op.attr('quant_axis') != -1
                            ):
                                x_name = quant_op.desc.input('X')[0]
                                x_var = block._var_recursive(x_name)
                                x_dist_attr = (
                                    quant_op_dist_attr.get_input_dist_attr(
                                        x_name
                                    )
                                )
                                quant_axis = quant_op.attr('quant_axis')
                                ref_dims_mapping = [
                                    x_dist_attr.dims_mapping[quant_axis]
                                ]

326
                        tensor_dist_attr = TensorDistAttr()
327 328 329 330 331 332 333 334
                        tensor_dist_attr.process_mesh = ref_process_mesh
                        tensor_dist_attr.dims_mapping = ref_dims_mapping
                        dist_context.set_tensor_dist_attr_for_program(
                            input_var, tensor_dist_attr
                        )
                        quant_op_dist_attr.set_input_dist_attr(
                            in_name, tensor_dist_attr
                        )
335 336 337

                    for slot_name in quant_op.desc.output_names():
                        output_name = quant_op.desc.output(slot_name)[0]
338
                        output_var = block._var_recursive(output_name)
339
                        ref_dims_mapping = [-1 for i in output_var.shape]
340 341
                        if slot_name == "Y":
                            dist_context.set_tensor_dist_attr_for_program(
342 343
                                output_var, consume_input_dist_attr
                            )
344
                            quant_op_dist_attr.set_output_dist_attr(
345 346
                                output_name, consume_input_dist_attr
                            )
347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364
                            continue
                        elif slot_name == "OutScale":
                            if (
                                quant_op.has_attr('quant_axis')
                                and quant_op.attr('quant_axis') != -1
                            ):
                                x_name = quant_op.desc.input('X')[0]
                                x_var = block._var_recursive(x_name)
                                x_dist_attr = (
                                    quant_op_dist_attr.get_input_dist_attr(
                                        x_name
                                    )
                                )
                                quant_axis = quant_op.attr('quant_axis')
                                ref_dims_mapping = [
                                    x_dist_attr.dims_mapping[quant_axis]
                                ]

365
                        tensor_dist_attr = TensorDistAttr()
366 367 368 369 370 371 372 373
                        tensor_dist_attr.process_mesh = ref_process_mesh
                        tensor_dist_attr.dims_mapping = ref_dims_mapping
                        dist_context.set_tensor_dist_attr_for_program(
                            output_var, tensor_dist_attr
                        )
                        quant_op_dist_attr.set_output_dist_attr(
                            output_name, tensor_dist_attr
                        )
374 375 376 377 378

                    quant_op._set_attr("op_device", "")
                    qat_offset += 1

                else:
379
                    # recover origin ops' dist_attr
380 381 382
                    origin_op = main_program.blocks[ib].ops[ip - qat_offset]
                    quant_op.desc.set_original_id(origin_op.desc.original_id())
                    dist_origin_op = dist_context.get_dist_op_for_program(
383 384 385 386 387
                        origin_op
                    )
                    assert (
                        dist_origin_op is not None
                    ), "origin op must have dist attr."
388 389 390 391

                    origin_op_dist_attr = dist_origin_op.dist_attr
                    quant_op_dist_attr.impl_idx = origin_op_dist_attr.impl_idx
                    quant_op_dist_attr.impl_type = origin_op_dist_attr.impl_type
392 393 394
                    quant_op_dist_attr.process_mesh = (
                        origin_op_dist_attr.process_mesh
                    )
395 396

                    scale_offset = 0
397
                    for idx, input_name in enumerate(quant_op.input_arg_names):
398 399 400 401 402 403 404 405 406 407 408 409
                        if (
                            origin_op.type == "while"
                            and input_name not in origin_op.input_arg_names
                        ):
                            assert (
                                "@scale" in input_name
                                or "@zero_point" in input_name
                            )
                            scale_offset += 1
                            continue

                        idx -= scale_offset
410
                        origin_input_name = origin_op.input_arg_names[idx]
411 412 413 414 415
                        origin_input_dist_attr = (
                            origin_op_dist_attr.inputs_dist_attrs[
                                origin_input_name
                            ]
                        )
416
                        quant_op_dist_attr.set_input_dist_attr(
417 418
                            input_name, origin_input_dist_attr
                        )
419 420

                    for idx, output_name in enumerate(
421 422
                        quant_op.output_arg_names
                    ):
423
                        origin_output_name = origin_op.output_arg_names[idx]
424 425 426 427 428
                        origin_output_dist_attr = (
                            origin_op_dist_attr.outputs_dist_attrs[
                                origin_output_name
                            ]
                        )
429
                        quant_op_dist_attr.set_output_dist_attr(
430 431
                            output_name, origin_output_dist_attr
                        )
432

433 434 435 436 437 438
                        if not main_program.blocks[ib]._find_var_recursive(
                            output_name
                        ):
                            origin_output_var = main_program.blocks[
                                ib
                            ]._var_recursive(origin_output_name)
439 440 441 442 443
                            origin_out_tensor_dist_attr = (
                                dist_context.get_dist_tensor_for_program(
                                    origin_output_var
                                ).dist_attr
                            )
444
                            quant_output_var = block._var_recursive(output_name)
445
                            dist_context.set_tensor_dist_attr_for_program(
446 447
                                quant_output_var, origin_out_tensor_dist_attr
                            )
448 449

                dist_context.set_op_dist_attr_for_program(
450 451
                    quant_op, quant_op_dist_attr
                )
452 453 454 455 456 457

            # recover vars' dist_attr
            for name, dst_var in block.vars.items():
                if name in main_program.blocks[ib].vars:
                    src_var = main_program.blocks[ib].vars[name]
                    dist_tensor = dist_context.get_dist_tensor_for_program(
458 459
                        src_var
                    )
460 461 462
                    if not dist_tensor:
                        continue
                    dist_context.set_tensor_dist_attr_for_program(
463 464
                        dst_var, dist_tensor.dist_attr
                    )