program_config.py 19.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

15
from typing import Any, Callable, Dict, List, Optional
16
import numpy as np
W
Wilber 已提交
17
import enum
18 19 20 21 22 23
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.initializer import NumpyArrayInitializer
from paddle.fluid.framework import convert_np_dtype_to_dtype_

24 25 26 27 28
from paddle.fluid.contrib.slim.quantization import QuantizationTransformPass
from paddle.fluid.contrib.slim.quantization import QuantizationFreezePass
from paddle.fluid.framework import IrGraph, IrNode, Operator
from paddle.fluid.executor import global_scope

29 30 31 32 33 34

class TensorConfig:
    '''
    A config builder for a input or a weight.
    '''

35 36 37 38 39 40
    def __init__(
        self,
        lod: Optional[List[List[int]]] = None,
        data_gen: Optional[Callable[..., np.array]] = None,
        shape: Optional[List[List[int]]] = None,
    ):
41 42 43
        '''
        shape: The shape of the tensor.
        dtype: The data type of the tensor.
44
        data: The value of WeightVar. for input, it should be None
45
        '''
W
Wilber 已提交
46
        self.lod = lod
J
Jason 已提交
47 48 49 50 51 52
        if data_gen is not None:
            self.data_gen = data_gen
            self.data = data_gen()
            self.dtype = data_gen().dtype
            self.shape = data_gen().shape
        else:
53 54 55
            assert (
                shape is not None
            ), "While data_gen is not defined, shape must not be None"
J
Jason 已提交
56 57 58
            self.data = np.random.normal(0.0, 1.0, shape).astype(np.float32)
            self.shape = shape
            self.dtype = self.data.dtype
59 60 61

    def __repr__(self):
        return str({'shape': self.shape, 'lod': self.lod, 'dtype': self.dtype})
62 63


W
Wilber 已提交
64 65 66 67 68 69
class VarType(enum.Enum):
    LOD_TENSOR = 1
    LOD_TENSOR_ARRAY = 2
    STEP_SCOPES = 3


70
class OpConfig:
71 72 73 74 75 76 77 78 79 80 81 82
    '''A config builder for generating a Op.'''

    def __init__(
        self,
        type: str,
        inputs: Dict[str, List[str]],
        outputs: Dict[str, List[str]],
        attrs: Dict[str, Any] = None,
        outputs_var_type: Dict[str, VarType] = None,
        outputs_dtype: Dict[str, np.dtype] = None,
        **kwargs,
    ):
83 84 85
        self.type = type
        self.inputs = inputs
        self.outputs = outputs
W
Wilber 已提交
86 87
        self.outputs_dtype = outputs_dtype
        self.outputs_var_type = outputs_var_type
88
        self.attrs = attrs
J
Jason 已提交
89 90 91
        if self.attrs is None:
            self.attrs = dict()
        self.attrs.update(kwargs)
92

93 94 95 96 97
    def __repr__(self):
        log_str = self.type
        log_str += str(self.attrs)
        return log_str

98

W
Wilber 已提交
99
_OP_WITHOUT_KERNEL_SET = {
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
    'feed',
    'fetch',
    'recurrent',
    'go',
    'rnn_memory_helper_grad',
    'conditional_block',
    'while',
    'send',
    'recv',
    'listen_and_serv',
    'fl_listen_and_serv',
    'ncclInit',
    'select',
    'checkpoint_notify',
    'gen_bkcl_id',
    'c_gen_bkcl_id',
    'gen_nccl_id',
    'c_gen_nccl_id',
    'c_comm_init',
    'c_sync_calc_stream',
    'c_sync_comm_stream',
    'queue_generator',
    'dequeue',
    'enqueue',
    'heter_listen_and_serv',
    'c_wait_comm',
    'c_wait_compute',
    'c_gen_hccl_id',
    'c_comm_init_hccl',
    'copy_cross_scope',
W
Wilber 已提交
130 131 132 133
}


class BlockConfig:
134 135 136 137 138 139 140 141 142 143
    '''A config builder for generating a Block.'''

    def __init__(
        self,
        ops: List[OpConfig],
        vars: List[str],
        vars_dtype: Dict[str, np.dtype] = None,
        vars_var_type: Dict[str, VarType] = None,
        vars_lod_level: Dict[str, int] = None,
    ):
W
Wilber 已提交
144 145 146 147 148 149 150 151
        self.ops = ops
        self.vars = vars
        self.vars_dtype = vars_dtype
        self.vars_var_type = vars_var_type
        self.vars_lod_level = vars_lod_level

    def fill_block_desc(self, block_desc):
        for name in self.vars:
152
            var_desc = block_desc.var(name.encode())
W
Wilber 已提交
153
            var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
154 155 156
            if (
                self.vars_lod_level is not None
                and name in self.vars_lod_level.keys()
W
Wilber 已提交
157 158
            ):
                var_desc.set_lod_level(self.vars_lod_level[name])
159 160 161
            if (
                self.vars_var_type is not None
                and name in self.vars_var_type.keys()
W
Wilber 已提交
162 163 164 165 166 167 168 169 170
            ):
                if self.vars_var_type[name] == VarType.LOD_TENSOR_ARRAY:
                    var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR_ARRAY)
                elif self.vars_var_type[name] == VarType.STEP_SCOPES:
                    var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES)
                    continue
            var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32))
            if self.vars_dtype is not None and name in self.vars_dtype.keys():
                var_desc.set_dtype(
171 172
                    convert_np_dtype_to_dtype_(self.vars_dtype[name])
                )
W
Wilber 已提交
173 174 175 176 177 178 179 180 181 182 183

        for op_config in self.ops:
            op_desc = block_desc.append_op()
            op_desc.set_type(op_config.type)
            for name, values in op_config.inputs.items():
                op_desc.set_input(name, values)
            for name, values in op_config.attrs.items():
                op_desc._set_attr(name, values)
            for name, values in op_config.outputs.items():
                op_desc.set_output(name, values)
                for v in values:
184
                    if block_desc.has_var_recursive(v.encode()):
W
Wilber 已提交
185
                        continue
186
                    var_desc = block_desc.var(v.encode())
W
Wilber 已提交
187
                    var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
188 189 190
                    if (
                        op_config.outputs_var_type is not None
                        and v in op_config.outputs_var_type.keys()
W
Wilber 已提交
191
                    ):
192 193 194 195
                        if (
                            op_config.outputs_var_type[v]
                            == VarType.LOD_TENSOR_ARRAY
                        ):
W
Wilber 已提交
196
                            var_desc.set_type(
197 198 199 200 201
                                core.VarDesc.VarType.LOD_TENSOR_ARRAY
                            )
                        elif (
                            op_config.outputs_var_type[v] == VarType.STEP_SCOPES
                        ):
W
Wilber 已提交
202 203 204
                            var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES)
                            continue
                    var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32))
205 206 207
                    if (
                        op_config.outputs_dtype is not None
                        and v in op_config.outputs_dtype.keys()
W
Wilber 已提交
208 209
                    ):
                        var_desc.set_dtype(
210
                            convert_np_dtype_to_dtype_(
211 212 213
                                op_config.outputs_dtype[v]
                            )
                        )
W
Wilber 已提交
214 215 216 217 218 219
            if op_config.type not in _OP_WITHOUT_KERNEL_SET:
                op_desc.infer_var_type(block_desc)
                op_desc.infer_shape(block_desc)
            op_desc.check_attrs()


220
class ProgramConfig:
221 222 223 224 225 226 227 228 229
    '''A config builder for generating a Program.'''

    def __init__(
        self,
        ops: List[OpConfig],
        weights: Dict[str, TensorConfig],
        inputs: Dict[str, TensorConfig],
        outputs: List[str],
    ):
230
        self.ops = ops
W
Wilber 已提交
231 232 233 234 235 236 237 238 239 240 241
        # if no weight need to save, we create a place_holder to help seriazlie params.
        if not weights:

            def generate_weight():
                return np.array([1]).astype(np.float32)

            self.weights = {
                "place_holder_weight": TensorConfig(data_gen=generate_weight)
            }
        else:
            self.weights = weights
242 243 244
        self.inputs = inputs
        self.outputs = outputs

245 246 247 248 249 250 251 252 253 254
    def __repr__(self):
        log_str = ''
        for i in range(len(self.ops)):
            if i != len(self.ops) - 1:
                log_str += repr(self.ops[i]) + ' + '
            else:
                log_str += repr(self.ops[i])
        log_str += ' -- '
        for t, v in self.inputs.items():
            log_str += '[' + t + ': ' + str(v) + ']'
255 256
        for t, v in self.weights.items():
            log_str += '[' + t + ': ' + str(v) + ']'
257 258 259

        return log_str

260 261

def create_fake_model(program_config):
262
    '''Create a Paddle model(in memory) according to the given config.'''
263 264 265 266 267
    paddle.enable_static()
    main_program_desc = core.ProgramDesc()
    util_program = fluid.Program()
    main_block_desc = main_program_desc.block(0)

268
    var_desc = main_block_desc.var(b"feed")
269 270 271 272 273
    var_desc.set_type(core.VarDesc.VarType.FEED_MINIBATCH)
    var_desc.set_persistable(True)

    index = 0
    for name, tensor_config in program_config.inputs.items():
274
        var_desc = main_block_desc.var(name.encode())
275 276 277
        var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
        var_desc.set_dtype(convert_np_dtype_to_dtype_(tensor_config.dtype))
        var_desc.set_shape(tensor_config.shape)
278
        print(f"name: {name}; shape: {tensor_config.shape}")
279
        var_desc.set_need_check_feed(True)
W
Wilber 已提交
280 281
        if tensor_config.lod is not None:
            var_desc.set_lod_level(len(tensor_config.lod))
282 283 284 285 286 287 288 289 290
        op_desc = main_block_desc._prepend_op()
        op_desc.set_type("feed")
        op_desc.set_input('X', ["feed"])
        op_desc.set_output('Out', [name])
        op_desc._set_attr("col", index)
        index = index + 1

    save_var_map = {}
    for name, tensor_config in program_config.weights.items():
291
        var_desc = main_block_desc.var(name.encode())
292 293 294 295 296 297 298 299 300 301
        var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
        var_desc.set_dtype(convert_np_dtype_to_dtype_(tensor_config.dtype))
        var_desc.set_shape(tensor_config.shape)
        var_desc.set_persistable(True)

        save_var_map[name] = util_program.global_block().create_parameter(
            dtype=tensor_config.dtype,
            shape=tensor_config.shape,
            type=core.VarDesc.VarType.LOD_TENSOR,
            name=name,
302 303
            initializer=NumpyArrayInitializer(tensor_config.data),
        )
304 305 306 307 308
    in_vars = []
    for name in sorted(save_var_map.keys()):
        in_vars.append(save_var_map[name])

    out_var = util_program.global_block().create_var(
309 310
        type=core.VarDesc.VarType.RAW, name="out_var_0"
    )
311
    out_var.desc.set_persistable(True)
312 313 314 315 316 317
    util_program.global_block().append_op(
        type='save_combine',
        inputs={'X': in_vars},
        outputs={'Y': out_var},
        attrs={'file_path': '', 'save_to_memory': True},
    )
318 319 320 321 322 323
    for op_config in program_config.ops:
        op_desc = main_block_desc.append_op()
        op_desc.set_type(op_config.type)
        for name, values in op_config.inputs.items():
            op_desc.set_input(name, values)
        for name, values in op_config.attrs.items():
W
Wilber 已提交
324 325 326 327 328 329
            if name == 'sub_block':
                sub_block_desc = main_program_desc.append_block(main_block_desc)
                values.fill_block_desc(sub_block_desc)
                op_desc._set_attr(name, sub_block_desc)
            else:
                op_desc._set_attr(name, values)
330 331
        for name, values in op_config.outputs.items():
            op_desc.set_output(name, values)
332
            for v in values:
333
                if main_block_desc.has_var_recursive(v.encode()):
W
Wilber 已提交
334
                    continue
335
                var_desc = main_block_desc.var(v.encode())
336
                var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
337 338 339
                if (
                    op_config.outputs_var_type is not None
                    and v in op_config.outputs_var_type.keys()
W
Wilber 已提交
340
                ):
341 342 343 344
                    if (
                        op_config.outputs_var_type[v]
                        == VarType.LOD_TENSOR_ARRAY
                    ):
W
Wilber 已提交
345 346 347 348 349
                        var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR_ARRAY)
                    elif op_config.outputs_var_type[v] == VarType.STEP_SCOPES:
                        var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES)
                        continue
                var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32))
350 351 352
                if (
                    op_config.outputs_dtype is not None
                    and v in op_config.outputs_dtype.keys()
W
Wilber 已提交
353 354
                ):
                    var_desc.set_dtype(
355 356
                        convert_np_dtype_to_dtype_(op_config.outputs_dtype[v])
                    )
W
Wilber 已提交
357 358 359 360
        if op_config.type not in _OP_WITHOUT_KERNEL_SET:
            op_desc.infer_var_type(main_block_desc)
            op_desc.infer_shape(main_block_desc)
        op_desc.check_attrs()
361 362

    for index, name in enumerate(program_config.outputs):
363
        var_desc = main_block_desc.var(b"fetch")
364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
        var_desc.set_type(core.VarDesc.VarType.FETCH_LIST)
        var_desc.set_need_check_feed(True)
        op_desc = main_block_desc.append_op()
        op_desc.set_type("fetch")
        op_desc.set_input('X', [name])
        op_desc.set_output('Out', ["fetch"])
        op_desc._set_attr("col", index)

    main_program_desc._set_version()
    paddle.fluid.core.save_op_version_info(main_program_desc)

    model = main_program_desc.serialize_to_string()

    util_program._sync_with_cpp()
    place = fluid.CPUPlace()
    executor = fluid.Executor(place)
    scope = fluid.Scope()
    with fluid.scope_guard(scope):
        executor.run(util_program)
        params = scope.find_var("out_var_0").get_bytes()
384

385
    return model, params
386 387


388 389 390 391 392 393 394
def create_quant_model(
    model,
    params,
    activation_quantize_type='moving_average_abs_max',
    weight_quantize_type='channel_wise_abs_max',
    save=False,
):
395 396 397
    place = paddle.CUDAPlace(0)
    scope = global_scope()
    exe = paddle.static.Executor(place)
398 399 400 401 402 403 404 405 406 407
    [
        inference_program,
        feed_target_names,
        fetch_targets,
    ] = paddle.static.load_inference_model(
        path_prefix=None,
        executor=exe,
        model_filename=model,
        params_filename=params,
    )
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
    graph = IrGraph(core.Graph(inference_program.desc), for_test=True)

    out_scale_op_list = [
        "conv2d",
        "depthwise_conv2d",
        "mul",
        "matmul",
        "relu",
        "leaky_relu",
        "relu6",
        "sigmoid",
        "tanh",
        "prelu",
        "swish",
        "softmax",
        "batch_norm",
        "layer_norm",
        "elementwise_add",
        "pool2d",
        "reshape2",
        "transpose2",
        "concat",
        "elementwise_mul",
        "scale",
        "slice",
        "hard_swish",
        "hard_sigmoid",
        "conv2d_transpose",
        "gru",
        "bilinear_interp",
        "nearest_interp",
        "trilinear_interp",
        "flatten",
        "flatten2",
        "transpose",
        "pad2d",
        "reshape",
        "layer_norm",
    ]
    op_real_in_out_name = {
        "conv2d": [["Input", "Filter"], ["Output"]],
        "depthwise_conv2d": [["Input", "Filter"], ["Output"]],
        "conv2d_transpose": [["Input", "Filter"], ["Output"]],
        "mul": [["X", "Y"], ["Out"]],
        "matmul": [["X", "Y"], ["Out"]],
        "pool2d": [["X"], ["Out"]],
        "elementwise_add": [["X", "Y"], ["Out"]],
        "concat": [["X"], ["Out"]],
        "softmax": [["X"], ["Out"]],
        "argmax": [["X"], ["Out"]],
        "transpose": [["X"], ["Out"]],
        "equal": [["X", "Y"], ["Out"]],
        "gather": [["X"], ["Out"]],
        "greater_equal": [["X", "Y"], ["Out"]],
        "greater_than": [["X", "Y"], ["Out"]],
        "less_equal": [["X", "Y"], ["Out"]],
        "less_than": [["X", "Y"], ["Out"]],
        "mean": [["X"], ["Out"]],
        "not_equal": [["X", "Y"], ["Out"]],
        "reshape": [["X"], ["Out"]],
        "reshape2": [["X"], ["Out"]],
        "transpose2": [["X"], ["Out"]],
        "bilinear_interp": [["X"], ["Out"]],
        "nearest_interp": [["X"], ["Out"]],
        "trilinear_interp": [["X"], ["Out"]],
        "slice": [["Input"], ["Out"]],
        "squeeze": [["X"], ["Out"]],
        "elementwise_sub": [["X", "Y"], ["Out"]],
        "relu": [["X"], ["Out"]],
        "relu6": [["X"], ["Out"]],
        "leaky_relu": [["X"], ["Out"]],
        "prelu": [["X"], ["Out"]],
        "tanh": [["X"], ["Out"]],
        "swish": [["X"], ["Out"]],
        "dropout": [["X"], ["Out"]],
        "batch_norm": [["X"], ["Y"]],
        "layer_norm": [["X"], ["Y"]],
        "sigmoid": [["X"], ["Out"]],
        "elementwise_mul": [["X", "Y"], ["Out"]],
        "scale": [["X"], ["Out"]],
        "hard_swish": [["X"], ["Out"]],
        "hard_sigmoid": [["X"], ["Out"]],
        "gru": [["Input", "Weight"], ["Hidden"]],
        "lstm": [["Input", "Weight"], ["Hidden"]],
        "pad2d": [["X"], ["Out"]],
        "flatten": [["X"], ["Out"]],
        "flatten2": [["X"], ["Out"]],
    }

    def _get_op_output_var_names(op):
        """ """
499 500 501
        assert isinstance(
            op, (IrNode, Operator)
        ), "The input op should be IrNode or Operator."
502
        var_names = []
503
        op_name = op.name() if isinstance(op, IrNode) else op.type
504 505 506 507 508 509 510 511 512 513 514 515
        if op_name not in op_real_in_out_name:
            return []

        name_list = op_real_in_out_name[op_name][1]
        for name in name_list:
            var_name = op.output(name)
            if isinstance(var_name, list):
                var_names.extend(var_name)
            else:
                var_names.append(var_name)
        return var_names

W
Wilber 已提交
516 517 518 519
    transform_pass = QuantizationTransformPass(
        scope=scope,
        place=place,
        activation_quantize_type=activation_quantize_type,
520 521
        weight_quantize_type=weight_quantize_type,
    )
W
Wilber 已提交
522 523
    transform_pass.apply(graph)

524 525 526 527 528 529
    op_nodes = graph.all_op_nodes()
    for op_node in op_nodes:
        if op_node.name() in out_scale_op_list:
            var_names = _get_op_output_var_names(op_node)
            for var_name in var_names:
                in_node = graph._find_node_by_name(op_node.outputs, var_name)
530 531 532 533
                if in_node.dtype() not in [
                    core.VarDesc.VarType.FP64,
                    core.VarDesc.VarType.FP32,
                ]:
534 535 536 537 538 539
                    continue

                op_node.op()._set_attr("out_threshold", 3.0)

    # Freeze graph for inference, but the weight of fc/conv is still float type.
    freeze_pass = QuantizationFreezePass(
540 541
        scope=scope, place=place, weight_quantize_type=weight_quantize_type
    )
542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558
    freeze_pass.apply(graph)

    main_program = graph.to_program()

    # modify fake_quantize_moving_average_abs_max(InScale) and fake_channel_wise_dequantize_max_abs(Scales)
    op_nodes = graph.all_op_nodes()
    for op_node in op_nodes:
        if op_node.name() == 'fake_quantize_moving_average_abs_max':
            var_name = op_node.input("InScale")[0]
            tensor = scope.var(var_name).get_tensor()
            tensor.set(np.array([1], dtype=np.float32), place)
        elif op_node.name() == 'fake_channel_wise_dequantize_max_abs':
            var_name = op_node.input("Scales")[0]
            tensor = scope.var(var_name).get_tensor()
            tensor.set(np.ones(tensor.shape(), dtype=np.float32), place)

    if save:
559 560 561 562 563 564 565
        fluid.io.save_inference_model(
            'test_inference_model',
            feed_target_names,
            fetch_targets,
            exe,
            main_program=main_program,
        )
566 567 568 569

    feed_vars = [
        main_program.global_block().var(name) for name in feed_target_names
    ]
570 571 572
    serialized_program = paddle.static.serialize_program(
        feed_vars, fetch_targets, program=main_program
    )
573
    serialized_params = paddle.static.serialize_persistables(
574 575
        feed_vars, fetch_targets, executor=exe, program=main_program
    )
576
    return serialized_program, serialized_params