# for dygraph quantization, layers of type in quantizable_layer_type will be quantized
# for dygraph quantization, layers of type in quantizable_layer_type will be quantized
'quantizable_layer_type':['Conv2D','Linear'],
'quantizable_layer_type':['Conv2D','Linear'],
# whether fuse conv and bn before QAT
'fuse_conv_bn':False,
}
}
...
@@ -141,7 +143,8 @@ class PACT(paddle.nn.Layer):
...
@@ -141,7 +143,8 @@ class PACT(paddle.nn.Layer):
classQAT(object):
classQAT(object):
"""
"""
Quant Aware Training(QAT): Add the fake quant logic for given quantizable layers, namely add the quant_dequant computational logic both for activation inputs and weight inputs.
Quant Aware Training(QAT): Add the fake quant logic for given quantizable layers,
namely add the quant_dequant computational logic both for activation inputs and weight inputs.