提交 993cbbcd 编写于 作者: D Dang Qingqing

Move quantization transpiler to fluid/contrib/quantize path.

上级 8a850e21
......@@ -87,6 +87,7 @@ if (WITH_TESTING)
endif()
endif()
add_subdirectory(paddle/fluid/tests)
add_subdirectory(paddle/fluid/contrib/tests)
endif()
install(DIRECTORY ${PADDLE_PYTHON_PACKAGE_DIR}
DESTINATION opt/paddle/share/wheels
......
......@@ -47,7 +47,7 @@ from .param_attr import ParamAttr, WeightNormParamAttr
from .data_feeder import DataFeeder
from .core import LoDTensor, LoDTensorArray, CPUPlace, CUDAPlace, CUDAPinnedPlace, Scope
from .transpiler import DistributeTranspiler, InferenceTranspiler, \
memory_optimize, release_memory, DistributeTranspilerConfig, QuantizeTranspiler
memory_optimize, release_memory, DistributeTranspilerConfig
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
from . import clip
from . import profiler
......
......@@ -18,5 +18,7 @@ from . import decoder
from .decoder import *
from . import memory_usage_calc
from .memory_usage_calc import *
from . import quantize
__all__ = decoder.__all__ + memory_usage_calc.__all__
__all__ += quantize.__all__
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from . import quantize_transpiler
__all__ = quantize_transpiler.__all__
......@@ -18,14 +18,16 @@ import numpy as np
from paddle.fluid.framework import default_main_program, default_startup_program, program_guard
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid import unique_name
from paddle.fluid import core
from paddle.fluid.initializer import Constant
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.nn import autoincreased_step_counter
from .. import core
from ..framework import Variable
from ..executor import global_scope
from inference_transpiler import InferenceTranspiler
from paddle.fluid.framework import Variable
from paddle.fluid.executor import global_scope
from paddle.fluid.transpiler.inference_transpiler import InferenceTranspiler
__all__ = ['QuantizeTranspiler']
_QUANTIZABLE_OP_TYPES = ['conv2d', 'depthwise_conv2d', 'mul']
......
......@@ -13,17 +13,20 @@
# limitations under the license.
import numpy as np
import six
import unittest
import paddle
import paddle.fluid as fluid
from paddle.fluid.transpiler.quantize_transpiler import _original_var_name
from paddle.fluid.contrib.quantize.quantize_transpiler import _original_var_name
from paddle.fluid.contrib.quantize.quantize_transpiler import QuantizeTranspiler
def linear_fc(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in xrange(num):
for _ in six.moves.xrange(num):
hidden = fluid.layers.fc(hidden, size=128, act='relu')
loss = fluid.layers.cross_entropy(input=hidden, label=label)
loss = fluid.layers.mean(loss)
......@@ -51,7 +54,7 @@ def residual_block(num):
data = fluid.layers.data(name='image', shape=[1, 32, 32], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
hidden = data
for _ in xrange(num):
for _ in six.moves.xrange(num):
conv = conv_bn_layer(hidden, 16, 3, 1, 1, act=None, bias_attr=True)
short = conv_bn_layer(hidden, 16, 1, 1, 0, act=None)
hidden = fluid.layers.elementwise_add(x=conv, y=short, act='relu')
......@@ -142,7 +145,7 @@ class TestQuantizeTranspiler(unittest.TestCase):
loss = linear_fc(3)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
t = fluid.QuantizeTranspiler(activation_quantize_type=quant_type)
t = QuantizeTranspiler(activation_quantize_type=quant_type)
t.training_transpile(main)
self.check_program(main)
......@@ -161,7 +164,7 @@ class TestQuantizeTranspiler(unittest.TestCase):
loss = residual_block(2)
opt = fluid.optimizer.Adam(learning_rate=0.001)
opt.minimize(loss)
t = fluid.QuantizeTranspiler(activation_quantize_type=quant_type)
t = QuantizeTranspiler(activation_quantize_type=quant_type)
t.training_transpile(main)
self.check_program(main)
......@@ -176,7 +179,7 @@ class TestQuantizeTranspiler(unittest.TestCase):
def freeze_program(self, use_cuda):
main = fluid.Program()
startup = fluid.Program()
quant_transpiler = fluid.QuantizeTranspiler()
quant_transpiler = QuantizeTranspiler()
with fluid.program_guard(main, startup):
img = fluid.layers.data(
name='image', shape=[1, 28, 28], dtype='float32')
......@@ -247,7 +250,11 @@ class TestQuantizeTranspiler(unittest.TestCase):
self.assertEqual(np.sum(w_8bit), np.sum(w_freeze))
def test_freeze_program_cuda(self):
self.freeze_program(True)
if fluid.core.is_compiled_with_cuda():
self.freeze_program(True)
def test_freeze_program_cpu(self):
self.freeze_program(False)
if __name__ == '__main__':
......
......@@ -8,4 +8,3 @@ endforeach()
add_subdirectory(unittests)
add_subdirectory(book)
add_subdirectory(book_memory_optimization)
add_subdirectory(transpiler)
......@@ -16,12 +16,15 @@ from __future__ import print_function
from .distribute_transpiler import DistributeTranspiler, DistributeTranspilerConfig
from .inference_transpiler import InferenceTranspiler
from .quantize_transpiler import QuantizeTranspiler
from .memory_optimization_transpiler import memory_optimize, release_memory
from .ps_dispatcher import HashName, RoundRobin
__all__ = [
"DistributeTranspiler", "InferenceTranspiler", "memory_optimize",
"release_memory", "HashName", "RoundRobin", "DistributeTranspilerConfig",
"QuantizeTranspiler"
"DistributeTranspiler",
"InferenceTranspiler",
"memory_optimize",
"release_memory",
"HashName",
"RoundRobin",
"DistributeTranspilerConfig",
]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册