提交 28dfad5e 编写于 作者: W WangZhen

fix some bugs about python3. test=develop

上级 21752926
......@@ -14,6 +14,7 @@
import collections
import numpy as np
import six
from ..... import compat as cpt
from .... import core
from ....framework import IrGraph
......@@ -165,7 +166,7 @@ class QuantizationTransformPass(object):
assert self._program_exe is not None, \
'The program_exe cannot be set None when activation_quantize_type equals to range_abs_max.'
init_program = Program()
for var_desc, initializer in self._need_initialized.iteritems():
for var_desc, initializer in six.iteritems(self._need_initialized):
var = init_program.global_block().create_var(
name=var_desc.name(),
shape=var_desc.shape(),
......
......@@ -151,11 +151,11 @@ class TestQuantizationTransformPass(unittest.TestCase):
val_marked_nodes.add(op)
val_graph.draw('.', 'val_fc_' + quant_type, val_marked_nodes)
def no_test_linear_fc_quant_abs_max(self):
def test_linear_fc_quant_abs_max(self):
self.act_quant_op_type = 'fake_quantize_abs_max'
self.linear_fc_quant('abs_max')
def no_test_linear_fc_quant_range_abs_max(self):
def test_linear_fc_quant_range_abs_max(self):
self.act_quant_op_type = 'fake_quantize_range_abs_max'
self.linear_fc_quant('range_abs_max')
......@@ -187,11 +187,11 @@ class TestQuantizationTransformPass(unittest.TestCase):
val_marked_nodes.add(op)
val_graph.draw('.', 'val_residual_' + quant_type, val_marked_nodes)
def no_test_residual_block_abs_max(self):
def test_residual_block_abs_max(self):
self.act_quant_op_type = 'fake_quantize_abs_max'
self.residual_block_quant('abs_max')
def no_test_residual_block_range_abs_max(self):
def test_residual_block_range_abs_max(self):
self.act_quant_op_type = 'fake_quantize_range_abs_max'
self.residual_block_quant('range_abs_max')
......@@ -249,13 +249,13 @@ class TestQuantizationFreezePass(unittest.TestCase):
quantized_main_program = main_graph.to_program()
quantized_test_program = test_graph.to_program()
iters = 5
batch_size = 16
batch_size = 8
train_exe = fluid.ParallelExecutor(
main_program=quantized_main_program,
use_cuda=bool(use_cuda),
loss_name=loss.name,
scope=scope)
#train_exe = fluid.ParallelExecutor(
# main_program=quantized_main_program,
# use_cuda=bool(use_cuda),
# loss_name=loss.name,
# scope=scope)
train_reader = paddle.batch(
paddle.reader.shuffle(
paddle.dataset.mnist.train(), buf_size=500),
......@@ -266,11 +266,11 @@ class TestQuantizationFreezePass(unittest.TestCase):
with fluid.scope_guard(scope):
for _ in range(iters):
data = next(train_reader())
#loss_v = exe.run(program=quantized_main_program,
# feed=feeder.feed(data),
# fetch_list=[loss])
loss_v = train_exe.run(feed=feeder.feed(data),
fetch_list=[loss.name])
loss_v = exe.run(program=quantized_main_program,
feed=feeder.feed(data),
fetch_list=[loss])
#loss_v = train_exe.run(feed=feeder.feed(data),
# fetch_list=[loss.name])
#print('{}: {}'.format('loss' + dev_name + quant_type, loss_v))
test_data = next(test_reader())
......@@ -349,21 +349,21 @@ class TestQuantizationFreezePass(unittest.TestCase):
['image', 'label'], [loss], exe,
mobile_program)
def test_freeze_program_cuda_dynamic(self):
def test_freeze_graph_cuda_dynamic(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(True, seed=1, quant_type='abs_max')
def test_freeze_program_cpu_dynamic(self):
def test_freeze_graph_cpu_dynamic(self):
with fluid.unique_name.guard():
self.freeze_graph(False, seed=2, quant_type='abs_max')
def test_freeze_program_cuda_static(self):
def test_freeze_graph_cuda_static(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_graph(True, seed=1, quant_type='range_abs_max')
def test_freeze_program_cpu_static(self):
def test_freeze_graph_cpu_static(self):
with fluid.unique_name.guard():
self.freeze_graph(False, seed=2, quant_type='range_abs_max')
......
......@@ -204,7 +204,7 @@ class TestQuantizeTranspiler(unittest.TestCase):
build_program(test_program, startup, True)
test_program = test_program.clone(for_test=True)
quant_type = 'range_abs_max'
quant_type = 'range_abs_max' # 'range_abs_max' or 'abs_max'
quant_transpiler = QuantizeTranspiler(
activation_quantize_type=quant_type)
quant_transpiler.training_transpile(main, startup)
......@@ -225,14 +225,12 @@ class TestQuantizeTranspiler(unittest.TestCase):
paddle.dataset.mnist.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(feed_list=feeds, place=place)
dev_name = '_gpu_' if use_cuda else '_cpu_'
with fluid.program_guard(main):
for _ in range(iters):
data = next(train_reader())
loss_v = exe.run(program=main,
feed=feeder.feed(data),
fetch_list=[loss])
print('{}: {}'.format('loss' + dev_name + quant_type, loss_v))
with fluid.program_guard(test_program):
test_data = next(test_reader())
......@@ -249,19 +247,11 @@ class TestQuantizeTranspiler(unittest.TestCase):
feed=feeder.feed(test_data),
fetch_list=[loss])
self.assertAlmostEqual(test_loss1, test_loss2, delta=5e-3)
print('{}: {}'.format('test_loss1' + dev_name + quant_type,
test_loss1))
print('{}: {}'.format('test_loss2' + dev_name + quant_type,
test_loss2))
w_freeze = np.array(fluid.global_scope().find_var('conv2d_1.w_0')
.get_tensor())
# fail: -432.0 != -433.0, this is due to the calculation precision
#self.assertAlmostEqual(np.sum(w_freeze), np.sum(w_quant))
print('{}: {}'.format('w_freeze' + dev_name + quant_type,
np.sum(w_freeze)))
print('{}: {}'.format('w_quant' + dev_name + quant_type,
np.sum(w_quant)))
# Convert parameter to 8-bit.
quant_transpiler.convert_to_int8(test_program, place)
# Save the 8-bit parameter and model file.
......@@ -276,17 +266,13 @@ class TestQuantizeTranspiler(unittest.TestCase):
self.assertEqual(w_8bit.dtype, np.int8)
self.assertEqual(np.sum(w_8bit), np.sum(w_freeze))
print('{}: {}'.format('w_8bit' + dev_name + quant_type,
np.sum(w_8bit)))
print('{}: {}'.format('w_freeze' + dev_name + quant_type,
np.sum(w_freeze)))
def test_freeze_program_cuda(self):
def not_test_freeze_program_cuda(self):
if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_program(True, seed=1)
def test_freeze_program_cpu(self):
def not_test_freeze_program_cpu(self):
with fluid.unique_name.guard():
self.freeze_program(False, seed=2)
......
......@@ -1681,14 +1681,14 @@ class IrGraph(object):
"""
op_desc = core.OpDesc()
op_desc.set_type(op_type)
for attr, value in attrs.iteritems():
for attr, value in six.iteritems(attrs):
self._update_desc_attr(op_desc, attr, value)
for input_name, var_nodes in inputs.iteritems():
for input_name, var_nodes in six.iteritems(inputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_input(input_name,
[var_node.name() for var_node in var_nodes])
for output_name, var_nodes in outputs.iteritems():
for output_name, var_nodes in six.iteritems(outputs):
if not isinstance(var_nodes, list):
var_nodes = [var_nodes]
op_desc.set_output(output_name,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册