提交 96138ca8 编写于 作者: D Dang Qingqing

Fix unit test.

上级 9e580631
...@@ -208,17 +208,21 @@ class TestQuantizeTranspiler(unittest.TestCase): ...@@ -208,17 +208,21 @@ class TestQuantizeTranspiler(unittest.TestCase):
paddle.dataset.mnist.test(), batch_size=batch_size) paddle.dataset.mnist.test(), batch_size=batch_size)
feeder = fluid.DataFeeder(feed_list=[img, label], place=place) feeder = fluid.DataFeeder(feed_list=[img, label], place=place)
with fluid.program_guard(main):
for _ in range(iter): for _ in range(iter):
data = train_reader().next() data = train_reader().next()
loss_v = exe.run(program=main, loss_v = exe.run(program=main,
feed=feeder.feed(data), feed=feeder.feed(data),
fetch_list=[loss]) fetch_list=[loss])
test_data = test_reader().next()
with fluid.program_guard(test_program):
test_data = test_reader().next()
f_var = fluid.framework.get_var('conv2d_1.tmp_0', test_program) f_var = fluid.framework.get_var('conv2d_1.tmp_0', test_program)
w_var = fluid.framework.get_var('conv2d_1.w_0.quantized', test_program) w_var = fluid.framework.get_var('conv2d_1.w_0.quantized',
test_program)
# Testing during training # Testing during training
test_loss1, f_v1, w_quant = exe.run(program=test_program, test_loss1, f_v1, w_quant = exe.run(
program=test_program,
feed=feeder.feed(test_data), feed=feeder.feed(test_data),
fetch_list=[loss, f_var, w_var]) fetch_list=[loss, f_var, w_var])
...@@ -229,8 +233,8 @@ class TestQuantizeTranspiler(unittest.TestCase): ...@@ -229,8 +233,8 @@ class TestQuantizeTranspiler(unittest.TestCase):
test_loss2, f_v2 = exe.run(program=test_program, test_loss2, f_v2 = exe.run(program=test_program,
feed=feeder.feed(test_data), feed=feeder.feed(test_data),
fetch_list=[loss, fv2]) fetch_list=[loss, fv2])
self.assertAlmostEqual(test_loss1, test_loss2, delta=1e-5) self.assertAlmostEqual(test_loss1, test_loss2, delta=1e-3)
self.assertAlmostEqual(f_v1.all(), f_v2.all(), delta=1e-5) self.assertTrue(np.allclose(f_v1, f_v2, rtol=1e-05, atol=1e-05))
w_freeze = np.array(fluid.global_scope().find_var('conv2d_1.w_0') w_freeze = np.array(fluid.global_scope().find_var('conv2d_1.w_0')
.get_tensor()) .get_tensor())
self.assertEqual(np.sum(w_freeze), np.sum(w_quant)) self.assertEqual(np.sum(w_freeze), np.sum(w_quant))
...@@ -238,10 +242,11 @@ class TestQuantizeTranspiler(unittest.TestCase): ...@@ -238,10 +242,11 @@ class TestQuantizeTranspiler(unittest.TestCase):
# Convert parameter to 8-bit. # Convert parameter to 8-bit.
quant_transpiler.convert_to_int8(test_program, place) quant_transpiler.convert_to_int8(test_program, place)
# Save the 8-bit parameter and model file. # Save the 8-bit parameter and model file.
fluid.io.save_inference_model('model_8bit', ['image', 'label'], [loss], fluid.io.save_inference_model('model_8bit', ['image', 'label'],
exe, test_program) [loss], exe, test_program)
# Test whether the 8-bit parameter and model file can be loaded successfully. # Test whether the 8-bit parameter and model file can be loaded successfully.
[infer, feed, fetch] = fluid.io.load_inference_model('model_8bit', exe) [infer, feed, fetch] = fluid.io.load_inference_model('model_8bit',
exe)
# Check the loaded 8-bit weight. # Check the loaded 8-bit weight.
w_8bit = np.array(fluid.global_scope().find_var('conv2d_1.w_0.int8') w_8bit = np.array(fluid.global_scope().find_var('conv2d_1.w_0.int8')
.get_tensor()) .get_tensor())
...@@ -251,9 +256,11 @@ class TestQuantizeTranspiler(unittest.TestCase): ...@@ -251,9 +256,11 @@ class TestQuantizeTranspiler(unittest.TestCase):
def test_freeze_program_cuda(self): def test_freeze_program_cuda(self):
if fluid.core.is_compiled_with_cuda(): if fluid.core.is_compiled_with_cuda():
with fluid.unique_name.guard():
self.freeze_program(True) self.freeze_program(True)
def test_freeze_program_cpu(self): def test_freeze_program_cpu(self):
with fluid.unique_name.guard():
self.freeze_program(False) self.freeze_program(False)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册