未验证 提交 2f8c7e02 编写于 作者: T Tao Luo 提交者: GitHub

remove unused inference_transpiler unit-tests (#19130)

* remove unused inference_transpiler unit-tests

test=develop

* remove InferenceTranspiler usage in quantize_transpiler.py

test=develop
上级 708bd979
......@@ -472,7 +472,7 @@ paddle.fluid.contrib.op_freq_statistic (ArgSpec(args=['program'], varargs=None,
paddle.fluid.contrib.QuantizeTranspiler ('paddle.fluid.contrib.quantize.quantize_transpiler.QuantizeTranspiler', ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.contrib.QuantizeTranspiler.__init__ (ArgSpec(args=['self', 'weight_bits', 'activation_bits', 'activation_quantize_type', 'weight_quantize_type', 'window_size', 'moving_rate'], varargs=None, keywords=None, defaults=(8, 8, 'abs_max', 'abs_max', 10000, 0.9)), ('document', '14b39f1fcd5667ff556b1aad94357d1d'))
paddle.fluid.contrib.QuantizeTranspiler.convert_to_int8 (ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,)), ('document', '6adf97f83acf6453d4a6a4b1070f3754'))
paddle.fluid.contrib.QuantizeTranspiler.freeze_program (ArgSpec(args=['self', 'program', 'place', 'fuse_bn', 'scope'], varargs=None, keywords=None, defaults=(False, None)), ('document', '909675a1ab055c69b436a7893fcae4fd'))
paddle.fluid.contrib.QuantizeTranspiler.freeze_program (ArgSpec(args=['self', 'program', 'place', 'scope'], varargs=None, keywords=None, defaults=(None,)), ('document', '909675a1ab055c69b436a7893fcae4fd'))
paddle.fluid.contrib.QuantizeTranspiler.training_transpile (ArgSpec(args=['self', 'program', 'startup_program'], varargs=None, keywords=None, defaults=(None, None)), ('document', '6dd9909f10b283ba2892a99058a72884'))
paddle.fluid.contrib.distributed_batch_reader (ArgSpec(args=['batch_reader'], varargs=None, keywords=None, defaults=None), ('document', 'b60796eb0a481484dd34e345f0eaa4d5'))
paddle.fluid.contrib.Compressor ('paddle.fluid.contrib.slim.core.compressor.Compressor', ('document', 'a5417774a94aa9ae5560a42b96527e7d'))
......
......@@ -25,7 +25,6 @@ from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.nn import autoincreased_step_counter
from paddle.fluid.framework import Variable
from paddle.fluid.executor import global_scope
from paddle.fluid.transpiler.inference_transpiler import InferenceTranspiler
__all__ = ['QuantizeTranspiler']
......@@ -221,7 +220,7 @@ class QuantizeTranspiler(object):
self.activation_quantize_type == 'range_abs_max':
self.global_step = autoincreased_step_counter()
def freeze_program(self, program, place, fuse_bn=False, scope=None):
def freeze_program(self, program, place, scope=None):
"""Freeze input training program for inference.
Args:
......@@ -232,10 +231,6 @@ class QuantizeTranspiler(object):
scope = global_scope() if scope is None else scope
program = default_main_program() if program is None else program
if fuse_bn:
bn_fuse_transpiler = BNFuseTranspiler()
bn_fuse_transpiler.transpile(program, place)
persistable_vars = [
v.name
for v in filter(lambda var: var.persistable, program.list_vars())
......@@ -564,58 +559,3 @@ class QuantizeTranspiler(object):
'Scale': scale},
outputs={"Out": dequant_var})
return dequant_var
class BNFuseTranspiler(InferenceTranspiler):
def _fuse_param(self, current_op, bn_op, bias_op, with_bias):
def _update_param(op, param_name, new_param):
var = self.block.vars[param_name]
tensor = self.scope.find_var(param_name).get_tensor()
tensor.set(np.array(new_param), self.place)
def _load_param(param_name):
return np.array(self.scope.find_var(param_name).get_tensor())
bias_bn = _load_param(bn_op.input("Bias")[0]) #Bias
scale_bn = _load_param(bn_op.input("Scale")[0]) #Scale
mean_bn = _load_param(bn_op.input("Mean")[0]) #Mean
var_bn = _load_param(bn_op.input("Variance")[0]) #Variance
if current_op.type in ['conv2d', 'depthwise_conv2d']:
current_param = _load_param(
_original_var_name(current_op.input("Filter")[0]))
elif current_op.type == 'mul':
current_param = _load_param(
_original_var_name(current_op.input("Y")[0]))
std_bn = np.float32(np.sqrt(np.add(var_bn, 1e-5)))
tmp = np.float32(np.divide(scale_bn, std_bn))
# add bias of batch_norm_op to conv2d
if with_bias:
bias = _load_param(bias_op.input("Y"))
else:
bias = np.zeros(bias_bn.shape)
bias = np.float32(
np.add(np.multiply(np.subtract(bias, mean_bn), tmp), bias_bn))
# re-compute weight of conv2d/fc
tmp = tmp.reshape(tmp.shape[0], -1)
dst_param = current_param.reshape((tmp.shape[0], -1))
dst_param = np.float32(np.multiply(dst_param, tmp))
dst_param = dst_param.reshape(current_param.shape)
# update parameters
if current_op.type in ['conv2d', 'depthwise_conv2d']:
_update_param(current_op,
_original_var_name(current_op.input("Filter")[0]),
dst_param)
elif current_op.type == 'mul':
_update_param(current_op,
_original_var_name(current_op.input("Y")[0]),
dst_param)
_update_param(bias_op, bias_op.input("Y")[0], bias)
# collect the renamed input
self.input_map[bn_op.output("Y")[0]] = bias_op.output("Out")[0]
......@@ -242,31 +242,16 @@ def infer(use_cuda, save_dirname=None):
batch_size = 1
tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32")
# Use inference_transpiler to speedup
inference_transpiler_program = inference_program.clone()
t = fluid.transpiler.InferenceTranspiler()
t.transpile(inference_transpiler_program, place)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
transpiler_results = exe.run(inference_transpiler_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
assert len(results[0]) == len(transpiler_results[0])
for i in range(len(results[0])):
np.testing.assert_almost_equal(
results[0][i], transpiler_results[0][i], decimal=4)
print("infer results: ", results[0])
fluid.io.save_inference_model(save_dirname, feed_target_names,
fetch_targets, exe,
inference_transpiler_program)
fetch_targets, exe, inference_program)
def main(net_type, use_cuda, is_local=True):
......
......@@ -221,31 +221,16 @@ def infer(use_cuda, save_dirname=None):
batch_size = 1
tensor_img = numpy.random.rand(batch_size, 3, 32, 32).astype("float32")
# Use inference_transpiler to speedup
inference_transpiler_program = inference_program.clone()
t = fluid.transpiler.InferenceTranspiler()
t.transpile(inference_transpiler_program, place)
# Construct feed as a dictionary of {feed_target_name: feed_target_data}
# and results will contain a list of data corresponding to fetch_targets.
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
transpiler_results = exe.run(inference_transpiler_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
assert len(results[0]) == len(transpiler_results[0])
for i in range(len(results[0])):
np.testing.assert_almost_equal(
results[0][i], transpiler_results[0][i], decimal=4)
print("infer results: ", results[0])
fluid.io.save_inference_model(save_dirname, feed_target_names,
fetch_targets, exe,
inference_transpiler_program)
fetch_targets, exe, inference_program)
def main(net_type, use_cuda, is_local=True):
......
......@@ -15,7 +15,6 @@
from __future__ import print_function
from .distribute_transpiler import DistributeTranspiler, DistributeTranspilerConfig
from .inference_transpiler import InferenceTranspiler
from .memory_optimization_transpiler import memory_optimize, release_memory
from .ps_dispatcher import HashName, RoundRobin
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册