提交 3b8afdb9 编写于 作者: S SunAhong1993

add tf optimizer

上级 c236c3c2
......@@ -4,6 +4,7 @@
```
python tools/check_for_lite.py paddle_model/inference_model/__model__
```
> 附:check_for_lite工具并不能完全判断模型是否被支持,PaddleLite详细支持的算子请参考[PaddleLite支持算子集](https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/docs/introduction/support_operation_list.md)
### 二、模型参数合并
......
......@@ -117,6 +117,7 @@ def tf2paddle(model_path,
from x2paddle.optimizer.tensorflow.bias import BiasOpt
from x2paddle.optimizer.tensorflow.transpose import TransposeOpt
from x2paddle.optimizer.tensorflow.batch_norm import BatchNormOpt
from x2paddle.optimizer.tensorflow.prelu import PReLUOpt
print("Now translating model from tensorflow to paddle.")
model = TFDecoder(model_path, define_input_shape=define_input_shape)
......@@ -125,8 +126,10 @@ def tf2paddle(model_path,
bias_opt = BiasOpt()
transpose_opt = TransposeOpt()
batch_norm_opt = BatchNormOpt()
prelu_opt = PReLUOpt()
bias_opt.run(program)
batch_norm_opt.run(program)
prelu_opt.run(program)
transpose_opt.run(program)
program.gen_model(save_dir)
......
......@@ -882,7 +882,7 @@ class TFOpMapper(OpMapper):
begin = self.graph.get_node(node.layer.input[1])
size = self.graph.get_node(node.layer.input[2])
inputs = {"x": input.name}
inputs = {"input": input.name}
attrs = {}
if begin.layer_type == "Const":
begin = begin.value.tolist()
......@@ -901,20 +901,30 @@ class TFOpMapper(OpMapper):
if size.layer_type == "Const":
size = size.value.tolist()
attrs['shape'] = size
shape = size
else:
shape = size.out_shapes[0]
reshape_name = gen_name("slice", "reshape")
program.add_layer(
kernel="fluid.layers.reshape",
inputs={"x": size.name},
outputs=[reshape_name],
shape=shape)
inputs['shape'] = reshape_name
# reshape_name = gen_name("slice", "reshape")
# program.add_layer(
# kernel="fluid.layers.reshape",
# inputs={"x": size.name},
# outputs=[reshape_name],
# shape=shape)
# inputs['shape'] = reshape_name
# inputs.pop('shape')
program.add_layer(
kernel="fluid.layers.crop_tensor",
kernel="fluid.layers.slice",
inputs=inputs,
outputs=[node.name],
**attrs)
axes=list(range(len(attrs['offsets']))),
starts=attrs['offsets'],
ends=[attrs['offsets'][i] + shape[i] for i in range(len(shape))])
# program.add_layer(
# kernel="fluid.layers.crop_tensor",
# inputs=inputs,
# outputs=[node.name],
# **attrs)
def ResizeNearestNeighbor(self, node):
input = self.graph.get_node(node.layer.input[0])
......
......@@ -20,10 +20,12 @@ class BatchNormOpt:
input_ids0 = graph.edges_in[layer_id]
mul_layer0 = graph.layers[input_ids0[0]]
sub_layer0 = graph.layers[input_ids0[1]]
if mul_layer0.kernel != "fluid.layers.elementwise_mul":
continue
if sub_layer0.kernel != "fluid.layers.elementwise_sub":
continue
axis = mul_layer0.attrs.get('axis', -1)
if axis != -1 and axis != 3:
continue
......@@ -116,7 +118,7 @@ class BatchNormOpt:
other = graph.layers[input_ids6[1]]
if variance.kernel != "fluid.layers.create_parameter":
continue
if other.kernel != "fluid.layers.create_parameter":
if other.kernel != "fluid.layers.fill_constant":
continue
if len(graph.edges_out.get(input_ids6[0], [])) != 1:
continue
......@@ -127,10 +129,6 @@ class BatchNormOpt:
variance_shape = graph.parameters[variance.outputs[0]].shape
if variance_shape != beta_shape:
continue
if other.outputs[0] not in graph.parameters:
continue
if graph.parameters[other.outputs[0]].size != 1:
continue
ids = set([
layer_id, mul_layer0.id, sub_layer0.id, mul_layer1.id, beta.id,
......@@ -163,7 +161,7 @@ class BatchNormOpt:
kernel="fluid.layers.batch_norm",
inputs={"input": "transpose_for_bn"},
outputs=layer.outputs,
epsilon=graph.parameters[other.outputs[0]],
epsilon=other.attrs["value"],
param_attr="'{}'".format(gamma.outputs[0]),
bias_attr="'{}'".format(beta.outputs[0]),
moving_mean_name="'{}'".format(mean.outputs[0]),
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册