未验证 提交 619b1833 编写于 作者: S SunAhong1993 提交者: GitHub

Merge pull request #2 from PaddlePaddle/develop

oo
- repo: local
- repo: https://github.com/PaddlePaddle/mirrors-yapf.git
sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37
hooks:
- id: yapf
name: yapf
entry: yapf
language: system
args: [-i, --style .style.yapf]
files: \.py$
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: a11d9314b22d8f8c7556443875b731ef05965464
hooks:
......@@ -18,6 +14,7 @@
- id: check-symlinks
- id: check-added-large-files
- repo: local
hooks:
- id: copyright_checker
name: copyright_checker
......
language: python
python:
- '2.7'
- '3.5'
- '3.6'
script:
- if [[ $TRAVIS_PYTHON_VERSION != 2.7 ]]; then /bin/bash ./tools/check_code_style.sh; fi
......
......@@ -44,10 +44,15 @@ x2paddle --framework=caffe --prototxt=deploy.prototxt --weight=deploy.caffemodel
```
x2paddle --framework=onnx --model=onnx_model.onnx --save_dir=pd_model
```
### Paddle2ONNX
```
# 注意:paddle_infer_model_dir下需包含__model__和__params__两个文件
x2paddle --framework=paddle2onnx --model=paddle_infer_model_dir --save_dir=onnx_model
```
### 参数选项
| 参数 | |
|----------|--------------|
|--framework | 源模型类型 (tensorflow、caffe、onnx) |
|--framework | 源模型类型 (tensorflow、caffe、onnx、paddle2onnx) |
|--prototxt | 当framework为caffe时,该参数指定caffe模型的proto文件路径 |
|--weight | 当framework为caffe时,该参数指定caffe模型的参数文件路径 |
|--save_dir | 指定转换后的模型保存目录路径 |
......
......@@ -11,8 +11,7 @@ setuptools.setup(
version=x2paddle.__version__,
author="dltp-sz",
author_email="dltp-sz@baidu.com",
description=
"a toolkit for converting trained model to PaddlePaddle from other deep learning frameworks.",
description="a toolkit for converting trained model to PaddlePaddle from other deep learning frameworks.",
long_description=long_description,
long_description_content_type="text/plain",
url="https://github.com/PaddlePaddle/x2paddle",
......@@ -23,6 +22,4 @@ setuptools.setup(
"Operating System :: OS Independent",
],
license='Apache 2.0',
entry_points={'console_scripts': [
'x2paddle=x2paddle.convert:main',
]})
entry_points={'console_scripts': ['x2paddle=x2paddle.convert:main', ]})
......@@ -5,10 +5,12 @@ model_dir = sys.argv[1]
new_model_dir = sys.argv[2]
exe = fluid.Executor(fluid.CPUPlace())
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(dirname=model_dir, executor=exe)
fetch_targets] = fluid.io.load_inference_model(
dirname=model_dir, executor=exe)
print(feed_target_names)
fluid.io.save_inference_model(dirname=new_model_dir,
fluid.io.save_inference_model(
dirname=new_model_dir,
feeded_var_names=feed_target_names,
target_vars=fetch_targets,
executor=exe,
......
__version__ = "0.7.1"
__version__ = "0.7.4"
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
......@@ -19,22 +19,26 @@ import sys
def arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--model",
parser.add_argument(
"--model",
"-m",
type=_text_type,
default=None,
help="define model file path for tensorflow or onnx")
parser.add_argument("--prototxt",
parser.add_argument(
"--prototxt",
"-p",
type=_text_type,
default=None,
help="prototxt file of caffe model")
parser.add_argument("--weight",
parser.add_argument(
"--weight",
"-w",
type=_text_type,
default=None,
help="weight file of caffe model")
parser.add_argument("--save_dir",
parser.add_argument(
"--save_dir",
"-s",
type=_text_type,
default=None,
......@@ -44,7 +48,8 @@ def arg_parser():
"-f",
type=_text_type,
default=None,
help="define which deeplearning framework(tensorflow/caffe/onnx)")
help="define which deeplearning framework(tensorflow/caffe/onnx/paddle2onnx)"
)
parser.add_argument(
"--caffe_proto",
"-c",
......@@ -52,7 +57,8 @@ def arg_parser():
default=None,
help="optional: the .py file compiled by caffe proto file of caffe model"
)
parser.add_argument("--version",
parser.add_argument(
"--version",
"-v",
action="store_true",
default=False,
......@@ -63,12 +69,14 @@ def arg_parser():
action="store_true",
default=False,
help="tf model conversion without data format optimization")
parser.add_argument("--define_input_shape",
parser.add_argument(
"--define_input_shape",
"-d",
action="store_true",
default=False,
help="define input shape for tf model")
parser.add_argument("--params_merge",
parser.add_argument(
"--params_merge",
"-pm",
action="store_true",
default=False,
......@@ -117,7 +125,6 @@ def tf2paddle(model_path,
optimizer.merge_bias()
optimizer.optimize_sub_graph()
# optimizer.merge_batch_norm()
# optimizer.merge_prelu()
else:
......@@ -177,6 +184,14 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
mapper.save_inference_model(save_dir, params_merge)
def paddle2onnx(model_path, save_dir):
from x2paddle.decoder.paddle_decoder import PaddleDecoder
from x2paddle.op_mapper.paddle_op_mapper import PaddleOpMapper
model = PaddleDecoder(model_path, '__model__', '__params__')
mapper = PaddleOpMapper()
mapper.convert(model.program, save_dir)
def main():
if len(sys.argv) < 2:
print("Use \"x2paddle -h\" to print the help information")
......@@ -249,8 +264,14 @@ def main():
if args.params_merge:
params_merge = True
onnx2paddle(args.model, args.save_dir, params_merge)
elif args.framework == "paddle2onnx":
assert args.model is not None, "--model should be defined while translating paddle model to onnx"
paddle2onnx(args.model, args.save_dir)
else:
raise Exception("--framework only support tensorflow/caffe/onnx now")
raise Exception(
"--framework only support tensorflow/caffe/onnx/paddle2onnx now")
if __name__ == "__main__":
......
......@@ -46,8 +46,9 @@ class Layer(object):
for input in self.inputs:
if isinstance(input, GraphNode):
if hasattr(input, "index"):
in_list += (input.layer_name +
"[{}]".format(input.index) + ", ")
in_list += (
input.layer_name + "[{}]".format(input.index) + ", "
)
else:
in_list += (input.layer_name + ", ")
elif isinstance(input, six.string_types):
......@@ -71,8 +72,8 @@ class Layer(object):
layer_code = layer_code + key + "={}, ".format(input)
elif isinstance(self.inputs, GraphNode):
if hasattr(self.inputs, "index"):
layer_code += (self.inputs.layer_name +
"[{}]".format(self.inputs.index))
layer_code += (
self.inputs.layer_name + "[{}]".format(self.inputs.index))
else:
layer_code += (self.inputs.layer_name)
if self.op != "=":
......@@ -88,6 +89,8 @@ class Layer(object):
for key, value in param_attr.items():
if '\n' in str(value):
value = string(str(value).replace('\n', ','))
if str(key) == 'attr':
value = 'ParamAttr(' + str(value) + ')'
layer_code = layer_code + key + "={}, ".format(value)
layer_code = layer_code.strip(", ")
......
......@@ -64,10 +64,8 @@ def run_net(param_dir="./"):
b = os.path.exists(os.path.join(param_dir, var.name))
return b
fluid.io.load_vars(exe,
param_dir,
fluid.default_main_program(),
predicate=if_exist)
fluid.io.load_vars(
exe, param_dir, fluid.default_main_program(), predicate=if_exist)
class OpMapper(object):
......@@ -98,8 +96,8 @@ class OpMapper(object):
def add_codes(self, codes, indent=0):
if isinstance(codes, list):
for code in codes:
self.paddle_codes += (self.tab * indent + code.strip('\n') +
'\n')
self.paddle_codes += (
self.tab * indent + code.strip('\n') + '\n')
elif isinstance(codes, str):
self.paddle_codes += (self.tab * indent + codes.strip('\n') + '\n')
else:
......@@ -135,20 +133,21 @@ class OpMapper(object):
os.path.join(os.path.join(py_code_dir, var.name)))
return b
fluid.io.load_vars(exe,
fluid.io.load_vars(
exe,
py_code_dir,
fluid.default_main_program(),
predicate=if_exist)
if params_merge:
fluid.io.save_inference_model(dirname=os.path.join(
save_dir, "inference_model"),
fluid.io.save_inference_model(
dirname=os.path.join(save_dir, "inference_model"),
feeded_var_names=input_names,
target_vars=outputs,
executor=exe,
params_filename="__params__")
else:
fluid.io.save_inference_model(dirname=os.path.join(
save_dir, "inference_model"),
fluid.io.save_inference_model(
dirname=os.path.join(save_dir, "inference_model"),
feeded_var_names=input_names,
target_vars=outputs,
executor=exe,
......
......@@ -49,13 +49,11 @@ class CaffeResolver(object):
class CaffeGraphNode(GraphNode):
def __init__(self, layer, type_str, layer_name=None):
if layer_name is None:
super(CaffeGraphNode,
self).__init__(layer,
layer.name.replace('/', '_').replace('-', '_'))
super(CaffeGraphNode, self).__init__(
layer, layer.name.replace('/', '_').replace('-', '_'))
else:
super(CaffeGraphNode,
self).__init__(layer,
layer_name.replace('/', '_').replace('-', '_'))
super(CaffeGraphNode, self).__init__(
layer, layer_name.replace('/', '_').replace('-', '_'))
self.layer_type = type_str
self.fluid_code = FluidCode()
self.data = None
......@@ -268,8 +266,8 @@ class CaffeDecoder(object):
c_i = blob.channels
h = blob.height
w = blob.width
data = np.asarray(list(blob.data),
dtype=np.float32).reshape(c_o, c_i, h, w)
data = np.asarray(
list(blob.data), dtype=np.float32).reshape(c_o, c_i, h, w)
transformed.append(data)
return transformed
因为 它太大了无法显示 source diff 。你可以改为 查看blob
......@@ -71,9 +71,8 @@ class ONNXGraphNode(GraphNode):
if attr.type == onnx.AttributeProto.TENSOR:
dtype = np.dtype(TENSOR_TYPE_TO_NP_TYPE[attr.t.data_type])
data = attr.t.raw_data
value = np.frombuffer(data,
dtype=dtype,
count=(len(data) // dtype.itemsize))
value = np.frombuffer(
data, dtype=dtype, count=(len(data) // dtype.itemsize))
elif attr.type == onnx.AttributeProto.STRING:
value = attr.s
value = value.decode() if isinstance(value, bytes) else value
......@@ -205,9 +204,8 @@ class ONNXGraph(Graph):
self.node_map[name].weight = weight
self.node_map[name].embeded_as = []
else:
self.node_map[name] = ONNXGraphDataNode(initializer,
layer_name=name,
is_global_input=False)
self.node_map[name] = ONNXGraphDataNode(
initializer, layer_name=name, is_global_input=False)
self.node_map[name].weight = weight
self.node_map[name].embeded_as = []
......@@ -494,8 +492,8 @@ class ONNXDecoder(object):
sess = rt.InferenceSession(model_path)
for ipt in sess.get_inputs():
datatype = datatype_map[ipt.type]
input_dict[ipt.name] = np.random.random(
ipt.shape).astype(datatype)
input_dict[ipt.name] = np.random.random(ipt.shape).astype(
datatype)
res = sess.run(None, input_feed=input_dict)
except:
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
class PaddleDecoder(object):
def __init__(self,
model_dir,
model_filename='__model__',
params_filename=None):
exe = fluid.Executor(fluid.CPUPlace())
[self.program, feed, fetchs] = fluid.io.load_inference_model(
model_dir,
exe,
model_filename=model_filename,
params_filename=params_filename)
......@@ -120,13 +120,13 @@ class TFGraph(Graph):
def build(self):
for layer in self.model.node:
self.node_map[layer.name.replace('/', '_').replace(
'-', '_')] = TFGraphNode(layer, data_format=self.tf_data_format)
'-', '_')] = TFGraphNode(
layer, data_format=self.tf_data_format)
for layer_name, node in self.node_map.items():
for in_node in node.layer.input:
in_node = in_node.replace('/',
'_').replace('-',
'_').replace('^', '')
in_node = in_node.replace('/', '_').replace('-', '_').replace(
'^', '')
if in_node not in self.node_map:
if in_node.strip().split(':')[0] in self.node_map:
self.connect(in_node.strip().split(':')[0], layer_name)
......@@ -390,10 +390,10 @@ class TFDecoder(object):
shape=shape,
name="x2paddle_{}".format(layer.name))
except:
x2paddle_input = tf.placeholder(dtype=dtype,
x2paddle_input = tf.placeholder(
dtype=dtype,
shape=shape,
name="x2paddle_{}".format(
layer.name))
name="x2paddle_{}".format(layer.name))
input_map["{}:0".format(layer.name)] = x2paddle_input
if shape.count(None) > 0:
......
......@@ -122,7 +122,8 @@ def convolutiondepthwise_layer(inputs,
c_out = num_output if num_output is not None else input_shape[0][1]
group = int(c_in / (c_in / c_out)) if c_in > c_out else int(c_in /
(c_out / c_in))
out = fluid.layers.conv2d(input,
out = fluid.layers.conv2d(
input,
dilation=[dila_h, dila_w],
filter_size=[k_h, k_w],
stride=[s_h, s_w],
......@@ -142,7 +143,8 @@ def convolutiondepthwise_weights(name, data=None):
return weights_name
register(kind='ConvolutionDepthwise',
register(
kind='ConvolutionDepthwise',
shape=convolutiondepthwise_shape,
layer=convolutiondepthwise_layer,
weights=convolutiondepthwise_weights)
......@@ -37,8 +37,8 @@ def detectionoutput_layer(inputs,
pbv = fluid.layers.reshape(x=pbv, shape=[-1, 4])
mbox_loc = inputs[0]
mbox_loc = fluid.layers.reshape(x=mbox_loc, shape=[-1, pb.shape[0], 4])
mbox_conf_flatten = fluid.layers.reshape(x=mbox_conf_flatten,
shape=[0, pb.shape[0], -1])
mbox_conf_flatten = fluid.layers.reshape(
x=mbox_conf_flatten, shape=[0, pb.shape[0], -1])
default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0}
fields = ['eta', 'top_k', 'nms_threshold']
......@@ -64,7 +64,8 @@ def detectionoutput_weights(name, data=None):
return weights_name
register(kind='DetectionOutput',
register(
kind='DetectionOutput',
shape=detectionoutput_shape,
layer=detectionoutput_layer,
weights=detectionoutput_weights)
......@@ -20,9 +20,8 @@ def normalize_layer(inputs,
attr=name + '_scale')
scale_param = fluid.layers.reshape(x=scale_param, \
shape=[1] if channel_shared else [input_shape[0][1]])
out = fluid.layers.elementwise_mul(x=l2_norm,
y=scale_param,
axis=-1 if channel_shared else 1)
out = fluid.layers.elementwise_mul(
x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1)
return out
......@@ -31,7 +30,8 @@ def normalize_weights(name, data=None):
return weights_name
register(kind='Normalize',
register(
kind='Normalize',
shape=normalize_shape,
layer=normalize_layer,
weights=normalize_weights)
......@@ -23,7 +23,8 @@ def permute_weights(name, data=None):
return weights_name
register(kind='Permute',
register(
kind='Permute',
shape=permute_shape,
layer=permute_layer,
weights=permute_weights)
......@@ -30,7 +30,8 @@ def priorbox_layer(inputs,
steps = tuple(step) if type(step) is list or type(step) is tuple else (step,
step)
box, variance_ = fluid.layers.prior_box(input,
box, variance_ = fluid.layers.prior_box(
input,
image,
min_sizes=min_size,
max_sizes=max_size,
......@@ -53,7 +54,8 @@ def priorbox_weights(name, data=None):
return weights_name
register(kind='PriorBox',
register(
kind='PriorBox',
shape=priorbox_shape,
layer=priorbox_layer,
weights=priorbox_weights)
......@@ -23,8 +23,7 @@ def register(kind, shape, layer, weights):
kind = [kind]
else:
assert type(
kind
) is list, 'invalid param "kind" for register, not a list or str'
kind) is list, 'invalid param "kind" for register, not a list or str'
for k in kind:
assert type(
......
......@@ -21,7 +21,8 @@ def roipooling_layer(inputs,
input = inputs[0]
roi = inputs[1]
roi = fluid.layers.slice(roi, axes=[1], starts=[1], ends=[5])
out = fluid.layers.roi_pool(input,
out = fluid.layers.roi_pool(
input,
roi,
pooled_height=pooled_h,
pooled_width=pooled_w,
......@@ -34,7 +35,8 @@ def roipooling_weights(name, data=None):
return weights_name
register(kind='ROIPooling',
register(
kind='ROIPooling',
shape=roipooling_shape,
layer=roipooling_layer,
weights=roipooling_weights)
......@@ -30,7 +30,8 @@ def select_layer(inputs,
out = []
for i in range(len(slice_point)):
out.append(
fluid.layers.slice(input,
fluid.layers.slice(
input,
axes=[axis],
starts=[slice_point[i]],
ends=[slice_point[i + 1]],
......@@ -45,7 +46,8 @@ def select_weights(name, data=None):
return weights_name
register(kind='Select',
register(
kind='Select',
shape=select_shape,
layer=select_layer,
weights=select_weights)
......@@ -17,7 +17,8 @@ def shufflechannel_weights(name, data=None):
return weights_name
register(kind='ShuffleChannel',
register(
kind='ShuffleChannel',
shape=shufflechannel_shape,
layer=shufflechannel_layer,
weights=shufflechannel_weights)
......@@ -144,8 +144,8 @@ class CaffeOpMapper(OpMapper):
[s_h, s_w] = [params.stride] * 2
elif len(params.stride) > 0:
s_h = params.stride_h if params.stride_h > 0 else params.stride[0]
s_w = params.stride_w if params.stride_w > 0 else params.stride[
len(params.stride) - 1]
s_w = params.stride_w if params.stride_w > 0 else params.stride[len(
params.stride) - 1]
elif params.stride_h > 0 or params.stride_w > 0:
s_h = params.stride_h
s_w = params.stride_w
......@@ -154,8 +154,8 @@ class CaffeOpMapper(OpMapper):
[p_h, p_w] = [params.pad] * 2
elif len(params.pad) > 0:
p_h = params.pad_h if params.pad_h > 0 else params.pad[0]
p_w = params.pad_w if params.pad_w > 0 else params.pad[
len(params.pad) - 1]
p_w = params.pad_w if params.pad_w > 0 else params.pad[len(
params.pad) - 1]
elif params.pad_h > 0 or params.pad_w > 0:
p_h = params.pad_h
p_w = params.pad_w
......@@ -195,10 +195,8 @@ class CaffeOpMapper(OpMapper):
'shape': shape,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("data",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"data", inputs=None, output=node, param_attr=attr)
def MemoryData(self, node):
# TODO(syf): Paddlepaddle can't fully support
......@@ -209,10 +207,8 @@ class CaffeOpMapper(OpMapper):
'shape': shape,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("data",
inputs=None,
output=node.layer_name + '0',
param_attr=attr)
node.fluid_code.add_layer(
"data", inputs=None, output=node.layer_name + '0', param_attr=attr)
node.fluid_code.add_note('{} = [{}]'.format(node.layer_name,
node.layer_name + '0'))
......@@ -229,11 +225,9 @@ class CaffeOpMapper(OpMapper):
input_c = node.input_shape[0][1]
output_c = channel
data.append(
np.zeros([output_c, input_c, kernel[0],
kernel[1]]).astype('float32'))
data.append(np.zeros([
output_c,
])).astype('float32')
np.zeros([output_c, input_c, kernel[0], kernel[1]]).astype(
'float32'))
data.append(np.zeros([output_c, ])).astype('float32')
else:
data = self.adjust_parameters(node)
self.weights[node.layer_name + '_weights'] = data[0]
......@@ -244,29 +238,19 @@ class CaffeOpMapper(OpMapper):
input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = {
'filter_size':
kernel,
'num_filters':
channel,
'stride':
stride,
'padding':
pad,
'dilation':
dilation,
'groups':
group,
'name':
string(node.layer_name),
'param_attr':
string(node.layer_name + '_weights'),
'bias_attr':
False if len(data) == 1 else string(node.layer_name + '_bias'),
'filter_size': kernel,
'num_filters': channel,
'stride': stride,
'padding': pad,
'dilation': dilation,
'groups': group,
'name': string(node.layer_name),
'param_attr': string(node.layer_name + '_weights'),
'bias_attr': False
if len(data) == 1 else string(node.layer_name + '_bias'),
}
node.fluid_code.add_layer("conv2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"conv2d", inputs=input, output=node, param_attr=attr)
def Deconvolution(self, node):
data = node.data
......@@ -281,11 +265,9 @@ class CaffeOpMapper(OpMapper):
input_c = node.input_shape[0][1]
output_c = channel
data.append(
np.zeros([output_c, input_c, kernel[0],
kernel[1]]).astype('float32'))
data.append(np.zeros([
output_c,
]).astype('float32'))
np.zeros([output_c, input_c, kernel[0], kernel[1]]).astype(
'float32'))
data.append(np.zeros([output_c, ]).astype('float32'))
else:
data = self.adjust_parameters(node)
self.weights[node.layer_name + '_weights'] = data[0]
......@@ -295,31 +277,20 @@ class CaffeOpMapper(OpMapper):
) == 1, 'The count of Deconvolution node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = {
'output_size':
None,
'filter_size':
kernel,
'num_filters':
channel,
'stride':
stride,
'padding':
pad,
'dilation':
dilation,
'groups':
group,
'name':
string(node.layer_name),
'param_attr':
string(node.layer_name + '_weights'),
'bias_attr':
False if len(data) == 1 else string(node.layer_name + '_bias')
'output_size': None,
'filter_size': kernel,
'num_filters': channel,
'stride': stride,
'padding': pad,
'dilation': dilation,
'groups': group,
'name': string(node.layer_name),
'param_attr': string(node.layer_name + '_weights'),
'bias_attr': False
if len(data) == 1 else string(node.layer_name + '_bias')
}
node.fluid_code.add_layer("conv2d_transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"conv2d_transpose", inputs=input, output=node, param_attr=attr)
def Pooling(self, node):
params = node.layer.pooling_param
......@@ -345,10 +316,8 @@ class CaffeOpMapper(OpMapper):
'global_pooling': global_pool,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("pool2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pool2d", inputs=input, output=node, param_attr=attr)
def LRN(self, node):
assert len(node.inputs) == 1, 'The count of LRN node\'s input is not 1.'
......@@ -368,10 +337,8 @@ class CaffeOpMapper(OpMapper):
'beta': params.beta,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("lrn",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"lrn", inputs=input, output=node, param_attr=attr)
def InnerProduct(self, node):
data = node.data
......@@ -384,8 +351,8 @@ class CaffeOpMapper(OpMapper):
output_c = params.num_output
data = []
data.append(
np.zeros([input_c,
output_c]).astype('float32').astype('float32'))
np.zeros([input_c, output_c]).astype('float32').astype(
'float32'))
data.append(
np.zeros([output_c]).astype('float32').astype('float32'))
else:
......@@ -409,21 +376,15 @@ class CaffeOpMapper(OpMapper):
assert params.bias_term == True
input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = {
'size':
params.num_output,
'name':
string(node.layer_name),
'act':
None,
'param_attr':
string(node.layer_name + '_weights'),
'bias_attr':
False if len(data) == 1 else string(node.layer_name + '_bias')
'size': params.num_output,
'name': string(node.layer_name),
'act': None,
'param_attr': string(node.layer_name + '_weights'),
'bias_attr': False
if len(data) == 1 else string(node.layer_name + '_bias')
}
node.fluid_code.add_layer("fc",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"fc", inputs=input, output=node, param_attr=attr)
def Softmax(self, node):
assert len(
......@@ -435,10 +396,8 @@ class CaffeOpMapper(OpMapper):
dims = len(shape)
axis = axis + dims if axis < 0 else axis
attr = {'axis': axis, 'name': string(node.layer_name + '_softmax')}
node.fluid_code.add_layer("softmax",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"softmax", inputs=input, output=node, param_attr=attr)
def Slice(self, node):
assert len(
......@@ -459,10 +418,8 @@ class CaffeOpMapper(OpMapper):
'dim': axis,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("split",
inputs=input,
output=node.layer_name,
param_attr=attr)
node.fluid_code.add_layer(
"split", inputs=input, output=node.layer_name, param_attr=attr)
def Concat(self, node):
assert len(
......@@ -475,10 +432,8 @@ class CaffeOpMapper(OpMapper):
params = node.layer.concat_param
axis = params.axis
attr = {'axis': axis, 'name': string(node.layer_name)}
node.fluid_code.add_layer("concat",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"concat", inputs=inputs, output=node, param_attr=attr)
def PReLU(self, node):
assert len(
......@@ -499,10 +454,8 @@ class CaffeOpMapper(OpMapper):
'param_attr': string(node.layer_name + '_weights'),
'name': string(node.layer_name)
}
node.fluid_code.add_layer("prelu",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"prelu", inputs=input, output=node, param_attr=attr)
def Accuracy(self, node):
assert len(
......@@ -526,10 +479,8 @@ class CaffeOpMapper(OpMapper):
assert axis == 1, 'PaddlePaddle can not support the situation when the axis is not 1.'
assert not ignore_label >= 0, 'PaddlePaddle can not support the situation when the model has ignore label.'
attr = {'k': top_k}
node.fluid_code.add_layer("accuracy",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"accuracy", inputs=inputs, output=node, param_attr=attr)
def Eltwise(self, node):
assert len(
......@@ -546,7 +497,8 @@ class CaffeOpMapper(OpMapper):
inputs_dict['x'] = inputs[0]
inputs_dict['y'] = inputs[1]
attr = {'act': None, 'name': string(node.layer_name)}
node.fluid_code.add_layer("elementwise_mul",
node.fluid_code.add_layer(
"elementwise_mul",
inputs=inputs_dict,
output=node,
param_attr=attr)
......@@ -559,14 +511,15 @@ class CaffeOpMapper(OpMapper):
'value': coeff[0],
'dtype': '{}.dtype'.format(input1_name)
}
node.fluid_code.add_layer("fill_constant",
node.fluid_code.add_layer(
"fill_constant",
inputs=None,
output=node.layer_name + '_const1',
param_attr=attr)
attr = {'act': None, 'name': string(node.layer_name + '_mul1')}
node.fluid_code.add_layer("elementwise_mul",
inputs=input1_name + ', ' +
node.layer_name + '_const1',
node.fluid_code.add_layer(
"elementwise_mul",
inputs=input1_name + ', ' + node.layer_name + '_const1',
output=node.layer_name + '_mul1',
param_attr=attr)
input2_name = self.get_input_name(inputs[1])
......@@ -575,21 +528,23 @@ class CaffeOpMapper(OpMapper):
'value': coeff[1],
'dtype': '{}.dtype'.format(input2_name)
}
node.fluid_code.add_layer("fill_constant",
node.fluid_code.add_layer(
"fill_constant",
inputs=None,
output=node.layer_name + '_const2',
param_attr=attr)
attr = {'act': None, 'name': string(node.layer_name + '_mul2')}
node.fluid_code.add_layer("elementwise_mul",
inputs=input2_name + ', ' +
node.layer_name + '_const2',
node.fluid_code.add_layer(
"elementwise_mul",
inputs=input2_name + ', ' + node.layer_name + '_const2',
output=node.layer_name + '_mul2',
param_attr=attr)
attr = {'act': None, 'name': string(node.layer_name)}
node.fluid_code.add_layer("elementwise_add",
inputs='{}_mul1, {}_mul2'.format(
node.layer_name, node.layer_name),
node.fluid_code.add_layer(
"elementwise_add",
inputs='{}_mul1, {}_mul2'.format(node.layer_name,
node.layer_name),
output=node,
param_attr=attr)
else:
......@@ -597,7 +552,8 @@ class CaffeOpMapper(OpMapper):
inputs_dict['x'] = inputs[0]
inputs_dict['y'] = inputs[1]
attr = {'act': None, 'name': string(node.layer_name)}
node.fluid_code.add_layer("elementwise_add",
node.fluid_code.add_layer(
"elementwise_add",
inputs=inputs_dict,
output=node,
param_attr=attr)
......@@ -606,7 +562,8 @@ class CaffeOpMapper(OpMapper):
inputs_dict['x'] = inputs[0]
inputs_dict['y'] = inputs[1]
attr = {'act': None, 'name': string(node.layer_name)}
node.fluid_code.add_layer("elementwise_max",
node.fluid_code.add_layer(
"elementwise_max",
inputs=inputs_dict,
output=node,
param_attr=attr)
......@@ -625,12 +582,8 @@ class CaffeOpMapper(OpMapper):
'The parameter of {} (type is {}) is not set. So we set the parameters as 0'
.format(node.layer_name, node.layer_type))
input_c = node.input_shape[0][1]
mean = np.zeros([
input_c,
]).astype('float32')
variance = np.zeros([
input_c,
]).astype('float32')
mean = np.zeros([input_c, ]).astype('float32')
variance = np.zeros([input_c, ]).astype('float32')
scale = 0
else:
......@@ -651,10 +604,8 @@ class CaffeOpMapper(OpMapper):
'epsilon': eps,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("batch_norm",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"batch_norm", inputs=input, output=node, param_attr=attr)
def Scale(self, node):
if node.data is None:
......@@ -669,10 +620,10 @@ class CaffeOpMapper(OpMapper):
input_c,
]).astype('float32')
else:
self.weights[node.layer_name + '_scale'] = np.squeeze(
node.data[0]).astype('float32')
self.weights[node.layer_name + '_offset'] = np.squeeze(
node.data[1]).astype('float32')
self.weights[node.layer_name + '_scale'] = np.squeeze(node.data[
0]).astype('float32')
self.weights[node.layer_name + '_offset'] = np.squeeze(node.data[
1]).astype('float32')
params = node.layer.scale_param
axis = params.axis
num_axes = params.num_axes
......@@ -687,7 +638,8 @@ class CaffeOpMapper(OpMapper):
inputs_dict['x'] = input0
inputs_dict['y'] = input1
attr = {'axis': axis, 'name': string(node.layer_name + '_mul')}
node.fluid_code.add_layer("elementwise_mul",
node.fluid_code.add_layer(
"elementwise_mul",
inputs=inputs_dict,
output=node.layer_name + '_mul',
param_attr=attr)
......@@ -703,15 +655,14 @@ class CaffeOpMapper(OpMapper):
'is_bias': True,
'default_initializer': 'Constant(value=1.0)'
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"create_parameter", inputs=None, output=node, param_attr=attr)
inputs_dict = {}
inputs_dict['x'] = input0
inputs_dict['y'] = node
attr = {'axis': axis, 'name': string(node.layer_name + '_mul')}
node.fluid_code.add_layer("elementwise_mul",
node.fluid_code.add_layer(
"elementwise_mul",
inputs=inputs_dict,
output=node.layer_name + '_mul',
param_attr=attr)
......@@ -725,14 +676,16 @@ class CaffeOpMapper(OpMapper):
'is_bias': True,
'default_initializer': 'Constant(value=1.0)'
}
node.fluid_code.add_layer("create_parameter",
node.fluid_code.add_layer(
"create_parameter",
inputs=None,
output=node.layer_name + '_offset_param',
param_attr=attr)
attr = {'axis': axis, 'name': string(node.layer_name + '_add')}
node.fluid_code.add_layer("elementwise_add",
inputs='{}_mul, {}_offset_param'.format(
node.layer_name, node.layer_name),
node.fluid_code.add_layer(
"elementwise_add",
inputs='{}_mul, {}_offset_param'.format(node.layer_name,
node.layer_name),
output=node,
param_attr=attr)
......@@ -747,10 +700,8 @@ class CaffeOpMapper(OpMapper):
'act': None,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("reshape",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reshape", inputs=input, output=node, param_attr=attr)
def ArgMax(self, node):
assert len(node.inputs) == 1 and len(
......@@ -767,10 +718,11 @@ class CaffeOpMapper(OpMapper):
axis += len(input_shape)
if out_max_val is True:
attr = {'k': top_k, 'name': string(node.layer_name + '_topk')}
node.fluid_code.add_layer("topk",
node.fluid_code.add_layer(
"topk",
inputs=input,
output='{}_topk_var, {}_index_var'.format(
node.layer_name, node.layer_name),
output='{}_topk_var, {}_index_var'.format(node.layer_name,
node.layer_name),
param_attr=attr)
attr = {'dtype': '{}_topk_var.dtype'.format(node.layer_name)}
node.fluid_code.add_layer(
......@@ -779,14 +731,16 @@ class CaffeOpMapper(OpMapper):
output='{}_index_var'.format(node.layer_name),
param_attr=attr)
attr = {'axis': axis, 'name': string(node.layer_name)}
node.fluid_code.add_layer("concat",
inputs='{}_topk_var, {}_index_var'.format(
node.layer_name, node.layer_name),
node.fluid_code.add_layer(
"concat",
inputs='{}_topk_var, {}_index_var'.format(node.layer_name,
node.layer_name),
output=node,
param_attr=attr)
else:
attr = {'k': top_k, 'name': string(node.layer_name)}
node.fluid_code.add_layer("topk",
node.fluid_code.add_layer(
"topk",
inputs=input,
output='_, {}'.format(node.layer_name),
param_attr=attr)
......@@ -804,29 +758,27 @@ class CaffeOpMapper(OpMapper):
offset_real = [0] * len(input_shape)
if hasattr(params, "offset") and len(params.offset) > 0:
offset = list(params.offset)
assert (len(input_shape) - axis) == len(
offset), "invalid offset[%s] in crop layer" % (str(offset))
assert (len(input_shape) - axis
) == len(offset), "invalid offset[%s] in crop layer" % (
str(offset))
offset_real = [0] * axis + offset
attr = {'offsets': list(offset_real), 'name': string(node.layer_name)}
node.fluid_code.add_layer("crop",
inputs={
'x': input,
'shape': node.input_shape[1]
},
node.fluid_code.add_layer(
"crop",
inputs={'x': input,
'shape': node.input_shape[1]},
output=node,
param_attr=attr)
def Flatten(self, node):
assert len(
node.inputs
) == 1, 'The count of DetectionOutput node\'s input is not 1.'
node.
inputs) == 1, 'The count of DetectionOutput node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True)
shape = node.output_shape[0]
attr = {'shape': shape, 'name': string(node.layer_name)}
node.fluid_code.add_layer("reshape",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reshape", inputs=input, output=node, param_attr=attr)
def Power(self, node):
assert len(
......@@ -842,15 +794,11 @@ class CaffeOpMapper(OpMapper):
'bias_after_scale': True,
'name': string(node.layer_name + '_scale')
}
node.fluid_code.add_layer("scale",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"scale", inputs=input, output=node, param_attr=attr)
attr = {'factor': power, 'name': string(node.layer_name)}
node.fluid_code.add_layer("pow",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pow", inputs=node, output=node, param_attr=attr)
def Reduction(self, node):
assert len(
......@@ -872,55 +820,41 @@ class CaffeOpMapper(OpMapper):
'keep_dim': False,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("reduce_sum",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_sum", inputs=input, output=node, param_attr=attr)
elif operation == 2: ## operation = ASUM
attr = {'name': string(node.layer_name + '_abs')}
node.fluid_code.add_layer("abs",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"abs", inputs=input, output=node, param_attr=attr)
attr = {
'dim': dim[axis:],
'keep_dim': False,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("reduce_sum",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_sum", inputs=node, output=node, param_attr=attr)
elif operation == 3: ## operation = SUMSQ
attr = {'factor': 2.0, 'name': string(node.layer_name + '_pow')}
node.fluid_code.add_layer("pow",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pow", inputs=input, output=node, param_attr=attr)
attr = {
'dim': dim[axis:],
'keep_dim': False,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("reduce_sum",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_sum", inputs=node, output=node, param_attr=attr)
else: ## operation = MEAN
attr = {
'dim': dim[axis:],
'keep_dim': False,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("reduce_mean",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_mean", inputs=node, output=node, param_attr=attr)
attr = {'scale': coeff}
node.fluid_code.add_layer("scale",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"scale", inputs=node, output=node, param_attr=attr)
def deal_custom_layer(self, node):
op = node.layer_type
......@@ -947,7 +881,8 @@ class CaffeOpMapper(OpMapper):
assert input is not None, 'This kind of DetectionOutput is not supported!'
input = self.graph.get_bottom_node(input, idx=0, copy=True)
inputs_node.append(input)
node.fluid_code.add_layer(func.__code__.co_name,
node.fluid_code.add_layer(
func.__code__.co_name,
inputs=inputs_node,
output=node,
param_attr=kwargs,
......@@ -960,7 +895,5 @@ class CaffeOpMapper(OpMapper):
op_info = self.directly_map_ops[node.layer_type]
input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = {'name': string(node.layer_name)}
node.fluid_code.add_layer(op_info,
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
op_info, inputs=input, output=node, param_attr=attr)
......@@ -33,8 +33,8 @@ def get_kernel_parameters(params):
[s_h, s_w] = [params.stride] * 2
elif len(params.stride) > 0:
s_h = params.stride_h if params.stride_h > 0 else params.stride[0]
s_w = params.stride_w if params.stride_w > 0 else params.stride[
len(params.stride) - 1]
s_w = params.stride_w if params.stride_w > 0 else params.stride[len(
params.stride) - 1]
elif params.stride_h > 0 or params.stride_w > 0:
s_h = params.stride_h
s_w = params.stride_w
......
......@@ -24,21 +24,18 @@ def InstanceNormalization_layer(inputs, name=None):
epsilon = 1e-5
input_ = inputs[0]
mean = fluid.layers.reduce_mean(input_, dim=[2, 3], keep_dim=True)
var = fluid.layers.reduce_mean(fluid.layers.square(input_ - mean),
dim=[2, 3],
keep_dim=True)
var = fluid.layers.reduce_mean(
fluid.layers.square(input_ - mean), dim=[2, 3], keep_dim=True)
if name is not None:
scale_name = name + "_scale"
offset_name = name + "_offset"
scale_param = inputs[1]
offset_param = inputs[2]
scale = fluid.layers.create_parameter(name=scale_param.name,
shape=input_.shape[1:2],
dtype="float32")
offset = fluid.layers.create_parameter(name=offset_param.name,
shape=input_.shape[1:2],
dtype="float32")
scale = fluid.layers.create_parameter(
name=scale_param.name, shape=input_.shape[1:2], dtype="float32")
offset = fluid.layers.create_parameter(
name=offset_param.name, shape=input_.shape[1:2], dtype="float32")
tmp = fluid.layers.elementwise_mul(x=(input_ - mean), y=scale, axis=1)
tmp = tmp / fluid.layers.sqrt(var + epsilon)
......@@ -51,7 +48,8 @@ def InstanceNormalization_weights(name, data=None):
return weights_name
register(kind='InstanceNormalization',
register(
kind='InstanceNormalization',
shape=InstanceNormalization_shape,
layer=InstanceNormalization_layer,
child_func=None,
......
......@@ -36,8 +36,7 @@ def register(kind, shape, layer, child_func, weights):
kind = [kind]
else:
assert type(
kind
) is list, 'invalid param "kind" for register, not a list or str'
kind) is list, 'invalid param "kind" for register, not a list or str'
for k in kind:
assert type(
......
......@@ -28,61 +28,55 @@ default_op_mapping_field_values['FILL_NAME_FIELD'] = True
default_op_mapping = {
'Shape': ['shape', ['X'], ['Out']],
'Clip': [
'clip', ['X'], ['Out'],
dict(),
dict(
min=(_np.asarray([255, 255, 127, 255],
dtype=_np.uint8).view(_np.float32)[0]),
max=(_np.asarray([255, 255, 127, 127],
dtype=_np.uint8).view(_np.float32)[0]),
)
'clip', ['X'], ['Out'], dict(), dict(
min=(_np.asarray(
[255, 255, 127, 255], dtype=_np.uint8).view(_np.float32)[0]),
max=(_np.asarray(
[255, 255, 127, 127], dtype=_np.uint8).view(_np.float32)[0]), )
],
'Erf': ['erf', ['X'], ['Out']],
'Ceil': ['ceil', ['X'], ['Out']],
'ReduceMean': [
'reduce_mean', ['X'], ['Out'],
dict(axes='dim', keepdims='keep_dim'),
dict(keep_dim=1)
'reduce_mean', ['X'], ['Out'], dict(
axes='dim', keepdims='keep_dim'), dict(keep_dim=1)
],
'ReduceSum': [
'reduce_sum', ['X'], ['Out'],
dict(axes='dim', keepdims='keep_dim'),
dict(keep_dim=1)
'reduce_sum', ['X'], ['Out'], dict(
axes='dim', keepdims='keep_dim'), dict(keep_dim=1)
],
'ReduceMin': [
'reduce_min', ['X'], ['Out'],
dict(axes='dim', keepdims='keep_dim'),
dict(keep_dim=1)
'reduce_min', ['X'], ['Out'], dict(
axes='dim', keepdims='keep_dim'), dict(keep_dim=1)
],
'ReduceMax': [
'reduce_max', ['X'], ['Out'], dict(
axes='dim', keepdims='keep_dim'), dict(keep_dim=1)
],
#active function
'Relu': ['relu', ['X'], ['Out']],
'LeakyRelu': ['leaky_relu', ['X'], ['Out'],
dict(), dict(alpha=.01)],
'Elu': ['elu', ['X'], ['Out'],
dict(), dict(alpha=1.)],
'LeakyRelu': ['leaky_relu', ['X'], ['Out'], dict(), dict(alpha=.01)],
'Elu': ['elu', ['X'], ['Out'], dict(), dict(alpha=1.)],
'ThresholdedRelu': [
'thresholded_relu', ['X'], ['Out'],
dict(alpha='threshold'),
'thresholded_relu', ['X'], ['Out'], dict(alpha='threshold'),
dict(alpha=1.)
],
'Tanh': ['tanh', ['X'], ['Out']],
'Sigmoid': ['sigmoid', ['X'], ['Out']],
'HardSigmoid': [
'hard_sigmoid', ['X'], ['Out'],
dict(alpha='slope', beta='offset'),
dict(slope=.2, offset=.5)
'hard_sigmoid', ['X'], ['Out'], dict(
alpha='slope', beta='offset'), dict(
slope=.2, offset=.5)
],
'Softsign': ['softsign', ['X'], ['Out']],
'Softplus': ['softplus', ['X'], ['Out']],
'Exp': ['exp', ['X'], ['Out']],
'Softmax': ['softmax', ['X'], ['Out'],
dict(), dict(axis=1)],
'Softmax': ['softmax', ['X'], ['Out'], dict(), dict(axis=1)],
'Sqrt': ['sqrt', ['X'], ['Out']],
'Floor': ['floor', ['X'], ['Out']],
'Abs': ['abs', ['X'], ['Out']],
}
default_ioa_constraint = {
'Gather':
[(lambda i, o, a: a.get('axis', 0) == 0, 'only axis = 0 is supported')],
'Gather': [(lambda i, o, a: a.get('axis', 0) == 0,
'only axis = 0 is supported')],
}
......@@ -140,8 +140,8 @@ class ONNXOpMapper(OpMapper):
model.graph.ClearField('output')
model.graph.output.MergeFrom(model.graph.value_info)
onnx.save(model, os.path.join(self.tmp_data_dir,
'onnx_model_infer.onnx'))
onnx.save(model,
os.path.join(self.tmp_data_dir, 'onnx_model_infer.onnx'))
sess = rt.InferenceSession(
os.path.join(self.tmp_data_dir, 'onnx_model_infer.onnx'))
res = sess.run(None, input_feed=inputs_dict)
......@@ -217,8 +217,7 @@ class ONNXOpMapper(OpMapper):
default_attrs,
input_perm,
output_perm,
fill_name_field,
) = info
fill_name_field, ) = info
if fluid_op in default_ioa_constraint:
for predicate, message in default_ioa_constraint[fluid_op]:
......@@ -246,10 +245,8 @@ class ONNXOpMapper(OpMapper):
assert len(val_inps) == 1, 'directly_map error with multi inputs'
if fluid_op not in ['shape']:
attr['name'] = string(node.layer_name)
node.fluid_code.add_layer(fluid_op,
inputs=val_inps[0],
output=val_outs[0],
param_attr=attr)
node.fluid_code.add_layer(
fluid_op, inputs=val_inps[0], output=val_outs[0], param_attr=attr)
def deal_custom_layer(self, node):
op = node.layer_type
......@@ -258,7 +255,8 @@ class ONNXOpMapper(OpMapper):
params = get_params(node.layer, node.layer_type)
arg_names, kwargs = set_args(func, params)
kwargs['name'] = string(node.layer_name)
node.fluid_code.add_layer(func.__code__.co_name,
node.fluid_code.add_layer(
func.__code__.co_name,
inputs=node.inputs,
output=node,
param_attr=kwargs,
......@@ -299,21 +297,18 @@ class ONNXOpMapper(OpMapper):
'shape': val_y_reshaped,
'name': string(var_y_reshaped)
}
node.fluid_code.add_layer('reshape',
node.fluid_code.add_layer(
'reshape',
inputs=val_y,
output=var_y_reshaped,
param_attr=attr_reshaped)
inputs = {'x': val_x, 'y': var_y_reshaped}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=attr)
else:
inputs = {'x': val_x, 'y': val_y}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=attr)
def place_holder(self, node):
self.input_shapes.append(node.out_shapes[0])
......@@ -331,10 +326,8 @@ class ONNXOpMapper(OpMapper):
"append_batch_size": 'False'
}
node.fluid_code.add_layer("data",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"data", inputs=None, output=node, param_attr=attr)
def create_parameter(self, node, parameter=None):
if parameter is not None:
......@@ -351,10 +344,8 @@ class ONNXOpMapper(OpMapper):
'attr': string(node.layer_name),
'default_initializer': 'Constant(0.0)'
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"create_parameter", inputs=None, output=node, param_attr=attr)
def _pad_if_asymmetric(self, node, pads, val_name): # pads: SSEE
assert len(pads) & 1 == 0
......@@ -418,10 +409,8 @@ class ONNXOpMapper(OpMapper):
else:
attr['out_shape'] = out_shape
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
fluid_op, inputs=val_x, output=node, param_attr=attr)
def RoiAlign(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -437,11 +426,10 @@ class ONNXOpMapper(OpMapper):
'spatial_scale': spatial_scale,
'sampling_ratio': sampling_ratio,
}
node.fluid_code.add_layer('roi_align',
inputs={
'input': val_x,
'rois': val_rois
},
node.fluid_code.add_layer(
'roi_align',
inputs={'input': val_x,
'rois': val_rois},
output=node,
param_attr=attr)
......@@ -456,11 +444,10 @@ class ONNXOpMapper(OpMapper):
'pooled_width': pooled_width,
'spatial_scale': spatial_scale,
}
node.fluid_code.add_layer('roi_pool',
inputs={
'input': val_x,
'rois': val_rois
},
node.fluid_code.add_layer(
'roi_pool',
inputs={'input': val_x,
'rois': val_rois},
output=node,
param_attr=attr)
......@@ -499,13 +486,12 @@ class ONNXOpMapper(OpMapper):
attr['paddings'] = paddings
if op_independent:
attr['name'] = string(node.layer_name)
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
fluid_op, inputs=val_x, output=node, param_attr=attr)
else:
attr['name'] = string(node.layer_name + '_paded')
node.fluid_code.add_layer(fluid_op,
node.fluid_code.add_layer(
fluid_op,
inputs=val_x,
output=node.layer_name + '_paded',
param_attr=attr)
......@@ -515,16 +501,12 @@ class ONNXOpMapper(OpMapper):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axes = node.get_attr('axes')
if len(val_x.out_shapes[0]) == 0:
node.fluid_code.add_layer('assign',
inputs=val_x,
output=node,
param_attr=None)
node.fluid_code.add_layer(
'assign', inputs=val_x, output=node, param_attr=None)
else:
attr = {'axes': axes, 'name': string(node.layer_name)}
node.fluid_code.add_layer('unsqueeze',
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
'unsqueeze', inputs=val_x, output=node, param_attr=attr)
def Shrink(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -532,10 +514,18 @@ class ONNXOpMapper(OpMapper):
lambd = node.get_attr('lambd')
assert bias == 0.0, 'not support bias!=0'
attr = {'threshold': lambd, 'name': node.layer_name}
node.fluid_code.add_layer('hard_shrink',
inputs=val_x,
node.fluid_code.add_layer(
'hard_shrink', inputs=val_x, output=node, param_attr=attr)
def Greater(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
node.fluid_code.add_layer(
'greater_than',
inputs={'x': val_x,
'y': val_y},
output=node,
param_attr=attr)
param_attr=None)
def Constant(self, node):
val_output = self.graph.get_node(node.layer.output[0], copy=True)
......@@ -552,8 +542,7 @@ class ONNXOpMapper(OpMapper):
shape = val_output.out_shapes[0]
if shape is None:
shape = list(value.shape)
_logger.warning(
'in (Constant -> %s): '
_logger.warning('in (Constant -> %s): '
'attribute "shape" of %s not inferred, '
'using value as 1-D tensor may lead to fails',
val_output.layer_name, val_output.layer_name)
......@@ -565,10 +554,8 @@ class ONNXOpMapper(OpMapper):
if dtype.name == 'int64':
dtype = 'int32'
attr = {'shape': shape, 'dtype': string(dtype), 'value': value}
node.fluid_code.add_layer('fill_constant',
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
'fill_constant', inputs=None, output=node, param_attr=attr)
else:
value = np.reshape(value, shape)
self.weights[node.layer_name] = value
......@@ -579,10 +566,8 @@ class ONNXOpMapper(OpMapper):
'attr': string(node.layer_name),
'default_initializer': 'Constant(0.0)'
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"create_parameter", inputs=None, output=node, param_attr=attr)
def Resize(self, node):
self._interpolate(node)
......@@ -603,13 +588,12 @@ class ONNXOpMapper(OpMapper):
name_ones = node.layer_name + '_ones'
attr_ones = {'shape': out_shape, 'dtype': string(val_x_dtype)}
node.fluid_code.add_layer('ones',
inputs=None,
output=name_ones,
param_attr=attr_ones)
node.fluid_code.add_layer(
'ones', inputs=None, output=name_ones, param_attr=attr_ones)
inputs = {'x': name_ones, 'y': val_x}
attr = {'name': string(node.layer_name)}
node.fluid_code.add_layer('elementwise_mul',
node.fluid_code.add_layer(
'elementwise_mul',
inputs=inputs,
output=node.layer_name,
param_attr=attr)
......@@ -622,11 +606,10 @@ class ONNXOpMapper(OpMapper):
assert len(
indices_shape) <= 2, "Gather op don't support dim of indice >2 "
if axis == 0 and len(indices_shape) <= 1:
node.fluid_code.add_layer('gather',
inputs={
'input': val_x,
'index': indices
},
node.fluid_code.add_layer(
'gather',
inputs={'input': val_x,
'index': indices},
output=node,
param_attr=None)
elif axis > 0 and len(indices_shape) <= 1:
......@@ -634,57 +617,53 @@ class ONNXOpMapper(OpMapper):
perm = [axis] + perm[:axis] + perm[axis + 1:]
attr_trans = {'perm': perm}
name_trans = val_x.layer_name + '_trans'
node.fluid_code.add_layer('transpose',
node.fluid_code.add_layer(
'transpose',
inputs=val_x,
output=name_trans,
param_attr=attr_trans)
node.fluid_code.add_layer('gather',
inputs={
'input': name_trans,
'index': indices
},
node.fluid_code.add_layer(
'gather',
inputs={'input': name_trans,
'index': indices},
output=node,
param_attr=None)
node.fluid_code.add_layer('transpose',
inputs=node,
output=node,
param_attr=attr_trans)
node.fluid_code.add_layer(
'transpose', inputs=node, output=node, param_attr=attr_trans)
elif len(indices_shape) > 1:
from functools import reduce
reshape_shape = reduce(lambda x, y: x * y, indices_shape)
node.fluid_code.add_layer('reshape',
node.fluid_code.add_layer(
'reshape',
inputs=indices,
output=indices,
param_attr={'shape': [
reshape_shape,
]})
param_attr={'shape': [reshape_shape, ]})
perm = list(range(len(val_x.out_shapes[0])))
perm = [axis] + perm[:axis] + perm[axis + 1:]
attr_trans = {'perm': perm}
name_trans = val_x.layer_name + '_trans'
node.fluid_code.add_layer('transpose',
node.fluid_code.add_layer(
'transpose',
inputs=val_x,
output=name_trans,
param_attr=attr_trans)
node.fluid_code.add_layer('gather',
inputs={
'input': name_trans,
'index': indices
},
node.fluid_code.add_layer(
'gather',
inputs={'input': name_trans,
'index': indices},
output=node,
param_attr=None)
node.fluid_code.add_layer('transpose',
inputs=node,
output=node,
param_attr=attr_trans)
node.fluid_code.add_layer(
'transpose', inputs=node, output=node, param_attr=attr_trans)
val_x_shape = val_x.out_shapes[0]
reshaped_shape = []
for i in perm:
reshaped_shape.append(indices_shape[i])
for i in val_x_shape[:axis] + val_x_shape[axis + 1:]:
reshaped_shape.append(i)
node.fluid_code.add_layer('reshape',
node.fluid_code.add_layer(
'reshape',
inputs=node,
output=node,
param_attr={'shape': reshaped_shape})
......@@ -725,10 +704,8 @@ class ONNXOpMapper(OpMapper):
if value > shape[axes[idx]]:
ends[idx] = shape[axes[idx]]
attr = {"axes": axes, "starts": starts, "ends": ends}
node.fluid_code.add_layer('slice',
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
'slice', inputs=val_x, output=node, param_attr=attr)
def ConstantOfShape(self, node):
val_shape = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -751,10 +728,8 @@ class ONNXOpMapper(OpMapper):
if dtype.name == 'int64':
dtype = 'int32'
attr = {'shape': shape, 'dtype': string(dtype), 'value': value}
node.fluid_code.add_layer('fill_constant',
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
'fill_constant', inputs=None, output=node, param_attr=attr)
def Split(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -769,10 +744,8 @@ class ONNXOpMapper(OpMapper):
'name': string(node.layer_name)
}
node.fluid_code.add_layer('split',
inputs=val_x,
output=val_y,
param_attr=attr)
node.fluid_code.add_layer(
'split', inputs=val_x, output=val_y, param_attr=attr)
def Reshape(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -789,7 +762,8 @@ class ONNXOpMapper(OpMapper):
shape, _, _ = self.get_dynamic_shape(val_shape.layer_name)
if val_shape.dtype == 'int64':
val_shape_cast = val_shape.layer_name + '_cast'
node.fluid_code.add_layer('cast',
node.fluid_code.add_layer(
'cast',
inputs=val_shape,
output=val_shape_cast,
param_attr={'dtype': string('int32')})
......@@ -810,10 +784,8 @@ class ONNXOpMapper(OpMapper):
val_x.layer_name, val_reshaped.layer_name)
attr['shape'] = shape
node.fluid_code.add_layer('reshape',
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
'reshape', inputs=val_x, output=node, param_attr=attr)
def Cast(self, node):
val_input = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -827,10 +799,8 @@ class ONNXOpMapper(OpMapper):
if output_dtype:
assert dtype == output_dtype, 'dtype of to unmatches output'
attr = {'dtype': string(dtype)}
node.fluid_code.add_layer('cast',
inputs=val_input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
'cast', inputs=val_input, output=node, param_attr=attr)
def AveragePool(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -865,10 +835,8 @@ class ONNXOpMapper(OpMapper):
"name": string(node.layer_name)
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
fluid_op, inputs=val_x, output=node, param_attr=attr)
def Concat(self, node):
inputs = []
......@@ -880,19 +848,15 @@ class ONNXOpMapper(OpMapper):
inputs.append(ipt.layer_name)
axis = node.get_attr('axis')
attr = {'axis': axis}
node.fluid_code.add_layer('concat',
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
'concat', inputs=inputs, output=node, param_attr=attr)
def Flatten(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axis = node.get_attr('axis', 1)
attr = {"axis": str(axis), "name": string(node.layer_name)}
node.fluid_code.add_layer('flatten',
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
'flatten', inputs=val_x, output=node, param_attr=attr)
def Gemm(self, node):
val_a = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -911,7 +875,8 @@ class ONNXOpMapper(OpMapper):
"alpha": alpha,
"name": string(val_mm)
}
node.fluid_code.add_layer('matmul',
node.fluid_code.add_layer(
'matmul',
inputs=matmul_inputs,
output=val_mm,
param_attr=attr_matmul)
......@@ -920,21 +885,24 @@ class ONNXOpMapper(OpMapper):
if beta == 1.:
add_inputs = {"x": val_mm, "y": val_c}
attr = {"name": string(node.layer_name)}
node.fluid_code.add_layer("elementwise_add",
node.fluid_code.add_layer(
"elementwise_add",
inputs=add_inputs,
output=node,
param_attr=attr)
else:
var_beta = node.layer_name + '_beta'
matmul_beta_inputs = {"x": val_c, "y": var_beta}
node.fluid_code.add_layer("Constant",
node.fluid_code.add_layer(
"Constant",
inputs=matmul_beta_inputs,
output=var_beta,
param_attr={'value': beta})
add_inputs = {"x": val_mm, "y": var_beta}
attr = {"name": string(node.layer_name)}
node.fluid_code.add_layer("elementwise_add",
node.fluid_code.add_layer(
"elementwise_add",
inputs=add_inputs,
output=node,
param_attr=attr)
......@@ -942,8 +910,10 @@ class ONNXOpMapper(OpMapper):
def Sum(self, node):
val_inps = node.layer.input
inputs = {
"x": self.graph.get_input_node(node, idx=0, copy=True),
"y": self.graph.get_input_node(node, idx=1, copy=True),
"x": self.graph.get_input_node(
node, idx=0, copy=True),
"y": self.graph.get_input_node(
node, idx=1, copy=True),
}
node.fluid_code.add_layer("elementwise_add", inputs=inputs, output=node)
......@@ -953,19 +923,16 @@ class ONNXOpMapper(OpMapper):
"x": node.layer_name,
"y": y,
}
node.fluid_code.add_layer("elementwise_add",
inputs=inputs,
output=node)
node.fluid_code.add_layer(
"elementwise_add", inputs=inputs, output=node)
def MatMul(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
inputs = {"x": val_x, "y": val_y}
attr = {"name": string(node.layer_name)}
node.fluid_code.add_layer("matmul",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"matmul", inputs=inputs, output=node, param_attr=attr)
def BatchNormalization(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -996,27 +963,21 @@ class ONNXOpMapper(OpMapper):
"use_global_stats": spatial,
"name": string(node.layer_name)
}
node.fluid_code.add_layer("batch_norm",
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"batch_norm", inputs=val_x, output=node, param_attr=attr)
def Transpose(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
perm = node.get_attr('perm')
attr = {'perm': perm, "name": string(node.layer_name)}
node.fluid_code.add_layer("transpose",
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=val_x, output=node, param_attr=attr)
def Relu(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
attr = {"name": string(node.layer_name)}
node.fluid_code.add_layer("relu",
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"relu", inputs=val_x, output=node, param_attr=attr)
def PRelu(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -1032,28 +993,23 @@ class ONNXOpMapper(OpMapper):
"param_attr": string(val_slope.layer_name),
'mode': string(mode)
}
node.fluid_code.add_layer("prelu",
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"prelu", inputs=val_x, output=node, param_attr=attr)
def Squeeze(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axes = node.get_attr('axes')
attr = {'axes': axes, "name": string(node.layer_name)}
node.fluid_code.add_layer("squeeze",
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"squeeze", inputs=val_x, output=node, param_attr=attr)
def Equal(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
node.fluid_code.add_layer("equal",
inputs={
'x': val_x,
'y': val_y
},
node.fluid_code.add_layer(
"equal",
inputs={'x': val_x,
'y': val_y},
output=node,
param_attr=None)
......@@ -1063,52 +1019,51 @@ class ONNXOpMapper(OpMapper):
val_y = self.graph.get_input_node(node, idx=2, copy=True)
not_condition = condition.layer_name + '_not'
node.fluid_code.add_layer("logical_not",
node.fluid_code.add_layer(
"logical_not",
inputs=condition,
output=not_condition,
param_attr=None)
cast_not_condition = not_condition + '_cast'
node.fluid_code.add_layer("cast",
node.fluid_code.add_layer(
"cast",
inputs=not_condition,
output=cast_not_condition,
param_attr={'dtype': string(val_x.dtype)})
cast_condition = condition.layer_name + '_cast'
node.fluid_code.add_layer("cast",
node.fluid_code.add_layer(
"cast",
inputs=condition,
output=cast_condition,
param_attr={'dtype': string(val_x.dtype)})
mul_val_x = val_x.layer_name + '_mul'
node.fluid_code.add_layer("elementwise_mul",
inputs={
'x': val_x,
'y': cast_condition
},
node.fluid_code.add_layer(
"elementwise_mul",
inputs={'x': val_x,
'y': cast_condition},
output=mul_val_x,
param_attr=None)
mul_val_y = val_y.layer_name + '_mul'
node.fluid_code.add_layer("elementwise_mul",
inputs={
'x': val_y,
'y': cast_not_condition
},
node.fluid_code.add_layer(
"elementwise_mul",
inputs={'x': val_y,
'y': cast_not_condition},
output=mul_val_y,
param_attr=None)
node.fluid_code.add_layer("elementwise_add",
inputs={
'x': mul_val_x,
'y': mul_val_y
},
node.fluid_code.add_layer(
"elementwise_add",
inputs={'x': mul_val_x,
'y': mul_val_y},
output=node,
param_attr=None)
def NonZero(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
where_name = node.layer_name + '_where'
node.fluid_code.add_layer("where",
inputs=val_x.layer_name + '!=0',
output=where_name)
node.fluid_code.add_layer(
"where", inputs=val_x.layer_name + '!=0', output=where_name)
dims = len(val_x.out_shapes[0])
elements_count_val_x = reduce(lambda x, y: x * y, val_x.out_shapes[0])
flatten_names = []
......@@ -1121,17 +1076,15 @@ class ONNXOpMapper(OpMapper):
'starts': [0, dim],
'ends': [elements_count_val_x, dim + 1]
}
node.fluid_code.add_layer("slice",
inputs=where_name,
output=slice_name,
param_attr=attr)
node.fluid_code.add_layer("flatten",
node.fluid_code.add_layer(
"slice", inputs=where_name, output=slice_name, param_attr=attr)
node.fluid_code.add_layer(
"flatten",
inputs=slice_name,
output=flatten_name,
param_attr={'axis': 0})
node.fluid_code.add_layer("concat",
inputs=flatten_names,
output=node,
node.fluid_code.add_layer(
"concat", inputs=flatten_names, output=node,
param_attr={'axis': 0})
def Identity(self, node):
......@@ -1151,10 +1104,8 @@ class ONNXOpMapper(OpMapper):
'expand_times': repeats,
"name": string(node.layer_name),
}
node.fluid_code.add_layer("expand",
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"expand", inputs=val_x, output=node, param_attr=attr)
def MaxPool(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -1191,10 +1142,8 @@ class ONNXOpMapper(OpMapper):
"name": string(node.layer_name),
"exclusive": False
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
fluid_op, inputs=val_x, output=node, param_attr=attr)
def _global_pool(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -1220,10 +1169,8 @@ class ONNXOpMapper(OpMapper):
"global_pooling": True,
"name": string(node.layer_name)
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
fluid_op, inputs=val_x, output=node, param_attr=attr)
def GlobalMaxPool(self, node):
self._global_pool(node)
......@@ -1279,10 +1226,8 @@ class ONNXOpMapper(OpMapper):
attr["bias_attr"] = string(val_b.layer_name)
else:
attr["bias_attr"] = False
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
fluid_op, inputs=val_x, output=node, param_attr=attr)
def ConvTranspose(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -1314,11 +1259,11 @@ class ONNXOpMapper(OpMapper):
output_size = [0, 0]
output_size[0] = (val_x.out_shapes[0][2] -
1) * strides[0] - 2 * paddings[0] + dilations[0] * (
output_size[0] = (val_x.out_shapes[0][2] - 1
) * strides[0] - 2 * paddings[0] + dilations[0] * (
kernel_shape[0] - 1) + 1 + out_padding[0]
output_size[1] = (val_x.out_shapes[0][3] -
1) * strides[1] - 2 * paddings[1] + dilations[1] * (
output_size[1] = (val_x.out_shapes[0][3] - 1
) * strides[1] - 2 * paddings[1] + dilations[1] * (
kernel_shape[1] - 1) + 1 + out_padding[1]
attr = {
'num_filters': num_out_channels,
......@@ -1332,10 +1277,8 @@ class ONNXOpMapper(OpMapper):
'bias_attr': None if val_b is None else string(val_b.layer_name),
'name': string(node.layer_name),
}
node.fluid_code.add_layer(fluid_op,
inputs=val_x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
fluid_op, inputs=val_x, output=node, param_attr=attr)
def GRU(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -1352,15 +1295,13 @@ class ONNXOpMapper(OpMapper):
else:
miss_arg_num += 1
if num_ipt > 4 and node.layer.input[4] != '':
val_len = self.graph.get_input_node(node,
idx=4 - miss_arg_num,
copy=True)
val_len = self.graph.get_input_node(
node, idx=4 - miss_arg_num, copy=True)
else:
miss_arg_num += 1
if num_ipt > 5 and node.layer.input[5] != '':
val_xh = self.graph.get_input_node(node,
idx=5 - miss_arg_num,
copy=True)
val_xh = self.graph.get_input_node(
node, idx=5 - miss_arg_num, copy=True)
data, dtype, shape = self.get_dynamic_shape(val_x.layer_name)
......@@ -1401,30 +1342,27 @@ class ONNXOpMapper(OpMapper):
is_reverse = direction == 'reverse'
var_x0 = node.layer_name + '_x0'
node.fluid_code.add_layer('squeeze',
node.fluid_code.add_layer(
'squeeze',
inputs=val_x,
output=var_x0,
param_attr={
'axes': [1],
'name': string(var_x0)
})
param_attr={'axes': [1],
'name': string(var_x0)})
var_w0 = node.layer_name + '_w0'
node.fluid_code.add_layer('squeeze',
node.fluid_code.add_layer(
'squeeze',
inputs=val_w,
output=var_w0,
param_attr={
'axes': [0],
'name': string(var_w0)
})
param_attr={'axes': [0],
'name': string(var_w0)})
var_fc = node.layer_name + '_fc'
var_mm = (node.layer_name + '_mm') if val_b else var_fc
node.fluid_code.add_layer('matmul',
inputs={
'x': var_x0,
'y': var_w0
},
node.fluid_code.add_layer(
'matmul',
inputs={'x': var_x0,
'y': var_w0},
output=var_mm,
param_attr={
'transpose_x': 0,
......@@ -1433,65 +1371,58 @@ class ONNXOpMapper(OpMapper):
})
var_r0 = node.layer_name + '_r0'
node.fluid_code.add_layer('squeeze',
node.fluid_code.add_layer(
'squeeze',
inputs=val_r,
output=var_r0,
param_attr={
'axes': [0],
'name': string(var_r0)
})
param_attr={'axes': [0],
'name': string(var_r0)})
var_r0t = node.layer_name + '_r0t'
node.fluid_code.add_layer('transpose',
node.fluid_code.add_layer(
'transpose',
inputs=var_r0,
output=var_r0t,
param_attr={
'perm': [1, 0],
'name': string(var_r0t)
})
param_attr={'perm': [1, 0],
'name': string(var_r0t)})
if val_b:
var_bi = node.layer_name + '_bi'
var_bh = node.layer_name + '_bh'
node.fluid_code.add_layer('split',
node.fluid_code.add_layer(
'split',
inputs=val_b,
output=var_bi + ',' + var_bh,
param_attr={
'axis':
1,
'split':
[hidden_size * 3, hidden_size * 3],
'name':
string(node.layer_name + '.b/split')
'axis': 1,
'split': [hidden_size * 3, hidden_size * 3],
'name': string(node.layer_name + '.b/split')
})
var_bi0 = node.layer_name + '_bi0'
node.fluid_code.add_layer('squeeze',
node.fluid_code.add_layer(
'squeeze',
inputs=var_bi,
output=var_bi0,
param_attr={
'axes': [0],
'name': string(var_bi0)
})
param_attr={'axes': [0],
'name': string(var_bi0)})
node.fluid_code.add_layer('elmentwise_add',
node.fluid_code.add_layer(
'elmentwise_add',
inputs=[var_mm, var_bi0],
output=var_fc,
param_attr={
'axes':
1,
'name':
string(node.layer_name + '.i/bias')
'axes': 1,
'name': string(node.layer_name + '.i/bias')
})
if val_xh:
var_xh0 = node.layer_name + '_xh0'
node.fluid_code.add_layer('squeeze',
node.fluid_code.add_layer(
'squeeze',
inputs=val_xh,
output=var_xh0,
param_attr={
'axes': [1],
'name': string(var_xh0)
})
param_attr={'axes': [1],
'name': string(var_xh0)})
var_y00 = node.layer_name + '_y00'
attr = {
......@@ -1503,7 +1434,8 @@ class ONNXOpMapper(OpMapper):
'param_attr': string(var_r0t),
'bias_attr': string(var_bh) if val_b else False,
}
node.fluid_code.add_layer('dynamic_gru',
node.fluid_code.add_layer(
'dynamic_gru',
inputs=var_fc + ',' + str(hidden_size),
output=var_y00,
param_attr=attr)
......@@ -1511,7 +1443,8 @@ class ONNXOpMapper(OpMapper):
num_opt = len(node.layer.output)
if num_opt > 0 and node.layer.output[0] != '':
node.fluid_code.add_layer('unsqueeze',
node.fluid_code.add_layer(
'unsqueeze',
inputs=var_y00,
output=node.layer.output[0],
param_attr={
......@@ -1519,7 +1452,8 @@ class ONNXOpMapper(OpMapper):
'name': string(node.layer.output[0])
})
if num_opt > 1 and node.layer.output[1] != '':
node.fluid_code.add_layer('unsqueeze',
node.fluid_code.add_layer(
'unsqueeze',
inputs=var_y00,
output=node.layer.output[1],
param_attr={
......
import onnx
import numpy as np
from onnx import onnx_pb, helper
im2seq_counter = 0
def im2sequence(op, block):
global im2sequence_counter
n, c, h, w = block.var(op.input('X')[0]).shape
assert h > 0 and w > 0, "Only supported fixed input shape for im2sequence operator."
stride_h, stride_w = op.attr('strides')
paddings = op.attr('paddings')
assert op.attr(
'out_stride'
) != 1, "Only out_stride==1 is supported for im2sequence operator."
h = h + paddings[0] + paddings[1]
w = w + paddings[1] + paddings[2]
kernel_h, kernel_w = op.attr('kernels')
out_h = 1 + (h - kernel_h + stride_h - 1) // stride_h
out_w = 1 + (w - kernel_w + stride_w - 1) // stride_w
h_steps = list()
for i in range(out_h):
h_steps.append([i * stride_h, i * stride_h + kernel_h])
w_steps = list()
for i in range(out_w):
w_steps.append([i * stride_w, i * stride_w + kernel_w])
nodes = list()
slice_blocks = list()
for i in range(out_h):
for j in range(out_w):
starts_name = "im2sequence.starts.{}.{}.{}".format(im2seq_counter,
i, j)
starts_tensor = helper.make_tensor(
name=starts_name,
data_type=onnx_pb.TensorProto.INT64,
dims=[4],
vals=[0, 0, h_steps[i][0], w_steps[j][0]])
ends_name = "im2sequence.ends.{}.{}.{}".format(im2seq_counter, i, j)
ends_tensor = helper.make_tensor(
name=ends_name,
data_type=onnx_pb.TensorProto.INT64,
dims=[4],
vals=[999999, 999999, h_steps[i][1], w_steps[j][1]])
starts_node = helper.make_node(
'Constant',
inputs=[],
outputs=[starts_name],
value=starts_tensor)
ends_node = helper.make_node(
'Constant', inputs=[], outputs=[ends_name], value=ends_tensor)
nodes.extend([starts_node, ends_node])
slice_block_name = "im2sequence.slice.{}.{}.{}".format(
im2seq_counter, i, j)
slice_block_node = helper.make_node(
'Slice',
inputs=[op.input('X')[0], starts_name, ends_name],
outputs=[slice_block_name])
flatten_block_name = "im2sequence.flatten.{}.{}.{}".format(
im2seq_counter, i, j)
flatten_block_node = helper.make_node(
"Flatten",
inputs=[slice_block_name],
outputs=[flatten_block_name],
axis=0)
nodes.extend([slice_block_node, flatten_block_node])
slice_blocks.append(flatten_block_name)
concat_block_name = "im2sequence.concat_block.{}".format(im2seq_counter)
# concat_block_node = helper.make_node("Concat", inputs=slice_blocks, outputs=[concat_block_name], axis=0)
concat_block_node = helper.make_node(
"Concat", inputs=slice_blocks, outputs=op.output('Out'), axis=0)
nodes.append(concat_block_node)
print("\n\n==========Importance Notice===========")
print(
"Since im2sequence operator is used in your paddlepaddle model, the translated onnx model only support input data with batch_size=1."
)
print("======================================\n")
return nodes
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
import os
import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
import onnx
import warnings
from onnx import helper, onnx_pb
def multiclass_nms(op, block):
"""
Convert the paddle multiclass_nms to onnx op.
This op is get the select boxes from origin boxes.
"""
inputs = dict()
outputs = dict()
attrs = dict()
for name in op.input_names:
inputs[name] = op.input(name)
for name in op.output_names:
outputs[name] = op.output(name)
for name in op.attr_names:
attrs[name] = op.attr(name)
result_name = outputs['Out'][0]
background = attrs['background_label']
normalized = attrs['normalized']
if normalized == False:
warnings.warn(
'The parameter normalized of multiclass_nms OP of Paddle is False, which has diff with ONNX. \
Please set normalized=True in multiclass_nms of Paddle')
#convert the paddle attribute to onnx tensor
name_score_threshold = [outputs['Out'][0] + "@score_threshold"]
name_iou_threshold = [outputs['Out'][0] + "@iou_threshold"]
name_keep_top_k = [outputs['Out'][0] + '@keep_top_k']
name_keep_top_k_2D = [outputs['Out'][0] + '@keep_top_k_1D']
node_score_threshold = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_score_threshold,
value=onnx.helper.make_tensor(
name=name_score_threshold[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=[float(attrs['score_threshold'])]))
node_iou_threshold = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_iou_threshold,
value=onnx.helper.make_tensor(
name=name_iou_threshold[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=[float(attrs['nms_threshold'])]))
node_keep_top_k = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_keep_top_k,
value=onnx.helper.make_tensor(
name=name_keep_top_k[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=(),
vals=[np.int64(attrs['keep_top_k'])]))
node_keep_top_k_2D = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_keep_top_k_2D,
value=onnx.helper.make_tensor(
name=name_keep_top_k_2D[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[1, 1],
vals=[np.int64(attrs['keep_top_k'])]))
# the paddle data format is x1,y1,x2,y2
kwargs = {'center_point_box': 0}
name_select_nms = [outputs['Out'][0] + "@select_index"]
node_select_nms= onnx.helper.make_node(
'NonMaxSuppression',
inputs=inputs['BBoxes'] + inputs['Scores'] + name_keep_top_k +\
name_iou_threshold + name_score_threshold,
outputs=name_select_nms)
# step 1 nodes select the nms class
node_list = [
node_score_threshold, node_iou_threshold, node_keep_top_k,
node_keep_top_k_2D, node_select_nms
]
# create some const value to use
name_const_value = [result_name+"@const_0",
result_name+"@const_1",\
result_name+"@const_2",\
result_name+"@const_-1"]
value_const_value = [0, 1, 2, -1]
for name, value in zip(name_const_value, value_const_value):
node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=[name],
value=onnx.helper.make_tensor(
name=name + "@const",
data_type=onnx.TensorProto.INT64,
dims=[1],
vals=[value]))
node_list.append(node)
# Ine this code block, we will deocde the raw score data, reshape N * C * M to 1 * N*C*M
# and the same time, decode the select indices to 1 * D, gather the select_indices
outputs_gather_1 = [result_name + "@gather_1"]
node_gather_1 = onnx.helper.make_node(
'Gather',
inputs=name_select_nms + [result_name + "@const_1"],
outputs=outputs_gather_1,
axis=1)
node_list.append(node_gather_1)
outputs_squeeze_gather_1 = [result_name + "@sequeeze_gather_1"]
node_squeeze_gather_1 = onnx.helper.make_node(
'Squeeze',
inputs=outputs_gather_1,
outputs=outputs_squeeze_gather_1,
axes=[1])
node_list.append(node_squeeze_gather_1)
outputs_gather_2 = [result_name + "@gather_2"]
node_gather_2 = onnx.helper.make_node(
'Gather',
inputs=name_select_nms + [result_name + "@const_2"],
outputs=outputs_gather_2,
axis=1)
node_list.append(node_gather_2)
#slice the class is not 0
if background == 0:
outputs_nonzero = [result_name + "@nonzero"]
node_nonzero = onnx.helper.make_node(
'NonZero', inputs=outputs_squeeze_gather_1, outputs=outputs_nonzero)
node_list.append(node_nonzero)
else:
name_thresh = [result_name + "@thresh"]
node_thresh = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_thresh,
value=onnx.helper.make_tensor(
name=name_thresh[0] + "@const",
data_type=onnx.TensorProto.INT32,
dims=[1],
vals=[-1]))
node_list.append(node_thresh)
outputs_cast = [result_name + "@cast"]
node_cast = onnx.helper.make_node(
'Cast', inputs=outputs_squeeze_gather_1, outputs=outputs_cast, to=6)
node_list.append(node_cast)
outputs_greater = [result_name + "@greater"]
node_greater = onnx.helper.make_node(
'Greater',
inputs=outputs_cast + name_thresh,
outputs=outputs_greater)
node_list.append(node_greater)
outputs_nonzero = [result_name + "@nonzero"]
node_nonzero = onnx.helper.make_node(
'NonZero', inputs=outputs_greater, outputs=outputs_nonzero)
node_list.append(node_nonzero)
outputs_gather_1_nonzero = [result_name + "@gather_1_nonzero"]
node_gather_1_nonzero = onnx.helper.make_node(
'Gather',
inputs=outputs_gather_1 + outputs_nonzero,
outputs=outputs_gather_1_nonzero,
axis=0)
node_list.append(node_gather_1_nonzero)
outputs_gather_2_nonzero = [result_name + "@gather_2_nonzero"]
node_gather_2_nonzero = onnx.helper.make_node(
'Gather',
inputs=outputs_gather_2 + outputs_nonzero,
outputs=outputs_gather_2_nonzero,
axis=0)
node_list.append(node_gather_2_nonzero)
# reshape scores N * C * M to (N*C*M) * 1
outputs_reshape_scores_rank1 = [result_name + "@reshape_scores_rank1"]
node_reshape_scores_rank1 = onnx.helper.make_node(
"Reshape",
inputs=inputs['Scores'] + [result_name + "@const_-1"],
outputs=outputs_reshape_scores_rank1)
node_list.append(node_reshape_scores_rank1)
# get the shape of scores
outputs_shape_scores = [result_name + "@shape_scores"]
node_shape_scores = onnx.helper.make_node(
'Shape', inputs=inputs['Scores'], outputs=outputs_shape_scores)
node_list.append(node_shape_scores)
# gather the index: 2 shape of scores
outputs_gather_scores_dim1 = [result_name + "@gather_scores_dim1"]
node_gather_scores_dim1 = onnx.helper.make_node(
'Gather',
inputs=outputs_shape_scores + [result_name + "@const_2"],
outputs=outputs_gather_scores_dim1,
axis=0)
node_list.append(node_gather_scores_dim1)
# mul class * M
outputs_mul_classnum_boxnum = [result_name + "@mul_classnum_boxnum"]
node_mul_classnum_boxnum = onnx.helper.make_node(
'Mul',
inputs=outputs_gather_1_nonzero + outputs_gather_scores_dim1,
outputs=outputs_mul_classnum_boxnum)
node_list.append(node_mul_classnum_boxnum)
# add class * M * index
outputs_add_class_M_index = [result_name + "@add_class_M_index"]
node_add_class_M_index = onnx.helper.make_node(
'Add',
inputs=outputs_mul_classnum_boxnum + outputs_gather_2_nonzero,
outputs=outputs_add_class_M_index)
node_list.append(node_add_class_M_index)
# Squeeze the indices to 1 dim
outputs_squeeze_select_index = [result_name + "@squeeze_select_index"]
node_squeeze_select_index = onnx.helper.make_node(
'Squeeze',
inputs=outputs_add_class_M_index,
outputs=outputs_squeeze_select_index,
axes=[0, 2])
node_list.append(node_squeeze_select_index)
# gather the data from flatten scores
outputs_gather_select_scores = [result_name + "@gather_select_scores"]
node_gather_select_scores = onnx.helper.make_node('Gather',
inputs=outputs_reshape_scores_rank1 + \
outputs_squeeze_select_index,
outputs=outputs_gather_select_scores,
axis=0)
node_list.append(node_gather_select_scores)
# get nums to input TopK
outputs_shape_select_num = [result_name + "@shape_select_num"]
node_shape_select_num = onnx.helper.make_node(
'Shape',
inputs=outputs_gather_select_scores,
outputs=outputs_shape_select_num)
node_list.append(node_shape_select_num)
outputs_gather_select_num = [result_name + "@gather_select_num"]
node_gather_select_num = onnx.helper.make_node(
'Gather',
inputs=outputs_shape_select_num + [result_name + "@const_0"],
outputs=outputs_gather_select_num,
axis=0)
node_list.append(node_gather_select_num)
outputs_unsqueeze_select_num = [result_name + "@unsqueeze_select_num"]
node_unsqueeze_select_num = onnx.helper.make_node(
'Unsqueeze',
inputs=outputs_gather_select_num,
outputs=outputs_unsqueeze_select_num,
axes=[0])
node_list.append(node_unsqueeze_select_num)
outputs_concat_topK_select_num = [result_name + "@conat_topK_select_num"]
node_conat_topK_select_num = onnx.helper.make_node(
'Concat',
inputs=outputs_unsqueeze_select_num + name_keep_top_k_2D,
outputs=outputs_concat_topK_select_num,
axis=0)
node_list.append(node_conat_topK_select_num)
outputs_cast_concat_topK_select_num = [
result_name + "@concat_topK_select_num"
]
node_outputs_cast_concat_topK_select_num = onnx.helper.make_node(
'Cast',
inputs=outputs_concat_topK_select_num,
outputs=outputs_cast_concat_topK_select_num,
to=6)
node_list.append(node_outputs_cast_concat_topK_select_num)
# get min(topK, num_select)
outputs_compare_topk_num_select = [result_name + "@compare_topk_num_select"]
node_compare_topk_num_select = onnx.helper.make_node(
'ReduceMin',
inputs=outputs_cast_concat_topK_select_num,
outputs=outputs_compare_topk_num_select,
keepdims=0)
node_list.append(node_compare_topk_num_select)
# unsqueeze the indices to 1D tensor
outputs_unsqueeze_topk_select_indices = [
result_name + "@unsqueeze_topk_select_indices"
]
node_unsqueeze_topk_select_indices = onnx.helper.make_node(
'Unsqueeze',
inputs=outputs_compare_topk_num_select,
outputs=outputs_unsqueeze_topk_select_indices,
axes=[0])
node_list.append(node_unsqueeze_topk_select_indices)
# cast the indices to INT64
outputs_cast_topk_indices = [result_name + "@cast_topk_indices"]
node_cast_topk_indices = onnx.helper.make_node(
'Cast',
inputs=outputs_unsqueeze_topk_select_indices,
outputs=outputs_cast_topk_indices,
to=7)
node_list.append(node_cast_topk_indices)
# select topk scores indices
outputs_topk_select_topk_indices = [result_name + "@topk_select_topk_values",\
result_name + "@topk_select_topk_indices"]
node_topk_select_topk_indices = onnx.helper.make_node(
'TopK',
inputs=outputs_gather_select_scores + outputs_cast_topk_indices,
outputs=outputs_topk_select_topk_indices)
node_list.append(node_topk_select_topk_indices)
# gather topk label, scores, boxes
outputs_gather_topk_scores = [result_name + "@gather_topk_scores"]
node_gather_topk_scores = onnx.helper.make_node(
'Gather',
inputs=outputs_gather_select_scores +
[outputs_topk_select_topk_indices[1]],
outputs=outputs_gather_topk_scores,
axis=0)
node_list.append(node_gather_topk_scores)
outputs_gather_topk_class = [result_name + "@gather_topk_class"]
node_gather_topk_class = onnx.helper.make_node(
'Gather',
inputs=outputs_gather_1_nonzero +
[outputs_topk_select_topk_indices[1]],
outputs=outputs_gather_topk_class,
axis=1)
node_list.append(node_gather_topk_class)
# gather the boxes need to gather the boxes id, then get boxes
outputs_gather_topk_boxes_id = [result_name + "@gather_topk_boxes_id"]
node_gather_topk_boxes_id = onnx.helper.make_node(
'Gather',
inputs=outputs_gather_2_nonzero +
[outputs_topk_select_topk_indices[1]],
outputs=outputs_gather_topk_boxes_id,
axis=1)
node_list.append(node_gather_topk_boxes_id)
# squeeze the gather_topk_boxes_id to 1 dim
outputs_squeeze_topk_boxes_id = [result_name + "@squeeze_topk_boxes_id"]
node_squeeze_topk_boxes_id = onnx.helper.make_node(
'Squeeze',
inputs=outputs_gather_topk_boxes_id,
outputs=outputs_squeeze_topk_boxes_id,
axes=[0, 2])
node_list.append(node_squeeze_topk_boxes_id)
outputs_gather_select_boxes = [result_name + "@gather_select_boxes"]
node_gather_select_boxes = onnx.helper.make_node(
'Gather',
inputs=inputs['BBoxes'] + outputs_squeeze_topk_boxes_id,
outputs=outputs_gather_select_boxes,
axis=1)
node_list.append(node_gather_select_boxes)
# concat the final result
# before concat need to cast the class to float
outputs_cast_topk_class = [result_name + "@cast_topk_class"]
node_cast_topk_class = onnx.helper.make_node(
'Cast',
inputs=outputs_gather_topk_class,
outputs=outputs_cast_topk_class,
to=1)
node_list.append(node_cast_topk_class)
outputs_unsqueeze_topk_scores = [result_name + "@unsqueeze_topk_scores"]
node_unsqueeze_topk_scores = onnx.helper.make_node(
'Unsqueeze',
inputs=outputs_gather_topk_scores,
outputs=outputs_unsqueeze_topk_scores,
axes=[0, 2])
node_list.append(node_unsqueeze_topk_scores)
inputs_concat_final_results = outputs_cast_topk_class + outputs_unsqueeze_topk_scores +\
outputs_gather_select_boxes
outputs_concat_final_results = outputs['Out']
node_concat_final_results = onnx.helper.make_node(
'Concat',
inputs=inputs_concat_final_results,
outputs=outputs_concat_final_results,
axis=2)
node_list.append(node_concat_final_results)
return node_list
import onnx
import numpy as np
from onnx import onnx_pb, helper
def get_old_name(arg, name_prefix=''):
prefix_index = arg.find(name_prefix)
if prefix_index != -1:
last_prefix = arg[len(name_prefix):]
else:
last_prefix = arg
idx = last_prefix.find('@')
if idx != -1:
last_prefix = last_prefix[:idx]
return name_prefix + last_prefix
def yolo_box(op, block):
inputs = dict()
outputs = dict()
attrs = dict()
for name in op.input_names:
inputs[name] = op.input(name)
for name in op.output_names:
outputs[name] = op.output(name)
for name in op.attr_names:
attrs[name] = op.attr(name)
model_name = outputs['Boxes'][0]
input_shape = block.vars[get_old_name(inputs['X'][0])].shape
image_size = inputs['ImgSize']
input_height = input_shape[2]
input_width = input_shape[3]
class_num = attrs['class_num']
anchors = attrs['anchors']
num_anchors = int(len(anchors)) // 2
downsample_ratio = attrs['downsample_ratio']
input_size = input_height * downsample_ratio
conf_thresh = attrs['conf_thresh']
conf_thresh_mat = np.ones([num_anchors * input_height *
input_width]) * conf_thresh
node_list = []
im_outputs = []
x_shape = [1, num_anchors, 5 + class_num, input_height, input_width]
name_x_shape = [model_name + "@x_shape"]
node_x_shape = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_x_shape,
value=onnx.helper.make_tensor(
name=name_x_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[5],
vals=x_shape))
node_list.append(node_x_shape)
outputs_x_reshape = [model_name + "@reshape"]
node_x_reshape = onnx.helper.make_node(
'Reshape', inputs=inputs['X'] + name_x_shape, outputs=outputs_x_reshape)
node_list.append(node_x_reshape)
outputs_x_transpose = [model_name + "@x_transpose"]
node_x_transpose = onnx.helper.make_node(
'Transpose',
inputs=outputs_x_reshape,
outputs=outputs_x_transpose,
perm=[0, 1, 3, 4, 2])
node_list.append(node_x_transpose)
range_x = []
range_y = []
for i in range(0, input_width):
range_x.append(i)
for j in range(0, input_height):
range_y.append(j)
name_range_x = [model_name + "@range_x"]
node_range_x = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_range_x,
value=onnx.helper.make_tensor(
name=name_range_x[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=[input_width],
vals=range_x))
node_list.append(node_range_x)
name_range_y = [model_name + "@range_y"]
node_range_y = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_range_y,
value=onnx.helper.make_tensor(
name=name_range_y[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=[input_height],
vals=range_y))
node_list.append(node_range_y)
range_x_new_shape = [1, input_width]
range_y_new_shape = [input_height, 1]
name_range_x_new_shape = [model_name + "@range_x_new_shape"]
node_range_x_new_shape = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_range_x_new_shape,
value=onnx.helper.make_tensor(
name=name_range_x_new_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[len(range_x_new_shape)],
vals=range_x_new_shape))
node_list.append(node_range_x_new_shape)
name_range_y_new_shape = [model_name + "@range_y_new_shape"]
node_range_y_new_shape = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_range_y_new_shape,
value=onnx.helper.make_tensor(
name=name_range_y_new_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[len(range_y_new_shape)],
vals=range_y_new_shape))
node_list.append(node_range_y_new_shape)
outputs_range_x_reshape = [model_name + "@range_x_reshape"]
node_range_x_reshape = onnx.helper.make_node(
'Reshape',
inputs=name_range_x + name_range_x_new_shape,
outputs=outputs_range_x_reshape)
node_list.append(node_range_x_reshape)
outputs_range_y_reshape = [model_name + "@range_y_reshape"]
node_range_y_reshape = onnx.helper.make_node(
'Reshape',
inputs=name_range_y + name_range_y_new_shape,
outputs=outputs_range_y_reshape)
node_list.append(node_range_y_reshape)
outputs_grid_x = [model_name + "@grid_x"]
node_grid_x = onnx.helper.make_node(
"Tile",
inputs=outputs_range_x_reshape + name_range_y_new_shape,
outputs=outputs_grid_x)
node_list.append(node_grid_x)
outputs_grid_y = [model_name + "@grid_y"]
node_grid_y = onnx.helper.make_node(
"Tile",
inputs=outputs_range_y_reshape + name_range_x_new_shape,
outputs=outputs_grid_y)
node_list.append(node_grid_y)
outputs_box_x = [model_name + "@box_x"]
outputs_box_y = [model_name + "@box_y"]
outputs_box_w = [model_name + "@box_w"]
outputs_box_h = [model_name + "@box_h"]
outputs_conf = [model_name + "@conf"]
outputs_prob = [model_name + "@prob"]
node_split_input = onnx.helper.make_node(
"Split",
inputs=outputs_x_transpose,
outputs=outputs_box_x + outputs_box_y + outputs_box_w\
+ outputs_box_h + outputs_conf + outputs_prob,
axis=-1,
split=[1, 1, 1, 1, 1, class_num])
node_list.append(node_split_input)
outputs_box_x_sigmoid = [model_name + "@box_x_sigmoid"]
outputs_box_y_sigmoid = [model_name + "@box_y_sigmoid"]
node_box_x_sigmoid = onnx.helper.make_node(
"Sigmoid", inputs=outputs_box_x, outputs=outputs_box_x_sigmoid)
node_list.append(node_box_x_sigmoid)
node_box_y_sigmoid = onnx.helper.make_node(
"Sigmoid", inputs=outputs_box_y, outputs=outputs_box_y_sigmoid)
node_list.append(node_box_y_sigmoid)
outputs_box_x_squeeze = [model_name + "@box_x_squeeze"]
outputs_box_y_squeeze = [model_name + "@box_y_squeeze"]
node_box_x_squeeze = onnx.helper.make_node(
'Squeeze',
inputs=outputs_box_x_sigmoid,
outputs=outputs_box_x_squeeze,
axes=[4])
node_list.append(node_box_x_squeeze)
node_box_y_squeeze = onnx.helper.make_node(
'Squeeze',
inputs=outputs_box_y_sigmoid,
outputs=outputs_box_y_squeeze,
axes=[4])
node_list.append(node_box_y_squeeze)
outputs_box_x_add_grid = [model_name + "@box_x_add_grid"]
outputs_box_y_add_grid = [model_name + "@box_y_add_grid"]
node_box_x_add_grid = onnx.helper.make_node(
"Add",
inputs=outputs_grid_x + outputs_box_x_squeeze,
outputs=outputs_box_x_add_grid)
node_list.append(node_box_x_add_grid)
node_box_y_add_grid = onnx.helper.make_node(
"Add",
inputs=outputs_grid_y + outputs_box_y_squeeze,
outputs=outputs_box_y_add_grid)
node_list.append(node_box_y_add_grid)
name_input_h = [model_name + "@input_h"]
name_input_w = [model_name + "@input_w"]
node_input_h = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_input_h,
value=onnx.helper.make_tensor(
name=name_input_w[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=[input_height]))
node_list.append(node_input_h)
node_input_w = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_input_w,
value=onnx.helper.make_tensor(
name=name_input_w[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=[input_width]))
node_list.append(node_input_w)
outputs_box_x_encode = [model_name + "@box_x_encode"]
outputs_box_y_encode = [model_name + "@box_y_encode"]
node_box_x_encode = onnx.helper.make_node(
'Div',
inputs=outputs_box_x_add_grid + name_input_w,
outputs=outputs_box_x_encode)
node_list.append(node_box_x_encode)
node_box_y_encode = onnx.helper.make_node(
'Div',
inputs=outputs_box_y_add_grid + name_input_h,
outputs=outputs_box_y_encode)
node_list.append(node_box_y_encode)
name_anchor_tensor = [model_name + "@anchor_tensor"]
node_anchor_tensor = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=name_anchor_tensor,
value=onnx.helper.make_tensor(
name=name_anchor_tensor[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=[len(anchors)],
vals=anchors))
node_list.append(node_anchor_tensor)
anchor_shape = [int(num_anchors), 2]
name_anchor_shape = [model_name + "@anchor_shape"]
node_anchor_shape = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=name_anchor_shape,
value=onnx.helper.make_tensor(
name=name_anchor_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[2],
vals=anchor_shape))
node_list.append(node_anchor_shape)
outputs_anchor_tensor_reshape = [model_name + "@anchor_tensor_reshape"]
node_anchor_tensor_reshape = onnx.helper.make_node(
"Reshape",
inputs=name_anchor_tensor + name_anchor_shape,
outputs=outputs_anchor_tensor_reshape)
node_list.append(node_anchor_tensor_reshape)
name_input_size = [model_name + "@input_size"]
node_input_size = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=name_input_size,
value=onnx.helper.make_tensor(
name=name_input_size[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=[input_size]))
node_list.append(node_input_size)
outputs_anchors_div_input_size = [model_name + "@anchors_div_input_size"]
node_anchors_div_input_size = onnx.helper.make_node(
"Div",
inputs=outputs_anchor_tensor_reshape + name_input_size,
outputs=outputs_anchors_div_input_size)
node_list.append(node_anchors_div_input_size)
outputs_anchor_w = [model_name + "@anchor_w"]
outputs_anchor_h = [model_name + "@anchor_h"]
node_anchor_split = onnx.helper.make_node(
'Split',
inputs=outputs_anchors_div_input_size,
outputs=outputs_anchor_w + outputs_anchor_h,
axis=1,
split=[1, 1])
node_list.append(node_anchor_split)
new_anchor_shape = [1, int(num_anchors), 1, 1]
name_new_anchor_shape = [model_name + "@new_anchor_shape"]
node_new_anchor_shape = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_new_anchor_shape,
value=onnx.helper.make_tensor(
name=name_new_anchor_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[len(new_anchor_shape)],
vals=new_anchor_shape))
node_list.append(node_new_anchor_shape)
outputs_anchor_w_reshape = [model_name + "@anchor_w_reshape"]
outputs_anchor_h_reshape = [model_name + "@anchor_h_reshape"]
node_anchor_w_reshape = onnx.helper.make_node(
'Reshape',
inputs=outputs_anchor_w + name_new_anchor_shape,
outputs=outputs_anchor_w_reshape)
node_list.append(node_anchor_w_reshape)
node_anchor_h_reshape = onnx.helper.make_node(
'Reshape',
inputs=outputs_anchor_h + name_new_anchor_shape,
outputs=outputs_anchor_h_reshape)
node_list.append(node_anchor_h_reshape)
outputs_box_w_squeeze = [model_name + "@box_w_squeeze"]
node_box_w_squeeze = onnx.helper.make_node(
'Squeeze',
inputs=outputs_box_w,
outputs=outputs_box_w_squeeze,
axes=[4])
node_list.append(node_box_w_squeeze)
outputs_box_h_squeeze = [model_name + "@box_h_squeeze"]
node_box_h_squeeze = onnx.helper.make_node(
'Squeeze',
inputs=outputs_box_h,
outputs=outputs_box_h_squeeze,
axes=[4])
node_list.append(node_box_h_squeeze)
outputs_box_w_exp = [model_name + "@box_w_exp"]
node_box_w_exp = onnx.helper.make_node(
"Exp", inputs=outputs_box_w_squeeze, outputs=outputs_box_w_exp)
node_list.append(node_box_w_exp)
outputs_box_h_exp = [model_name + "@box_h_exp"]
node_box_h_exp = onnx.helper.make_node(
"Exp", inputs=outputs_box_h_squeeze, outputs=outputs_box_h_exp)
node_list.append(node_box_h_exp)
outputs_box_w_encode = [model_name + "box_w_encode"]
outputs_box_h_encode = [model_name + "box_h_encode"]
node_box_w_encode = onnx.helper.make_node(
'Mul',
inputs=outputs_box_w_exp + outputs_anchor_w_reshape,
outputs=outputs_box_w_encode)
node_list.append(node_box_w_encode)
node_box_h_encode = onnx.helper.make_node(
'Mul',
inputs=outputs_box_h_exp + outputs_anchor_h_reshape,
outputs=outputs_box_h_encode)
node_list.append(node_box_h_encode)
outputs_conf_sigmoid = [model_name + "@conf_sigmoid"]
node_conf_sigmoid = onnx.helper.make_node(
'Sigmoid', inputs=outputs_conf, outputs=outputs_conf_sigmoid)
node_list.append(node_conf_sigmoid)
name_conf_thresh = [model_name + "@conf_thresh"]
node_conf_thresh = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_conf_thresh,
value=onnx.helper.make_tensor(
name=name_conf_thresh[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=[num_anchors * input_height * input_width],
vals=conf_thresh_mat))
node_list.append(node_conf_thresh)
conf_shape = [1, int(num_anchors), input_height, input_width, 1]
name_conf_shape = [model_name + "@conf_shape"]
node_conf_shape = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_conf_shape,
value=onnx.helper.make_tensor(
name=name_conf_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[len(conf_shape)],
vals=conf_shape))
node_list.append(node_conf_shape)
outputs_conf_thresh_reshape = [model_name + "@conf_thresh_reshape"]
node_conf_thresh_reshape = onnx.helper.make_node(
'Reshape',
inputs=name_conf_thresh + name_conf_shape,
outputs=outputs_conf_thresh_reshape)
node_list.append(node_conf_thresh_reshape)
outputs_conf_sub = [model_name + "@conf_sub"]
node_conf_sub = onnx.helper.make_node(
'Sub',
inputs=outputs_conf_sigmoid + outputs_conf_thresh_reshape,
outputs=outputs_conf_sub)
node_list.append(node_conf_sub)
outputs_conf_clip = [model_name + "@conf_clip"]
node_conf_clip = onnx.helper.make_node(
'Clip', inputs=outputs_conf_sub, outputs=outputs_conf_clip)
node_list.append(node_conf_clip)
zeros = [0]
name_zeros = [model_name + "@zeros"]
node_zeros = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_zeros,
value=onnx.helper.make_tensor(
name=name_zeros[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=zeros))
node_list.append(node_zeros)
outputs_conf_clip_bool = [model_name + "@conf_clip_bool"]
node_conf_clip_bool = onnx.helper.make_node(
'Greater',
inputs=outputs_conf_clip + name_zeros,
outputs=outputs_conf_clip_bool)
node_list.append(node_conf_clip_bool)
outputs_conf_clip_cast = [model_name + "@conf_clip_cast"]
node_conf_clip_cast = onnx.helper.make_node(
'Cast',
inputs=outputs_conf_clip_bool,
outputs=outputs_conf_clip_cast,
to=1)
node_list.append(node_conf_clip_cast)
outputs_conf_set_zero = [model_name + "@conf_set_zero"]
node_conf_set_zero = onnx.helper.make_node(
'Mul',
inputs=outputs_conf_sigmoid + outputs_conf_clip_cast,
outputs=outputs_conf_set_zero)
node_list.append(node_conf_set_zero)
outputs_prob_sigmoid = [model_name + "@prob_sigmoid"]
node_prob_sigmoid = onnx.helper.make_node(
'Sigmoid', inputs=outputs_prob, outputs=outputs_prob_sigmoid)
node_list.append(node_prob_sigmoid)
new_shape = [1, int(num_anchors), input_height, input_width, 1]
name_new_shape = [model_name + "@new_shape"]
node_new_shape = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_new_shape,
value=onnx.helper.make_tensor(
name=name_new_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[len(new_shape)],
vals=new_shape))
node_list.append(node_new_shape)
outputs_conf_new_shape = [model_name + "@_conf_new_shape"]
node_conf_new_shape = onnx.helper.make_node(
'Reshape',
inputs=outputs_conf_set_zero + name_new_shape,
outputs=outputs_conf_new_shape)
node_list.append(node_conf_new_shape)
outputs_score = [model_name + "@score"]
node_score = onnx.helper.make_node(
'Mul',
inputs=outputs_prob_sigmoid + outputs_conf_new_shape,
outputs=outputs_score)
node_list.append(node_score)
outputs_conf_bool = [model_name + "@conf_bool"]
node_conf_bool = onnx.helper.make_node(
'Greater',
inputs=outputs_conf_new_shape + name_zeros,
outputs=outputs_conf_bool)
node_list.append(node_conf_bool)
outputs_box_x_new_shape = [model_name + "@box_x_new_shape"]
node_box_x_new_shape = onnx.helper.make_node(
'Reshape',
inputs=outputs_box_x_encode + name_new_shape,
outputs=outputs_box_x_new_shape)
node_list.append(node_box_x_new_shape)
outputs_box_y_new_shape = [model_name + "@box_y_new_shape"]
node_box_y_new_shape = onnx.helper.make_node(
'Reshape',
inputs=outputs_box_y_encode + name_new_shape,
outputs=outputs_box_y_new_shape)
node_list.append(node_box_y_new_shape)
outputs_box_w_new_shape = [model_name + "@box_w_new_shape"]
node_box_w_new_shape = onnx.helper.make_node(
'Reshape',
inputs=outputs_box_w_encode + name_new_shape,
outputs=outputs_box_w_new_shape)
node_list.append(node_box_w_new_shape)
outputs_box_h_new_shape = [model_name + "@box_h_new_shape"]
node_box_h_new_shape = onnx.helper.make_node(
'Reshape',
inputs=outputs_box_h_encode + name_new_shape,
outputs=outputs_box_h_new_shape)
node_list.append(node_box_h_new_shape)
outputs_pred_box = [model_name + "@pred_box"]
node_pred_box = onnx.helper.make_node(
'Concat',
inputs=outputs_box_x_new_shape + outputs_box_y_new_shape + \
outputs_box_w_new_shape + outputs_box_h_new_shape,
outputs=outputs_pred_box,
axis=4)
node_list.append(node_pred_box)
outputs_conf_cast = [model_name + "conf_cast"]
node_conf_cast = onnx.helper.make_node(
'Cast', inputs=outputs_conf_bool, outputs=outputs_conf_cast, to=1)
node_list.append(node_conf_cast)
outputs_pred_box_mul_conf = [model_name + "@pred_box_mul_conf"]
node_pred_box_mul_conf = onnx.helper.make_node(
'Mul',
inputs=outputs_pred_box + outputs_conf_cast,
outputs=outputs_pred_box_mul_conf)
node_list.append(node_pred_box_mul_conf)
box_shape = [1, int(num_anchors) * input_height * input_width, 4]
name_box_shape = [model_name + "@box_shape"]
node_box_shape = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_box_shape,
value=onnx.helper.make_tensor(
name=name_box_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[len(box_shape)],
vals=box_shape))
node_list.append(node_box_shape)
outputs_pred_box_new_shape = [model_name + "@pred_box_new_shape"]
node_pred_box_new_shape = onnx.helper.make_node(
'Reshape',
inputs=outputs_pred_box_mul_conf + name_box_shape,
outputs=outputs_pred_box_new_shape)
node_list.append(node_pred_box_new_shape)
outputs_pred_box_x = [model_name + "@_pred_box_x"]
outputs_pred_box_y = [model_name + "@_pred_box_y"]
outputs_pred_box_w = [model_name + "@_pred_box_w"]
outputs_pred_box_h = [model_name + "@_pred_box_h"]
node_pred_box_split = onnx.helper.make_node(
'Split',
inputs=outputs_pred_box_new_shape,
outputs=outputs_pred_box_x + outputs_pred_box_y + outputs_pred_box_w +
outputs_pred_box_h,
axis=2)
node_list.append(node_pred_box_split)
name_number_two = [model_name + "@number_two"]
node_number_two = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=name_number_two,
value=onnx.helper.make_tensor(
name=name_number_two[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=[2]))
node_list.append(node_number_two)
outputs_half_w = [model_name + "@half_w"]
node_half_w = onnx.helper.make_node(
"Div",
inputs=outputs_pred_box_w + name_number_two,
outputs=outputs_half_w)
node_list.append(node_half_w)
outputs_half_h = [model_name + "@half_h"]
node_half_h = onnx.helper.make_node(
"Div",
inputs=outputs_pred_box_h + name_number_two,
outputs=outputs_half_h)
node_list.append(node_half_h)
outputs_pred_box_x1 = [model_name + "@pred_box_x1"]
node_pred_box_x1 = onnx.helper.make_node(
'Sub',
inputs=outputs_pred_box_x + outputs_half_w,
outputs=outputs_pred_box_x1)
node_list.append(node_pred_box_x1)
outputs_pred_box_y1 = [model_name + "@pred_box_y1"]
node_pred_box_y1 = onnx.helper.make_node(
'Sub',
inputs=outputs_pred_box_y + outputs_half_h,
outputs=outputs_pred_box_y1)
node_list.append(node_pred_box_y1)
outputs_pred_box_x2 = [model_name + "@pred_box_x2"]
node_pred_box_x2 = onnx.helper.make_node(
'Add',
inputs=outputs_pred_box_x + outputs_half_w,
outputs=outputs_pred_box_x2)
node_list.append(node_pred_box_x2)
outputs_pred_box_y2 = [model_name + "@pred_box_y2"]
node_pred_box_y2 = onnx.helper.make_node(
'Add',
inputs=outputs_pred_box_y + outputs_half_h,
outputs=outputs_pred_box_y2)
node_list.append(node_pred_box_y2)
outputs_sqeeze_image_size = [model_name + "@sqeeze_image_size"]
node_sqeeze_image_size = onnx.helper.make_node(
"Squeeze",
axes=[0],
inputs=image_size,
outputs=outputs_sqeeze_image_size)
node_list.append(node_sqeeze_image_size)
output_img_height = [model_name + "@img_height"]
output_img_width = [model_name + "@img_width"]
node_image_size_split = onnx.helper.make_node(
"Split",
inputs=outputs_sqeeze_image_size,
outputs=output_img_height + output_img_width,
axis=-1,
split=[1, 1])
node_list.append(node_image_size_split)
output_img_width_cast = [model_name + "@img_width_cast"]
node_img_width_cast = onnx.helper.make_node(
'Cast', inputs=output_img_width, outputs=output_img_width_cast, to=1)
node_list.append(node_img_width_cast)
output_img_height_cast = [model_name + "@img_height_cast"]
node_img_height_cast = onnx.helper.make_node(
'Cast', inputs=output_img_height, outputs=output_img_height_cast, to=1)
node_list.append(node_img_height_cast)
outputs_pred_box_x1_decode = [model_name + "@pred_box_x1_decode"]
outputs_pred_box_y1_decode = [model_name + "@pred_box_y1_decode"]
outputs_pred_box_x2_decode = [model_name + "@pred_box_x2_decode"]
outputs_pred_box_y2_decode = [model_name + "@pred_box_y2_decode"]
node_pred_box_x1_decode = onnx.helper.make_node(
'Mul',
inputs=outputs_pred_box_x1 + output_img_width_cast,
outputs=outputs_pred_box_x1_decode)
node_list.append(node_pred_box_x1_decode)
node_pred_box_y1_decode = onnx.helper.make_node(
'Mul',
inputs=outputs_pred_box_y1 + output_img_height_cast,
outputs=outputs_pred_box_y1_decode)
node_list.append(node_pred_box_y1_decode)
node_pred_box_x2_decode = onnx.helper.make_node(
'Mul',
inputs=outputs_pred_box_x2 + output_img_width_cast,
outputs=outputs_pred_box_x2_decode)
node_list.append(node_pred_box_x2_decode)
node_pred_box_y2_decode = onnx.helper.make_node(
'Mul',
inputs=outputs_pred_box_y2 + output_img_height_cast,
outputs=outputs_pred_box_y2_decode)
node_list.append(node_pred_box_y2_decode)
name_number_one = [model_name + "@one"]
node_number_one = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=name_number_one,
value=onnx.helper.make_tensor(
name=name_number_one[0] + "@const",
data_type=onnx.TensorProto.FLOAT,
dims=(),
vals=[1]))
node_list.append(node_number_one)
output_new_img_height = [model_name + "@new_img_height"]
node_new_img_height = onnx.helper.make_node(
'Sub',
inputs=output_img_height_cast + name_number_one,
outputs=output_new_img_height)
node_list.append(node_new_img_height)
output_new_img_width = [model_name + "@new_img_width"]
node_new_img_width = onnx.helper.make_node(
'Sub',
inputs=output_img_width_cast + name_number_one,
outputs=output_new_img_width)
node_list.append(node_new_img_width)
outputs_pred_box_x2_sub_w = [model_name + "@pred_box_x2_sub_w"]
node_pred_box_x2_sub_w = onnx.helper.make_node(
'Sub',
inputs=outputs_pred_box_x2_decode + output_new_img_width,
outputs=outputs_pred_box_x2_sub_w)
node_list.append(node_pred_box_x2_sub_w)
outputs_pred_box_y2_sub_h = [model_name + "@pred_box_y2_sub_h"]
node_pred_box_y2_sub_h = onnx.helper.make_node(
'Sub',
inputs=outputs_pred_box_y2_decode + output_new_img_height,
outputs=outputs_pred_box_y2_sub_h)
node_list.append(node_pred_box_y2_sub_h)
outputs_pred_box_x1_clip = [model_name + "@pred_box_x1_clip"]
outputs_pred_box_y1_clip = [model_name + "@pred_box_y1_clip"]
outputs_pred_box_x2_clip = [model_name + "@pred_box_x2_clip"]
outputs_pred_box_y2_clip = [model_name + "@pred_box_y2_clip"]
node_pred_box_x1_clip = onnx.helper.make_node(
'Clip',
inputs=outputs_pred_box_x1_decode,
outputs=outputs_pred_box_x1_clip,
min=0.0,
max=float(np.inf))
node_list.append(node_pred_box_x1_clip)
node_pred_box_y1_clip = onnx.helper.make_node(
'Clip',
inputs=outputs_pred_box_y1_decode,
outputs=outputs_pred_box_y1_clip,
min=0.0,
max=float(np.inf))
node_list.append(node_pred_box_y1_clip)
node_pred_box_x2_clip = onnx.helper.make_node(
'Clip',
inputs=outputs_pred_box_x2_sub_w,
outputs=outputs_pred_box_x2_clip,
min=0.0,
max=float(np.inf))
node_list.append(node_pred_box_x2_clip)
node_pred_box_y2_clip = onnx.helper.make_node(
'Clip',
inputs=outputs_pred_box_y2_sub_h,
outputs=outputs_pred_box_y2_clip,
min=0.0,
max=float(np.inf))
node_list.append(node_pred_box_y2_clip)
outputs_pred_box_x2_res = [model_name + "@box_x2_res"]
node_pred_box_x2_res = onnx.helper.make_node(
'Sub',
inputs=outputs_pred_box_x2_decode + outputs_pred_box_x2_clip,
outputs=outputs_pred_box_x2_res)
node_list.append(node_pred_box_x2_res)
outputs_pred_box_y2_res = [model_name + "@box_y2_res"]
node_pred_box_y2_res = onnx.helper.make_node(
'Sub',
inputs=outputs_pred_box_y2_decode + outputs_pred_box_y2_clip,
outputs=outputs_pred_box_y2_res)
node_list.append(node_pred_box_y2_res)
node_pred_box_result = onnx.helper.make_node(
'Concat',
inputs=outputs_pred_box_x1_clip + outputs_pred_box_y1_clip +
outputs_pred_box_x2_res + outputs_pred_box_y2_res,
outputs=outputs['Boxes'],
axis=-1)
node_list.append(node_pred_box_result)
score_shape = [1, input_height * input_width * int(num_anchors), class_num]
name_score_shape = [model_name + "@score_shape"]
node_score_shape = onnx.helper.make_node(
"Constant",
inputs=[],
outputs=name_score_shape,
value=onnx.helper.make_tensor(
name=name_score_shape[0] + "@const",
data_type=onnx.TensorProto.INT64,
dims=[len(score_shape)],
vals=score_shape))
node_list.append(node_score_shape)
node_score_new_shape = onnx.helper.make_node(
'Reshape',
inputs=outputs_score + name_score_shape,
outputs=outputs['Scores'])
node_list.append(node_score_new_shape)
return node_list
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import sys
import x2paddle
import os
import numpy as np
import paddle.fluid.core as core
import paddle.fluid as fluid
import onnx
from onnx import helper, onnx_pb
class PaddleOpMapper(object):
def __init__(self):
self.paddle_onnx_dtype_map = {
core.VarDesc.VarType.FP32: onnx_pb.TensorProto.FLOAT,
core.VarDesc.VarType.FP64: onnx_pb.TensorProto.DOUBLE,
core.VarDesc.VarType.INT32: onnx_pb.TensorProto.INT32,
core.VarDesc.VarType.INT16: onnx_pb.TensorProto.INT16,
core.VarDesc.VarType.INT16: onnx_pb.TensorProto.UINT16,
core.VarDesc.VarType.INT64: onnx_pb.TensorProto.INT64,
core.VarDesc.VarType.BOOL: onnx_pb.TensorProto.BOOL
}
self.name_counter = dict()
def convert(self, program, save_dir):
weight_nodes = self.convert_weights(program)
op_nodes = list()
input_nodes = list()
output_nodes = list()
unsupported_ops = set()
print("Translating PaddlePaddle to ONNX...\n")
for block in program.blocks:
for i, op in enumerate(block.ops):
sys.stdout.write(
"\rTotal:{}, Current:{} : {} ".format(
len(block.ops), i + 1, op.type))
sys.stdout.flush()
if not hasattr(self, op.type):
unsupported_ops.add(op.type)
continue
if len(unsupported_ops) > 0:
continue
node = getattr(self, op.type)(op, block)
if op.type == 'feed':
input_nodes.append(node)
elif op.type == 'fetch':
output_nodes.append(node)
else:
if isinstance(node, list):
op_nodes = op_nodes + node
else:
op_nodes.append(node)
if len(unsupported_ops) > 0:
print("\nThere's {} ops are not supported yet".format(
len(unsupported_ops)))
for op in unsupported_ops:
print("=========== {} ===========".format(op))
return
graph = helper.make_graph(
nodes=weight_nodes + op_nodes,
name='onnx_model_from_paddle',
initializer=[],
inputs=input_nodes,
outputs=output_nodes)
model = helper.make_model(graph, producer_name='X2Paddle')
onnx.checker.check_model(model)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
with open(os.path.join(save_dir, 'x2paddle_model.onnx'), 'wb') as f:
f.write(model.SerializeToString())
print("\nTranslated model saved in {}".format(
os.path.join(save_dir, 'x2paddle_model.onnx')))
def get_name(self, op_name, var_name):
name = 'p2o.{}.{}'.format(op_name, var_name)
if name not in self.name_counter:
self.name_counter[name] = 0
else:
self.name_counter[name] += 1
return name + '.{}'.format(self.name_counter[name])
def convert_weights(self, program):
var_names = program.global_block().vars
nodes = list()
for name in var_names:
var = program.global_block().var(name)
if name.endswith('feed') or name.endswith('fetch'):
continue
if not var.persistable:
continue
weight = np.array(fluid.global_scope().find_var(name).get_tensor())
tensor = helper.make_tensor(
name=name,
dims=var.shape,
data_type=self.paddle_onnx_dtype_map[var.dtype],
vals=weight.flatten().tolist())
node = helper.make_node(
'Constant', inputs=[], outputs=[name], value=tensor)
nodes.append(node)
return nodes
def make_constant_node(self, name, dtype, value=None):
if isinstance(value, list):
dims = (len(value), )
elif value is None:
dims = ()
value = []
else:
dims = ()
value = [value]
tensor = helper.make_tensor(
name=name, data_type=dtype, dims=dims, vals=value)
node = helper.make_node(
'Constant', inputs=[], outputs=[name], value=tensor)
return node
def conv2d(self, op, block):
kernel_shape = block.var(op.input('Filter')[0]).shape
node = helper.make_node(
'Conv',
inputs=op.input('Input') + op.input('Filter'),
outputs=op.output('Output'),
dilations=op.attr('dilations'),
kernel_shape=kernel_shape[-2:],
strides=op.attr('strides'),
group=op.attr('groups'),
pads=op.attr('paddings') + op.attr('paddings'))
return node
def conv2d_transpose(self, op, block):
kernel_shape = block.var(op.input('Filter')[0]).shape
node = helper.make_node(
'ConvTranspose',
inputs=op.input('Input') + op.input('Filter'),
outputs=op.output('Output'),
dilations=op.attr('dilations'),
kernel_shape=kernel_shape[-2:],
strides=op.attr('strides'),
group=1,
pads=op.attr('paddings') + op.attr('paddings'))
return node
def relu(self, op, block):
node = helper.make_node(
'Relu', inputs=op.input('X'), outputs=op.output('Out'))
return node
def sigmoid(self, op, block):
node = helper.make_node(
'Sigmoid', inputs=op.input('X'), outputs=op.output('Out'))
return node
def exp(self, op, block):
node = helper.make_node(
'Exp', inputs=op.input('X'), outputs=op.output('Out'))
return node
def leaky_relu(self, op, block):
node = helper.make_node(
'LeakyRelu',
inputs=op.input('X'),
outputs=op.output('Out'),
alpha=op.attr('alpha'))
return node
def elementwise_add(self, op, block):
axis = op.attr('axis')
x_shape = block.var(op.input('X')[0]).shape
y_shape = block.var(op.input('Y')[0]).shape
if len(y_shape) == 1 and axis == 1:
shape_name = self.get_name(op.type, 'shape')
shape_value = [1] * len(x_shape)
shape_value[axis] = y_shape[0]
shape_node = self.make_constant_node(
shape_name, onnx_pb.TensorProto.INT64, shape_value)
temp_value = self.get_name(op.type, 'temp')
y_node = helper.make_node(
'Reshape',
inputs=[op.input('Y')[0], shape_name],
outputs=[temp_value])
node = helper.make_node(
'Add',
inputs=[op.input('X')[0], temp_value],
outputs=op.output('Out'))
return [shape_node, y_node, node]
elif len(x_shape) == len(y_shape):
node = helper.make_node(
'Add',
inputs=[op.input('X')[0], op.input('Y')[0]],
outputs=op.output('Out'))
return node
else:
raise Excpetion("Unexpected situation happend in elementwise_add")
def elementwise_sub(self, op, block):
axis = op.attr('axis')
x_shape = block.var(op.input('X')[0]).shape
y_shape = block.var(op.input('Y')[0]).shape
if len(y_shape) == 1 and axis == 1:
shape_name = self.get_name(op.type, 'shape')
shape_value = [1] * len(x_shape)
shape_value[axis] = y_shape[0]
shape_node = self.make_constant_node(
shape_name, onnx_pb.TensorProto.INT64, shape_value)
temp_value = self.get_name(op.type, 'temp')
y_node = helper.make_node(
'Reshape',
inputs=[op.input('Y')[0], shape_name],
outputs=[temp_value])
node = helper.make_node(
'Sub',
inputs=[op.input('X')[0], temp_value],
outputs=op.output('Out'))
return [shape_node, y_node, node]
elif len(x_shape) == len(y_shape):
node = helper.make_node(
'Sub',
inputs=[op.input('X')[0], op.input('Y')[0]],
outputs=op.output('Out'))
return node
else:
raise Excpetion("Unexpected situation happend in elementwise_sub")
def pool2d(self, op, block):
pool_type = {
'max': ('MaxPool', 'GlobalMaxPool'),
'avg': ('AveragePool', 'GlobalAveragePool')
}
if op.attr('global_pooling'):
node = helper.make_node(
pool_type[op.attr('pooling_type')][1],
inputs=op.input('X'),
outputs=op.output('Out'), )
else:
input_shape = block.var(op.input('X')[0]).shape
k_size = op.attr('ksize')
paddings = op.attr('paddings')
if input_shape[2] > 0 and input_shape[2] + paddings[0] < k_size[0]:
k_size[0] = input_shape[2] + paddings[0]
if input_shape[3] > 0 and input_shape[3] + paddings[1] < k_size[1]:
k_size[1] = input_shape[3] + paddings[1]
node = helper.make_node(
pool_type[op.attr('pooling_type')][0],
inputs=op.input('X'),
outputs=op.output('Out'),
kernel_shape=k_size,
strides=op.attr('strides'),
pads=op.attr('paddings') + op.attr('paddings'))
return node
def softmax(self, op, block):
axis = op.attr('axis')
shape = block.var(op.output('Out')[0]).shape
if axis < 0:
axis += len(shape)
if axis == len(shape) - 1:
node = helper.make_node(
'Softmax',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'))
return node
else:
perm = [i for i in range(len(shape))]
perm[-1] = axis
perm[axis] = len(shape) - 1
transpose_name0 = self.get_name(op.type, 'transpose')
transpose_node0 = helper.make_node(
'Transpose',
inputs=op.input('X'),
outputs=[transpose_name0],
perm=perm)
softmax_name = self.get_name(op.type, 'softmax')
softmax_node = helper.make_node(
'Softmax',
inputs=[transpose_name0],
outputs=[softmax_name],
axis=-1)
transpose_name1 = self.get_name(op.type, 'transpose')
transpose_node1 = helper.make_node(
'Transpose',
inputs=[softmax_name],
outputs=op.output('Out'),
perm=perm)
return [transpose_node0, softmax_node, transpose_node1]
def scale(self, op, block):
scale = op.attr('scale')
bias = op.attr('bias')
if math.fabs(scale - 1.0) < 1e-06 and math.fabs(bias - 0.0) < 1e-06:
node = helper.make_node(
'Identity', inputs=op.input('X'), outputs=op.output('Out'))
return node
else:
scale_name = self.get_name(op.type, 'scale')
bias_name = self.get_name(op.type, 'bias')
scale_node = self.make_constant_node(
scale_name, onnx_pb.TensorProto.FLOAT, scale)
bias_node = self.make_constant_node(bias_name,
onnx_pb.TensorProto.FLOAT, bias)
temp_tensor_name = self.get_name(op.type, 'temporary')
if op.attr('bias_after_scale'):
node1 = helper.make_node(
'Mul',
inputs=[scale_name, op.input('X')[0]],
outputs=[temp_tensor_name])
node2 = helper.make_node(
'Add',
inputs=[bias_name, temp_tensor_name],
outputs=op.output('Out'))
else:
node1 = helper.make_node(
'Add',
inputs=[bias_name, op.input('X')[0]],
outputs=temp_tensor_name)
node2 = helper.make_node(
'Mul',
inputs=[scale_name, temp_tensor_name],
outputs=[op.output('Out')])
return [scale_node, bias_node, node1, node2]
def mul(self, op, block):
x_shape = block.var(op.input('X')[0]).shape
y_shape = block.var(op.input('Y')[0]).shape
out_shape = list(block.var(op.output('Out')[0]).shape)
x_num_col_dims = op.attr('x_num_col_dims')
y_num_col_dims = op.attr('y_num_col_dims')
flatten_x_name = 'flatten_{}'.format(op.input('X')[0])
flatten_y_name = 'flatten_{}'.format(op.input('Y')[0])
shape_name = 'temp_shape_{}'.format(op.output('Out')[0])
temp_out_name = 'temp_{}'.format(op.output('Out')[0])
flatten_x = helper.make_node(
'Flatten',
inputs=op.input('X'),
outputs=[flatten_x_name],
axis=x_num_col_dims)
flatten_y = helper.make_node(
'Flatten',
inputs=op.input('Y'),
outputs=[flatten_y_name],
axis=y_num_col_dims)
shape_node = self.make_constant_node(
shape_name, onnx_pb.TensorProto.INT64, out_shape)
node = helper.make_node(
'MatMul',
inputs=[flatten_x_name, flatten_y_name],
outputs=[temp_out_name])
reshape_out = helper.make_node(
'Reshape',
inputs=[temp_out_name, shape_name],
outputs=op.output('Out'))
return [flatten_x, flatten_y, shape_node, node, reshape_out]
def batch_norm(self, op, block):
kwargs = {
'epsilon': op.attr('epsilon'),
'momentum': op.attr('momentum')
}
inputs = op.input('X') + op.input('Scale') + op.input(
'Bias') + op.input('Mean') + op.input('Variance')
node = helper.make_node(
'BatchNormalization',
inputs=inputs,
outputs=op.output('Y'),
**kwargs)
return node
def concat(self, op, block):
node = helper.make_node(
'Concat',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'))
return node
def depthwise_conv2d(self, op, block):
return self.conv2d(op, block)
def relu6(self, op, block):
min_name = self.get_name(op.type, 'min')
max_name = self.get_name(op.type, 'max')
min_node = self.make_constant_node(min_name, onnx_pb.TensorProto.FLOAT,
0)
max_node = self.make_constant_node(max_name, onnx_pb.TensorProto.FLOAT,
op.attr('threshold'))
node = helper.make_node(
'Clip',
inputs=[op.input('X')[0], min_name, max_name],
outputs=op.output('Out'), )
return [min_node, max_node, node]
def shape(self, op, block):
node = helper.make_node(
'Shape', inputs=op.input('Input'), outputs=op.output('Out'))
return node
def split(self, op, block):
sections = op.attr('sections')
if len(sections) > 0:
node = helper.make_node(
'Split',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'),
split=sections)
else:
node = helper.make_node(
'Split',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'))
return node
def slice(self, op, block):
axes = op.attr('axes')
starts = op.attr('starts')
ends = op.attr('ends')
axes_name = self.get_name(op.type, 'axes')
starts_name = self.get_name(op.type, 'starts')
ends_name = self.get_name(op.type, 'ends')
axes_node = self.make_constant_node(axes_name,
onnx_pb.TensorProto.INT64, axes)
starts_node = self.make_constant_node(starts_name,
onnx_pb.TensorProto.INT64, starts)
ends_node = self.make_constant_node(ends_name,
onnx_pb.TensorProto.INT64, ends)
node = helper.make_node(
"Slice",
inputs=[op.input('Input')[0], starts_name, ends_name, axes_name],
outputs=op.output('Out'), )
return [starts_node, ends_node, axes_node, node]
def fill_constant(self, op, block):
value = op.attr('value')
dtype = op.attr('dtype')
shape = op.attr('shape')
value = np.ones(shape) * value
if dtype == 2:
value = value.astype('int32')
node = helper.make_node(
'Constant',
inputs=[],
outputs=op.output('Out'),
value=helper.make_tensor(
name=op.output('Out')[0],
data_type=self.paddle_onnx_dtype_map[dtype],
dims=shape,
vals=value.tolist()))
return node
def transpose2(self, op, block):
node = helper.make_node(
'Transpose',
inputs=op.input('X'),
outputs=op.output('Out'),
perm=op.attr('axis'))
return node
def reshape2(self, op, block):
input_names = op.input_names
if len(op.input('ShapeTensor')) > 1:
cast_shape_nodes = list()
cast_shape_names = list()
for i in range(len(op.input('ShapeTensor'))):
dim = op.input('ShapeTensor')[i]
temp_name = self.get_name(op.type, 'shape.cast')
node = helper.make_node(
'Cast',
inputs=[dim],
outputs=[temp_name],
to=onnx_pb.TensorProto.INT64)
cast_shape_nodes.append(node)
cast_shape_names.append(temp_name)
temp_name = self.get_name(op.type, 'shape.concat')
shape_node = helper.make_node(
'Concat', inputs=cast_shape_names, outputs=[temp_name], axis=-1)
node = helper.make_node(
'Reshape',
inputs=[op.input('X')[0], temp_name],
outputs=op.output('Out'))
return cast_shape_nodes + [shape_node, node]
else:
temp_name = self.get_name(op.type, 'shape.cast')
cast_shape_node = helper.make_node(
'Cast',
inputs=op.input('ShapeTensor'),
outputs=[temp_name],
to=onnx_pb.TensorProto.INT64)
node = helper.make_node(
'Reshape',
inputs=[op.input('X')[0], temp_name],
outputs=op.output('Out'))
return [cast_shape_node, node]
def dropout(self, op, block):
dropout_mode = op.attr('dropout_implementation')
dropout_prob = op.attr('dropout_prob')
if dropout_mode == 'upscale_in_train':
node = helper.make_node(
'Identity', inputs=op.input('X'), outputs=op.output('Out'))
return node
elif dropout_mode == 'downgrade_in_infer':
scale_name = self.get_name(op.type, 'scale')
scale_node = self.make_constant_node(
scale_name, onnx_pb.TensorProto.FLOAT, 1 - dropout_prob)
node = helper.make_node(
"Mul",
inputs=[op.input('X')[0], scale_name],
outputs=op.output('Out'))
return [scale_node, node]
else:
raise Exception("Unexpected situation happend")
def reduce_mean(self, op, block):
node = helper.make_node(
'ReduceMean',
inputs=op.input('X'),
outputs=op.output('Out'),
axes=op.attr('dim'),
keepdims=op.attr('keep_dim'))
return node
def bilinear_interp(self, op, block):
input_names = op.input_names
coordinate_transformation_mode = 'half_pixel'
if op.attr('align_corners'):
coordinate_transformation_mode = 'align_corners'
if ('OutSize' in input_names and len(op.input('OutSize')) > 0) or (
'SizeTensor' in input_names and
len(op.input('SizeTensor')) > 0):
node_list = list()
roi_node = self.make_constant_node(
self.get_name(op.type, 'roi'), onnx_pb.TensorProto.FLOAT,
[1, 1, 1, 1, 1, 1, 1, 1])
roi_name = self.get_name(op.type, 'roi')
roi_node = self.make_constant_node(
roi_name, onnx_pb.TensorProto.FLOAT, [1, 1, 1, 1, 1, 1, 1, 1])
empty_name = self.get_name(op.type, 'empty')
empty_tensor = helper.make_tensor(
empty_name,
onnx_pb.TensorProto.FLOAT, (0, ),
np.array([]).astype('float32'),
raw=False)
empty_node = helper.make_node(
'Constant', [], outputs=[empty_name], value=empty_tensor)
shape_name0 = self.get_name(op.type, 'shape')
shape_node0 = helper.make_node(
'Shape', inputs=op.input('X'), outputs=[shape_name0])
starts_name = self.get_name(op.type, 'slice.starts')
starts_node = self.make_constant_node(
starts_name, onnx_pb.TensorProto.INT64, [0])
ends_name = self.get_name(op.type, 'slice.ends')
ends_node = self.make_constant_node(ends_name,
onnx_pb.TensorProto.INT64, [2])
shape_name1 = self.get_name(op.type, 'shape')
shape_node1 = helper.make_node(
'Slice',
inputs=[shape_name0, starts_name, ends_name],
outputs=[shape_name1])
node_list.extend([
roi_node, empty_node, shape_node0, starts_node, ends_node,
shape_node1
])
# shape_name2 = self.get_name(op.type, "shape.cast")
# shape_node2 = helper.make_node(
# 'Cast',
# inputs=op.input('OutSize'),
# outputs=[shape_name2],
# to=onnx_pb.TensorProto.INT64)
if 'OutSize' in input_names and len(op.input('OutSize')) > 0:
cast_shape_name = self.get_name(op.type, "shape.cast")
cast_shape_node = helper.make_node(
'Cast',
inputs=op.input('OutSize'),
outputs=[cast_shape_name],
to=onnx_pb.TensorProto.INT64)
node_list.append(cast_shape_node)
else:
concat_shape_name = self.get_name(op.type, "shape.concat")
concat_shape_node = helper.make_node(
"Concat",
inputs=op.input('SizeTensor'),
outputs=[concat_shape_name],
axis=0)
cast_shape_name = self.get_name(op.type, "shape.cast")
cast_shape_node = helper.make_node(
'Cast',
inputs=[concat_shape_name],
outputs=[cast_shape_name],
to=onnx_pb.TensorProto.INT64)
node_list.extend([concat_shape_node, cast_shape_node])
shape_name3 = self.get_name(op.type, "shape.concat")
shape_node3 = helper.make_node(
'Concat',
inputs=[shape_name1, cast_shape_name],
outputs=[shape_name3],
axis=0)
result_node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], roi_name, empty_name, shape_name3],
outputs=op.output('Out'),
mode='linear',
coordinate_transformation_mode=coordinate_transformation_mode)
node_list.extend([shape_node3, result_node])
return node_list
elif 'Scale' in input_names and len(op.input('Scale')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], op.input('Scale')[0]],
outputs=op.output('Out'),
mode='linear',
coordinate_transformation_mode=coordinate_transformation_mode)
else:
out_shape = [op.attr('out_h'), op.attr('out_w')]
scale = op.attr('scale')
if out_shape.count(-1) > 0:
scale_name = self.get_name(op.type, 'scale')
scale_node = self.make_constant_node(scale_name,
onnx_pb.TensorProto.FLOAT,
[1, 1, scale, scale])
roi_name = self.get_name(op.type, 'roi')
roi_node = self.make_constant_node(roi_name,
onnx_pb.TensorProto.FLOAT,
[1, 1, 1, 1, 1, 1, 1, 1])
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], roi_name, scale_name],
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode
)
return [scale_node, roi_node, node]
else:
raise Exception("Unexpected situation happend")
return node
def nearest_interp(self, op, block):
input_names = op.input_names
coordinate_transformation_mode = 'half_pixel'
if op.attr('align_corners'):
coordinate_transformation_mode = 'align_corners'
if 'OutSize' in input_names and len(op.input('OutSize')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], '', op.input('OutSize')[0]],
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode)
elif 'Scale' in input_names and len(op.input('Scale')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], op.input('Scale')[0]],
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode)
else:
out_shape = [op.attr('out_h'), op.attr('out_w')]
scale = op.attr('scale')
if out_shape.count(-1) > 0:
scale_name = self.get_name(op.type, 'scale')
scale_node = self.make_constant_node(scale_name,
onnx_pb.TensorProto.FLOAT,
[1, 1, scale, scale])
roi_name = self.get_name(op.type, 'roi')
roi_node = self.make_constant_node(roi_name,
onnx_pb.TensorProto.FLOAT,
[1, 1, 1, 1, 1, 1, 1, 1])
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], roi_name, scale_name],
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode
)
return [scale_node, roi_node, node]
else:
raise Exception("Unexpected situation happend")
return node
def hard_sigmoid(self, op, block):
slope = op.attr('slope')
offset = op.attr('offset')
node = helper.make_node(
'HardSigmoid',
inputs=op.input('X'),
outputs=op.output('Out'),
alpha=slope,
beta=offset)
return node
def hard_swish(self, op, block):
min_name = self.get_name(op.type, 'min')
max_name = self.get_name(op.type, 'max')
scale_name = self.get_name(op.type, 'scale')
offset_name = self.get_name(op.type, 'offset')
min_node = self.make_constant_node(min_name, onnx_pb.TensorProto.FLOAT,
0)
max_node = self.make_constant_node(max_name, onnx_pb.TensorProto.FLOAT,
op.attr('threshold'))
scale_node = self.make_constant_node(scale_name,
onnx_pb.TensorProto.FLOAT,
op.attr('scale'))
offset_node = self.make_constant_node(offset_name,
onnx_pb.TensorProto.FLOAT,
op.attr('offset'))
name0 = self.get_name(op.type, 'add')
node0 = helper.make_node(
'Add', inputs=[op.input('X')[0], offset_name], outputs=[name0])
name1 = self.get_name(op.type, 'relu')
node1 = helper.make_node(
'Clip',
inputs=[name0, min_name, max_name],
outputs=[name1], )
name2 = self.get_name(op.type, 'mul')
node2 = helper.make_node(
'Mul', inputs=[op.input('X')[0], name1], outputs=[name2])
node3 = helper.make_node(
'Div', inputs=[name2, scale_name], outputs=op.output('Out'))
return [
min_node, max_node, scale_node, offset_node, node0, node1, node2,
node3
]
def elementwise_mul(self, op, block):
axis = op.attr('axis')
x_shape = block.var(op.input('X')[0]).shape
y_shape = block.var(op.input('Y')[0]).shape
if len(y_shape) == 1 and axis == 1:
shape_name = self.get_name(op.type, 'shape')
shape_value = [1] * len(x_shape)
shape_value[axis] = y_shape[0]
shape_node = self.make_constant_node(
shape_name, onnx_pb.TensorProto.INT64, shape_value)
temp_value = self.get_name(op.type, 'temp')
y_node = helper.make_node(
'Reshape',
inputs=[op.input('Y')[0], shape_name],
outputs=[temp_value])
node = helper.make_node(
'Mul',
inputs=[op.input('X')[0], temp_value],
outputs=op.output('Out'))
return [shape_node, y_node, node]
elif len(x_shape) == len(y_shape):
node = helper.make_node(
'Mul',
inputs=[op.input('X')[0], op.input('Y')[0]],
outputs=op.output('Out'))
return node
else:
raise Excpetion("Unexpected situation happend in elementwise_add")
return node
def feed(self, op, block):
name = op.output('Out')[0]
var = block.var(name)
tensor_info = helper.make_tensor_value_info(
name=name,
shape=var.shape,
elem_type=self.paddle_onnx_dtype_map[var.dtype])
return tensor_info
def fetch(self, op, block):
name = op.input('X')[0]
var = block.var(name)
tensor_info = helper.make_tensor_value_info(
name=name,
shape=var.shape,
elem_type=self.paddle_onnx_dtype_map[var.dtype])
return tensor_info
def unsqueeze2(self, op, block):
node = helper.make_node(
'Unsqueeze',
inputs=op.input('X'),
outputs=op.output('Out'),
axes=op.attr('axes'))
return node
def arg_max(self, op, block):
node = helper.make_node(
'ArgMax',
inputs=op.input('X'),
outputs=op.output('Out'),
axis=op.attr('axis'),
keepdims=0)
return node
def reciprocal(self, op, block):
inputs = op.input(op.input_names[0])
outputs = op.output(op.output_names[0])
node = helper.make_node('Reciprocal', inputs=inputs, outputs=outputs)
return node
def im2sequence(self, op, block):
from .paddle_custom_layer.im2sequence import im2sequence
return im2sequence(op, block)
......@@ -85,7 +85,8 @@ class TFOpMapper(OpMapper):
not_placeholder = list()
for name in self.graph.input_nodes:
if self.graph.get_node(name).layer_type != "Placeholder" and self.graph.get_node(name).layer_type != "OneShotIterator":
if self.graph.get_node(name).layer_type != "Placeholder" \
and self.graph.get_node(name).layer_type != "OneShotIterator":
not_placeholder.append(name)
for name in not_placeholder:
idx = self.graph.input_nodes.index(name)
......@@ -113,9 +114,8 @@ class TFOpMapper(OpMapper):
else:
unsupported_ops.add(op)
if len(unsupported_ops) > 0:
sys.stderr.write(
"=========={} Ops are not supported yet======\n".format(
len(unsupported_ops)))
sys.stderr.write("=========={} Ops are not supported yet======\n".
format(len(unsupported_ops)))
for op in unsupported_ops:
sys.stderr.write("========== {} ==========\n".format(op))
sys.exit(-1)
......@@ -140,10 +140,8 @@ class TFOpMapper(OpMapper):
pd_param_name = list(param.values())[0]
tf_param = node.get_attr(tf_param_name)
attr[pd_param_name] = tf_param
node.fluid_code.add_layer(op_info[0],
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
op_info[0], inputs=input, output=node, param_attr=attr)
def elementwise_map(self, node):
assert node.layer_type in self.elementwise_ops
......@@ -178,21 +176,21 @@ class TFOpMapper(OpMapper):
0] == y_shape[-1] and y_shape.count(-1) < 1:
shape = [1, x_shape[0], 1, 1]
attr = {"shape": shape}
node.fluid_code.add_layer("reshape",
node.fluid_code.add_layer(
"reshape",
inputs=x_input,
output="reshape_x",
param_attr=attr)
if y_shape[0] != 1:
attr = {"expand_times": [y_shape[0], 1, 1, 1]}
node.fluid_code.add_layer("expand",
node.fluid_code.add_layer(
"expand",
inputs="reshape_x",
output="reshape_x",
param_attr=attr)
inputs = {"x": "reshape_x", "y": y_input}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=None)
return
else:
raise Exception("Unexpected situation happend")
......@@ -204,10 +202,8 @@ class TFOpMapper(OpMapper):
axis = -1
attr = {"axis": axis}
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=attr)
return
is_sub_seq = True
......@@ -241,10 +237,8 @@ class TFOpMapper(OpMapper):
if len(x_expand_times) == 4 and x.tf_data_format == "NHWC":
x_expand_times = [x_expand_times[i] for i in [0, 3, 1, 2]]
attr = {"expand_times": x_expand_times}
node.fluid_code.add_layer("expand",
inputs=x_input,
output="x_tmp",
param_attr=attr)
node.fluid_code.add_layer(
"expand", inputs=x_input, output="x_tmp", param_attr=attr)
x_input = "x_tmp"
if y_need_expand:
if len(y_expand_times) == 3 and y.tf_data_format == "NHWC":
......@@ -252,16 +246,12 @@ class TFOpMapper(OpMapper):
if len(y_expand_times) == 4 and y.tf_data_format == "NHWC":
y_expand_times = [y_expand_times[i] for i in [0, 3, 1, 2]]
attr = {"expand_times": y_expand_times}
node.fluid_code.add_layer("expand",
inputs=y_input,
output="y_tmp",
param_attr=attr)
node.fluid_code.add_layer(
"expand", inputs=y_input, output="y_tmp", param_attr=attr)
y_input = "y_tmp"
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=None)
def Placeholder(self, node):
shape = node.out_shapes[0]
......@@ -282,10 +272,8 @@ class TFOpMapper(OpMapper):
if shape[0] < 0:
self.batch_node = node
node.fluid_code.add_layer("data",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"data", inputs=None, output=node, param_attr=attr)
def OneShotIterator(self, node):
return self.Placeholder(node)
......@@ -307,8 +295,8 @@ class TFOpMapper(OpMapper):
shape = [shape[i] for i in [0, 3, 1, 2]]
if len(shape) == 3:
shape = [shape[i] for i in [2, 0, 1]]
self.weights[node.layer_name] = numpy.transpose(
node.value, (2, 0, 1))
self.weights[node.layer_name] = numpy.transpose(node.value,
(2, 0, 1))
elif node.tf_data_format == "NCHW":
if len(shape) == 4:
self.graph.data_format_propagation(node)
......@@ -319,10 +307,8 @@ class TFOpMapper(OpMapper):
'name': string(node.layer_name),
'default_initializer': initializer
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"create_parameter", inputs=None, output=node, param_attr=attr)
def Transpose(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -361,16 +347,12 @@ class TFOpMapper(OpMapper):
node.tf_data_format = [tf_data_format[i] for i in perm]
node.pd_data_format = [pd_data_format[i] for i in perm]
attr = {'perm': new_perm}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
elif len(node.out_shapes[0]) != 4:
attr = {'perm': perm}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
else:
raise Exception("Unexpected situation happend in Transpose OP")
......@@ -400,10 +382,8 @@ class TFOpMapper(OpMapper):
"pool_padding": string(pad_mode),
"pool_stride": strides[2:4]
}
node.fluid_code.add_layer("pool2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pool2d", inputs=input, output=node, param_attr=attr)
def Conv2D(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -443,10 +423,8 @@ class TFOpMapper(OpMapper):
"dilation": dilations[2:4],
"padding": string(pad_mode)
}
node.fluid_code.add_layer("conv2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"conv2d", inputs=input, output=node, param_attr=attr)
def BiasAdd(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -456,10 +434,8 @@ class TFOpMapper(OpMapper):
axis = 1
inputs = {"x": input, "y": bias}
attr = {"axis": axis}
node.fluid_code.add_layer("elementwise_add",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"elementwise_add", inputs=inputs, output=node, param_attr=attr)
def FusedBatchNorm(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -490,10 +466,8 @@ class TFOpMapper(OpMapper):
"is_test": True
}
node.fluid_code.add_layer("batch_norm",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"batch_norm", inputs=input, output=node, param_attr=attr)
def FusedBatchNormV3(self, node):
return self.FusedBatchNorm(node)
......@@ -538,10 +512,8 @@ class TFOpMapper(OpMapper):
"use_cudnn": False,
"padding": string(pad_mode)
}
node.fluid_code.add_layer("conv2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"conv2d", inputs=input, output=node, param_attr=attr)
def Reshape(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -561,18 +533,17 @@ class TFOpMapper(OpMapper):
attr = {"shape": shape}
self.add_omit_nodes(param.layer_name, node.layer_name)
else:
assert len(param.out_shapes[0]
) == 1, "Unexpected situation of shape parameter"
assert len(param.out_shapes[
0]) == 1, "Unexpected situation of shape parameter"
attr = {"shape": [-1]}
node.fluid_code.add_layer("reshape",
node.fluid_code.add_layer(
"reshape",
inputs=param,
output="shape_param",
param_attr=attr)
attr = {"num_or_sections": param.out_shapes[0][0], "dim": 0}
node.fluid_code.add_layer("split",
inputs="shape_param",
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"split", inputs="shape_param", output=node, param_attr=attr)
new_param = "["
for i in range(param.out_shapes[0][0]):
new_param += (node.layer_name + "[{}]".format(i) + ", ")
......@@ -600,14 +571,10 @@ class TFOpMapper(OpMapper):
if len(input.out_shapes[0]) == 4 and node.tf_data_format == "NHWC":
if len(attr["shape"]) < 3:
perm = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=perm)
node.fluid_code.add_layer("reshape",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=perm)
node.fluid_code.add_layer(
"reshape", inputs=node, output=node, param_attr=attr)
return
if len(attr["shape"]) == 4 and node.tf_data_format == "NHWC":
......@@ -616,27 +583,19 @@ class TFOpMapper(OpMapper):
attr["shape"] = [attr["shape"][i] for i in [0, 3, 1, 2]]
else:
perm = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=perm)
node.fluid_code.add_layer("reshape",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=perm)
node.fluid_code.add_layer(
"reshape", inputs=node, output=node, param_attr=attr)
perm = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=perm)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=perm)
return
if len(attr["shape"]) == 5:
attr["shape"] = [attr["shape"][i] for i in [0, 1, 4, 2, 3]]
node.fluid_code.add_layer("reshape",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reshape", inputs=input, output=node, param_attr=attr)
def AvgPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -664,10 +623,8 @@ class TFOpMapper(OpMapper):
"pool_stride": strides[2:4],
"pool_padding": string(pad_mode)
}
node.fluid_code.add_layer("pool2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pool2d", inputs=input, output=node, param_attr=attr)
def SplitV(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -684,28 +641,24 @@ class TFOpMapper(OpMapper):
"num_or_sections": num_sections.value.tolist(),
"dim": dim.value
}
node.fluid_code.add_layer("split",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"split", inputs=input, output=node, param_attr=attr)
def ConcatV2(self, node):
inputs = [
self.graph.get_node(name, copy=True)
for name in node.layer.input[:-1]
self.graph.get_node(
name, copy=True) for name in node.layer.input[:-1]
]
axis = self.graph.get_node(node.layer.input[-1], copy=True)
assert axis.layer_type == "Const"
self.add_omit_nodes(axis.layer_name, node.layer_name)
axis = axis.value
if inputs[0].tf_data_format == "NHWC" and len(
inputs[0].out_shapes[0]) == 4:
if inputs[0].tf_data_format == "NHWC" and len(inputs[0].out_shapes[
0]) == 4:
axis = nhwc_dim_to_nchw(inputs[0], axis)
attr = {"axis": axis}
node.fluid_code.add_layer("concat",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"concat", inputs=inputs, output=node, param_attr=attr)
def Tile(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -725,18 +678,17 @@ class TFOpMapper(OpMapper):
expand_times[i] = 1
attr = {"expand_times": expand_times}
node.fluid_code.add_layer("expand",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"expand", inputs=input, output=node, param_attr=attr)
def Pack(self, node):
inputs = [
self.graph.get_node(name, copy=True) for name in node.layer.input
self.graph.get_node(
name, copy=True) for name in node.layer.input
]
axis = node.get_attr("axis")
if inputs[0].tf_data_format == "NHWC" and len(
inputs[0].out_shapes[0]) == 4:
if inputs[0].tf_data_format == "NHWC" and len(inputs[0].out_shapes[
0]) == 4:
tf_data_format = list(inputs[0].tf_data_format)
tf_data_format.insert(axis, str(len(tf_data_format)))
axis = nhwc_dim_to_nchw(inputs[0], axis)
......@@ -746,10 +698,8 @@ class TFOpMapper(OpMapper):
node.pd_data_format = "".join(pd_data_format)
attr = {"axis": axis}
node.fluid_code.add_layer("stack",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"stack", inputs=inputs, output=node, param_attr=attr)
def Pad(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -766,10 +716,8 @@ class TFOpMapper(OpMapper):
paddings = paddings[4:]
pad_op = "pad2d"
attr = {"paddings": paddings}
node.fluid_code.add_layer(pad_op,
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
pad_op, inputs=input, output=node, param_attr=attr)
def MirrorPad(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -788,10 +736,8 @@ class TFOpMapper(OpMapper):
paddings = paddings[4:]
pad_op = "pad2d"
attr = {"paddings": paddings, "mode": string("reflect")}
node.fluid_code.add_layer(pad_op,
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
pad_op, inputs=input, output=node, param_attr=attr)
def Range(self, node):
start = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -815,10 +761,8 @@ class TFOpMapper(OpMapper):
inputs = {"start": start, "end": limit, "step": delta}
attr = {"dtype": string(node.dtype)}
node.fluid_code.add_layer("range",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"range", inputs=inputs, output=node, param_attr=attr)
def Mean(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -832,10 +776,8 @@ class TFOpMapper(OpMapper):
dims[i] = nhwc_dim_to_nchw(input, dims[i])
attr = {"dim": dims, "keep_dim": keep_dims}
node.fluid_code.add_layer("reduce_mean",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_mean", inputs=input, output=node, param_attr=attr)
def MatMul(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -849,15 +791,11 @@ class TFOpMapper(OpMapper):
shape = x.out_shapes[0]
shape[-1] = y.out_shapes[0][0]
attr = {"shape": shape}
node.fluid_code.add_layer("reshape",
inputs=x,
output=x,
param_attr=attr)
node.fluid_code.add_layer(
"reshape", inputs=x, output=x, param_attr=attr)
attr = {"transpose_x": transpose_a, "transpose_y": transpose_b}
node.fluid_code.add_layer("matmul",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"matmul", inputs=inputs, output=node, param_attr=attr)
def ArgMax(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -868,10 +806,8 @@ class TFOpMapper(OpMapper):
if input.tf_data_format == "NHWC" and len(input.out_shapes[0]) == 4:
axis = nhwc_dim_to_nchw(input, axis)
attr = {"axis": axis}
node.fluid_code.add_layer("argmax",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"argmax", inputs=input, output=node, param_attr=attr)
def StridedSlice(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -909,16 +845,12 @@ class TFOpMapper(OpMapper):
x = shrink_axis_mask >> i & 1
if x == 1:
squeeze_dims.append(i)
node.fluid_code.add_layer("slice",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"slice", inputs=input, output=node, param_attr=attr)
if shrink_axis_mask > 0 and len(input.out_shapes[0]) == 5:
attr = {"axes": squeeze_dims}
node.fluid_code.add_layer("squeeze",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"squeeze", inputs=node, output=node, param_attr=attr)
def Slice(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -950,10 +882,8 @@ class TFOpMapper(OpMapper):
"starts": begin,
"ends": size
}
node.fluid_code.add_layer("slice",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"slice", inputs=input, output=node, param_attr=attr)
def Conv2DBackpropInput(self, node):
out_shape = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1003,10 +933,8 @@ class TFOpMapper(OpMapper):
"padding": string(pad_mode),
"output_size": out_shape[1:3]
}
node.fluid_code.add_layer("conv2d_transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"conv2d_transpose", inputs=input, output=node, param_attr=attr)
def Max(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1018,10 +946,8 @@ class TFOpMapper(OpMapper):
dim = nhwc_dim_to_nchw(input, dim)
attr = {"dim": dim, "keep_dim": keep_dims}
node.fluid_code.add_layer("reduce_max",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_max", inputs=input, output=node, param_attr=attr)
def Sum(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1033,19 +959,15 @@ class TFOpMapper(OpMapper):
dim = nhwc_dim_to_nchw(input, dim)
attr = {"dim": dim, "keep_dim": keep_dims}
node.fluid_code.add_layer("reduce_sum",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_sum", inputs=input, output=node, param_attr=attr)
def Cast(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
dtype = node.dtype_map[node.get_attr('DstT')]
attr = {"dtype": string(dtype)}
node.fluid_code.add_layer("cast",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"cast", inputs=input, output=node, param_attr=attr)
def Split(self, node):
dim = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1057,10 +979,8 @@ class TFOpMapper(OpMapper):
dim = nhwc_dim_to_nchw(input, dim)
attr = {"num_or_sections": num_split, "dim": dim}
node.fluid_code.add_layer("split",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"split", inputs=input, output=node, param_attr=attr)
def Squeeze(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1069,10 +989,8 @@ class TFOpMapper(OpMapper):
for i in range(len(squeeze_dims)):
squeeze_dims[i] = nhwc_dim_to_nchw(input, squeeze_dims[i])
attr = {"axes": squeeze_dims}
node.fluid_code.add_layer("squeeze",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"squeeze", inputs=input, output=node, param_attr=attr)
def Softmax(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1082,10 +1000,8 @@ class TFOpMapper(OpMapper):
if input.tf_data_format == "NHWC" and len(input.out_shapes[0]) == 4:
axis = nhwc_dim_to_nchw(input, axis)
attr = {"axis": axis}
node.fluid_code.add_layer("softmax",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"softmax", inputs=input, output=node, param_attr=attr)
def ResizeNearestNeighbor(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1094,14 +1010,12 @@ class TFOpMapper(OpMapper):
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
resize_shape = self.decoder.infer_shape_tensor(resize_shape,
node.out_shapes[0])
align_corners = node.get_attr("align_corners")
attr = {"align_corners": align_corners, "out_shape": resize_shape}
node.fluid_code.add_layer("resize_nearest",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"resize_nearest", inputs=input, output=node, param_attr=attr)
def ResizeBilinear(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1110,27 +1024,23 @@ class TFOpMapper(OpMapper):
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
resize_shape = self.decoder.infer_shape_tensor(resize_shape,
node.out_shapes[0])
align_corners = node.get_attr("align_corners")
attr = {
"align_corners": align_corners,
"out_shape": resize_shape,
"align_mode": 1
}
node.fluid_code.add_layer("resize_bilinear",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"resize_bilinear", inputs=input, output=node, param_attr=attr)
def GreaterEqual(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {"x": x, "y": y}
node.fluid_code.add_layer("greater_equal",
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"greater_equal", inputs=inputs, output=node, param_attr=None)
def RandomUniform(self, node):
shape = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1144,26 +1054,21 @@ class TFOpMapper(OpMapper):
attr = {"shape": shape, "min": 0.0, "max": 0.9999}
if shape[0] < 0:
input = self.batch_node
node.fluid_code.add_layer("uniform_random_batch_size_like",
node.fluid_code.add_layer(
"uniform_random_batch_size_like",
inputs=input,
output=node,
param_attr=attr)
else:
node.fluid_code.add_layer("uniform_random",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"uniform_random", inputs=None, output=node, param_attr=attr)
def SquaredDifference(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {"x": x, "y": y}
node.fluid_code.add_layer("elementwise_sub",
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"elementwise_sub", inputs=inputs, output=node, param_attr=None)
inputs = {"x": node, "y": node}
node.fluid_code.add_layer("elementwise_mul",
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"elementwise_mul", inputs=inputs, output=node, param_attr=None)
......@@ -43,6 +43,7 @@ class TFOpMapperNHWC(OpMapper):
'Sqrt': ['sqrt'],
'swish_f32': ['swish'],
'Tanh': ['tanh'],
'Softplus': ['softplus'],
'LeakyRelu': ['leaky_relu', {
'alpha': 'alpha'
}]
......@@ -128,26 +129,18 @@ class TFOpMapperNHWC(OpMapper):
if len(input.out_shapes[0]) == 4 and op_info[0] != 'shape':
attr1 = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer('transpose',
inputs=input,
output=node,
param_attr=attr1)
node.fluid_code.add_layer(
'transpose', inputs=input, output=node, param_attr=attr1)
input = node
node.fluid_code.add_layer(op_info[0],
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
op_info[0], inputs=input, output=node, param_attr=attr)
input = node
attr2 = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer('transpose',
inputs=input,
output=node,
param_attr=attr2)
node.fluid_code.add_layer(
'transpose', inputs=input, output=node, param_attr=attr2)
else:
node.fluid_code.add_layer(op_info[0],
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
op_info[0], inputs=input, output=node, param_attr=attr)
def elementwise_map(self, node):
assert node.layer_type in self.elementwise_ops
......@@ -208,42 +201,37 @@ class TFOpMapperNHWC(OpMapper):
raise Exception("Unexpected situation happend")
if x_need_expand:
attr = {"expand_times": x_expand_times}
node.fluid_code.add_layer("expand",
inputs=x_input,
output="x_tmp",
param_attr=attr)
node.fluid_code.add_layer(
"expand", inputs=x_input, output="x_tmp", param_attr=attr)
x_input = "x_tmp"
if y_need_expand:
attr = {"expand_times": y_expand_times}
node.fluid_code.add_layer("expand",
inputs=y_input,
output="y_tmp",
param_attr=attr)
node.fluid_code.add_layer(
"expand", inputs=y_input, output="y_tmp", param_attr=attr)
y_input = "y_tmp"
if len(x_shape) == 4 and len(y_shape) == 4:
node.fluid_code.add_layer("transpose",
node.fluid_code.add_layer(
"transpose",
inputs=x_input,
output=x_input,
param_attr={'perm': [0, 3, 1, 2]})
node.fluid_code.add_layer("transpose",
node.fluid_code.add_layer(
"transpose",
inputs=y_input,
output=y_input,
param_attr={'perm': [0, 3, 1, 2]})
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer("transpose",
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=None)
node.fluid_code.add_layer(
"transpose",
inputs=node,
output=node,
param_attr={'perm': [0, 2, 3, 1]})
else:
inputs = {"x": x_input, "y": y_input}
node.fluid_code.add_layer(op_type,
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
op_type, inputs=inputs, output=node, param_attr=None)
def Placeholder(self, node):
shape = node.out_shapes[0]
......@@ -259,10 +247,8 @@ class TFOpMapperNHWC(OpMapper):
'append_batch_size': False
}
node.fluid_code.add_layer("data",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"data", inputs=None, output=node, param_attr=attr)
def Const(self, node):
shape = node.out_shapes[0]
......@@ -282,10 +268,8 @@ class TFOpMapperNHWC(OpMapper):
'name': string(node.layer_name),
'default_initializer': initializer
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"create_parameter", inputs=None, output=node, param_attr=attr)
def Transpose(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -296,10 +280,8 @@ class TFOpMapperNHWC(OpMapper):
perm = perm.value.tolist()
attr = {'perm': perm}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
def MaxPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -316,10 +298,8 @@ class TFOpMapperNHWC(OpMapper):
if not channel_first:
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
in_shape = [in_shape[i] for i in [0, 3, 1, 2]]
strides = [strides[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
......@@ -331,17 +311,13 @@ class TFOpMapperNHWC(OpMapper):
"pool_stride": strides[2:4],
"pool_padding": string(pad_mode)
}
node.fluid_code.add_layer("pool2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pool2d", inputs=input, output=node, param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def Conv2D(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -373,10 +349,8 @@ class TFOpMapperNHWC(OpMapper):
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
input = node
attr = {
......@@ -393,25 +367,19 @@ class TFOpMapperNHWC(OpMapper):
if len(node.dilation) == 1:
attr['dilation'] = [1, node.dilation[0]]
node.fluid_code.add_layer("conv2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"conv2d", inputs=input, output=node, param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def BiasAdd(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
bias = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {"x": input, "y": bias}
node.fluid_code.add_layer("elementwise_add",
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"elementwise_add", inputs=inputs, output=node, param_attr=None)
def FusedBatchNorm(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -433,10 +401,8 @@ class TFOpMapperNHWC(OpMapper):
if not channel_first:
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
input = node
attr = {
......@@ -448,17 +414,13 @@ class TFOpMapperNHWC(OpMapper):
"is_test": True
}
node.fluid_code.add_layer("batch_norm",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"batch_norm", inputs=input, output=node, param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def DepthwiseConv2dNative(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -487,10 +449,8 @@ class TFOpMapperNHWC(OpMapper):
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
input = node
attr = {
......@@ -504,17 +464,13 @@ class TFOpMapperNHWC(OpMapper):
"use_cudnn": False,
"padding": string(pad_mode)
}
node.fluid_code.add_layer("conv2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"conv2d", inputs=input, output=node, param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def Reshape(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -530,18 +486,17 @@ class TFOpMapperNHWC(OpMapper):
attr = {"shape": shape}
self.add_omit_nodes(param.layer_name, node.layer_name)
else:
assert len(param.out_shapes[0]
) == 1, "Unexpected situation of shape parameter"
assert len(param.out_shapes[
0]) == 1, "Unexpected situation of shape parameter"
attr = {"shape": [-1]}
node.fluid_code.add_layer("reshape",
node.fluid_code.add_layer(
"reshape",
inputs=param,
output="shape_param",
param_attr=attr)
attr = {"num_or_sections": param.out_shapes[0][0], "dim": 0}
node.fluid_code.add_layer("split",
inputs="shape_param",
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"split", inputs="shape_param", output=node, param_attr=attr)
new_param = "["
for i in range(param.out_shapes[0][0]):
new_param += (node.layer_name + "[{}]".format(i) + ", ")
......@@ -565,10 +520,8 @@ class TFOpMapperNHWC(OpMapper):
attr["shape"][index] = int(total_size)
attr["shape"][0] = -1
node.fluid_code.add_layer("reshape",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reshape", inputs=input, output=node, param_attr=attr)
def AvgPool(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -588,10 +541,8 @@ class TFOpMapperNHWC(OpMapper):
strides = [strides[i] for i in [0, 3, 1, 2]]
k_size = [k_size[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
input = node
attr = {
......@@ -600,17 +551,13 @@ class TFOpMapperNHWC(OpMapper):
"pool_stride": strides[2:4],
"pool_padding": string(pad_mode)
}
node.fluid_code.add_layer("pool2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pool2d", inputs=input, output=node, param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def SplitV(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -625,15 +572,13 @@ class TFOpMapperNHWC(OpMapper):
"num_or_sections": num_sections.value.tolist(),
"dim": dim.value
}
node.fluid_code.add_layer("split",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"split", inputs=input, output=node, param_attr=attr)
def ConcatV2(self, node):
inputs = [
self.graph.get_node(name, copy=True)
for name in node.layer.input[:-1]
self.graph.get_node(
name, copy=True) for name in node.layer.input[:-1]
]
axis = self.graph.get_node(node.layer.input[-1], copy=True)
assert axis.layer_type == "Const"
......@@ -643,10 +588,8 @@ class TFOpMapperNHWC(OpMapper):
axis += len(inputs[0].out_shapes[0])
attr = {"axis": axis}
node.fluid_code.add_layer("concat",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"concat", inputs=inputs, output=node, param_attr=attr)
def Tile(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -660,21 +603,18 @@ class TFOpMapperNHWC(OpMapper):
if expand_times[i] < 0:
expand_times[i] = 1
attr = {"expand_times": expand_times}
node.fluid_code.add_layer("expand",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"expand", inputs=input, output=node, param_attr=attr)
def Pack(self, node):
inputs = [
self.graph.get_node(name, copy=True) for name in node.layer.input
self.graph.get_node(
name, copy=True) for name in node.layer.input
]
axis = node.get_attr("axis")
attr = {"axis": axis}
node.fluid_code.add_layer("stack",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"stack", inputs=inputs, output=node, param_attr=attr)
def Pad(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -695,30 +635,22 @@ class TFOpMapperNHWC(OpMapper):
if new_padding is not None:
if input.tf_data_format == "NHWC":
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
input = node
attr = {"paddings": new_padding}
node.fluid_code.add_layer("pad2d",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pad2d", inputs=input, output=node, param_attr=attr)
if input.tf_data_format == "NHWC":
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
return
attr = {"paddings": paddings}
node.fluid_code.add_layer("pad",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"pad", inputs=input, output=node, param_attr=attr)
def Range(self, node):
start = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -746,10 +678,8 @@ class TFOpMapperNHWC(OpMapper):
"step": delta,
}
attr = {"dtype": string(node.dtype)}
node.fluid_code.add_layer("range",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"range", inputs=inputs, output=node, param_attr=attr)
def Mean(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -759,10 +689,8 @@ class TFOpMapperNHWC(OpMapper):
keep_dims = node.get_attr("keep_dims")
attr = {"dim": dims, "keep_dim": keep_dims}
node.fluid_code.add_layer("reduce_mean",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_mean", inputs=input, output=node, param_attr=attr)
def MatMul(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -776,15 +704,11 @@ class TFOpMapperNHWC(OpMapper):
shape = x.out_shapes[0]
shape[-1] = y.out_shapes[0][0]
attr = {"shape": shape}
node.fluid_code.add_layer("reshape",
inputs=x,
output=x,
param_attr=attr)
node.fluid_code.add_layer(
"reshape", inputs=x, output=x, param_attr=attr)
attr = {"transpose_x": transpose_a, "transpose_y": transpose_b}
node.fluid_code.add_layer("matmul",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"matmul", inputs=inputs, output=node, param_attr=attr)
def ArgMax(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -793,10 +717,8 @@ class TFOpMapperNHWC(OpMapper):
self.add_omit_nodes(axis.layer_name, node.layer_name)
axis = axis.value
attr = {"axis": axis}
node.fluid_code.add_layer("argmax",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"argmax", inputs=input, output=node, param_attr=attr)
def StridedSlice(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -862,25 +784,19 @@ class TFOpMapperNHWC(OpMapper):
"starts": new_begin,
"ends": new_end
}
node.fluid_code.add_layer("slice",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"slice", inputs=input, output=node, param_attr=attr)
if len(new_axes) > 0:
attr = {"axes": new_axes}
node.fluid_code.add_layer("unsqueeze",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"unsqueeze", inputs=node, output=node, param_attr=attr)
if len(shrink_axes) > 0:
if len(input.out_shapes[0]) + len(new_axes) <= 1:
pass
else:
attr = {"axes": shrink_axes}
node.fluid_code.add_layer("squeeze",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"squeeze", inputs=node, output=node, param_attr=attr)
def Slice(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -909,10 +825,8 @@ class TFOpMapperNHWC(OpMapper):
"ends": size
}
node.fluid_code.add_layer("slice",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"slice", inputs=input, output=node, param_attr=attr)
def Conv2DBackpropInput(self, node):
out_shape = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -950,10 +864,8 @@ class TFOpMapperNHWC(OpMapper):
strides = [strides[i] for i in [0, 3, 1, 2]]
dilations = [dilations[i] for i in [0, 3, 1, 2]]
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
input = node
else:
self.data_format_propagation(node)
......@@ -968,17 +880,13 @@ class TFOpMapperNHWC(OpMapper):
"padding": string(pad_mode),
"output_size": out_shape[1:3]
}
node.fluid_code.add_layer("conv2d_transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"conv2d_transpose", inputs=input, output=node, param_attr=attr)
if not channel_first:
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def Max(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -988,10 +896,8 @@ class TFOpMapperNHWC(OpMapper):
dim = reduce_idx.value.tolist()
attr = {"dim": dim, "keep_dim": keep_dims}
node.fluid_code.add_layer("reduce_max",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_max", inputs=input, output=node, param_attr=attr)
def Sum(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1001,19 +907,15 @@ class TFOpMapperNHWC(OpMapper):
dim = reduce_idx.value.tolist()
attr = {"dim": dim, "keep_dim": keep_dims}
node.fluid_code.add_layer("reduce_sum",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"reduce_sum", inputs=input, output=node, param_attr=attr)
def Cast(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
dtype = node.dtype_map[node.get_attr('DstT')]
attr = {"dtype": string(dtype)}
node.fluid_code.add_layer("cast",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"cast", inputs=input, output=node, param_attr=attr)
def Split(self, node):
dim = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1024,28 +926,22 @@ class TFOpMapperNHWC(OpMapper):
dim = dim.value
attr = {"num_or_sections": num_split, "dim": dim}
node.fluid_code.add_layer("split",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"split", inputs=input, output=node, param_attr=attr)
def Squeeze(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
squeeze_dims = node.get_attr('squeeze_dims')
attr = {"axes": squeeze_dims}
node.fluid_code.add_layer("squeeze",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"squeeze", inputs=input, output=node, param_attr=attr)
def Softmax(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
axis = node.get_attr("axis")
attr = {"axis": axis}
node.fluid_code.add_layer("softmax",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"softmax", inputs=input, output=node, param_attr=attr)
def ResizeNearestNeighbor(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1054,24 +950,18 @@ class TFOpMapperNHWC(OpMapper):
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
resize_shape = self.decoder.infer_shape_tensor(resize_shape,
node.out_shapes[0])
align_corners = node.get_attr("align_corners")
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
attr = {"align_corners": align_corners, "out_shape": resize_shape}
node.fluid_code.add_layer("resize_nearest",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"resize_nearest", inputs=node, output=node, param_attr=attr)
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def ResizeBilinear(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1080,37 +970,29 @@ class TFOpMapperNHWC(OpMapper):
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
resize_shape = self.decoder.infer_shape_tensor(resize_shape,
node.out_shapes[0])
align_corners = node.get_attr("align_corners")
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=input, output=node, param_attr=attr)
attr = {
"align_corners": align_corners,
"out_shape": resize_shape,
"align_mode": 1
}
node.fluid_code.add_layer("resize_bilinear",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"resize_bilinear", inputs=node, output=node, param_attr=attr)
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose", inputs=node, output=node, param_attr=attr)
def GreaterEqual(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {"x": x, "y": y}
node.fluid_code.add_layer("greater_equal",
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"greater_equal", inputs=inputs, output=node, param_attr=None)
def RandomUniform(self, node):
shape = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1123,29 +1005,24 @@ class TFOpMapperNHWC(OpMapper):
if shape[0] < 0:
input = self.batch_node
node.fluid_code.add_layer("uniform_random_batch_size_like",
node.fluid_code.add_layer(
"uniform_random_batch_size_like",
inputs=input,
output=node,
param_attr=attr)
else:
node.fluid_code.add_layer("uniform_random",
inputs=None,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"uniform_random", inputs=None, output=node, param_attr=attr)
def SquaredDifference(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
inputs = {"x": x, "y": y}
node.fluid_code.add_layer("elementwise_sub",
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"elementwise_sub", inputs=inputs, output=node, param_attr=None)
inputs = {"x": node, "y": node}
node.fluid_code.add_layer("elementwise_mul",
inputs=inputs,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"elementwise_mul", inputs=inputs, output=node, param_attr=None)
def ExpandDims(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
......@@ -1156,19 +1033,15 @@ class TFOpMapperNHWC(OpMapper):
dim = self.decoder.infer_tensor(y)
self.add_omit_nodes(y.layer_name, node.layer_name)
attr = {'axes': [dim]}
node.fluid_code.add_layer("unsqueeze",
inputs=x,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"unsqueeze", inputs=x, output=node, param_attr=attr)
def BatchToSpaceND(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
if hasattr(node, 'skip') and node.skip:
node.fluid_code.add_layer("=",
inputs=x,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"=", inputs=x, output=node, param_attr=None)
else:
raise Exception("BatchToSpaceND is not supported")
......@@ -1176,9 +1049,7 @@ class TFOpMapperNHWC(OpMapper):
x = self.graph.get_node(node.layer.input[0], copy=True)
y = self.graph.get_node(node.layer.input[1], copy=True)
if hasattr(node, 'skip') and node.skip:
node.fluid_code.add_layer("=",
inputs=x,
output=node,
param_attr=None)
node.fluid_code.add_layer(
"=", inputs=x, output=node, param_attr=None)
else:
raise Exception("SpaceToBatchND is not supported")
......@@ -41,7 +41,8 @@ class CaffeOptimizer(object):
if is_delete_node:
parent_node.fluid_code.clear()
node.fluid_code.clear()
node.fluid_code.add_layer("batch_norm",
node.fluid_code.add_layer(
"batch_norm",
inputs=input,
output=node,
param_attr=parent_param_attr)
......@@ -62,7 +63,8 @@ class CaffeOptimizer(object):
if is_delete_node:
parent_node.fluid_code.clear()
node.fluid_code.clear()
node.fluid_code.add_layer(op,
node.fluid_code.add_layer(
op,
inputs=input,
output=node,
param_attr=parent_param_attr)
......@@ -554,7 +554,8 @@ class TFOptimizer(object):
node.fluid_code.layers[0].param_attr["shape"] = shape
node.fluid_code.layers[0].output = "nhwc_" + name
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
node.fluid_code.add_layer(
"transpose",
inputs="nhwc_" + name,
output=node,
param_attr=attr)
......@@ -767,8 +768,8 @@ class TFOptimizer(object):
is_prelu = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
if len(in_nodes0[0].outputs) != 1 or len(in_nodes0[1]
.outputs) != 1:
is_prelu = False
continue
......@@ -777,8 +778,8 @@ class TFOptimizer(object):
self.graph.get_node(in_name)
for in_name in in_nodes0[1].inputs
]
if in_nodes2[1].layer_type != "Const" or numpy.fabs(
in_nodes2[1].value - 0.5) > 1e-06:
if in_nodes2[1].layer_type != "Const" or numpy.fabs(in_nodes2[
1].value - 0.5) > 1e-06:
is_prelu = False
continue
if in_nodes2[0].layer_type != "Mul":
......@@ -787,8 +788,8 @@ class TFOptimizer(object):
if exist_act(in_nodes2[0]):
is_prelu = False
continue
if len(in_nodes2[1].outputs) != 1 or len(
in_nodes2[0].outputs) != 1:
if len(in_nodes2[1].outputs) != 1 or len(in_nodes2[0]
.outputs) != 1:
is_prelu = False
continue
......@@ -803,8 +804,8 @@ class TFOptimizer(object):
if exist_act(in_nodes3[1]):
is_prelu = False
continue
if len(in_nodes3[0].outputs) != 1 or len(
in_nodes3[1].outputs) != 1:
if len(in_nodes3[0].outputs) != 1 or len(in_nodes3[1]
.outputs) != 1:
is_prelu = False
continue
......@@ -856,12 +857,12 @@ class TFOptimizer(object):
mode = "element"
elif len(in_nodes3[0].value.shape) == 0:
mode = "all"
elif len(in_nodes3[0].value.shape
) == 1 and in_nodes3[0].value.shape[0] == 1:
elif len(in_nodes3[0].value.shape) == 1 and in_nodes3[
0].value.shape[0] == 1:
mode = "all"
elif len(in_shape) == 4 and len(
in_nodes3[0].value.shape
) == 1 and in_nodes3[0].value.shape[0] == in_shape[-1]:
elif len(in_shape) == 4 and len(in_nodes3[
0].value.shape) == 1 and in_nodes3[0].value.shape[
0] == in_shape[-1]:
mode = "channel"
weight = self.op_mapper.weights[in_nodes3[0].layer_name]
weight = numpy.expand_dims(weight, 0)
......@@ -916,14 +917,15 @@ class TFOptimizer(object):
self.graph.get_node(in_name) for in_name in node.inputs
]
if in_nodes0[0].layer_type != "Mul" or in_nodes0[
1].layer_type != "Const" or in_nodes0[1].value.size != 1:
1].layer_type != "Const" or in_nodes0[
1].value.size != 1:
is_scale = False
continue
if exist_act(in_nodes0[0]):
is_scale = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
if len(in_nodes0[0].outputs) != 1 or len(in_nodes0[1]
.outputs) != 1:
is_scale = False
continue
......@@ -939,8 +941,8 @@ class TFOptimizer(object):
if exist_act(in_nodes1[1]):
is_scale = False
continue
if len(in_nodes1[0].outputs) != 1 or len(
in_nodes1[1].outputs) != 1:
if len(in_nodes1[0].outputs) != 1 or len(in_nodes1[1]
.outputs) != 1:
is_scale = False
continue
......@@ -962,8 +964,8 @@ class TFOptimizer(object):
scale = 1.0 / in_nodes2[1].value * in_nodes1[0].value
act = None
if node.fluid_code.layers[0].param_attr is not None:
act = node.fluid_code.layers[0].param_attr.get(
"act", None)
act = node.fluid_code.layers[0].param_attr.get("act",
None)
node.fluid_code.clear()
attr = {
......@@ -972,10 +974,8 @@ class TFOptimizer(object):
"bias_after_scale": True,
"act": act
}
node.fluid_code.add_layer("scale",
inputs=in_node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"scale", inputs=in_node, output=node, param_attr=attr)
del self.graph.node_map[in_nodes0[0].layer_name]
del self.graph.node_map[in_nodes0[1].layer_name]
......@@ -1004,17 +1004,17 @@ class TFOptimizer(object):
if exist_act(in_nodes0[0]):
is_affine_channel = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
if len(in_nodes0[0].outputs) != 1 or len(in_nodes0[1]
.outputs) != 1:
is_affine_channel = False
continue
in_nodes1 = [
self.graph.get_node(in_name)
for in_name in in_nodes0[0].inputs
]
if len(in_nodes1[0].out_shapes[0]
) != 4 or in_nodes1[1].layer_type != "Const" or len(
in_nodes1[1].value.shape) != 3:
if len(in_nodes1[0].out_shapes[0]) != 4 or in_nodes1[
1].layer_type != "Const" or len(in_nodes1[1]
.value.shape) != 3:
is_affine_channel = False
continue
if len(in_nodes1[1].outputs) != 1:
......@@ -1037,8 +1037,8 @@ class TFOptimizer(object):
node.layer_type = "AffineChannel"
node.inputs = [in_node.layer_name]
scale = 1.0 / in_nodes0[1].value.flatten()
bias = in_nodes1[1].value.flatten(
) / in_nodes0[1].value.flatten()
bias = in_nodes1[1].value.flatten() / in_nodes0[
1].value.flatten()
if not bias_add:
bias *= -1.0
self.op_mapper.weights[node.layer_name + "_scale"] = scale
......@@ -1046,8 +1046,8 @@ class TFOptimizer(object):
act = None
if node.fluid_code.layers[0].param_attr is not None:
act = node.fluid_code.layers[0].param_attr.get(
"act", None)
act = node.fluid_code.layers[0].param_attr.get("act",
None)
node.fluid_code.clear()
attr = {
......@@ -1055,7 +1055,8 @@ class TFOptimizer(object):
"shape": [channel],
"name": string(node.layer_name + "_scale")
}
node.fluid_code.add_layer("create_parameter",
node.fluid_code.add_layer(
"create_parameter",
inputs=None,
output=node.layer_name + "_scale",
param_attr=attr)
......@@ -1064,7 +1065,8 @@ class TFOptimizer(object):
"shape": [channel],
"name": string(node.layer_name + "_bias")
}
node.fluid_code.add_layer("create_parameter",
node.fluid_code.add_layer(
"create_parameter",
inputs=None,
output=node.layer_name + "_bias",
param_attr=attr)
......@@ -1074,7 +1076,8 @@ class TFOptimizer(object):
"bias": node.layer_name + "_bias"
}
attr = {"act": act}
node.fluid_code.add_layer("affine_channel",
node.fluid_code.add_layer(
"affine_channel",
inputs=inputs,
output=node,
param_attr=attr)
......
......@@ -13,6 +13,7 @@
| ShuffleNet | [code](https://github.com/TropComplique/shufflenet-v2-tensorflow) |-|
| mNASNet | [code](https://github.com/tensorflow/tpu/tree/master/models/official/mnasnet) |-|
| EfficientNet | [code](https://github.com/tensorflow/tpu/tree/master/models/official/efficientnet) |-|
| Inception_V3 | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v3.py) |-|
| Inception_V4 | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_v4.py) |-|
| Inception_ResNet_V2 | [code](https://github.com/tensorflow/models/blob/master/research/slim/nets/inception_resnet_v2.py) |-|
| VGG16 | [code](https://github.com/tensorflow/models/tree/master/research/slim/nets) |-|
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册