Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
X2Paddle
提交
b670c23e
X
X2Paddle
项目概览
PaddlePaddle
/
X2Paddle
大约 1 年 前同步成功
通知
328
Star
698
Fork
167
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
26
列表
看板
标记
里程碑
合并请求
4
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
X
X2Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
26
Issue
26
列表
看板
标记
里程碑
合并请求
4
合并请求
4
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
b670c23e
编写于
8月 03, 2020
作者:
J
jiangjiajun
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
modify program
上级
842108b5
变更
5
显示空白变更内容
内联
并排
Showing
5 changed file
with
771 addition
and
800 deletion
+771
-800
x2paddle/__init__.py
x2paddle/__init__.py
+2
-2
x2paddle/convert.py
x2paddle/convert.py
+4
-22
x2paddle/core/program.py
x2paddle/core/program.py
+131
-27
x2paddle/decoder/tf_decoder.py
x2paddle/decoder/tf_decoder.py
+10
-0
x2paddle/op_mapper/tf_op_mapper_nhwc.py
x2paddle/op_mapper/tf_op_mapper_nhwc.py
+624
-749
未找到文件。
x2paddle/__init__.py
浏览文件 @
b670c23e
...
...
@@ -8,10 +8,10 @@ name_counter = dict()
def
gen_name
(
op_name
,
var_name
):
name
=
"{}
.
{}"
.
format
(
op_name
,
var_name
)
name
=
"{}
_
{}"
.
format
(
op_name
,
var_name
)
if
name
not
in
name_counter
:
name_counter
[
name
]
=
0
else
:
name_counter
[
name
]
+=
1
name
=
name
+
"
.
"
+
str
(
name_counter
[
name
])
name
=
name
+
"
_
"
+
str
(
name_counter
[
name
])
return
name
x2paddle/convert.py
浏览文件 @
b670c23e
...
...
@@ -13,6 +13,7 @@
# limitations under the License.
from
six
import
text_type
as
_text_type
from
x2paddle
import
program
import
argparse
import
sys
...
...
@@ -120,29 +121,10 @@ def tf2paddle(model_path,
print
(
"Now translating model from tensorflow to paddle."
)
model
=
TFDecoder
(
model_path
,
define_input_shape
=
define_input_shape
)
if
not
without_data_format_optimization
:
mapper
=
TFOpMapper
(
model
)
optimizer
=
TFOptimizer
(
mapper
)
# neccesary optimization
optimizer
.
delete_redundance_code
()
# optimizer below is experimental
optimizer
.
optimize_elementwise_op
()
optimizer
.
merge_activation
()
optimizer
.
merge_bias
()
optimizer
.
optimize_sub_graph
()
# optimizer.merge_batch_norm()
# optimizer.merge_prelu()
else
:
mapper
=
TFOpMapperNHWC
(
model
)
optimizer
=
TFOptimizer
(
mapper
)
optimizer
.
delete_redundance_code
()
optimizer
.
strip_graph
()
optimizer
.
merge_activation
()
optimizer
.
merge_bias
()
optimizer
.
make_nchw_input_output
()
optimizer
.
remove_transpose
()
mapper
.
save_inference_model
(
save_dir
,
params_merge
)
program
.
build
()
program
.
gen_model
(
save_dir
)
def
caffe2paddle
(
proto
,
weight
,
save_dir
,
caffe_proto
,
params_merge
=
False
):
...
...
x2paddle/core/program.py
浏览文件 @
b670c23e
...
...
@@ -14,8 +14,13 @@
from
__future__
import
print_function
from
__future__
import
division
import
paddle.fluid
as
fluid
from
paddle.fluid.proto
import
framework_pb2
import
numpy
import
collections
import
sys
import
os
import
six
class
PaddleLayer
(
object
):
...
...
@@ -25,7 +30,21 @@ class PaddleLayer(object):
dict
),
"parameter 'inputs' for PaddleLayer should be type of dict"
assert
isinstance
(
outputs
,
list
),
"parameter, 'outputs' for PaddleLayer should be type of list"
list
),
"parameter 'outputs' for PaddleLayer should be type of list"
for
k
,
v
in
inputs
.
items
():
if
isinstance
(
v
,
list
):
for
i
in
v
:
assert
isinstance
(
i
,
six
.
string_types
),
"value in inputs should be type of string or list of string"
else
:
assert
isinstance
(
v
,
six
.
string_types
)
or
isinstance
(
v
,
list
),
"value in inputs should be type of string or list of string"
for
v
in
outputs
:
assert
isinstance
(
v
,
six
.
string_types
),
"elements in outputs should be type of string"
self
.
kernel
=
kernel
self
.
inputs
=
inputs
self
.
outputs
=
outputs
...
...
@@ -41,22 +60,31 @@ class PaddleProgram(object):
self
.
outputs
=
list
()
self
.
parameters
=
dict
()
def
clear
(
self
):
self
.
layers
=
list
()
self
.
edges_out
=
dict
()
self
.
edges_in
=
dict
()
self
.
inputs
=
list
()
self
.
outputs
=
list
()
self
.
parameters
=
dict
()
def
add_layer
(
self
,
kernel
,
inputs
,
outputs
,
**
kwargs
):
layer
=
PaddleLayer
(
kernel
,
inputs
,
outputs
,
**
kwargs
)
index
=
len
(
self
.
layers
)
self
.
layers
.
append
(
layer
)
return
index
def
build
(
self
):
outputs
=
dict
()
for
i
in
range
(
len
(
self
.
layers
)
):
layer
=
self
.
layers
[
i
]
for
output
in
layer
.
outputs
:
outputs
[
output
]
=
i
for
k
,
v
in
layer
.
inputs
.
items
()
:
assert
v
in
output
s
,
"Couldn't find {} in previous layers, the layers should be make by topological sort"
.
format
(
outputs
_from_nodes
=
dict
()
for
i
,
layer
in
enumerate
(
self
.
layers
):
for
input_key
,
input_var
in
layer
.
inputs
.
items
():
vs
=
input_var
if
not
isinstance
(
vs
,
list
):
vs
=
[
vs
]
for
v
in
vs
:
assert
v
in
outputs_from_node
s
,
"Couldn't find {} in previous layers, the layers should be make by topological sort"
.
format
(
v
)
in_layer_index
=
outputs
[
v
]
in_layer_index
=
outputs_from_nodes
[
v
]
if
in_layer_index
not
in
self
.
edges_out
:
self
.
edges_out
[
in_layer_index
]
=
list
()
self
.
edges_out
[
in_layer_index
].
append
(
i
)
...
...
@@ -64,6 +92,8 @@ class PaddleProgram(object):
if
i
not
in
self
.
edges_in
:
self
.
edges_in
[
i
]
=
list
()
self
.
edges_in
[
i
].
append
(
in_layer_index
)
for
output
in
layer
.
outputs
:
outputs_from_nodes
[
output
]
=
i
def
get_layer_outputs
(
self
,
i
):
return
self
.
edges_out
[
i
]
...
...
@@ -80,7 +110,9 @@ class PaddleProgram(object):
else
:
f
.
write
(
indent_blank
+
code_line
+
'
\n
'
)
f
=
open
(
os
.
path
.
join
(
code_dir
,
'model.py'
),
'w'
)
if
not
os
.
path
.
exists
(
code_dir
):
os
.
makedirs
(
code_dir
)
f
=
open
(
os
.
path
.
join
(
code_dir
,
'x2paddle_model.py'
),
'w'
)
write_code
(
f
,
[
...
...
@@ -90,9 +122,10 @@ class PaddleProgram(object):
""
,
"def x2paddle_net():"
],
indent
=
0
)
for
i
,
layer
in
enumerate
(
self
.
layers
):
if
self
.
edges_in
.
get
(
i
,
0
)
==
0
and
self
.
edges_out
.
get
(
i
,
0
)
==
0
:
edges_in
=
self
.
edges_in
.
get
(
i
,
[])
edges_out
=
self
.
edges_out
.
get
(
i
,
[])
if
len
(
edges_in
)
==
0
and
len
(
edges_out
)
==
0
:
continue
line
=
""
...
...
@@ -106,16 +139,87 @@ class PaddleProgram(object):
line
+=
" = {}("
.
format
(
layer
.
kernel
)
for
k
,
v
in
layer
.
inputs
.
items
():
if
isinstance
(
v
,
list
):
line
+=
"{}=[{}], "
.
format
(
k
,
", "
.
join
(
v
))
else
:
line
+=
"{}={}, "
.
format
(
k
,
v
)
for
k
,
v
in
layer
.
attrs
.
items
():
line
+=
"{}={}, "
.
format
(
k
,
v
)
line
=
line
.
strip
(
", "
)
line
+=
")"
write_code
(
f
,
[
line
],
indent
=
1
)
f
.
close
()
def
gen_parameters
(
self
,
code_dir
):
pass
write_code
(
f
,
[
"return [{}], [{}]"
.
format
(
", "
.
join
(
self
.
inputs
),
", "
.
join
(
self
.
outputs
))
],
indent
=
1
)
f
.
close
()
def
gen_inference_model
(
self
,
model_dir
):
pass
def
gen_model
(
self
,
save_dir
):
code_dir
=
os
.
path
.
join
(
save_dir
,
'model_with_code'
)
infer_dir
=
os
.
path
.
join
(
save_dir
,
'inference_model'
)
self
.
gen_code
(
code_dir
)
sys
.
path
.
append
(
code_dir
)
import
x2paddle_model
scope
=
fluid
.
Scope
()
startup_program
=
fluid
.
Program
()
main_program
=
fluid
.
Program
()
with
fluid
.
scope_guard
(
scope
):
with
fluid
.
program_guard
(
main_program
,
startup_program
):
inputs
,
outputs
=
x2paddle_model
.
x2paddle_net
()
exe
=
fluid
.
Executor
(
fluid
.
CPUPlace
())
exe
.
run
(
startup_program
)
param_dir
=
os
.
path
.
join
(
code_dir
,
'weights'
)
for
k
,
v
in
self
.
parameters
.
items
():
if
scope
.
find_var
(
k
):
self
.
dump_parameter
(
k
,
v
,
param_dir
)
def
if_exist
(
var
):
b
=
os
.
path
.
exists
(
os
.
path
.
join
(
os
.
path
.
join
(
param_dir
,
var
.
name
)))
return
b
fluid
.
io
.
load_vars
(
exe
,
param_dir
,
main_program
,
predicate
=
if_exist
)
fluid
.
io
.
save_inference_model
(
dirname
=
infer_dir
,
feeded_var_names
=
[
i
.
name
for
i
in
inputs
],
target_vars
=
outputs
,
executor
=
exe
)
def
dump_parameter
(
self
,
param_name
,
param
,
save_dir
):
if
not
os
.
path
.
exists
(
save_dir
):
os
.
makedirs
(
save_dir
)
dtype_map
=
{
"int16"
:
[
framework_pb2
.
VarType
.
INT16
,
'h'
],
"int32"
:
[
framework_pb2
.
VarType
.
INT32
,
'i'
],
"int64"
:
[
framework_pb2
.
VarType
.
INT64
,
'q'
],
"float16"
:
[
framework_pb2
.
VarType
.
FP16
,
'e'
],
"float32"
:
[
framework_pb2
.
VarType
.
FP32
,
'f'
],
"float64"
:
[
framework_pb2
.
VarType
.
FP64
,
'd'
],
"bool"
:
[
framework_pb2
.
VarType
.
BOOL
,
None
]
}
shape
=
param
.
shape
if
str
(
param
.
dtype
)
in
[
'uint8'
,
'uint_8'
,
'bool'
]:
param
=
param
.
astype
(
'int64'
)
if
len
(
shape
)
==
0
:
assert
param
.
size
==
1
,
"Unexpected situation happend!"
shape
=
[
1
]
assert
str
(
param
.
dtype
)
in
dtype_map
,
"Unknown dtype {} of params: {}."
.
format
(
str
(
param
.
dtype
),
param_name
)
fp
=
open
(
os
.
path
.
join
(
save_dir
,
param_name
),
'wb'
)
numpy
.
array
([
0
],
dtype
=
'int32'
).
tofile
(
fp
)
numpy
.
array
([
0
],
dtype
=
'int64'
).
tofile
(
fp
)
numpy
.
array
([
0
],
dtype
=
'int32'
).
tofile
(
fp
)
tensor_desc
=
framework_pb2
.
VarType
.
TensorDesc
()
tensor_desc
.
data_type
=
dtype_map
[
str
(
param
.
dtype
)][
0
]
tensor_desc
.
dims
.
extend
(
shape
)
desc_size
=
tensor_desc
.
ByteSize
()
numpy
.
array
([
desc_size
],
dtype
=
'int32'
).
tofile
(
fp
)
fp
.
write
(
tensor_desc
.
SerializeToString
())
param
.
tofile
(
fp
)
fp
.
close
()
x2paddle/decoder/tf_decoder.py
浏览文件 @
b670c23e
...
...
@@ -89,6 +89,16 @@ class TFGraphNode(GraphNode):
field
=
getattr
(
attr
,
attr
.
WhichOneof
(
'value'
))
return
tensor_util
.
MakeNdarray
(
field
)
@
property
def
name
(
self
):
multi_out_ops
=
[
'Split'
,
'SplitV'
,
'IteratorV2'
]
if
self
.
layer_type
in
multi_out_ops
:
if
self
.
layer_name
.
count
(
':'
)
>
0
:
return
self
.
layer_name
.
replace
(
':'
,
'_p'
)
else
:
return
"{}_p0"
.
format
(
self
.
layer_name
)
return
self
.
layer_name
def
get_attr
(
self
,
name
):
if
name
not
in
self
.
layer
.
attr
:
return
None
...
...
x2paddle/op_mapper/tf_op_mapper_nhwc.py
浏览文件 @
b670c23e
...
...
@@ -15,6 +15,9 @@
from
x2paddle.decoder.tf_decoder
import
TFGraph
from
x2paddle.core.op_mapper
import
OpMapper
from
x2paddle.core.util
import
*
from
x2paddle
import
program
from
x2paddle
import
gen_name
import
traceback
import
math
import
inspect
import
numpy
...
...
@@ -36,7 +39,6 @@ class TFOpMapperNHWC(OpMapper):
directly_map_ops
=
{
'Relu'
:
[
'relu'
],
'Relu6'
:
[
'relu6'
],
'Shape'
:
[
'shape'
],
'Abs'
:
[
'abs'
],
'Sigmoid'
:
[
'sigmoid'
],
'Exp'
:
[
'exp'
],
...
...
@@ -59,6 +61,7 @@ class TFOpMapperNHWC(OpMapper):
'Maximum'
:
'elementwise_max'
,
'Minimum'
:
'elementwise_min'
,
'LessEqual'
:
'less_equal'
,
'GreaterEqual'
:
'greater_equal'
,
'Mul'
:
'elementwise_mul'
,
'FloorDiv'
:
'elementwise_floordiv'
}
...
...
@@ -68,9 +71,9 @@ class TFOpMapperNHWC(OpMapper):
self
.
decoder
=
decoder
self
.
graph
=
decoder
.
tf_graph
self
.
weights
=
dict
()
self
.
batch_node
=
None
self
.
omit_nodes
=
list
()
self
.
used_custom_layers
=
dict
()
program
.
clear
()
not_placeholder
=
list
()
for
name
in
self
.
graph
.
input_nodes
:
...
...
@@ -84,6 +87,9 @@ class TFOpMapperNHWC(OpMapper):
idx
=
self
.
graph
.
input_nodes
.
index
(
name
)
del
self
.
graph
.
input_nodes
[
idx
]
program
.
inputs
=
self
.
graph
.
input_nodes
program
.
outputs
=
self
.
graph
.
output_nodes
unsupported_ops
=
set
()
sys
.
stderr
.
write
(
"Total nodes: {}
\n
"
.
format
(
len
(
self
.
graph
.
topo_sort
)))
for
i
,
node_name
in
enumerate
(
self
.
graph
.
topo_sort
):
...
...
@@ -106,30 +112,21 @@ class TFOpMapperNHWC(OpMapper):
func
(
node
)
except
Exception
as
e
:
unsupported_ops
.
add
(
op
)
print
(
e
)
print
(
"
\n
{}
\n
"
.
format
(
traceback
.
format_exc
())
)
else
:
unsupported_ops
.
add
(
op
)
if
len
(
unsupported_ops
)
>
0
:
print
(
"========= {} OPs are not supported yet ==========="
.
format
(
print
(
"
\n
========= {} OPs are not supported yet ==========="
.
format
(
len
(
unsupported_ops
)))
for
op
in
unsupported_ops
:
print
(
"========== {} ============"
.
format
(
op
))
sys
.
exit
(
-
1
)
sys
.
stderr
.
write
(
"
\n
Done!
\n
"
)
def
add_omit_nodes
(
self
,
in_node_name
,
out_node_name
):
in_node
=
self
.
graph
.
get_node
(
in_node_name
)
out_node
=
self
.
graph
.
get_node
(
out_node_name
)
index
=
in_node
.
outputs
.
index
(
out_node_name
)
del
in_node
.
outputs
[
index
]
index
=
out_node
.
inputs
.
index
(
in_node_name
)
del
out_node
.
inputs
[
index
]
self
.
omit_nodes
.
append
(
in_node
.
layer_name
)
def
directly_map
(
self
,
node
):
assert
node
.
layer_type
in
self
.
directly_map_ops
op_info
=
self
.
directly_map_ops
[
node
.
layer_type
]
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
attr
=
dict
()
for
param
in
op_info
[
1
:]:
tf_param_name
=
list
(
param
.
keys
())[
0
]
...
...
@@ -137,46 +134,35 @@ class TFOpMapperNHWC(OpMapper):
tf_param
=
node
.
get_attr
(
tf_param_name
)
attr
[
pd_param_name
]
=
tf_param
if
len
(
input
.
out_shapes
[
0
])
==
4
and
op_info
[
0
]
!=
'shape'
:
attr1
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
'transpose'
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr1
)
input
=
node
node
.
fluid_code
.
add_layer
(
op_info
[
0
],
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input
=
node
attr2
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
'transpose'
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr2
)
else
:
node
.
fluid_code
.
add_layer
(
op_info
[
0
],
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.{}"
.
format
(
op_info
[
0
]),
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
**
attr
)
def
elementwise_map
(
self
,
node
):
assert
node
.
layer_type
in
self
.
elementwise_ops
op_type
=
self
.
elementwise_ops
[
node
.
layer_type
]
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
inputs
=
{
"x"
:
x
,
"y"
:
y
}
node
.
fluid_code
.
add_layer
(
op_type
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
program
.
add_layer
(
kernel
=
"fluid.layers.{}"
.
format
(
op_type
),
inputs
=
{
"x"
:
x
.
name
,
"y"
:
y
.
name
},
outputs
=
[
node
.
name
])
def
Placeholder
(
self
,
node
):
shape
=
node
.
out_shapes
[
0
]
assert
len
(
shape
)
!=
0
,
"Unknown shape of input nodes[{}]."
.
format
(
node
.
layer_name
)
dtype
=
node
.
dtype
if
shape
[
0
]
<
0
:
self
.
batch_node
=
node
attr
=
{
'dtype'
:
string
(
dtype
),
'shape'
:
shape
,
'name'
:
string
(
node
.
layer_name
),
'append_batch_size'
:
False
}
node
.
fluid_code
.
add_layer
(
"data"
,
inputs
=
None
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.data"
,
inputs
=
{},
outputs
=
[
node
.
name
],
dtype
=
string
(
dtype
),
shape
=
shape
,
name
=
string
(
node
.
name
))
def
Const
(
self
,
node
):
shape
=
node
.
out_shapes
[
0
]
...
...
@@ -188,74 +174,94 @@ class TFOpMapperNHWC(OpMapper):
shape
=
[
1
]
initializer
=
"Constant({})"
.
format
(
value
)
self
.
weights
[
node
.
layer_name
]
=
node
.
value
attr
=
{
'dtype'
:
string
(
dtype
),
'shape'
:
shape
,
'name'
:
string
(
node
.
layer_name
),
'default_initializer'
:
initializer
}
node
.
fluid_code
.
add_layer
(
"create_parameter"
,
inputs
=
None
,
output
=
node
,
param_attr
=
attr
)
program
.
parameters
[
node
.
name
]
=
node
.
value
program
.
add_layer
(
kernel
=
"fluid.layers.create_parameter"
,
inputs
=
{},
outputs
=
[
node
.
name
],
dtype
=
string
(
dtype
),
shape
=
shape
,
name
=
string
(
node
.
name
),
default_initializer
=
initializer
)
def
Transpose
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
perm
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
]
,
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
perm
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
assert
perm
.
layer_type
==
"Const"
,
"Perm of transpose OP should be Const"
del
self
.
weights
[
perm
.
layer_name
.
replace
(
'/'
,
'_'
)]
perm
.
fluid_code
.
clear
()
perm
=
perm
.
value
.
tolist
()
attr
=
{
'perm'
:
perm
}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
perm
=
perm
)
def
Fill
(
self
,
node
):
dims
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
input_value
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
assert
input_value
.
layer_type
==
"Const"
,
"Value of fill OP should be Const"
self
.
add_omit_nodes
(
input_value
.
layer_name
,
node
.
layer_name
)
input_value
=
input_value
.
value
input_dtype
=
string
(
input_value
.
dtype
)
attr
=
{
'value'
:
input_value
,
'dtype'
:
input_dtype
}
node
.
fluid_code
.
add_layer
(
"fill_constant"
,
inputs
=
dims
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
"fluid.layers.fill_constant"
,
inputs
=
{},
outputs
=
[
node
.
name
],
shape
=
dims
,
dtype
=
string
(
input_dtype
),
value
=
input_value
)
def
DepthToSpace
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
block_size
=
node
.
get_attr
(
"block_size"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
if
data_format
==
"NHWC"
:
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
input
,
param_attr
=
attr
)
n
,
h
,
w
,
c
=
input
.
out_shapes
[
0
]
attr
=
{
'shape'
:
[
0
,
block_size
*
block_size
,
-
1
,
h
,
w
]}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
input
,
output
=
input
,
param_attr
=
attr
)
attr
=
{
'perm'
:
[
0
,
2
,
1
,
3
,
4
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
input
,
param_attr
=
attr
)
attr
=
{
'shape'
:
[
0
,
c
,
h
,
w
]}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
input
,
output
=
input
,
param_attr
=
attr
)
attr
=
{
'upscale_factor'
:
block_size
}
node
.
fluid_code
.
add_layer
(
"pixel_shuffle"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"depth_to_space"
,
"transpose"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
input_name
=
transpose_name
shape
=
[
0
,
block_size
*
block_size
,
-
1
,
h
,
w
]
reshape_name
=
gen_name
(
"depth_to_space"
,
"reshape"
)
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
input_name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
transpose_name
=
gen_name
(
"depth_to_space"
,
"transpose"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
reshape_name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
2
,
1
,
3
,
4
])
reshape_name
=
gen_name
(
"depth_to_space"
,
"reshape"
)
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
transpose_name
},
outputs
=
[
reshape_name
],
shape
=
[
0
,
c
,
h
,
w
])
program
.
add_layer
(
kernel
=
"fluid.layers.pixed_shuffle"
,
inputs
=
{
"input"
:
reshape_name
},
outputs
=
[
node
.
name
],
upscale_factor
=
block_size
)
if
data_format
==
"NHWC"
:
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
MaxPool
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
...
...
@@ -264,41 +270,44 @@ class TFOpMapperNHWC(OpMapper):
strides
=
node
.
get_attr
(
"strides"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
channel_first
=
data_format
==
"NCHW"
if
not
channel_first
:
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"max_pool"
,
"transpose"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
strides
=
[
strides
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
k_size
=
[
k_size
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
input
=
nod
e
input
_name
=
transpose_nam
e
attr
=
{
"pool_size"
:
k_size
[
2
:
4
]
,
"pool_type"
:
string
(
"max"
)
,
"pool_stride"
:
strides
[
2
:
4
],
"pool_padding"
:
string
(
pad_mode
)
}
node
.
fluid_code
.
add_layer
(
"pool2d"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.pool2d"
,
inputs
=
{
"input"
:
input_name
}
,
outputs
=
[
node
.
name
],
pool_size
=
k_size
[
2
:
4
],
pool_type
=
string
(
"max"
),
pool_stride
=
strides
[
2
:
4
],
pool_padding
=
string
(
pad_mode
)
)
if
not
channel_first
:
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
if
data_format
==
"NHWC"
:
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
Conv2D
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
kernel
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
self
.
add_omit_nodes
(
kernel
.
layer_name
,
node
.
layer_name
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
kernel
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
k_size
=
kernel
.
out_shapes
[
0
]
strides
=
node
.
get_attr
(
"strides"
)
dilations
=
node
.
get_attr
(
"dilations"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
channel_first
=
data_format
==
"NCHW"
if
kernel
.
layer_type
==
'Const'
:
kernel_value
=
kernel
.
value
...
...
@@ -310,369 +319,330 @@ class TFOpMapperNHWC(OpMapper):
kernel
.
layer_name
)
else
:
kernel_weight_name
=
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)
self
.
weight
s
[
kernel_weight_name
]
=
numpy
.
transpose
(
kernel_value
,
program
.
parameter
s
[
kernel_weight_name
]
=
numpy
.
transpose
(
kernel_value
,
(
3
,
2
,
0
,
1
))
if
not
channel_first
:
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
strides
=
[
strides
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
dilations
=
[
dilations
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input
=
node
attr
=
{
"bias_attr"
:
False
,
"param_attr"
:
string
(
kernel_weight_name
),
"num_filters"
:
k_size
[
3
],
"filter_size"
:
k_size
[
0
:
2
],
"stride"
:
strides
[
2
:
4
],
"dilation"
:
dilations
[
2
:
4
],
"padding"
:
string
(
pad_mode
)
}
transpose_name
=
gen_name
(
"conv2d"
,
"transpose"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
input_name
=
transpose_name
program
.
add_layer
(
kernel
=
"fluid.layers.conv2d"
,
inputs
=
{
"input"
:
input_name
},
outputs
=
[
node
.
name
],
bias_attr
=
False
,
param_attr
=
string
(
kernel_weight_name
),
num_filters
=
k_size
[
3
],
filter_size
=
k_size
[
0
:
2
],
stride
=
strides
[
2
:
4
],
dilation
=
dilations
[
2
:
4
],
padding
=
string
(
pad_mode
))
if
hasattr
(
node
,
'dilation'
)
and
attr
[
'dilation'
]
==
[
1
,
1
]:
if
len
(
node
.
dilation
)
==
1
:
attr
[
'dilation'
]
=
[
1
,
node
.
dilation
[
0
]]
node
.
fluid_code
.
add_layer
(
"conv2d"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
if
not
channel_first
:
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
if
data_format
==
"NHWC"
:
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
BiasAdd
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
bias
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
inputs
=
{
"x"
:
input
,
"y"
:
bias
}
node
.
fluid_code
.
add_layer
(
"elementwise_add"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
bias
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
program
.
add_layer
(
kernel
=
"fluid.layers.elementwise_add"
,
inputs
=
{
"x"
:
input
.
name
,
"y"
:
bias
.
name
},
outputs
=
[
node
.
name
])
def
FusedBatchNorm
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
gamma
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
]
,
copy
=
True
)
beta
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
]
,
copy
=
True
)
moving_mean
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
3
]
,
copy
=
True
)
moving_var
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
4
]
,
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
gamma
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
beta
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
])
moving_mean
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
3
])
moving_var
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
4
])
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
channel_first
=
data_format
==
"NCHW"
assert
gamma
.
layer_type
==
"Const"
assert
beta
.
layer_type
==
"Const"
assert
moving_mean
.
layer_type
==
"Const"
assert
moving_var
.
layer_type
==
"Const"
self
.
add_omit_nodes
(
gamma
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
beta
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
moving_mean
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
moving_var
.
layer_name
,
node
.
layer_name
)
if
not
channel_first
:
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input
=
node
attr
=
{
"epsilon"
:
node
.
get_attr
(
"epsilon"
),
"param_attr"
:
string
(
gamma
.
layer_name
),
"bias_attr"
:
string
(
beta
.
layer_name
),
"moving_mean_name"
:
string
(
moving_mean
.
layer_name
),
"moving_variance_name"
:
string
(
moving_var
.
layer_name
),
"is_test"
:
True
}
node
.
fluid_code
.
add_layer
(
"batch_norm"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
if
not
channel_first
:
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
def
DepthwiseConv2dNative
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
kernel
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
assert
kernel
.
layer_type
==
"Const"
,
"Kernel of DepthwiseConv2DNative should be Const"
self
.
add_omit_nodes
(
kernel
.
layer_name
,
node
.
layer_name
)
in_shape
=
input
.
out_shapes
[
0
]
k_size
=
kernel
.
out_shapes
[
0
]
strides
=
node
.
get_attr
(
"strides"
)
dilations
=
node
.
get_attr
(
"dilations"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
channel_first
=
data_format
==
"NCHW"
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"batch_norm"
,
"transpose"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
input_name
=
transpose_name
program
.
add_layer
(
kernel
=
"fluid.layers.batch_norm"
,
inputs
=
{
"input"
:
input_name
},
outputs
=
[
node
.
name
],
epsilon
=
node
.
get_attr
(
"epsilon"
),
param_attr
=
string
(
gamma
.
name
),
bias_attr
=
string
(
beta
.
name
),
moving_mean_name
=
string
(
moving_mean
.
name
),
moving_variance_name
=
string
(
moving_var
.
name
),
is_test
=
True
)
self
.
weights
[
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)]
=
numpy
.
transpose
(
kernel
.
value
,
(
2
,
3
,
0
,
1
))
if
data_format
==
"NHWC"
:
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
if
not
channel_first
:
in_shape
=
[
in_shape
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
strides
=
[
strides
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
dilations
=
[
dilations
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input
=
node
attr
=
{
"bias_attr"
:
False
,
"param_attr"
:
string
(
kernel
.
layer_name
),
"num_filters"
:
in_shape
[
1
],
"filter_size"
:
k_size
[
0
:
2
],
"stride"
:
strides
[
2
:
4
],
"dilation"
:
dilations
[
2
:
4
],
"groups"
:
k_size
[
3
]
*
in_shape
[
1
],
"use_cudnn"
:
False
,
"padding"
:
string
(
pad_mode
)
}
node
.
fluid_code
.
add_layer
(
"conv2d"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
Mean
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
reduce_idx
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
dims
=
reduce_idx
.
value
.
tolist
()
keep_dims
=
node
.
get_attr
(
"keep_dims"
)
if
not
channel_first
:
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.reduce_mean"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
],
dim
=
dims
,
keep_dim
=
keep_dims
)
def
Reshape
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
param
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
]
,
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
param
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
if
param
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
param
.
layer_name
,
node
.
layer_name
)
shape
=
param
.
value
.
tolist
()
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
shape
=
shape
)
else
:
shape
=
param
inputs
=
{
"x"
:
input
,
"shape"
:
shape
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
input
.
name
,
"shape"
:
param
.
name
},
outputs
=
[
node
.
name
])
if
param
.
layer_type
!=
"Const"
:
out_shape
=
numpy
.
array
(
node
.
out_shapes
[
0
])
if
(
out_shape
>
0
).
any
():
out_shape
[
out_shape
<
0
]
=
0
attr
=
{
'shape'
:
out_shape
.
tolist
()}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
def
AvgPool
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
k_size
=
node
.
get_attr
(
"ksize"
)
strides
=
node
.
get_attr
(
"strides"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
channel_first
=
data_format
==
"NCHW"
if
not
channel_first
:
strides
=
[
strides
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
k_size
=
[
k_size
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input
=
node
attr
=
{
"pool_size"
:
k_size
[
2
:
4
],
"pool_type"
:
string
(
"avg"
),
"pool_stride"
:
strides
[
2
:
4
],
"pool_padding"
:
string
(
pad_mode
)
}
node
.
fluid_code
.
add_layer
(
"pool2d"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
if
not
channel_first
:
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
def
SplitV
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
num_sections
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
dim
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
],
copy
=
True
)
assert
num_sections
.
layer_type
==
"Const"
assert
dim
.
layer_type
==
"Const"
self
.
add_omit_nodes
(
num_sections
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
dim
.
layer_name
,
node
.
layer_name
)
dim
=
dim
.
value
attr
=
{
"num_or_sections"
:
num_sections
.
value
.
tolist
(),
"dim"
:
dim
.
value
}
node
.
fluid_code
.
add_layer
(
"split"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
ConcatV2
(
self
,
node
):
inputs
=
[
self
.
graph
.
get_node
(
name
,
copy
=
True
)
for
name
in
node
.
layer
.
input
[:
-
1
]
]
axis
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
-
1
],
copy
=
True
)
assert
axis
.
layer_type
==
"Const"
self
.
add_omit_nodes
(
axis
.
layer_name
,
node
.
layer_name
)
axis
=
axis
.
value
if
axis
<
0
:
axis
+=
len
(
inputs
[
0
].
out_shapes
[
0
])
attr
=
{
"axis"
:
axis
}
node
.
fluid_code
.
add_layer
(
"concat"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
attr
)
def
Tile
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
expand_times
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
expand_times
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
expand_times
.
layer_name
,
node
.
layer_name
)
expand_times
=
expand_times
.
value
.
tolist
()
else
:
expand_times
=
expand_times
inputs
=
{
"x"
:
input
,
"expand_times"
:
expand_times
}
node
.
fluid_code
.
add_layer
(
"expand"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
def
Pack
(
self
,
node
):
inputs
=
[
self
.
graph
.
get_node
(
name
,
copy
=
True
)
for
name
in
node
.
layer
.
input
]
reshape_shape
=
list
()
for
input_node
in
inputs
:
k_size
=
input_node
.
out_shapes
[
0
]
if
len
(
k_size
)
and
k_size
[
-
1
]
!=
-
1
:
reshape_shape
=
[
0
]
*
len
(
k_size
)
reshape_shape
[
-
1
]
=
k_size
[
-
1
]
break
if
len
(
reshape_shape
):
for
i
,
input_node
in
enumerate
(
inputs
):
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
input_node
,
output
=
'tmp_{}'
.
format
(
i
),
param_attr
=
{
"shape"
:
reshape_shape
})
axis
=
node
.
get_attr
(
"axis"
)
attr
=
{
"axis"
:
axis
}
if
len
(
reshape_shape
):
inputs
=
[
'tmp_{}'
.
format
(
i
)
for
i
in
range
(
len
(
inputs
))]
node
.
fluid_code
.
add_layer
(
"stack"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
shape
=
out_shape
.
tolist
())
def
Pad
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
paddings
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
]
,
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
paddings
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
assert
paddings
.
layer_type
==
"Const"
,
"Padding should be Const"
self
.
add_omit_nodes
(
paddings
.
layer_name
,
node
.
layer_name
)
paddings
=
paddings
.
value
.
flatten
().
tolist
()
data_format
=
input
.
tf_data_format
if
len
(
input
.
out_shapes
[
0
])
==
4
:
new_padding
=
None
if
input
.
tf_data_format
==
"NHWC"
:
if
paddings
[
0
]
+
paddings
[
1
]
+
paddings
[
6
]
+
paddings
[
7
]
==
0
:
new_padding
=
paddings
[
2
:
6
]
else
:
if
paddings
[
0
]
+
paddings
[
1
]
+
paddings
[
2
]
+
paddings
[
3
]
==
0
:
new_padding
=
paddings
[
4
:]
if
new_padding
is
not
None
:
if
input
.
tf_data_format
==
"NHWC"
:
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input
=
node
attr
=
{
"paddings"
:
new_padding
}
node
.
fluid_code
.
add_layer
(
"pad2d"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
if
input
.
tf_data_format
==
"NHWC"
:
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
transpose_name
=
gen_name
(
"pad"
,
"transpose"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
program
.
add_layer
(
kernel
=
"fluid.layers.pad2d"
,
inputs
=
{
"input"
:
transpose_name
},
outputs
=
[
node
.
name
],
paddings
=
new_padding
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
return
attr
=
{
"paddings"
:
paddings
}
node
.
fluid_code
.
add_layer
(
"pad"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
Range
(
self
,
node
):
start
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
limit
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
delta
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
],
copy
=
True
)
if
start
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
start
.
layer_name
,
node
.
layer_name
)
start
=
start
.
value
if
limit
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
limit
.
layer_name
,
node
.
layer_name
)
limit
=
limit
.
value
if
delta
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
delta
.
layer_name
,
node
.
layer_name
)
delta
=
delta
.
value
program
.
add_layer
(
kernel
=
"fluid.layers.pad"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
],
paddings
=
paddings
)
dtype
=
node
.
dtype
inputs
=
{
"start"
:
start
,
"end"
:
limit
,
"step"
:
delta
,
}
attr
=
{
"dtype"
:
string
(
node
.
dtype
)}
node
.
fluid_code
.
add_layer
(
"range"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
attr
)
def
Squeeze
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
squeeze_dims
=
node
.
get_attr
(
'squeeze_dims'
)
program
.
add_layer
(
kernel
=
"fluid.layers.squeeze"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
],
axes
=
squeeze_dims
)
def
Mean
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
reduce_idx
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
dims
=
reduce_idx
.
value
.
tolist
()
keep_dims
=
node
.
get_attr
(
"keep_dims"
)
def
Softmax
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
axis
=
node
.
get_attr
(
"axis"
)
program
.
add_layer
(
kernel
=
"fluid.layers.softmax"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
],
axis
=
axis
)
def
Shape
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
program
.
add_layer
(
kernel
=
"fluid.layers.shape"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
])
attr
=
{
"dim"
:
dims
,
"keep_dim"
:
keep_dims
}
node
.
fluid_code
.
add_layer
(
"reduce_mean"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
ArgMax
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
axis
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
assert
axis
.
layer_type
==
"Const"
,
"ArgMax only support Const parameter"
axis
=
axis
.
value
program
.
add_layer
(
kernel
=
"fluid.layers.argmax"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
axis
=
axis
)
def
MatMul
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
]
,
copy
=
True
)
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
transpose_a
=
node
.
get_attr
(
'transpose_a'
)
transpose_b
=
node
.
get_attr
(
'transpose_b'
)
inputs
=
{
"x"
:
x
,
"y"
:
y
}
# fix paddle shape infer problem
# should be removed after paddle 1.6
if
x
.
out_shapes
[
0
][
-
1
]
<
0
and
y
.
out_shapes
[
0
][
0
]
>
0
:
shape
=
x
.
out_shapes
[
0
]
shape
[
-
1
]
=
y
.
out_shapes
[
0
][
0
]
attr
=
{
"shape"
:
shape
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
x
,
output
=
x
,
param_attr
=
attr
)
if
transpose_a
is
None
:
transpose_a
=
node
.
get_attr
(
'adj_x'
)
if
transpose_b
is
None
:
transpose_b
=
node
.
get_attr
(
'adj_y'
)
attr
=
{
"transpose_x"
:
transpose_a
,
"transpose_y"
:
transpose_b
}
node
.
fluid_code
.
add_layer
(
"matmul"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.matmul"
,
inputs
=
{
"x"
:
x
.
name
,
"y"
:
y
.
name
},
outputs
=
[
node
.
name
],
transpose_x
=
transpose_a
,
transpose_y
=
transpose_b
)
def
DepthwiseConv2dNative
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
kernel
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
assert
kernel
.
layer_type
==
"Const"
,
"Kernel of DepthwiseConv2DNative should be Const"
def
BatchMatMul
(
self
,
node
):
return
self
.
MatMul
(
node
)
in_shape
=
input
.
out_shapes
[
0
]
k_size
=
kernel
.
out_shapes
[
0
]
strides
=
node
.
get_attr
(
"strides"
)
dilations
=
node
.
get_attr
(
"dilations"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
def
BatchMatMulV2
(
self
,
node
):
return
self
.
MatMul
(
node
)
program
.
parameters
[
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)]
=
numpy
.
transpose
(
kernel
.
value
,
(
2
,
3
,
0
,
1
)
)
def
ArgMax
(
self
,
node
):
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
in_shape
=
[
in_shape
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
strides
=
[
strides
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
dilations
=
[
dilations
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
transpose_name
=
gen_name
(
'depthwise_conv2d'
,
'transpose'
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
input_name
=
transpose_name
program
.
add_layer
(
kernel
=
"fluid.layers.conv2d"
,
inputs
=
{
"input"
:
input_name
},
outputs
=
[
node
.
name
],
num_filters
=
in_shape
[
1
],
filter_size
=
k_size
[
0
:
2
],
stride
=
strides
[
2
:
4
],
dilation
=
dilations
[
2
:
4
],
groups
=
k_size
[
3
]
*
in_shape
[
1
],
padding
=
string
(
pad_mode
),
param_attr
=
string
(
kernel
.
layer_name
),
bias_attr
=
False
)
if
data_format
==
"NHWC"
:
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
AvgPool
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
axis
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
assert
axis
.
layer_type
==
"Const"
,
"ArgMax only support Const parameter"
self
.
add_omit_nodes
(
axis
.
layer_name
,
node
.
layer_name
)
k_size
=
node
.
get_attr
(
"ksize"
)
strides
=
node
.
get_attr
(
"strides"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"avg_pool"
,
"transpose"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
strides
=
[
strides
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
k_size
=
[
k_size
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
input_name
=
transpose_name
program
.
add_layer
(
kernel
=
"fluid.layers.pool2d"
,
inputs
=
{
"input"
:
input_name
},
outputs
=
[
node
.
name
],
pool_size
=
k_size
[
2
:
4
],
pool_type
=
string
(
"avg"
),
pool_stride
=
strides
[
2
:
4
],
pool_padding
=
string
(
pad_mode
))
if
data_format
==
"NHWC"
:
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
def
Pack
(
self
,
node
):
inputs
=
[
self
.
graph
.
get_node
(
name
)
for
name
in
node
.
layer
.
input
]
axis
=
node
.
get_attr
(
"axis"
)
program
.
add_layer
(
kernel
=
"fluid.layers.stack"
,
inputs
=
{
"x"
:
[
i
.
name
for
i
in
inputs
]},
outputs
=
[
node
.
name
],
axis
=
axis
)
def
ConcatV2
(
self
,
node
):
inputs
=
[
self
.
graph
.
get_node
(
name
)
for
name
in
node
.
layer
.
input
[:
-
1
]]
axis
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
-
1
])
assert
axis
.
layer_type
==
"Const"
,
"axis for ConcatV2 must be type Const"
axis
=
axis
.
value
attr
=
{
"axis"
:
axis
}
node
.
fluid_code
.
add_layer
(
"argmax"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
if
axis
<
0
:
axis
+=
len
(
inputs
[
0
].
out_shapes
[
0
])
program
.
add_layer
(
kernel
=
"fluid.layers.concat"
,
inputs
=
{
"input"
:
[
i
.
name
for
i
in
inputs
]},
outputs
=
[
node
.
name
],
axis
=
axis
)
def
StridedSlice
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
begin
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
]
,
copy
=
True
)
end
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
]
,
copy
=
True
)
strides
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
3
]
,
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
begin
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
end
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
])
strides
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
3
])
assert
begin
.
layer_type
==
"Const"
assert
end
.
layer_type
==
"Const"
assert
strides
.
layer_type
==
"Const"
self
.
add_omit_nodes
(
begin
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
end
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
strides
.
layer_name
,
node
.
layer_name
)
strides
=
strides
.
value
.
tolist
()
assert
len
(
set
(
strides
))
==
1
and
strides
[
0
]
==
1
,
"Only support strides be 1 in StridedSlice OP"
...
...
@@ -721,373 +691,278 @@ class TFOpMapperNHWC(OpMapper):
else
:
new_end
.
append
(
end
[
i
])
attr
=
{
"axes"
:
[
i
for
i
in
range
(
len
(
new_begin
))]
,
"starts"
:
new_begin
,
"ends"
:
new_end
}
node
.
fluid_code
.
add_layer
(
"slice"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.slice"
,
inputs
=
{
"input"
:
input
.
name
}
,
outputs
=
[
node
.
name
],
axes
=
[
i
for
i
in
range
(
len
(
new_begin
))],
starts
=
new_begin
,
ends
=
new_end
)
if
len
(
new_axes
)
>
0
:
attr
=
{
"axes"
:
new_axes
}
node
.
fluid_code
.
add_layer
(
"unsqueeze"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.unsqueeze"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
axes
=
new_axes
)
if
len
(
shrink_axes
)
>
0
:
if
len
(
input
.
out_shapes
[
0
])
+
len
(
new_axes
)
<=
1
:
pass
else
:
attr
=
{
"axes"
:
shrink_axes
}
node
.
fluid_code
.
add_layer
(
"squeeze"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.unsqueeze"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
axes
=
new_axes
)
def
Split
(
self
,
node
):
dim
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
assert
dim
.
layer_type
==
"Const"
num_split
=
node
.
get_attr
(
'num_split'
)
dim
=
dim
.
value
program
.
add_layer
(
kernel
=
"fluid.layers.split"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
"{}_p{}"
.
format
(
node
.
layer_name
,
i
)
for
i
in
range
(
num_split
)
],
num_or_sections
=
num_split
,
dim
=
dim
)
def
Slice
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
begin
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
size
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
],
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
begin
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
size
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
])
inputs
=
{
"x"
:
input
.
name
}
attrs
=
{}
if
begin
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
begin
.
layer_name
,
node
.
layer_name
)
begin
=
begin
.
value
.
tolist
()
attrs
[
'offsets'
]
=
begin
else
:
begin
=
begin
shape
=
begin
.
out_shapes
[
0
]
attr
=
{
"shape"
:
shape
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
begin
,
output
=
begin
,
param_attr
=
attr
)
reshape_name
=
gen_name
(
"slice"
,
"reshape"
)
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
begin
.
name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
inputs
[
'offsets'
]
=
reshape_name
if
size
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
size
.
layer_name
,
node
.
layer_name
)
size
=
size
.
value
.
tolist
()
attrs
[
'shape'
]
=
size
else
:
size
=
size
shape
=
size
.
out_shapes
[
0
]
attr
=
{
"shape"
:
shape
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
size
,
output
=
size
,
param_attr
=
attr
)
inputs
=
{
"x"
:
input
,
"offsets"
:
begin
,
"shape"
:
size
}
node
.
fluid_code
.
add_layer
(
"crop_tensor"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
def
Conv2DBackpropInput
(
self
,
node
):
out_shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
kernel
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
],
copy
=
True
)
reshape_name
=
gen_name
(
"slice"
,
"reshape"
)
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
size
.
name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
inputs
[
'shape'
]
=
reshape_name
program
.
add_layer
(
kernel
=
"fluid.layers.crop_tensor"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attrs
)
assert
kernel
.
layer_type
==
"Const"
,
"Kernel of Conv2DBackpropInput should be Const"
self
.
add_omit_nodes
(
kernel
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
out_shape
.
layer_name
,
node
.
layer_name
)
def
ResizeNearestNeighbor
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
resize_shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
data_format
=
"NHWC"
inputs
=
{
"input"
:
input
.
name
}
attrs
=
{
"align_corners"
:
node
.
get_attr
(
"align_corners"
)}
if
out_shape
.
layer_type
==
"Const"
:
out_shape
=
out_shape
.
value
.
tolist
()
if
resize_shape
.
layer_type
==
"Const"
:
resize_shape
=
resize_shape
.
value
.
tolist
()
attrs
[
"out_shape"
]
=
resize_shape
else
:
out_shape
=
self
.
decoder
.
infer_shape_tensor
(
out_shape
,
node
.
out_shapes
[
0
])
shape
=
resize_shape
.
out_shapes
[
0
]
reshape_name
=
gen_name
(
"resize_nearest"
,
"reshape"
)
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
resize_shape
.
name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
inputs
[
"out_shape"
]
=
reshape_name
in_shape
=
input
.
out_shapes
[
0
]
if
in_shape
.
count
(
-
1
)
>
2
:
in_shape
=
self
.
decoder
.
infer_tensor
(
input
).
shape
k_size
=
kernel
.
out_shapes
[
0
]
if
k_size
.
count
(
-
1
)
>
2
:
k_size
=
self
.
decoder
.
infer_tensor
(
kernel
).
shape
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"resize_nearest"
,
"reshape"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
inputs
[
"input"
]
=
transpose_name
program
.
add_layer
(
kernel
=
"fluid.layers.resize_nearest"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attrs
)
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
strides
=
node
.
get_attr
(
"strides"
)
dilations
=
node
.
get_attr
(
"dilations"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
channel_first
=
data_format
==
"NCHW"
if
data_format
==
"NHWC"
:
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
self
.
weights
[
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)]
=
numpy
.
transpose
(
kernel
.
value
,
(
3
,
2
,
0
,
1
)
)
if
not
channel_first
:
in_shape
=
[
in_shape
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
strides
=
[
strides
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
dilations
=
[
dilations
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
input
=
nod
e
def
ResizeBilinear
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
)
resize_shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
data_format
=
"NHWC"
inputs
=
{
"input"
:
input
.
name
}
attrs
=
{
"align_corners"
:
node
.
get_attr
(
"align_corners"
)}
if
resize_shape
.
layer_type
==
"Const"
:
resize_shape
=
resize_shape
.
value
.
tolist
(
)
attrs
[
"out_shape"
]
=
resize_shap
e
else
:
self
.
graph
.
data_format_propagation
(
node
)
attr
=
{
"bias_attr"
:
False
,
"param_attr"
:
string
(
kernel
.
layer_name
),
"num_filters"
:
k_size
[
2
],
"filter_size"
:
k_size
[
0
:
2
],
"stride"
:
strides
[
2
:
4
],
"dilation"
:
dilations
[
2
:
4
],
"padding"
:
string
(
pad_mode
),
"output_size"
:
out_shape
[
1
:
3
]
}
node
.
fluid_code
.
add_layer
(
"conv2d_transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
shape
=
resize_shape
.
out_shapes
[
0
]
reshape_name
=
gen_name
(
"resize_bilinear"
,
"reshape"
)
program
.
add_layer
(
kernel
=
"fluid.layers.reshape"
,
inputs
=
{
"x"
:
resize_shape
.
name
},
outputs
=
[
reshape_name
],
shape
=
shape
)
inputs
[
"out_shape"
]
=
reshape_name
if
not
channel_first
:
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
if
data_format
==
"NHWC"
:
transpose_name
=
gen_name
(
"resize_bilinear"
,
"reshape"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
inputs
[
"input"
]
=
transpose_name
program
.
add_layer
(
kernel
=
"fluid.layers.resize_bilinear"
,
inputs
=
inputs
,
outputs
=
[
node
.
name
],
**
attrs
)
def
Max
(
self
,
node
)
:
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
reduce_idx
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
keep_dims
=
node
.
get_attr
(
"keep_dims"
)
dim
=
reduce_idx
.
value
.
tolist
(
)
if
data_format
==
"NHWC"
:
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
]
)
attr
=
{
"dim"
:
dim
,
"keep_dim"
:
keep_dims
}
node
.
fluid_code
.
add_layer
(
"reduce_max"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
Cast
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
dtype
=
node
.
dtype
program
.
add_layer
(
kernel
=
"fluid.layers.cast"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
node
.
name
],
dtype
=
string
(
dtype
))
def
Sum
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
]
,
copy
=
True
)
reduce_idx
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
]
,
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
reduce_idx
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
keep_dims
=
node
.
get_attr
(
"keep_dims"
)
dim
=
reduce_idx
.
value
.
tolist
()
attr
=
{
"dim"
:
dim
,
"keep_dim"
:
keep_dims
}
node
.
fluid_code
.
add_layer
(
"reduce_sum"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
Cast
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
dtype
=
node
.
dtype_map
[
node
.
get_attr
(
'DstT'
)]
attr
=
{
"dtype"
:
string
(
dtype
)}
node
.
fluid_code
.
add_layer
(
"cast"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
Split
(
self
,
node
):
dim
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
assert
dim
.
layer_type
==
"Const"
self
.
add_omit_nodes
(
dim
.
layer_name
,
node
.
layer_name
)
num_split
=
node
.
get_attr
(
'num_split'
)
dim
=
dim
.
value
attr
=
{
"num_or_sections"
:
num_split
,
"dim"
:
dim
}
node
.
fluid_code
.
add_layer
(
"split"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
Squeeze
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
squeeze_dims
=
node
.
get_attr
(
'squeeze_dims'
)
attr
=
{
"axes"
:
squeeze_dims
}
node
.
fluid_code
.
add_layer
(
"squeeze"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
Softmax
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
axis
=
node
.
get_attr
(
"axis"
)
attr
=
{
"axis"
:
axis
}
node
.
fluid_code
.
add_layer
(
"softmax"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
ResizeNearestNeighbor
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
resize_shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
resize_shape
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
resize_shape
.
layer_name
,
node
.
layer_name
)
resize_shape
=
resize_shape
.
value
.
tolist
()
else
:
resize_shape
=
resize_shape
shape
=
resize_shape
.
out_shapes
[
0
]
attr
=
{
"shape"
:
shape
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
resize_shape
,
output
=
resize_shape
,
param_attr
=
attr
)
align_corners
=
node
.
get_attr
(
"align_corners"
)
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
inputs
=
{
"input"
:
node
,
"out_shape"
:
resize_shape
}
attr
=
{
"align_corners"
:
align_corners
}
node
.
fluid_code
.
add_layer
(
"resize_nearest"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
attr
)
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
program
.
add_layer
(
kernel
=
"fluid.layers.reduce_sum"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
],
dim
=
dim
,
keep_dim
=
keep_dims
)
def
ResizeBilinear
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
resize_shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
resize_shape
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
resize_shape
.
layer_name
,
node
.
layer_name
)
resize_shape
=
resize_shape
.
value
.
tolist
()
else
:
shape
=
resize_shape
.
out_shapes
[
0
]
attr
=
{
"shape"
:
shape
}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
resize_shape
,
output
=
resize_shape
,
param_attr
=
attr
)
align_corners
=
node
.
get_attr
(
"align_corners"
)
attr
=
{
"perm"
:
[
0
,
3
,
1
,
2
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
inputs
=
{
"input"
:
node
,
"out_shape"
:
resize_shape
}
attr
=
{
#"out_shape": resize_shape,
"align_corners"
:
align_corners
,
"align_mode"
:
1
}
node
.
fluid_code
.
add_layer
(
"resize_bilinear"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
attr
)
attr
=
{
"perm"
:
[
0
,
2
,
3
,
1
]}
node
.
fluid_code
.
add_layer
(
"transpose"
,
inputs
=
node
,
output
=
node
,
param_attr
=
attr
)
def
GreaterEqual
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
inputs
=
{
"x"
:
x
,
"y"
:
y
}
node
.
fluid_code
.
add_layer
(
"greater_equal"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
def
Max
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
reduce_idx
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
keep_dims
=
node
.
get_attr
(
"keep_dims"
)
dim
=
reduce_idx
.
value
.
tolist
()
program
.
add_layer
(
kernel
=
"fluid.layers.reduce_max"
,
inputs
=
{
"input"
:
input
.
name
},
outputs
=
[
node
.
name
],
dim
=
dim
,
keep_dim
=
keep_dims
)
def
RandomUniform
(
self
,
node
):
shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
if
shape
.
layer_type
==
"Const"
:
self
.
add_omit_nodes
(
shape
.
layer_name
,
node
.
layer_name
)
shape
=
shape
.
value
.
tolist
()
program
.
add_layer
(
kernel
=
"fluid.layers.uniform_random"
,
inputs
=
{},
outputs
=
[
node
.
name
],
shape
=
shape
,
min
=
0.0
,
max
=
0.9999
)
else
:
shape
=
shape
attr
=
{
"min"
:
0.0
,
"max"
:
0.9999
}
node
.
fluid_code
.
add_layer
(
"uniform_random"
,
inputs
=
shape
,
output
=
node
,
param_attr
=
attr
)
def
SquaredDifference
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
inputs
=
{
"x"
:
x
,
"y"
:
y
}
node
.
fluid_code
.
add_layer
(
"elementwise_sub"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
inputs
=
{
"x"
:
node
,
"y"
:
node
}
node
.
fluid_code
.
add_layer
(
"elementwise_mul"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
None
)
def
ExpandDims
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
y
.
layer_type
==
'Const'
:
self
.
add_omit_nodes
(
y
.
layer_name
,
node
.
layer_name
)
dim
=
y
.
value
.
tolist
()
if
not
isinstance
(
dim
,
list
):
dim
=
[
dim
]
attr
=
{
'axes'
:
dim
}
else
:
attr
=
{
'axes'
:
y
}
node
.
fluid_code
.
add_layer
(
"unsqueeze"
,
inputs
=
x
,
output
=
node
,
param_attr
=
attr
)
def
BatchToSpaceND
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
hasattr
(
node
,
'skip'
)
and
node
.
skip
:
node
.
fluid_code
.
add_layer
(
"="
,
inputs
=
x
,
output
=
node
,
param_attr
=
None
)
else
:
raise
Exception
(
"BatchToSpaceND is not supported"
)
def
SpaceToBatchND
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
y
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
if
hasattr
(
node
,
'skip'
)
and
node
.
skip
:
node
.
fluid_code
.
add_layer
(
"="
,
inputs
=
x
,
output
=
node
,
param_attr
=
None
)
else
:
raise
Exception
(
"SpaceToBatchND is not supported"
)
program
.
add_layer
(
kernel
=
"fluid.layers.uniform_random"
,
inputs
=
{
'shape'
:
shape
.
name
},
outputs
=
[
node
.
name
],
min
=
0.0
,
max
=
0.9999
)
def
OneHot
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
depth
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
on_value
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
],
copy
=
True
)
off_value
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
3
],
copy
=
True
)
assert
depth
.
layer_type
==
'Const'
,
'Parameter depth should be Const in OneHot'
assert
on_value
.
layer_type
==
'Const'
,
'Parameter on_value should be Const in OneHot'
assert
off_value
.
layer_type
==
'Const'
,
'Parameter off_value should be Const in OneHot'
self
.
add_omit_nodes
(
depth
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
on_value
.
layer_name
,
node
.
layer_name
)
self
.
add_omit_nodes
(
off_value
.
layer_name
,
node
.
layer_name
)
depth
=
depth
.
value
on_value
=
on_value
.
value
off_value
=
off_value
.
value
assert
math
.
fabs
(
on_value
-
1.0
)
<
1e-06
,
"on_value should be 1 in OneHot"
assert
math
.
fabs
(
off_value
-
0.0
)
<
1e-06
,
"off_value should be 0 in OneHot"
attr
=
{
'depth'
:
depth
}
node
.
fluid_code
.
add_layer
(
"one_hot"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
,
use_fluid
=
True
)
def
Pow
(
self
,
node
):
x
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
factor
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
self
.
add_omit_nodes
(
factor
.
layer_name
,
node
.
layer_name
)
if
factor
.
layer_type
==
'Const'
:
factor
=
factor
.
value
.
tolist
()
def
Conv2DBackpropInput
(
self
,
node
):
out_shape
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
])
kernel
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
])
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
])
assert
kernel
.
layer_type
==
"Const"
,
"Kernel of Conv2DBackpropInput should be Const"
if
out_shape
.
layer_type
==
"Const"
:
out_shape
=
out_shape
.
value
.
tolist
()
else
:
factor
=
self
.
decoder
.
infer_tensor
(
factor
)
attr
=
{
'factor'
:
factor
}
node
.
fluid_code
.
add_layer
(
"pow"
,
inputs
=
x
,
output
=
node
,
param_attr
=
attr
)
out_shape
=
self
.
decoder
.
infer_shape_tensor
(
out_shape
,
node
.
out_shapes
[
0
])
def
All
(
self
,
node
):
input
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
reduce_idx
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
self
.
add_omit_nodes
(
reduce_idx
.
layer_name
,
node
.
layer_name
)
assert
reduce_idx
.
layer_type
==
"Const"
,
"Only support Const parameter[reduce_idx]"
dims
=
reduce_idx
.
value
.
tolist
()
keep_dims
=
node
.
get_attr
(
"keep_dims"
)
in_shape
=
input
.
out_shapes
[
0
]
if
in_shape
.
count
(
-
1
)
>
2
:
in_shape
=
self
.
decoder
.
infer_tensor
(
input
).
shape
k_size
=
kernel
.
out_shapes
[
0
]
if
k_size
.
count
(
-
1
)
>
2
:
k_size
=
self
.
decoder
.
infer_tensor
(
kernel
).
shape
attr
=
{
"dim"
:
dims
,
"keep_dim"
:
keep_dims
}
node
.
fluid_code
.
add_layer
(
"reduce_all"
,
inputs
=
input
,
output
=
node
,
param_attr
=
attr
)
def
GatherV2
(
self
,
node
):
embeddings
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
0
],
copy
=
True
)
index
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
1
],
copy
=
True
)
axis
=
self
.
graph
.
get_node
(
node
.
layer
.
input
[
2
],
copy
=
True
)
self
.
add_omit_nodes
(
axis
.
layer_name
,
node
.
layer_name
)
assert
axis
.
layer_type
==
'Const'
,
"Only support Const parameter[axis]"
axis
=
axis
.
value
.
tolist
()
assert
axis
==
0
,
"Only support axis=0 in GatherV2 OP"
attr
=
{
'overwrite'
:
False
}
if
len
(
index
.
out_shapes
[
0
])
!=
1
:
reshape_attr
=
{
"shape"
:
[
-
1
]}
node
.
fluid_code
.
add_layer
(
"reshape"
,
inputs
=
index
,
output
=
index
,
param_attr
=
reshape_attr
)
inputs
=
{
'input'
:
embeddings
,
'index'
:
index
}
node
.
fluid_code
.
add_layer
(
"gather"
,
inputs
=
inputs
,
output
=
node
,
param_attr
=
attr
)
def
OneShotIterator
(
self
,
node
):
return
self
.
Placeholder
(
node
)
def
IteratorV2
(
self
,
node
):
dtype_map
=
{
1
:
"float32"
,
3
:
"int32"
,
4
:
"uint8"
,
9
:
"int64"
,
10
:
"bool"
}
shapes
=
node
.
out_shapes
dtypes
=
node
.
layer
.
attr
[
'output_types'
].
list
.
type
node
.
fluid_code
.
add_note
(
"{} = [0] * {}"
.
format
(
node
.
layer_name
,
len
(
shapes
)))
for
i
,
shape
in
enumerate
(
shapes
):
attr
=
{
'dtype'
:
string
(
dtype_map
[
dtypes
[
i
]]),
'shape'
:
shape
,
'name'
:
string
(
"{}_{}"
.
format
(
node
.
layer_name
,
i
)),
'append_batch_size'
:
False
}
output
=
"{}[{}]"
.
format
(
node
.
layer_name
,
i
)
node
.
fluid_code
.
add_layer
(
"data"
,
inputs
=
None
,
output
=
output
,
param_attr
=
attr
)
pad_mode
=
node
.
get_attr
(
"padding"
).
decode
()
strides
=
node
.
get_attr
(
"strides"
)
dilations
=
node
.
get_attr
(
"dilations"
)
data_format
=
node
.
get_attr
(
"data_format"
).
decode
()
program
.
parameters
[
kernel
.
layer_name
.
replace
(
'/'
,
'_'
)]
=
numpy
.
transpose
(
kernel
.
value
,
(
3
,
2
,
0
,
1
))
input_name
=
input
.
name
if
data_format
==
"NHWC"
:
in_shape
=
[
in_shape
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
strides
=
[
strides
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
dilations
=
[
dilations
[
i
]
for
i
in
[
0
,
3
,
1
,
2
]]
transpose_name
=
gen_name
(
"conv2dbackpropinput"
,
"transpose"
)
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
input
.
name
},
outputs
=
[
transpose_name
],
perm
=
[
0
,
3
,
1
,
2
])
input_name
=
transpose_name
program
.
add_layer
(
kernel
=
"fluid.layers.conv2d_transpose"
,
inputs
=
{
"input"
:
input_name
},
outputs
=
[
node
.
name
],
bias_attr
=
False
,
param_attr
=
string
(
kernel
.
layer_name
),
num_filters
=
k_size
[
2
],
filter_size
=
k_size
[
0
:
2
],
stride
=
strides
[
2
:
4
],
dilation
=
dilations
[
2
:
4
],
padding
=
string
(
pad_mode
),
output_size
=
out_shape
[
1
:
3
])
if
data_format
==
"NHWC"
:
program
.
add_layer
(
kernel
=
"fluid.layers.transpose"
,
inputs
=
{
"x"
:
node
.
name
},
outputs
=
[
node
.
name
],
perm
=
[
0
,
2
,
3
,
1
])
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录