未验证 提交 992d1ab9 编写于 作者: J Jason 提交者: GitHub

Merge pull request #496 from SunAhong1993/develop

modify command usage
...@@ -63,7 +63,6 @@ x2paddle --framework=onnx --model=onnx_model.onnx --save_dir=pd_model --paddle_t ...@@ -63,7 +63,6 @@ x2paddle --framework=onnx --model=onnx_model.onnx --save_dir=pd_model --paddle_t
|--model | 当framework为tensorflow/onnx时,该参数指定tensorflow的pb模型文件或onnx模型路径 | |--model | 当framework为tensorflow/onnx时,该参数指定tensorflow的pb模型文件或onnx模型路径 |
|--caffe_proto | **[可选]** 由caffe.proto编译成caffe_pb2.py文件的存放路径,当存在自定义Layer时使用,默认为None | |--caffe_proto | **[可选]** 由caffe.proto编译成caffe_pb2.py文件的存放路径,当存在自定义Layer时使用,默认为None |
|--define_input_shape | **[可选]** For TensorFlow, 当指定该参数时,强制用户输入每个Placeholder的shape,见[文档Q2](./docs/user_guides/FAQ.md) | |--define_input_shape | **[可选]** For TensorFlow, 当指定该参数时,强制用户输入每个Placeholder的shape,见[文档Q2](./docs/user_guides/FAQ.md) |
|--params_merge | **[可选]** 当指定该参数时,转换完成后,inference_model中的所有模型参数将合并保存为一个文件__params__ |
|--paddle_type | **[可选]** 该参数指定转换为动态图代码(dygraph)或者静态图代码(static),默认为dygraph| |--paddle_type | **[可选]** 该参数指定转换为动态图代码(dygraph)或者静态图代码(static),默认为dygraph|
......
# X2Paddle支持OP列表 # X2Paddle支持OP列表
> 目前X2Paddle支持70+的TensorFlow OP,30+的Caffe Layer,覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。 > 目前X2Paddle支持80+的TensorFlow OP,30+的Caffe Layer,60+的ONNX OP,110+的PyTorch Aten,10+的PyTorch Prim覆盖了大部分CV分类模型常用的操作。我们在如下列表中给出了目前X2Paddle支持的全部OP。
**注:** 目前,部分OP暂未支持,如您在转换过程中出现OP不支持的情况,可自行添加或反馈给我们。欢迎通过[ISSUE反馈](https://github.com/PaddlePaddle/X2Paddle/issues/new)的方式告知我们(模型名,代码实现或模型获取方式),我们会及时跟进:) **注:** 目前,部分OP暂未支持,如您在转换过程中出现OP不支持的情况,可自行添加或反馈给我们。欢迎通过[ISSUE反馈](https://github.com/PaddlePaddle/X2Paddle/issues/new)的方式告知我们(模型名,代码实现或模型获取方式),我们会及时跟进:)
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
| 73 | Greater | 74 | FloorMod | 75 | LogicalAdd | 76 | Prod | | 73 | Greater | 74 | FloorMod | 75 | LogicalAdd | 76 | Prod |
| 77 | Equal | 78 | Conv3D | 79 | Ceil | 80 | AddN | | 77 | Equal | 78 | Conv3D | 79 | Ceil | 80 | AddN |
| 81 | DivNoNan | 82 | Where | 83 | MirrorPad | 84 | Size | | 81 | DivNoNan | 82 | Where | 83 | MirrorPad | 84 | Size |
| 85 | TopKv2 | | | | | | | | 85 | TopKv2 | 86 | SplitV | | | | |
## Caffe ## Caffe
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
| 21 | Axpy | 22 | ROIPolling | 23 | Permute | 24 | DetectionOutput | | 21 | Axpy | 22 | ROIPolling | 23 | Permute | 24 | DetectionOutput |
| 25 | Normalize | 26 | Select | 27 | ShuffleChannel | 28 | ConvolutionDepthwise | | 25 | Normalize | 26 | Select | 27 | ShuffleChannel | 28 | ConvolutionDepthwise |
| 29 | ReLU | 30 | AbsVal | 31 | Sigmoid | 32 | TanH | | 29 | ReLU | 30 | AbsVal | 31 | Sigmoid | 32 | TanH |
| 33 | ReLU6 | 34 | Upsample | | 33 | ReLU6 | 34 | Upsample | | | | |
## ONNX ## ONNX
...@@ -61,7 +61,11 @@ ...@@ -61,7 +61,11 @@
| 41 | MatMul | 42 | Sum | 43 | Transpose | 44 | BatchNormalization | | 41 | MatMul | 42 | Sum | 43 | Transpose | 44 | BatchNormalization |
| 45 | Squeeze | 46 | Equal | 47 | Identity | 48 | GlobalAveragePool | | 45 | Squeeze | 46 | Equal | 47 | Identity | 48 | GlobalAveragePool |
| 49 | MaxPool | 50 | Conv | 51 | Gemm | 52 | NonZero | | 49 | MaxPool | 50 | Conv | 51 | Gemm | 52 | NonZero |
| 53 | Abs | 54 | Floor | 52 | ArgMax | | 53 | Abs | 54 | Floor | 56 | ArgMax | 57 | Sign |
| 58 | Reciprocal | 59 | Size | 60 | OneHot | 61 | ReduceProd |
| 62 | LogSoftmax | 63 | LSTM | | | | |
## PyTorch ## PyTorch
Aten: Aten:
...@@ -95,6 +99,7 @@ Aten: ...@@ -95,6 +99,7 @@ Aten:
| 101 | aten::upsample\_bilinear2d | 102 | aten::values |103|aten::view|104|aten::warn| | 101 | aten::upsample\_bilinear2d | 102 | aten::values |103|aten::view|104|aten::warn|
| 105 | aten::where | 106 | aten::zeros |107|aten::zeros\_like|108|aten::bmm| | 105 | aten::where | 106 | aten::zeros |107|aten::zeros\_like|108|aten::bmm|
| 109 | aten::sub\_ | 110 | aten:erf |111|aten::lstm|112|aten::gather| | 109 | aten::sub\_ | 110 | aten:erf |111|aten::lstm|112|aten::gather|
| 113 | aten::upsample\_nearest2d || |||||
Prim: Prim:
| 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP | | 序号 | OP | 序号 | OP | 序号 | OP | 序号 | OP |
......
...@@ -70,12 +70,6 @@ def arg_parser(): ...@@ -70,12 +70,6 @@ def arg_parser():
action="store_true", action="store_true",
default=False, default=False,
help="define input shape for tf model") help="define input shape for tf model")
parser.add_argument(
"--params_merge",
"-pm",
action="store_true",
default=False,
help="define whether merge the params")
parser.add_argument( parser.add_argument(
"--paddle_type", "--paddle_type",
"-pt", "-pt",
...@@ -83,22 +77,14 @@ def arg_parser(): ...@@ -83,22 +77,14 @@ def arg_parser():
default="dygraph", default="dygraph",
help="define the paddle model type after converting(dygraph/static)" help="define the paddle model type after converting(dygraph/static)"
) )
parser.add_argument(
"--without_data_format_optimization",
"-wo",
type=_text_type,
default="True",
help="tf model conversion without data format optimization")
return parser return parser
def tf2paddle(model_path, def tf2paddle(model_path,
save_dir, save_dir,
without_data_format_optimization=False,
define_input_shape=False, define_input_shape=False,
paddle_type="dygraph", paddle_type="dygraph"):
params_merge=False):
# check tensorflow installation and version # check tensorflow installation and version
try: try:
import os import os
...@@ -139,8 +125,7 @@ def tf2paddle(model_path, ...@@ -139,8 +125,7 @@ def tf2paddle(model_path,
def caffe2paddle(proto, weight, save_dir, caffe_proto, def caffe2paddle(proto, weight, save_dir, caffe_proto, paddle_type):
paddle_type, params_merge=False):
from x2paddle.decoder.caffe_decoder import CaffeDecoder from x2paddle.decoder.caffe_decoder import CaffeDecoder
if paddle_type == "dygraph": if paddle_type == "dygraph":
from x2paddle.op_mapper.dygraph.caffe2paddle.caffe_op_mapper import CaffeOpMapper from x2paddle.op_mapper.dygraph.caffe2paddle.caffe_op_mapper import CaffeOpMapper
...@@ -165,7 +150,7 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto, ...@@ -165,7 +150,7 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto,
mapper.paddle_graph.gen_model(save_dir) mapper.paddle_graph.gen_model(save_dir)
def onnx2paddle(model_path, save_dir, paddle_type, params_merge=False): def onnx2paddle(model_path, save_dir, paddle_type):
# check onnx installation and version # check onnx installation and version
try: try:
import onnx import onnx
...@@ -259,33 +244,19 @@ def main(): ...@@ -259,33 +244,19 @@ def main():
if args.framework == "tensorflow": if args.framework == "tensorflow":
assert args.model is not None, "--model should be defined while translating tensorflow model" assert args.model is not None, "--model should be defined while translating tensorflow model"
assert args.without_data_format_optimization in [
"True", "False"
], "--the param without_data_format_optimization should be defined True or False"
define_input_shape = False define_input_shape = False
params_merge = False
without_data_format_optimization = True if args.without_data_format_optimization == "True" else False
if args.define_input_shape: if args.define_input_shape:
define_input_shape = True define_input_shape = True
if args.params_merge: tf2paddle(args.model, args.save_dir,
params_merge = True define_input_shape, args.paddle_type)
tf2paddle(args.model, args.save_dir, without_data_format_optimization,
define_input_shape, args.paddle_type, params_merge)
elif args.framework == "caffe": elif args.framework == "caffe":
assert args.prototxt is not None and args.weight is not None, "--prototxt and --weight should be defined while translating caffe model" assert args.prototxt is not None and args.weight is not None, "--prototxt and --weight should be defined while translating caffe model"
params_merge = False
if args.params_merge:
params_merge = True
caffe2paddle(args.prototxt, args.weight, args.save_dir, caffe2paddle(args.prototxt, args.weight, args.save_dir,
args.caffe_proto, args.paddle_type, params_merge) args.caffe_proto, args.paddle_type)
elif args.framework == "onnx": elif args.framework == "onnx":
assert args.model is not None, "--model should be defined while translating onnx model" assert args.model is not None, "--model should be defined while translating onnx model"
params_merge = False onnx2paddle(args.model, args.save_dir, args.paddle_type)
if args.params_merge:
params_merge = True
onnx2paddle(args.model, args.save_dir, args.paddle_type, params_merge)
elif args.framework == "paddle2onnx": elif args.framework == "paddle2onnx":
print("Paddle to ONNX tool has been migrated to the new github: https://github.com/PaddlePaddle/paddle2onnx") print("Paddle to ONNX tool has been migrated to the new github: https://github.com/PaddlePaddle/paddle2onnx")
......
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
from __future__ import print_function from __future__ import print_function
from __future__ import division from __future__ import division
import paddle.fluid as fluid
import paddle import paddle
from paddle.fluid.proto import framework_pb2 from paddle.fluid.proto import framework_pb2
import collections import collections
...@@ -258,22 +257,17 @@ class PaddleGraph(object): ...@@ -258,22 +257,17 @@ class PaddleGraph(object):
with paddle.static.scope_guard(scope): with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_program, startup_program): with paddle.static.program_guard(main_program, startup_program):
inputs, outputs = x2paddle_model.x2paddle_net() inputs, outputs = x2paddle_model.x2paddle_net()
exe = fluid.Executor(fluid.CPUPlace()) exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(startup_program) exe.run(startup_program)
param_dir = osp.join(code_dir, 'weights') param_dir = osp.join(code_dir, 'weights')
for k, v in self.parameters.items(): for k, v in self.parameters.items():
if scope.find_var(k): if scope.find_var(k):
self.dump_parameter(k, v, param_dir) self.dump_parameter(k, v, param_dir)
def if_exist(var): paddle.static.load(main_program, param_dir, exe)
b = osp.exists( paddle.static.save_inference_model(
osp.join(osp.join(param_dir, var.name))) path_prefix=osp.join(infer_dir, "model"),
return b feed_vars=[i for i in inputs],
fluid.io.load_vars( fetch_vars=outputs,
exe, param_dir, main_program, predicate=if_exist)
fluid.io.save_inference_model(
dirname=infer_dir,
feeded_var_names=[i.name for i in inputs],
target_vars=outputs,
executor=exe) executor=exe)
def gen_dygraph_model(self, save_dir, jit_type=None): def gen_dygraph_model(self, save_dir, jit_type=None):
...@@ -562,8 +556,7 @@ class PaddleGraph(object): ...@@ -562,8 +556,7 @@ class PaddleGraph(object):
remove_default_attrs(layer.kernel, layer.attrs) remove_default_attrs(layer.kernel, layer.attrs)
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel
) or layer.kernel == "paddle.to_tensor" or \ ) or layer.kernel == "paddle.to_tensor" or \
layer.kernel.startswith("custom_layer") or \ layer.kernel.startswith("custom_layer"):
layer.kernel.startswith("paddle.fluid.dygraph"):
line = "{}".format( line = "{}".format(
layer.outputs[0] layer.outputs[0]
) if layer.kernel == "paddle.to_tensor" and not layer.attrs[ ) if layer.kernel == "paddle.to_tensor" and not layer.attrs[
...@@ -660,7 +653,6 @@ class PaddleGraph(object): ...@@ -660,7 +653,6 @@ class PaddleGraph(object):
paddle.save(self.parameters, save_path) paddle.save(self.parameters, save_path)
def dygraph2static(self, save_dir, input_shapes=[], input_types=[]): def dygraph2static(self, save_dir, input_shapes=[], input_types=[]):
from paddle.fluid.dygraph.jit import declarative
sepc_list = list() sepc_list = list()
for i, name in enumerate(self.inputs): for i, name in enumerate(self.inputs):
sepc_list.append( sepc_list.append(
......
...@@ -119,19 +119,19 @@ class OpSet9(): ...@@ -119,19 +119,19 @@ class OpSet9():
# reduce function # reduce function
'ReduceMean': ['paddle.mean', 'ReduceMean': ['paddle.mean',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdims=1)], dict(axes=None, keepdims=1)],
'ReduceSum': ['paddle.sum', 'ReduceSum': ['paddle.sum',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdims=1)], dict(axes=None, keepdims=1)],
'ReduceMin': ['paddle.min', 'ReduceMin': ['paddle.min',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
'ReduceMax': ['paddle.max', 'ReduceMax': ['paddle.max',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
'ReduceProd': ['paddle.prod', 'ReduceProd': ['paddle.prod',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
# active function # active function
'Relu': ['paddle.nn.ReLU'], 'Relu': ['paddle.nn.ReLU'],
'LeakyRelu': ['paddle.nn.LeakyReLU', 'LeakyRelu': ['paddle.nn.LeakyReLU',
...@@ -150,6 +150,7 @@ class OpSet9(): ...@@ -150,6 +150,7 @@ class OpSet9():
dict(threshold='threshold'), dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))], dict(threshold=float(sys.maxsize))],
'Exp': ['paddle.exp'], 'Exp': ['paddle.exp'],
'Log': ['paddle.log'],
'LogSoftmax': ['paddle.nn.functional.log_softmax', 'LogSoftmax': ['paddle.nn.functional.log_softmax',
dict(axis='axis'), dict(axis='axis'),
dict(axis=1)], dict(axis=1)],
...@@ -320,8 +321,15 @@ class OpSet9(): ...@@ -320,8 +321,15 @@ class OpSet9():
return return
elif node.layer_type == 'Upsample': elif node.layer_type == 'Upsample':
val_scales = self.graph.get_input_node(node, idx=1, copy=True) val_scales = self.graph.get_input_node(node, idx=1, copy=True)
inputs['scale_factor'] = val_scales self.paddle_graph.add_layer(
"paddle.slice",
inputs={"input": val_scales.name},
outputs=[val_scales.name],
axes=[0],
starts=[2],
ends=[4])
inputs['scale_factor'] = val_scales.name
mode = node.get_attr('mode', 'nearest') mode = node.get_attr('mode', 'nearest')
attrs.update({"align_corners": False, attrs.update({"align_corners": False,
"mode": string(mode), "mode": string(mode),
...@@ -1013,13 +1021,12 @@ class OpSet9(): ...@@ -1013,13 +1021,12 @@ class OpSet9():
if len(value) == 1: if len(value) == 1:
value = value[0] value = value[0]
layer_attrs = { layer_attrs = {
'shape': val_shape.name,
'dtype': string(dtype), 'dtype': string(dtype),
'fill_value': value 'fill_value': value
} }
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.full", "paddle.full",
inputs={}, inputs={'shape': val_shape.name},
outputs=[node.name], outputs=[node.name],
**layer_attrs) **layer_attrs)
...@@ -1072,8 +1079,11 @@ class OpSet9(): ...@@ -1072,8 +1079,11 @@ class OpSet9():
} }
outputs_list = list() outputs_list = list()
if isinstance(split, list) or isinstance(split, tuple): if isinstance(split, list) or isinstance(split, tuple):
for i in range(len(split)): if len(split) == 1:
outputs_list.append("{}_p{}".format(node.layer_name, i)) outputs_list.append(node.name)
else:
for i in range(len(split)):
outputs_list.append("{}_p{}".format(node.layer_name, i))
else: else:
outputs_list.append(node.name) outputs_list.append(node.name)
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
...@@ -1415,6 +1425,18 @@ class OpSet9(): ...@@ -1415,6 +1425,18 @@ class OpSet9():
else: else:
if mode == 'channel': if mode == 'channel':
slope_data = _const_weight_or_none(val_slope) slope_data = _const_weight_or_none(val_slope)
if slope_data is None:
self.paddle_graph.add_layer(
"paddle.reshape",
inputs={"x": val_slope.name},
outputs=[val_slope.name],
shape=[shape_slope[0]])
self.paddle_graph.add_layer(
"paddle.nn.functional.prelu",
inputs={"x": val_x.name,
"weight": val_slope.name},
outputs=[node.name])
return
_rename_or_remove_weight(self.weights, val_slope.name) _rename_or_remove_weight(self.weights, val_slope.name)
if len(shape_slope) > 1: if len(shape_slope) > 1:
self.weights[op_name+'._weight'] = np.reshape(slope_data, shape_slope[0]) self.weights[op_name+'._weight'] = np.reshape(slope_data, shape_slope[0])
...@@ -1464,7 +1486,7 @@ class OpSet9(): ...@@ -1464,7 +1486,7 @@ class OpSet9():
"paddle.greater_than", "paddle.greater_than",
inputs={'x': val_x.name, inputs={'x': val_x.name,
'y': val_y.name}, 'y': val_y.name},
outputs=node, outputs=[node.name],
param_attr=None) param_attr=None)
@print_mapping_info @print_mapping_info
...@@ -1521,7 +1543,7 @@ class OpSet9(): ...@@ -1521,7 +1543,7 @@ class OpSet9():
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.transpose", "paddle.transpose",
inputs={"x": val_x.name}, inputs={"x": val_x.name},
outputs=[node.layer_naem], outputs=[node.layer_name],
perm=[1, 0]) perm=[1, 0])
if val_x_dim > 1: if val_x_dim > 1:
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
...@@ -1977,3 +1999,18 @@ class OpSet9(): ...@@ -1977,3 +1999,18 @@ class OpSet9():
outputs=[y_out], outputs=[y_out],
perm=[0,2,1,3] perm=[0,2,1,3]
) )
@print_mapping_info
def TopK(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_k = self.graph.get_input_node(node, idx=1, copy=True)
layer_attrs = dict()
layer_attrs["axis"] = node.get_attr('axis', -1)
layer_attrs["largest"] = True if node.get_attr('largest', 1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted', 1) == 1 else False
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=["{}_p{}".format(node.layer_name, 0), "{}_p{}".format(node.layer_name, 1)],
**layer_attrs)
...@@ -96,19 +96,19 @@ class OpSet9(): ...@@ -96,19 +96,19 @@ class OpSet9():
# reduce function # reduce function
'ReduceMean': ['paddle.mean', 'ReduceMean': ['paddle.mean',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdims=1)], dict(axes=None, keepdims=1)],
'ReduceSum': ['paddle.sum', 'ReduceSum': ['paddle.sum',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdims=1)], dict(axes=None, keepdims=1)],
'ReduceMin': ['paddle.min', 'ReduceMin': ['paddle.min',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
'ReduceMax': ['paddle.max', 'ReduceMax': ['paddle.max',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
'ReduceProd': ['paddle.prod', 'ReduceProd': ['paddle.prod',
dict(axes='axis', keepdims='keepdim'), dict(axes='axis', keepdims='keepdim'),
dict(keepdim=1)], dict(axes=None, keepdim=1)],
# active function # active function
'Relu': ['paddle.nn.functional.relu'], 'Relu': ['paddle.nn.functional.relu'],
'LeakyRelu': ['paddle.nn.functional.leaky_relu', 'LeakyRelu': ['paddle.nn.functional.leaky_relu',
...@@ -127,6 +127,7 @@ class OpSet9(): ...@@ -127,6 +127,7 @@ class OpSet9():
dict(threshold='threshold'), dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))], dict(threshold=float(sys.maxsize))],
'Exp': ['paddle.exp'], 'Exp': ['paddle.exp'],
'Log': ['paddle.log'],
'Softmax': ['paddle.nn.functional.softmax', 'Softmax': ['paddle.nn.functional.softmax',
dict(axis='axis'), dict(axis='axis'),
dict(axis=1)], dict(axis=1)],
...@@ -283,7 +284,14 @@ class OpSet9(): ...@@ -283,7 +284,14 @@ class OpSet9():
return return
elif node.layer_type == 'Upsample': elif node.layer_type == 'Upsample':
val_scales = self.graph.get_input_node(node, idx=1, copy=True) val_scales = self.graph.get_input_node(node, idx=1, copy=True)
inputs['scale'] = val_scales self.paddle_graph.add_layer(
"paddle.slice",
inputs={"input": val_scales.name},
outputs=[val_scales.name],
axes=[0],
starts=[2],
ends=[4])
inputs['scale_factor'] = val_scales.name
mode = node.get_attr('mode', 'nearest') mode = node.get_attr('mode', 'nearest')
attrs.update({"align_corners": False, attrs.update({"align_corners": False,
...@@ -977,13 +985,12 @@ class OpSet9(): ...@@ -977,13 +985,12 @@ class OpSet9():
if len(value) == 1: if len(value) == 1:
value = value[0] value = value[0]
layer_attrs = { layer_attrs = {
'shape': val_shape.name,
'dtype': string(dtype), 'dtype': string(dtype),
'fill_value': value 'fill_value': value
} }
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.full", "paddle.full",
inputs={}, inputs={'shape': val_shape.name},
outputs=[node.name], outputs=[node.name],
**layer_attrs) **layer_attrs)
...@@ -1035,8 +1042,11 @@ class OpSet9(): ...@@ -1035,8 +1042,11 @@ class OpSet9():
} }
outputs_list = list() outputs_list = list()
if isinstance(split, list) or isinstance(split, tuple): if isinstance(split, list) or isinstance(split, tuple):
for i in range(len(split)): if len(split) == 1:
outputs_list.append("{}_p{}".format(node.layer_name, i)) outputs_list.append(node.name)
else:
for i in range(len(split)):
outputs_list.append("{}_p{}".format(node.layer_name, i))
else: else:
outputs_list.append(node.name) outputs_list.append(node.name)
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
...@@ -1391,7 +1401,7 @@ class OpSet9(): ...@@ -1391,7 +1401,7 @@ class OpSet9():
"paddle.greater_than", "paddle.greater_than",
inputs={'x': val_x.name, inputs={'x': val_x.name,
'y': val_y.name}, 'y': val_y.name},
outputs=node, outputs=[node.name],
param_attr=None) param_attr=None)
@print_mapping_info @print_mapping_info
...@@ -1448,7 +1458,7 @@ class OpSet9(): ...@@ -1448,7 +1458,7 @@ class OpSet9():
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"paddle.transpose", "paddle.transpose",
inputs={"x": val_x.name}, inputs={"x": val_x.name},
outputs=[node.layer_naem], outputs=[node.layer_name],
perm=[1, 0]) perm=[1, 0])
if val_x_dim > 1: if val_x_dim > 1:
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
...@@ -1758,3 +1768,18 @@ class OpSet9(): ...@@ -1758,3 +1768,18 @@ class OpSet9():
"paddle.reciprocal", "paddle.reciprocal",
inputs={"x": val_x.name}, inputs={"x": val_x.name},
outputs=[node.name]) outputs=[node.name])
@print_mapping_info
def TopK(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_k = self.graph.get_input_node(node, idx=1, copy=True)
layer_attrs = dict()
layer_attrs["axis"] = node.get_attr('axis', -1)
layer_attrs["largest"] = True if node.get_attr('largest', 1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted', 1) == 1 else False
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=["{}_p{}".format(node.layer_name, 0), "{}_p{}".format(node.layer_name, 1)],
**layer_attrs)
\ No newline at end of file
...@@ -32,7 +32,7 @@ class DygraphPReLUFuser(FuseBase): ...@@ -32,7 +32,7 @@ class DygraphPReLUFuser(FuseBase):
conv2_mul_1_y = paddle.full(dtype='float32', shape=[1], fill_value=0.5) conv2_mul_1_y = paddle.full(dtype='float32', shape=[1], fill_value=0.5)
conv2_Relu = self.relu1(conv2_Conv2D) conv2_Relu = self.relu1(conv2_Conv2D)
conv2_Abs = paddle.abs(x=conv2_Conv2D) conv2_Abs = paddle.abs(x=conv2_Conv2D)
conv2_sub = fluid.layers.elementwise_sub(x=conv2_Conv2D, y=conv2_Abs) conv2_sub = paddle.subtract(x=conv2_Conv2D, y=conv2_Abs)
conv2_mul = paddle.multiply(x=conv2_alphas, y=conv2_sub, axis=1) conv2_mul = paddle.multiply(x=conv2_alphas, y=conv2_sub, axis=1)
conv2_mul_1 = paddle.multiply(x=conv2_mul, y=conv2_mul_1_y, axis=1) conv2_mul_1 = paddle.multiply(x=conv2_mul, y=conv2_mul_1_y, axis=1)
conv2_add = paddle.add(x=conv2_Relu, y=conv2_mul_1) conv2_add = paddle.add(x=conv2_Relu, y=conv2_mul_1)
...@@ -60,7 +60,7 @@ class DygraphPReLUFuser(FuseBase): ...@@ -60,7 +60,7 @@ class DygraphPReLUFuser(FuseBase):
inputs={"x": "prelu-input-0"}, inputs={"x": "prelu-input-0"},
outputs=[gen_name(3)]) outputs=[gen_name(3)])
self.pattern.add_layer( self.pattern.add_layer(
"fluid.layers.elementwise_sub", "paddle.subtract",
inputs={"x": "prelu-input-0", inputs={"x": "prelu-input-0",
"y": gen_name(3)}, "y": gen_name(3)},
outputs=[gen_name(4)]) outputs=[gen_name(4)])
......
...@@ -31,7 +31,7 @@ class StaticPReLUFuser(FuseBase): ...@@ -31,7 +31,7 @@ class StaticPReLUFuser(FuseBase):
conv4_mul_1_y = paddle.full(dtype='float32', shape=[1], fill_value=0.5) conv4_mul_1_y = paddle.full(dtype='float32', shape=[1], fill_value=0.5)
conv4_Relu = paddle.nn.functional.relu(x=conv4_BiasAdd) conv4_Relu = paddle.nn.functional.relu(x=conv4_BiasAdd)
conv4_Abs = paddle.abs(x=conv4_BiasAdd) conv4_Abs = paddle.abs(x=conv4_BiasAdd)
conv4_sub = fluid.layers.elementwise_sub(x=conv4_BiasAdd, y=conv4_Abs) conv4_sub = paddle.subtract(x=conv4_BiasAdd, y=conv4_Abs)
conv4_mul = paddle.multiply(x=conv4_alphas, y=conv4_sub) conv4_mul = paddle.multiply(x=conv4_alphas, y=conv4_sub)
conv4_mul_1 = paddle.multiply(x=conv4_mul, y=conv4_mul_1_y) conv4_mul_1 = paddle.multiply(x=conv4_mul, y=conv4_mul_1_y)
conv4_add = paddle.add(x=conv4_Relu, y=conv4_mul_1) conv4_add = paddle.add(x=conv4_Relu, y=conv4_mul_1)
...@@ -59,7 +59,7 @@ class StaticPReLUFuser(FuseBase): ...@@ -59,7 +59,7 @@ class StaticPReLUFuser(FuseBase):
inputs={"x": "prelu-input-0"}, inputs={"x": "prelu-input-0"},
outputs=[gen_name(3)]) outputs=[gen_name(3)])
self.pattern.add_layer( self.pattern.add_layer(
"fluid.layers.elementwise_sub", "paddle.subtract",
inputs={"x": "prelu-input-0", inputs={"x": "prelu-input-0",
"y": gen_name(3)}, "y": gen_name(3)},
outputs=[gen_name(4)]) outputs=[gen_name(4)])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册