提交 075fef0a 编写于 作者: S SunAhong1993

update caffe2paddle

上级 4f337753
from x2paddle.decoder.caffe_decoder import CaffeDecoder
Decoder = CaffeDecoder
from x2paddle.op_mapper.dygraph.caffe2paddle.caffe_op_mapper import CaffeOpMapper
DygraphOpMapper = CaffeOpMapper
from x2paddle.op_mapper.static.caffe2paddle.caffe_op_mapper import CaffeOpMapper
StaticOpMapper = CaffeOpMapper
from x2paddle.optimizer.caffe_optimizer import CaffeOptimizer
StaticOptimizer = CaffeOptimizer
\ No newline at end of file
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
# limitations under the License. # limitations under the License.
from six import text_type as _text_type from six import text_type as _text_type
from x2paddle import program
import argparse import argparse
import sys import sys
...@@ -66,8 +67,8 @@ def arg_parser(): ...@@ -66,8 +67,8 @@ def arg_parser():
parser.add_argument( parser.add_argument(
"--without_data_format_optimization", "--without_data_format_optimization",
"-wo", "-wo",
type=_text_type, action="store_true",
default="True", default=False,
help="tf model conversion without data format optimization") help="tf model conversion without data format optimization")
parser.add_argument( parser.add_argument(
"--define_input_shape", "--define_input_shape",
...@@ -88,11 +89,25 @@ def arg_parser(): ...@@ -88,11 +89,25 @@ def arg_parser():
default=False, default=False,
help="define whether merge the params") help="define whether merge the params")
parser.add_argument( parser.add_argument(
"--input_shapes", "--jit_type",
"-is", "-jt",
type=_text_type,
default="script",
help="define the jit type of pytorch Module.")
parser.add_argument(
"--input_files",
"-if",
action='append', action='append',
default=None, default=None,
help="define the inputs' shape") help="define the inputs' file path")
parser.add_argument(
"--paddle_type",
"-pt",
type=_text_type,
default="dygraph",
help="define the paddle model type after converting(dygraph/static)"
)
return parser return parser
...@@ -117,30 +132,27 @@ def tf2paddle(model_path, ...@@ -117,30 +132,27 @@ def tf2paddle(model_path,
"[ERROR] Tensorflow is not installed, use \"pip install tensorflow\"." "[ERROR] Tensorflow is not installed, use \"pip install tensorflow\"."
) )
return return
from x2paddle import program
from x2paddle.decoder.tf_decoder import TFDecoder from x2paddle.decoder.tf_decoder import TFDecoder
from x2paddle.op_mapper.tf_op_mapper import TFOpMapper from x2paddle.op_mapper.tf_op_mapper import TFOpMapper
from x2paddle.optimizer.tensorflow.bias import BiasOpt from x2paddle.op_mapper.tf_op_mapper_nhwc import TFOpMapperNHWC
from x2paddle.optimizer.tensorflow.transpose import TransposeOpt from x2paddle.optimizer.tf_optimizer import TFOptimizer
from x2paddle.optimizer.tensorflow.batch_norm import BatchNormOpt
print("Now translating model from tensorflow to paddle.") print("Now translating model from tensorflow to paddle.")
model = TFDecoder(model_path, define_input_shape=define_input_shape) model = TFDecoder(model_path, define_input_shape=define_input_shape)
mapper = TFOpMapper(model)
mapper = TFOpMapperNHWC(model)
program.build() program.build()
bias_opt = BiasOpt()
transpose_opt = TransposeOpt()
batch_norm_opt = BatchNormOpt()
bias_opt.run(program)
batch_norm_opt.run(program)
transpose_opt.run(program)
program.gen_model(save_dir) program.gen_model(save_dir)
def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False): def caffe2paddle(proto, weight, save_dir, caffe_proto,
from x2paddle.decoder.caffe_decoder import CaffeDecoder paddle_type, params_merge=False):
from x2paddle.op_mapper.caffe_op_mapper import CaffeOpMapper from x2paddle.caffe_convert import Decoder
from x2paddle.optimizer.caffe_optimizer import CaffeOptimizer if paddle_type == "dygraph":
from x2paddle.caffe_convert import DygraphOpMapper as OpMapper
else:
from x2paddle.caffe_convert import StaticOpMapper as OpMapper
import google.protobuf as gpb import google.protobuf as gpb
ver_part = gpb.__version__.split('.') ver_part = gpb.__version__.split('.')
version_satisfy = False version_satisfy = False
...@@ -149,12 +161,10 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False): ...@@ -149,12 +161,10 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto, params_merge=False):
version_satisfy = True version_satisfy = True
assert version_satisfy, '[ERROR] google.protobuf >= 3.6.0 is required' assert version_satisfy, '[ERROR] google.protobuf >= 3.6.0 is required'
print("Now translating model from caffe to paddle.") print("Now translating model from caffe to paddle.")
model = CaffeDecoder(proto, weight, caffe_proto) model = Decoder(proto, weight, caffe_proto)
mapper = CaffeOpMapper(model) mapper = OpMapper(model)
optimizer = CaffeOptimizer(mapper) mapper.pd_graph.build()
optimizer.merge_bn_scale() mapper.pd_graph.gen_model(save_dir)
optimizer.merge_op_activation()
mapper.save_inference_model(save_dir, params_merge)
def onnx2paddle(model_path, save_dir, params_merge=False): def onnx2paddle(model_path, save_dir, params_merge=False):
...@@ -162,8 +172,8 @@ def onnx2paddle(model_path, save_dir, params_merge=False): ...@@ -162,8 +172,8 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
try: try:
import onnx import onnx
version = onnx.version.version version = onnx.version.version
if version < '1.6.0': if version != '1.6.0':
print("[ERROR] onnx>=1.6.0 is required") print("[ERROR] onnx==1.6.0 is required")
return return
except: except:
print("[ERROR] onnx is not installed, use \"pip install onnx==1.6.0\".") print("[ERROR] onnx is not installed, use \"pip install onnx==1.6.0\".")
...@@ -185,7 +195,7 @@ def onnx2paddle(model_path, save_dir, params_merge=False): ...@@ -185,7 +195,7 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
print("Paddle model and code generated.") print("Paddle model and code generated.")
def pytorch2paddle(model_path, save_dir, input_shapes): def pytorch2paddle(model_path, save_dir, jit_type, input_files):
# check pytorch installation and version # check pytorch installation and version
try: try:
import torch import torch
...@@ -202,9 +212,12 @@ def pytorch2paddle(model_path, save_dir, input_shapes): ...@@ -202,9 +212,12 @@ def pytorch2paddle(model_path, save_dir, input_shapes):
return return
print("Now translating model from pytorch to paddle.") print("Now translating model from pytorch to paddle.")
from x2paddle.decoder.pytorch_decoder import PyTorchDecoder from x2paddle.decoder.pytorch_decoder import ScriptDecoder, TraceDecoder
from x2paddle.op_mapper.pytorch2paddle import pytorch_op_mapper from x2paddle.op_mapper.pytorch2paddle import pytorch_op_mapper
model = PyTorchDecoder(model_path) if jit_type == "trace":
model = TraceDecoder(model_path, input_files)
else:
model = ScriptDecoder(model_path)
mapper = pytorch_op_mapper.PyTorchOpMapper(model) mapper = pytorch_op_mapper.PyTorchOpMapper(model)
mapper.graph.build() mapper.graph.build()
print("Model optimizing ...") print("Model optimizing ...")
...@@ -212,34 +225,15 @@ def pytorch2paddle(model_path, save_dir, input_shapes): ...@@ -212,34 +225,15 @@ def pytorch2paddle(model_path, save_dir, input_shapes):
graph_opt = GraphOptimizer() graph_opt = GraphOptimizer()
graph_opt.optimize(mapper.graph) graph_opt.optimize(mapper.graph)
print("Model optimized.") print("Model optimized.")
if input_shapes is not None: mapper.graph.gen_model(save_dir, jit_type, input_files)
real_input_shapes = list()
for shape in input_shapes:
sp = shape[1:-1].split(",")
for i, s in enumerate(sp):
sp[i] = int(s)
real_input_shapes.append(sp)
else:
real_input_shapes = None
mapper.graph.gen_model(save_dir, real_input_shapes)
def paddle2onnx(model_path, save_dir, opset_version=10): def paddle2onnx(model_path, save_dir, opset_version=10):
import paddle.fluid as fluid from x2paddle.decoder.paddle_decoder import PaddleDecoder
try: from x2paddle.op_mapper.paddle2onnx.paddle_op_mapper import PaddleOpMapper
import paddle2onnx model = PaddleDecoder(model_path, '__model__', '__params__')
except: mapper = PaddleOpMapper()
print( mapper.convert(model.program, save_dir, opset_number=opset_version)
"[ERROR] paddle2onnx not installed, use \"pip install paddle2onnx\"")
import paddle2onnx as p2o
model = p2o.PaddleDecoder(model_path, '__model__', '__params__')
mapper = p2o.PaddleOpMapper()
mapper.convert(
model.program,
save_dir,
scope=fluid.global_scope(),
opset_version=opset_version)
def main(): def main():
...@@ -260,6 +254,7 @@ def main(): ...@@ -260,6 +254,7 @@ def main():
assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)" assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)"
assert args.save_dir is not None, "--save_dir is not defined" assert args.save_dir is not None, "--save_dir is not defined"
assert args.paddle_type in ["dygraph", "static"], "--paddle_type must be 'dygraph' or 'static'"
try: try:
import paddle import paddle
...@@ -267,8 +262,8 @@ def main(): ...@@ -267,8 +262,8 @@ def main():
print("paddle.__version__ = {}".format(paddle.__version__)) print("paddle.__version__ = {}".format(paddle.__version__))
if v0 == '0' and v1 == '0' and v2 == '0': if v0 == '0' and v1 == '0' and v2 == '0':
print("[WARNING] You are use develop version of paddlepaddle") print("[WARNING] You are use develop version of paddlepaddle")
elif int(v0) != 1 or int(v1) < 6: elif int(v0) != 2 or int(v1) < 0:
print("[ERROR] paddlepaddle>=1.6.0 is required") print("[ERROR] paddlepaddle>=2.0.0 is required")
return return
except: except:
print( print(
...@@ -277,12 +272,11 @@ def main(): ...@@ -277,12 +272,11 @@ def main():
if args.framework == "tensorflow": if args.framework == "tensorflow":
assert args.model is not None, "--model should be defined while translating tensorflow model" assert args.model is not None, "--model should be defined while translating tensorflow model"
assert args.without_data_format_optimization in [ without_data_format_optimization = False
"True", "False"
], "--the param without_data_format_optimization should be defined True or False"
define_input_shape = False define_input_shape = False
params_merge = False params_merge = False
without_data_format_optimization = True if args.without_data_format_optimization == "True" else False if args.without_data_format_optimization:
without_data_format_optimization = True
if args.define_input_shape: if args.define_input_shape:
define_input_shape = True define_input_shape = True
if args.params_merge: if args.params_merge:
...@@ -296,7 +290,7 @@ def main(): ...@@ -296,7 +290,7 @@ def main():
if args.params_merge: if args.params_merge:
params_merge = True params_merge = True
caffe2paddle(args.prototxt, args.weight, args.save_dir, caffe2paddle(args.prototxt, args.weight, args.save_dir,
args.caffe_proto, params_merge) args.caffe_proto, args.paddle_type, params_merge)
elif args.framework == "onnx": elif args.framework == "onnx":
assert args.model is not None, "--model should be defined while translating onnx model" assert args.model is not None, "--model should be defined while translating onnx model"
params_merge = False params_merge = False
...@@ -304,10 +298,13 @@ def main(): ...@@ -304,10 +298,13 @@ def main():
if args.params_merge: if args.params_merge:
params_merge = True params_merge = True
onnx2paddle(args.model, args.save_dir, params_merge) onnx2paddle(args.model, args.save_dir, params_merge)
elif args.framework == "pytorch":
assert args.model is not None, "--model should be defined while translating pytorch model"
pytorch2paddle(args.model, args.save_dir, args.jit_type, args.input_files)
elif args.framework == "paddle2onnx": elif args.framework == "paddle2onnx":
assert args.model is not None, "--model should be defined while translating paddle model to onnx" assert args.model is not None, "--model should be defined while translating paddle model to onnx"
paddle2onnx(args.model, args.save_dir, opset_version=args.onnx_opset) paddle2onnx(args.model, args.save_dir, args.onnx_opset)
else: else:
raise Exception( raise Exception(
......
# -*- coding:UTF-8 -*-
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# #
# Licensed under the Apache License, Version 2.0 (the "License" # Licensed under the Apache License, Version 2.0 (the "License"
...@@ -21,15 +20,15 @@ import paddle ...@@ -21,15 +20,15 @@ import paddle
from paddle.fluid.proto import framework_pb2 from paddle.fluid.proto import framework_pb2
from collections import OrderedDict from collections import OrderedDict
import numpy import numpy
import collections
import sys import sys
import os import os
import six import six
import pickle import pickle
import numpy as np
class PaddleLayer(object): class PaddleLayer(object):
def __init__(self, id, kernel, inputs, outputs, **kwargs): def __init__(self, id, kernel, inputs, outputs, scope_name="", **kwargs):
assert isinstance( assert isinstance(
inputs, inputs,
dict), "parameter 'inputs' for PaddleLayer should be type of dict" dict), "parameter 'inputs' for PaddleLayer should be type of dict"
...@@ -53,9 +52,11 @@ class PaddleLayer(object): ...@@ -53,9 +52,11 @@ class PaddleLayer(object):
self.kernel = kernel self.kernel = kernel
self.inputs = inputs self.inputs = inputs
self.outputs = outputs self.outputs = outputs
self.scope_name = scope_name
self.attrs = kwargs self.attrs = kwargs
self.id = id self.id = id
self.blocks = list() self.blocks = list()
def add_block(self, block): def add_block(self, block):
self.blocks.append(block) self.blocks.append(block)
...@@ -71,12 +72,23 @@ class PaddleGraph(object): ...@@ -71,12 +72,23 @@ class PaddleGraph(object):
self.parameters = dict() self.parameters = dict()
self.parent_layer = parent_layer self.parent_layer = parent_layer
self.graph_type = graph_type self.graph_type = graph_type
self.custom_func = None
self.inputs_info = None
def set_name(self, name): def set_name(self, name):
self.name = name self.name = name.replace("-", "_").replace("/", "_")
def set_parameters(self, parameters): def set_parameters(self, parameters):
self.parameters = parameters self.parameters = parameters
def set_custom_func(self, custom_func):
self.custom_func = custom_func
def set_inputs_info(self, inputs_info):
self.inputs_info = inputs_info
def set_script(self, script):
self.script = script
def clear(self): def clear(self):
self.layers = OrderedDict() self.layers = OrderedDict()
...@@ -90,13 +102,13 @@ class PaddleGraph(object): ...@@ -90,13 +102,13 @@ class PaddleGraph(object):
self.edges_out = dict() self.edges_out = dict()
self.edges_in = dict() self.edges_in = dict()
def add_layer(self, kernel, inputs, outputs, **kwargs): def add_layer(self, kernel, inputs, outputs, scope_name="", **kwargs):
layer_id = str(len(self.layers)) layer_id = str(len(self.layers))
if self.parent_layer is not None: if self.parent_layer is not None:
layer_id = "{}.{}.{}".format(self.parent_layer.id, layer_id = "{}.{}.{}".format(self.parent_layer.id,
len(self.parent_layer.blocks), len(self.parent_layer.blocks),
layer_id) layer_id)
layer = PaddleLayer(layer_id, kernel, inputs, outputs, **kwargs) layer = PaddleLayer(layer_id, kernel, inputs, outputs, scope_name=scope_name, **kwargs)
self.layers[layer_id] = layer self.layers[layer_id] = layer
return layer_id return layer_id
...@@ -156,6 +168,10 @@ class PaddleGraph(object): ...@@ -156,6 +168,10 @@ class PaddleGraph(object):
if not isinstance(vs, list): if not isinstance(vs, list):
vs = [vs] vs = [vs]
for v in vs: for v in vs:
if "[" in v:
remove_index = v.index("[")
v_part = v[remove_index:]
v = v.replace(v_part, "")
assert v in outputs_from_nodes or ( assert v in outputs_from_nodes or (
inputs is not None and v in list(inputs.values()) inputs is not None and v in list(inputs.values())
) or ( ) or (
...@@ -215,8 +231,59 @@ class PaddleGraph(object): ...@@ -215,8 +231,59 @@ class PaddleGraph(object):
block_global_layers = update(block.layers) block_global_layers = update(block.layers)
global_layers.update(block_global_layers) global_layers.update(block_global_layers)
return global_layers return global_layers
return update(self.layers) return update(self.layers)
def gen_model(self, save_dir, jit_type=None):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if self.graph_type == "static":
code_dir = os.path.join(save_dir, 'model_with_code')
infer_dir = os.path.join(save_dir, 'inference_model')
self.gen_code(code_dir)
sys.path.append(code_dir)
import x2paddle_model
paddle.enable_static()
scope = paddle.static.Scope()
startup_program = paddle.static.Program()
main_program = paddle.static.Program()
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_program, startup_program):
inputs, outputs = x2paddle_model.x2paddle_net()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
param_dir = os.path.join(code_dir, 'weights')
for k, v in self.parameters.items():
if scope.find_var(k):
self.dump_parameter(k, v, param_dir)
def if_exist(var):
b = os.path.exists(
os.path.join(os.path.join(param_dir, var.name)))
return b
fluid.io.load_vars(
exe, param_dir, main_program, predicate=if_exist)
fluid.io.save_inference_model(
dirname=infer_dir,
feeded_var_names=[i.name for i in inputs],
target_vars=outputs,
executor=exe)
else:
if jit_type == "trace":
from x2paddle.optimizer.code_optimizer import HierarchicalTree
hierarchical_tree = HierarchicalTree(self)
for layer_id, layer in self.layers.items():
hierarchical_tree.insert(layer)
hierarchical_tree.save_source_files(save_dir)
self.dump_dygraph_parameter(save_dir)
else:
self.gen_dygraph_code(save_dir)
self.dump_dygraph_parameter(save_dir)
input_shapes = list()
input_types = list()
for input_name in self.inputs:
input_shapes.append(self.inputs_info[input_name][0])
input_types.append(self.inputs_info[input_name][1])
# 如果input_files非空,则导出推理模型;其值类似[[None, 3, 224, 224]]
self.dygraph2static(save_dir, input_shapes, input_types)
def gen_code(self, code_dir): def gen_code(self, code_dir):
def write_code(f, code_list, indent=0): def write_code(f, code_list, indent=0):
...@@ -235,10 +302,24 @@ class PaddleGraph(object): ...@@ -235,10 +302,24 @@ class PaddleGraph(object):
f, [ f, [
"from paddle.fluid.initializer import Constant", "from paddle.fluid.initializer import Constant",
"from paddle.fluid.param_attr import ParamAttr", "from paddle.fluid.param_attr import ParamAttr",
"import paddle.fluid as fluid", "import math", "", "import paddle.fluid as fluid",
"def x2paddle_net():" "import paddle", "import math", "",
], ],
indent=0) indent=0)
if self.custom_func is not None:
write_code(
f,
list(self.custom_func.values()),
indent=0)
write_code(f,
["", "def x2paddle_net():"],
indent=0)
write_code(
f, [
"paddle.enable_static()"
],
indent=1)
for layer_id, layer in self.layers.items(): for layer_id, layer in self.layers.items():
edges_in = self.edges_in.get(layer_id, []) edges_in = self.edges_in.get(layer_id, [])
edges_out = self.edges_out.get(layer_id, []) edges_out = self.edges_out.get(layer_id, [])
...@@ -253,8 +334,10 @@ class PaddleGraph(object): ...@@ -253,8 +334,10 @@ class PaddleGraph(object):
for output in layer.outputs: for output in layer.outputs:
line += "{}, ".format(output) line += "{}, ".format(output)
line = line.strip(", ") line = line.strip(", ")
if layer.kernel.startswith("combination_layer"):
line += " = {}(".format(layer.kernel) line += " = {}(".format(layer.kernel.split(":")[-1].lower() + "_layer")
else:
line += " = {}(".format(layer.kernel)
for k, v in layer.inputs.items(): for k, v in layer.inputs.items():
if isinstance(v, list): if isinstance(v, list):
line += "{}=[{}], ".format(k, ", ".join(v)) line += "{}=[{}], ".format(k, ", ".join(v))
...@@ -274,47 +357,6 @@ class PaddleGraph(object): ...@@ -274,47 +357,6 @@ class PaddleGraph(object):
indent=1) indent=1)
f.close() f.close()
def gen_model(self, save_dir, input_shapes=None):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if self.graph_type == "static":
code_dir = os.path.join(save_dir, 'model_with_code')
infer_dir = os.path.join(save_dir, 'inference_model')
self.gen_code(code_dir)
sys.path.append(code_dir)
import x2paddle_model
scope = fluid.Scope()
startup_program = fluid.Program()
main_program = fluid.Program()
with fluid.scope_guard(scope):
with fluid.program_guard(main_program, startup_program):
inputs, outputs = x2paddle_model.x2paddle_net()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
param_dir = os.path.join(code_dir, 'weights')
for k, v in self.parameters.items():
if scope.find_var(k):
self.dump_parameter(k, v, param_dir)
def if_exist(var):
b = os.path.exists(
os.path.join(os.path.join(param_dir, var.name)))
return b
fluid.io.load_vars(
exe, param_dir, main_program, predicate=if_exist)
fluid.io.save_inference_model(
dirname=infer_dir,
feeded_var_names=[i.name for i in inputs],
target_vars=outputs,
executor=exe)
else:
self.gen_dygraph_code(save_dir)
self.dump_dygraph_parameter(save_dir)
if input_shapes is not None:
# 如果input_shapes非空,则导出推理模型;其值类似[[None, 3, 224, 224]]
self.dygraph2static(save_dir, input_shapes)
def dump_parameter(self, param_name, param, save_dir): def dump_parameter(self, param_name, param, save_dir):
if not os.path.exists(save_dir): if not os.path.exists(save_dir):
...@@ -356,10 +398,10 @@ class PaddleGraph(object): ...@@ -356,10 +398,10 @@ class PaddleGraph(object):
if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get( if self.edges_in.get(layer_id, 0) == 0 and self.edges_out.get(
layer_id, 0) == 0: layer_id, 0) == 0:
continue continue
if layer.kernel == "fluid.dygraph.base.to_variable": if layer.kernel == "paddle.to_tensor":
value = layer.attrs["value"] data = layer.attrs["data"]
if not value.startswith("params["): if not data.startswith("params["):
self.inputs.append(value) self.inputs.append(data)
if len(layer.blocks) > 0: if len(layer.blocks) > 0:
for block in layer.blocks: for block in layer.blocks:
block.get_dygraph_inputs() block.get_dygraph_inputs()
...@@ -376,11 +418,15 @@ class PaddleGraph(object): ...@@ -376,11 +418,15 @@ class PaddleGraph(object):
layer_id, 0) == 0: layer_id, 0) == 0:
continue continue
if self.edges_out.get(layer_id, 0) == 0: if self.edges_out.get(layer_id, 0) == 0:
for output_name in layer.outputs:
if not output_name.startswith("x"): for i, output_name in enumerate(layer.outputs):
continue if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel) or \
self.outputs.append(output_name) (layer.kernel == "paddle.to_tensor" and layer.attrs["data"].startswith("params["))or \
self.outputs = list(set(self.outputs)) "paddle.fluid.dygraph" in layer.kernel:
if i == 0:
continue
if output_name not in self.outputs:
self.outputs.append(output_name)
def gen_dygraph_code(self, code_dir=None, indent=2): def gen_dygraph_code(self, code_dir=None, indent=2):
def gen_codes(code_list, indent=0): def gen_codes(code_list, indent=0):
...@@ -415,6 +461,23 @@ class PaddleGraph(object): ...@@ -415,6 +461,23 @@ class PaddleGraph(object):
gen_codes( gen_codes(
["def forward(self, {}):".format(input_data_name)], ["def forward(self, {}):".format(input_data_name)],
indent=1)) indent=1))
def gen_run_net_code(code_dir):
input_data_name = ', '.join(self.inputs)
self.run_func = gen_codes(
[
"",
"def run_net({}):".format(input_data_name),
],
indent=0)
self.run_func.extend(
gen_codes(["paddle.disable_static()",
"params, _ = fluid.load_dygraph('{}/model')".format(code_dir),
"model = {}(params)".format(self.name),
"model.set_dict(params)",
"model.eval()",
"out = model({})".format(input_data_name),
"return out"], indent=1))
def write_code(code_dir): def write_code(code_dir):
f = open(os.path.join(code_dir, 'x2paddle_code.py'), 'w') f = open(os.path.join(code_dir, 'x2paddle_code.py'), 'w')
...@@ -431,6 +494,8 @@ class PaddleGraph(object): ...@@ -431,6 +494,8 @@ class PaddleGraph(object):
self.forward_func.extend(gen_codes([return_code], indent=2)) self.forward_func.extend(gen_codes([return_code], indent=2))
for code_line in self.forward_func: for code_line in self.forward_func:
f.write(code_line) f.write(code_line)
for code_line in self.run_func:
f.write(code_line)
f.close() f.close()
self.init_func = [] self.init_func = []
...@@ -440,12 +505,12 @@ class PaddleGraph(object): ...@@ -440,12 +505,12 @@ class PaddleGraph(object):
for layer_id, layer in self.layers.items(): for layer_id, layer in self.layers.items():
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel
) or layer.kernel == "fluid.dygraph.base.to_variable" or \ ) or layer.kernel == "paddle.to_tensor" or \
"paddle.fluid.dygraph" in layer.kernel: "paddle.fluid.dygraph" in layer.kernel:
line = "{}".format( line = "{}".format(
layer.outputs[0] layer.outputs[0]
) if layer.kernel == "fluid.dygraph.base.to_variable" and not layer.attrs[ ) if layer.kernel == "paddle.to_tensor" and not layer.attrs[
"value"].startswith("params[") else "self.{}".format( "data"].startswith("params[") else "self.{}".format(
layer.outputs[0]) layer.outputs[0])
line += " = {}(".format(layer.kernel) line += " = {}(".format(layer.kernel)
for k, v in layer.attrs.items(): for k, v in layer.attrs.items():
...@@ -453,8 +518,8 @@ class PaddleGraph(object): ...@@ -453,8 +518,8 @@ class PaddleGraph(object):
line = line.strip(", ") line = line.strip(", ")
line += ")" line += ")"
if layer.kernel == "fluid.dygraph.base.to_variable" and not layer.attrs[ if layer.kernel == "paddle.to_tensor" and not layer.attrs[
"value"].startswith("params["): "data"].startswith("params["):
self.forward_func.extend(gen_codes([line], indent=indent)) self.forward_func.extend(gen_codes([line], indent=indent))
continue continue
else: else:
...@@ -466,8 +531,8 @@ class PaddleGraph(object): ...@@ -466,8 +531,8 @@ class PaddleGraph(object):
line = layer.outputs[1] line = layer.outputs[1]
else: else:
line = ','.join(layer.outputs[1:]) line = ','.join(layer.outputs[1:])
if layer.kernel == "fluid.dygraph.base.to_variable" and layer.attrs[ if layer.kernel == "paddle.to_tensor" and layer.attrs[
"value"].startswith("params["): "data"].startswith("params["):
line += " = self.{}".format(layer.outputs[0]) line += " = self.{}".format(layer.outputs[0])
else: else:
line += " = self.{}(".format(layer.outputs[0]) line += " = self.{}(".format(layer.outputs[0])
...@@ -478,7 +543,7 @@ class PaddleGraph(object): ...@@ -478,7 +543,7 @@ class PaddleGraph(object):
self.forward_func.extend(gen_codes([line], indent=indent)) self.forward_func.extend(gen_codes([line], indent=indent))
elif "prim" in layer.kernel: elif "prim" in layer.kernel:
func_name = layer.kernel.replace(".", "_") func_name = layer.kernel.replace(".", "_")
from x2paddle.op_mapper.pytorch2paddle import prim2code from x2paddle.op_mapper.dygraph import prim2code
if hasattr(prim2code, func_name): if hasattr(prim2code, func_name):
func = getattr(prim2code, func_name) func = getattr(prim2code, func_name)
func( func(
...@@ -504,6 +569,7 @@ class PaddleGraph(object): ...@@ -504,6 +569,7 @@ class PaddleGraph(object):
line += ")" line += ")"
self.forward_func.extend(gen_codes([line], indent=indent)) self.forward_func.extend(gen_codes([line], indent=indent))
if indent == 2: if indent == 2:
gen_run_net_code(code_dir)
write_code(code_dir) write_code(code_dir)
else: else:
return self.init_func, self.forward_func return self.init_func, self.forward_func
...@@ -513,23 +579,22 @@ class PaddleGraph(object): ...@@ -513,23 +579,22 @@ class PaddleGraph(object):
pickle.dump(self.parameters, params_output) pickle.dump(self.parameters, params_output)
params_output.close() params_output.close()
def dygraph2static(self, save_dir, input_shapes=[]): def dygraph2static(self, save_dir, input_shapes=[], input_types=[]):
from paddle.fluid.dygraph.jit import declarative from paddle.fluid.dygraph.jit import declarative
sepc_list = list() sepc_list = list()
for i, name in enumerate(self.inputs): for i, name in enumerate(self.inputs):
input_shapes[i][0] = -1
sepc_list.append( sepc_list.append(
paddle.static.InputSpec( paddle.static.InputSpec(
shape=input_shapes[i], name=name)) shape=input_shapes[i], name=name, dtype=input_types[i]))
import sys import sys
path = osp.abspath(save_dir) path = osp.abspath(save_dir)
sys.path.insert(0, save_dir) sys.path.insert(0, save_dir)
import x2paddle_code import x2paddle_code
place = fluid.CPUPlace() paddle.disable_static()
with fluid.dygraph.guard(place): restore, _ = fluid.load_dygraph(osp.join(save_dir, "model"))
restore, _ = fluid.load_dygraph(osp.join(save_dir, "model")) model = getattr(x2paddle_code, self.name)(restore)
model = getattr(x2paddle_code, self.name)(restore) model.set_dict(restore)
model.set_dict(restore) model.eval()
model.eval() static_model = paddle.jit.to_static(model, input_spec=sepc_list)
model.forward = declarative(model.forward, sepc_list) paddle.jit.save(static_model, osp.join(save_dir, "inference_model/model"))
fluid.dygraph.jit.save( \ No newline at end of file
layer=model, model_path=osp.join(save_dir, "inference"))
...@@ -18,7 +18,6 @@ from google.protobuf import text_format ...@@ -18,7 +18,6 @@ from google.protobuf import text_format
import numpy as np import numpy as np
from x2paddle.core.graph import GraphNode, Graph from x2paddle.core.graph import GraphNode, Graph
from x2paddle.core.fluid_code import FluidCode from x2paddle.core.fluid_code import FluidCode
from x2paddle.op_mapper import caffe_shape
class CaffeResolver(object): class CaffeResolver(object):
...@@ -50,10 +49,10 @@ class CaffeGraphNode(GraphNode): ...@@ -50,10 +49,10 @@ class CaffeGraphNode(GraphNode):
def __init__(self, layer, type_str, layer_name=None): def __init__(self, layer, type_str, layer_name=None):
if layer_name is None: if layer_name is None:
super(CaffeGraphNode, self).__init__( super(CaffeGraphNode, self).__init__(
layer, layer.name.replace('/', '_').replace('-', '_')) layer, layer.name.replace('/', '_').replace('-', '_').lower())
else: else:
super(CaffeGraphNode, self).__init__( super(CaffeGraphNode, self).__init__(
layer, layer_name.replace('/', '_').replace('-', '_')) layer, layer_name.replace('/', '_').replace('-', '_').lower())
self.layer_type = type_str self.layer_type = type_str
self.fluid_code = FluidCode() self.fluid_code = FluidCode()
self.data = None self.data = None
...@@ -66,6 +65,13 @@ class CaffeGraph(Graph): ...@@ -66,6 +65,13 @@ class CaffeGraph(Graph):
def __init__(self, model, params, caffe_pb): def __init__(self, model, params, caffe_pb):
self.params = params self.params = params
self.caffe_pb = caffe_pb self.caffe_pb = caffe_pb
if hasattr(model, "name"):
if model.name == "":
self.graph_name = "CaffeModel"
else:
self.graph_name = model.name
else:
self.graph_name = "CaffeModel"
super(CaffeGraph, self).__init__(model) super(CaffeGraph, self).__init__(model)
def filter_layers(self, layers): def filter_layers(self, layers):
...@@ -242,7 +248,7 @@ class CaffeDecoder(object): ...@@ -242,7 +248,7 @@ class CaffeDecoder(object):
with open(proto_path, 'rb') as proto_file: with open(proto_path, 'rb') as proto_file:
proto_str = proto_file.read() proto_str = proto_file.read()
text_format.Merge(proto_str, self.net) text_format.Merge(proto_str, self.net)
self.load_using_pb() self.load_using_pb()
self.caffe_graph = CaffeGraph(self.net, self.params, self.caffe_graph = CaffeGraph(self.net, self.params,
......
...@@ -12,15 +12,14 @@ ...@@ -12,15 +12,14 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
import sys
import torch import torch
import numpy as np
class PyTorchDecoder(object): class Decoder(object):
def __init__(self, script_path): def _optimize_graph(self, graph):
self.script = torch.jit.load(script_path)
self.graph = self._optimize_graph(self.script.inlined_graph)
def _optimize_graph(self, graph):
torch._C._jit_pass_constant_propagation(graph) torch._C._jit_pass_constant_propagation(graph)
torch._C._jit_pass_dce(graph) torch._C._jit_pass_dce(graph)
torch._C._jit_pass_lint(graph) torch._C._jit_pass_lint(graph)
...@@ -31,4 +30,37 @@ class PyTorchDecoder(object): ...@@ -31,4 +30,37 @@ class PyTorchDecoder(object):
torch._C._jit_pass_canonicalize(graph) torch._C._jit_pass_canonicalize(graph)
torch._C._jit_pass_lint(graph) torch._C._jit_pass_lint(graph)
torch._C._jit_pass_constant_propagation(graph) torch._C._jit_pass_constant_propagation(graph)
return graph return graph
class ScriptDecoder(Decoder):
""" 当script_path非None,直接load ScriptModule;
当model_path非None,load PyTorchModule后使用script方式转换为ScriptModule。
Args:
script_path (str): ScriptModule保存路径。
model_path (str): PyTorchModule保存路径。
"""
def __init__(self, script_path=None):
self.script = torch.jit.load(script_path)
self.graph = self._optimize_graph(self.script.inlined_graph)
class TraceDecoder(Decoder):
""" PyTorchModule后使用trace方式转换为ScriptModule。
Args:
model_path (str): PyTorchModule保存路径。
input_files (list): 输入网络的numpy,每个numpy保存成.npy文件,
文件路径存储在input_files中。
"""
def __init__(self, model_path, input_files=list()):
# TODO(syf): 传入pytorch的Module(即import),否则出错
model = torch.load(model_path)
model.eval()
input_list = list()
for npy_file in input_files:
input_list.append(torch.tensor(np.load(npy_file)))
self.script = torch.jit.trace(model, input_list, strict=False)
self.graph = self._optimize_graph(self.script.inlined_graph)
# print(self.graph)
# print(getattr(getattr(self.script.decoder.block, "5").layer, "2"))
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numbers
import numpy as np
from x2paddle.core.op_mapper import OpMapper
from x2paddle.core.util import *
from x2paddle.op_mapper.dygraph.caffe2paddle import caffe_shape
from x2paddle.core.program import PaddleGraph
class CaffeOpMapper(OpMapper):
directly_map_ops = {
'Sigmoid': 'paddle.nn.layer.Sigmoid',
'TanH': 'paddle.nn.Tanh',
}
def __init__(self, decoder):
super(CaffeOpMapper, self).__init__()
self.graph = decoder.caffe_graph
self.params = dict()
self.pd_graph = PaddleGraph(parent_layer=None, graph_type="dygraph")
self.pd_graph.outputs = self.graph.output_nodes
self.input_index = 0
self.inputs_info = {}
self.nn_name2id = {}
print("Total nodes: {}".format(len(self.graph.topo_sort)))
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
if node.layer_type == 'DepthwiseConvolution':
node.layer_type = 'ConvolutionDepthwise'
op = node.layer_type
if hasattr(self, op):
self.set_node_shape(node)
func = getattr(self, op)
func(node)
elif op in self.directly_map_ops:
self.set_node_shape(node)
self.directly_map(node)
else:
raise Exception(
"The op {} in model is not supported yet.".format(op))
self.pd_graph.set_name(self.graph.graph_name)
self.pd_graph.set_parameters(self.params)
self.pd_graph.set_inputs_info(self.inputs_info)
def op_checker(self):
unsupported_ops = set()
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
op = node.layer_type
if not hasattr(self, op) and op not in custom_layers:
unsupported_ops.add(op)
if len(unsupported_ops) == 0:
return True
else:
print("There are {} ops not supported yet, list as below".format(
len(unsupported_ops)))
for op in unsupported_ops:
print(op)
return False
def set_node_shape(self, node):
inputs = node.inputs
input_shape = []
for i, nm in enumerate(inputs):
last_node = self.graph.get_node(nm)
tmp = node.layer.bottom[i]
idx = list(last_node.layer.top).index(tmp)
input_shape.append(last_node.output_shape[idx])
node.input_shape = input_shape
func_name = 'shape_' + node.layer_type.lower()
if node.layer_type.lower() == "permute":
node.output_shape = getattr(caffe_shape, func_name)(node.layer,
input_shape,
node.layer.permute_param.order)
elif node.layer_type.lower() == "priorbox":
node.output_shape = getattr(caffe_shape, func_name)(node.layer,
input_shape,
node.layer.prior_box_param.max_size,
node.layer.prior_box_param.aspect_ratio)
elif node.layer_type.lower() =="roipooling":
node.output_shape = getattr(caffe_shape, func_name)(node.layer,
input_shape,
node.layer.roi_pooling_param.pooled_w,
node.layer.roi_pooling_param.pooled_h)
elif node.layer_type.lower() =="upsample":
node.output_shape = getattr(caffe_shape, func_name)(node.layer,
input_shape,
node.layer.upsample_param.scale)
elif node.layer_type.lower() =="select":
node.output_shape = getattr(caffe_shape, func_name)(node.layer,
input_shape,
node.layer.select_param.slice_point,
node.layer.select_param.axis)
else:
node.output_shape = getattr(caffe_shape, func_name)(node.layer,
input_shape)
def adjust_parameters(self, node):
data = node.data
# When using the protobuf-backend, each parameter initially has four dimensions.
# In certain cases (like FC layers), we want to eliminate the singleton dimensions.
# This implementation takes care of the common cases. However, it does leave the
# potential for future issues.
# The Caffe-backend does not suffer from this problem.
data = list(data)
squeeze_indices = [1] # Squeeze biases.
if node.layer_type == 'InnerProduct':
squeeze_indices.append(0) # Squeeze FC.
for idx in squeeze_indices:
if idx >= len(data):
continue
d = data[idx]
assert len(
d.shape
) == 4, 'invalid shape[%s] from caffe when adjust_parameters' % (
str(d.shape))
shape_old = d.shape
sq_axis = None
if idx == 0:
sq_axis = (0, 1)
elif idx == 1:
sq_axis = (0, 1, 2)
else:
continue
data[idx] = np.squeeze(d, axis=sq_axis)
shape_new = data[idx].shape
return data
def get_kernel_parameters(self, kind, params):
assert kind in ["Convolution", "Pooling", "Deconvolution", "ConvolutionDepthwise"]
[k_h, k_w] = [1, 1]
if isinstance(params.kernel_size, numbers.Number):
[k_h, k_w] = [params.kernel_size] * 2
elif len(params.kernel_size) > 0:
k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[
0]
k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[
len(params.kernel_size) - 1]
elif params.kernel_h > 0 or params.kernel_w > 0:
k_h = params.kernel_h
k_w = params.kernel_w
[s_h, s_w] = [1, 1]
if isinstance(params.stride, numbers.Number):
[s_h, s_w] = [params.stride] * 2
elif len(params.stride) > 0:
s_h = params.stride_h if params.stride_h > 0 else params.stride[0]
s_w = params.stride_w if params.stride_w > 0 else params.stride[len(
params.stride) - 1]
elif params.stride_h > 0 or params.stride_w > 0:
s_h = params.stride_h
s_w = params.stride_w
[p_h, p_w] = [0, 0]
if isinstance(params.pad, numbers.Number):
[p_h, p_w] = [params.pad] * 2
elif len(params.pad) > 0:
p_h = params.pad_h if params.pad_h > 0 else params.pad[0]
p_w = params.pad_w if params.pad_w > 0 else params.pad[len(
params.pad) - 1]
elif params.pad_h > 0 or params.pad_w > 0:
p_h = params.pad_h
p_w = params.pad_w
dila_h = dila_w = 1
group = 1
c_o = 1
if kind in ["Convolution", "Deconvolution", "ConvolutionDepthwise"]:
if kind in ["Convolution", "Deconvolution"]:
c_o = params.num_output
dila_len = len(params.dilation)
if dila_len == 2:
dila_h = params.dilation[0]
dila_w = params.dilation[1]
elif dila_len == 1:
dila_h = dila_w = params.dilation[0]
else:
assert dila_len == 0, "invalid length[%s] of dilation in convolution" % (
dila_len)
if kind in ['Convolution', 'Deconvolution']:
group = params.group
kernel = [k_h, k_w]
stride = [s_h, s_w]
pad = [p_h, p_w]
dilation = [dila_h, dila_w]
return c_o, kernel, stride, pad, dilation, group
def get_input_name(self, node):
if hasattr(node, "index"):
return node.layer_name + "[{}]".format(node.index)
else:
return node.layer_name
def Input(self, node):
self.pd_graph.add_layer(
"paddle.to_tensor",
inputs={},
outputs=[node.layer_name],
data="x{}".format(self.input_index))
shape = list(node.layer.input_param.shape[0].dim)[1:]
self.inputs_info["x{}".format(self.input_index)] = [[-1] + shape, "float32"]
self.input_index += 1
def Convolution(self, node):
if "conv" in self.nn_name2id:
self.nn_name2id["conv"] += 1
else:
self.nn_name2id["conv"] = 0
conv2d_name = "conv" + str(self.nn_name2id["conv"])
output_name = node.layer_name
layer_outputs = [conv2d_name, output_name]
data = node.data
params = node.layer.convolution_param
out_channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters(
node.layer_type, params)
if data is None:
data = []
print(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.format(node.layer_name, node.layer_type))
data.append(
np.zeros([out_channel, node.input_shape[0][1], kernel[0], kernel[1]]).astype(
'float32'))
data.append(np.zeros([out_channel, ]).astype('float32'))
else:
data = self.adjust_parameters(node)
self.params[conv2d_name + ".weight"] = data[0]
if len(data) == 2:
self.params[conv2d_name + ".bias"] = data[1]
assert len(node.inputs
) == 1, "The count of Convolution node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
layer_attrs = {
"in_channels": node.input_shape[0][1],
"out_channels": out_channel,
"kernel_size": kernel,
"stride": stride,
"padding": pad,
"dilation": dilation,
"groups": group
}
if len(data) == 1:
layer_attrs["bias_attr"] = False
self.pd_graph.add_layer(
"paddle.nn.Conv2D",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
def Deconvolution(self, node):
if "conv" in self.nn_name2id:
self.nn_name2id["conv"] += 1
else:
self.nn_name2id["conv"] = 0
conv2d_name = "conv" + str(self.nn_name2id["conv"])
output_name = node.layer_name
layer_outputs = [conv2d_name, output_name]
data = node.data
params = node.layer.convolution_param
out_channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters(
node.layer_type, params)
if data is None:
data = []
print(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.format(node.layer_name, node.layer_type))
data.append(
np.zeros([out_channel, node.input_shape[0][1], kernel[0], kernel[1]]).astype(
'float32'))
data.append(np.zeros([out_channel, ]).astype('float32'))
else:
data = self.adjust_parameters(node)
self.params[conv2d_name + ".weight"] = data[0]
if len(data) == 2:
self.params[conv2d_name + ".bias"] = data[1]
assert len(node.inputs
) == 1, "The count of Deconvolution node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
layer_attrs = {
"in_channels": node.input_shape[0][1],
"out_channels": out_channel,
"kernel_size": kernel,
"stride": stride,
"padding": pad,
"dilation": dilation,
"groups": group
}
if len(data) == 1:
layer_attrs["bias_attr"] = False
self.pd_graph.add_layer(
"paddle.nn.Conv2DTranspose",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
def ConvolutionDepthwise(self, node):
if "conv" in self.nn_name2id:
self.nn_name2id["conv"] += 1
else:
self.nn_name2id["conv"] = 0
conv2d_name = "conv" + str(self.nn_name2id["conv"])
output_name = node.layer_name
layer_outputs = [conv2d_name, output_name]
data = node.data
params = node.layer.convolution_param
out_channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters(
node.layer_type, params)
out_channel = params.num_output if params.num_output is not None else node.input_shape[0][1]
in_channel = node.input_shape[0][1]
group = int(in_channel / (in_channel / out_channel)) if in_channel > out_channel else int(in_channel /
(out_channel / in_channel))
if data is None:
data = []
print(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.format(node.layer_name, node.layer_type))
data.append(
np.zeros([out_channel, node.input_shape[0][1], kernel[0], kernel[1]]).astype(
'float32'))
data.append(np.zeros([out_channel, ]).astype('float32'))
else:
data = self.adjust_parameters(node)
self.params[conv2d_name + ".weight"] = data[0]
if len(data) == 2:
self.params[conv2d_name + ".bias"] = data[1]
assert len(node.inputs
) == 1, "The count of Deconvolution node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
layer_attrs = {
"in_channels": in_channel,
"out_channels": out_channel,
"kernel_size": kernel,
"stride": stride,
"padding": pad,
"dilation": dilation,
"groups": group
}
if len(data) == 1:
layer_attrs["bias_attr"] = False
self.pd_graph.add_layer(
"paddle.nn.Conv2D",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
def Pooling(self, node):
if "pool" in self.nn_name2id:
self.nn_name2id["pool"] += 1
else:
self.nn_name2id["pool"] = 0
pool2d_name = "pool" + str(self.nn_name2id["pool"])
output_name = node.layer_name
layer_outputs = [pool2d_name, output_name]
params = node.layer.pooling_param
ceil_mode = getattr(params, "ceil_mod", True)
global_pool = getattr(params, "global_pooling", False)
assert not global_pool, "The global_pool must be False!"
kernel_default = [1, 1]
channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters(
node.layer_type, params)
if params.pool == 0:
pool_type = "max"
else:
pool_type = "avg"
assert len(
node.inputs) == 1, "The count of Pooling node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
layer_attrs = {
'kernel_size': kernel,
'stride': stride,
'padding': pad,
'ceil_mode': ceil_mode,
}
if params.pool == 0:
self.pd_graph.add_layer(
"paddle.nn.MaxPool2D",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
else:
layer_attrs["count_include_pad"] = True
self.pd_graph.add_layer(
"paddle.nn.AvgPool2D",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
def LRN(self, node):
assert len(node.inputs) == 1, "The count of LRN node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.lrn_param
assert params.local_size % 2 == 1
alpha = params.alpha / float(params.local_size)
layer_attrs = {
"n": params.local_size,
"k": params.k,
"alpha": alpha,
"beta": params.beta,
}
self.pd_graph.add_layer(
"fluid.layers.lrn",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def InnerProduct(self, node):
if "linear" in self.nn_name2id:
self.nn_name2id["linear"] += 1
else:
self.nn_name2id["linear"] = 0
linear_name = "linear" + str(self.nn_name2id["linear"])
output_name = node.layer_name
layer_outputs = [linear_name, output_name]
data = node.data
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.inner_product_param
if data is None:
print(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0."
.format(node.layer_name, node.layer_type))
data = []
data.append(
np.zeros([node.input_shape[0][1], params.num_output]).astype("float32").astype(
"float32"))
data.append(
np.zeros([params.num_output]).astype("float32").astype("float32"))
else:
data = self.adjust_parameters(node)
# Reshape the parameters to Paddle's ordering
transpose_order = (1, 0)
w = data[0]
fc_shape = w.shape
output_channels = fc_shape[0]
w = w.reshape((output_channels, -1))
w = w.transpose(transpose_order)
data[0] = w
self.params[linear_name + ".weight"] = data[0]
if len(data) == 2:
self.params[linear_name + ".bias"] = data[1]
assert len(node.inputs
) == 1, "The count of InnerProduct node\'s input is not 1."
assert params.axis == 1
assert params.bias_term == True
layer_attrs = {
"in_features": data[0].shape[0],
"out_features": params.num_output
}
if len(data) == 1:
layer_attrs["bias"] = False
if node.input_shape[0][-1] != data[0].shape[0]:
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": self.get_input_name(input)},
outputs=[output_name],
shape=[-1, data[0].shape[0]])
self.pd_graph.add_layer(
"paddle.nn.Linear",
inputs={"input": output_name},
outputs=layer_outputs,
**layer_attrs)
else:
self.pd_graph.add_layer(
"paddle.nn.Linear",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
def AbsVal(self, node):
assert len(
node.inputs
) >= 1, "The count of AbsVal node\'s input is not more than 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
self.pd_graph.add_layer(
"paddle.abs",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name])
def Softmax(self, node):
if "softmax" in self.nn_name2id:
self.nn_name2id["softmax"] += 1
else:
self.nn_name2id["softmax"] = 0
softmax_name = "softmax" + str(self.nn_name2id["softmax"])
output_name = node.layer_name
layer_outputs = [softmax_name, output_name]
assert len(
node.inputs) == 1, "The count of Softmax node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.softmax_param
axis = params.axis
shape = node.input_shape[0]
dims = len(shape)
axis = axis + dims if axis < 0 else axis
layer_attrs = {'axis': axis}
self.pd_graph.add_layer(
"paddle.nn.Softmax",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
def Slice(self, node):
assert len(
node.inputs) == 1, "The count of Slice node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
top_len = len(node.layer.top)
params = node.layer.slice_param
axis = params.axis
slice_dim = params.slice_dim
if slice_dim != 1 and axis == 1:
axis = slice_dim
output_shape = node.output_shape
sections_list = []
for s in output_shape:
sections_list.append(s[axis])
layer_attrs = {
'num_or_sections': sections_list,
'dim': axis,
}
self.pd_graph.add_layer(
"paddle.split",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def Concat(self, node):
assert len(
node.inputs
) >= 1, "The count of Concat node\'s input is not more than 1."
inputs_dict = dict()
for i in range(len(node.inputs)):
input = self.graph.get_bottom_node(node, idx=i, copy=True)
inputs_dict["input{}".format(i)] = self.get_input_name(input)
params = node.layer.concat_param
axis = params.axis
layer_attrs = {'axis': axis}
self.pd_graph.add_layer(
"prim.list",
inputs=inputs_dict,
outputs=[node.layer_name + "_list"])
self.pd_graph.add_layer(
"paddle.concat",
inputs={"x": node.layer_name + "_list"},
outputs=[node.layer_name],
**layer_attrs)
def ReLU(self, node):
if "relu" in self.nn_name2id:
self.nn_name2id["relu"] += 1
else:
self.nn_name2id["relu"] = 0
relu_name = "relu" + str(self.nn_name2id["relu"])
output_name = node.layer_name
layer_outputs = [relu_name, output_name]
assert len(
node.inputs) == 1, "The count of RelU node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.relu_param
if params.HasField('negative_slope') and params.negative_slope != 0:
negative_slope = float(params.negative_slope)
layer_attrs = {'alpha': negative_slope}
self.pd_graph.add_layer(
"paddle.nn.LeakyReLU",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
else:
self.pd_graph.add_layer(
"paddle.nn.ReLU",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs)
def PReLU(self, node):
if "prelu" in self.nn_name2id:
self.nn_name2id["prelu"] += 1
else:
self.nn_name2id["prelu"] = 0
prelu_name = "prelu" + str(self.nn_name2id["prelu"])
output_name = node.layer_name
layer_outputs = [prelu_name, output_name]
assert len(
node.inputs) == 1, "The count of PReLU node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.prelu_param
mode_bool = params.channel_shared
output_shape = node.output_shape[0]
if mode_bool:
num_parameters = 1
else:
num_parameters = output_shape[1]
data = node.data
self.params[prelu_name + '._weight'] = np.squeeze(data[0])
assert data is not None, "The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.".format(
node.layer_name, node.layer_type)
self.pd_graph.add_layer(
"paddle.nn.PReLU",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
num_parameters=num_parameters)
def Accuracy(self, node):
assert len(
node.inputs) == 2, "The count of Accuracy node\'s input is not 2."
inputs_dict = dict()
for i, shape in enumerate(node.input_shape):
if shape[1] == 1:
input = self.graph.get_bottom_node(node, idx=i, copy=True)
inputs_dict[y] = self.get_input_name(input)
else:
input = self.graph.get_bottom_node(node, idx=i, copy=True)
inputs_dict[x] = self.get_input_name(input)
params = node.layer.accuracy_param
top_k = params.top_k
axis = params.axis
ignore_label = params.ignore_label
assert axis == 1, "PaddlePaddle can not support the situation when the axis is not 1."
assert not ignore_label >= 0, "PaddlePaddle can not support the situation when the model has ignore label."
self.pd_graph.add_layer(
"prim.accuracy",
inputs=inputs_dict,
outputs=[node.layer_name],
topk=top_k)
def Eltwise(self, node):
assert len(
node.inputs) == 2, "The count of Eltwise node\'s input is not 2."
params = node.layer.eltwise_param
mode = params.operation
inputs = []
input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input1 = self.graph.get_bottom_node(node, idx=1, copy=True)
input0_name = self.get_input_name(input0)
input1_name = self.get_input_name(input1)
if mode == 0:
inputs_dict = {}
inputs_dict['x'] = input0_name
inputs_dict['y'] = input1_name
self.pd_graph.add_layer(
"paddle.multiply",
inputs=inputs_dict,
outputs=[node.layer_name])
elif mode == 1:
if hasattr(params, 'coeff') and len(params.coeff) == 2:
coeff = params.coeff
self.pd_graph.add_layer(
"prim.mul",
inputs={"x": input0_name},
outputs=[node.layer_name + '_mul0'],
y=coeff[0])
self.pd_graph.add_layer(
"prim.mul",
inputs={"x": input1_name},
outputs=[node.layer_name + '_mul1'],
y=coeff[2])
inputs_dict = {}
inputs_dict['x'] = node.layer_name + '_mul0'
inputs_dict['y'] = node.layer_name + '_mul1'
self.pd_graph.add_layer(
"paddle.add",
inputs=inputs_dict,
outputs=[node.layer_name])
else:
inputs_dict = {}
inputs_dict['x'] = input0_name
inputs_dict['y'] = input1_name
self.pd_graph.add_layer(
"paddle.add",
inputs=inputs_dict,
outputs=[node.layer_name])
else:
inputs_dict = {}
inputs_dict['x'] = input0_name
inputs_dict['y'] = input1_name
self.pd_graph.add_layer(
"paddle.max",
inputs=inputs_dict,
outputs=[node.layer_name])
def BatchNorm(self, node):
if "batchnorm" in self.nn_name2id:
self.nn_name2id["batchnorm"] += 1
else:
self.nn_name2id["batchnorm"] = 0
batchnorm_name = "batchnorm" + str(self.nn_name2id["batchnorm"])
output_name = node.layer_name
layer_outputs = [batchnorm_name, output_name]
assert len(
node.inputs) == 1, "The count of BatchNorm node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.batch_norm_param
if hasattr(params, "eps"):
eps = params.eps
else:
eps = 1e-5
if node.data is None or len(node.data) != 3:
print(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.format(node.layer_name, node.layer_type))
mean = np.zeros([node.input_shape[0][1], ]).astype("float32")
variance = np.zeros([node.input_shape[0][1], ]).astype("float32")
scale = 0
else:
node.data = [np.squeeze(i).astype("float32") for i in node.data]
mean, variance, scale = node.data
# Prescale the stats
scaling_factor = 1.0 / scale if scale != 0 else 0
mean *= scaling_factor
variance *= scaling_factor
self.params[batchnorm_name + "._mean"] = mean
self.params[batchnorm_name + '._variance'] = variance
layer_attrs = {
"num_features": node.input_shape[0][1],
"epsilon": eps,
"weight_attr": False,
"bias_attr": False,
}
self.pd_graph.add_layer(
"paddle.nn.BatchNorm2D",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs,
**layer_attrs)
def Scale(self, node):
if node.data is None:
print(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.format(node.layer_name, node.layer_type))
self.params[node.layer_name + ".weight"] = np.zeros([
node.input_shape[0][1],
]).astype("float32")
self.params[node.layer_name + ".bias"] = np.zeros([
node.input_shape[0][1],
]).astype("float32")
else:
self.params[node.layer_name + ".weight"] = np.squeeze(node.data[
0]).astype("float32")
self.params[node.layer_name + ".bias"] = np.squeeze(node.data[
1]).astype("float32")
params = node.layer.scale_param
axis = params.axis
inputs = []
if len(node.inputs) == 2:
input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input1 = self.graph.get_bottom_node(node, idx=1, copy=True)
input0_name = self.get_input_name(input0)
input1_name = self.get_input_name(input1)
inputs_dict = {}
inputs_dict['x'] = input0_name
inputs_dict['y'] = input1_name
self.pd_graph.add_layer(
"paddle.multiply",
inputs=inputs_dict,
outputs=[node.layer_name + "_mul"],
axis=1)
else:
self.pd_graph.add_layer(
"paddle.to_tensor",
inputs={},
outputs=[node.layer_name + "_cparam1"],
data="params[{}]".format(string(node.layer_name + ".weight")))
input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input0_name = self.get_input_name(input0)
inputs_dict = {}
inputs_dict['x'] = input0_name
inputs_dict['y'] = node.layer_name + "_cparam1"
self.pd_graph.add_layer(
"paddle.multiply",
inputs=inputs_dict,
outputs=[node.layer_name + "_mul"],
axis=axis)
self.pd_graph.add_layer(
"paddle.to_tensor",
inputs={},
outputs=[node.layer_name + "_cparam2"],
data="params[{}]".format(string(node.layer_name + ".bias")))
inputs_dict = {}
inputs_dict['x'] = node.layer_name + "_mul"
inputs_dict['y'] = node.layer_name + "_cparam2"
self.pd_graph.add_layer(
"paddle.add",
inputs=inputs_dict,
outputs=[node.layer_name],
axis=axis)
def Reshape(self, node):
input = self.graph.get_bottom_node(node, idx=0, copy=True)
top_count = len(input.layer.top)
is_inplace = False if top_count == 1 else True
output_shape = node.output_shape[0]
print(output_shape)
layer_attrs = {
'shape': output_shape,
'inplace': is_inplace,
}
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def ArgMax(self, node):
assert len(node.inputs) == 1 and len(
node.outputs
) == 1, "The count of ArgMax node\'s input and output is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
input_shape = node.input_shape[0]
params = node.layer.argmax_param
out_max_val = params.out_max_val if hasattr(params,
out_max_val) else False
top_k = params.top_k if hasattr(params, top_k) else 1
axis = parmas.axis if hasattr(params, axis) else -1
if axis < 0:
axis += len(input_shape)
if out_max_val is True:
self.pd_graph.add_layer(
"paddle.topk",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name + "_topk_var", node.layer_name + "_index_var"],
k=top_k)
self.pd_graph.add_layer(
"paddle.cast",
inputs={"x": node.layer_name + "_index_var"},
outputs=[node.layer_name + "_index_var"],
dtype="{}_topk_var.dtype".format(node.layer_name))
self.pd_graph.add_layer(
"prim.list",
inputs={"input0": node.layer_name + "_topk_var",
"input1": node.layer_name + "_index_var"},
outputs=[node.layer_name + "_list"])
self.pd_graph.add_layer(
"paddle.concat",
inputs={"x": node.layer_name + "_list"},
outputs=[node.layer_name],
axis=axis)
else:
self.pd_graph.add_layer(
"paddle.topk",
inputs={"x": self.get_input_name(input)},
outputs=["_", node.layer_name],
k=top_k)
def Axpy(self, node):
assert len(node.inputs) == 1 and len(
node.outputs
) == 1, "The count of Axpy node\'s input and output is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.axpy_param
input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input1 = self.graph.get_bottom_node(node, idx=1, copy=True)
input2 = self.graph.get_bottom_node(node, idx=2, copy=True)
input0_name = self.get_input_name(input0)
input1_name = self.get_input_name(input1)
input2_name = self.get_input_name(input2)
inputs_dict = {}
inputs_dict['x'] = input1_name
inputs_dict['y'] = input0_name
self.pd_graph.add_layer(
"paddle.multiply",
inputs=inputs_dict,
outputs=[node.layer_name + "_mul"],
axis=0)
inputs_dict = {}
inputs_dict['x'] = node.layer_name + "_mul"
inputs_dict['y'] = input2_name
self.pd_graph.add_layer(
"paddle.add",
inputs=inputs_dict,
outputs=[node.layer_name + "_mul"])
def Crop(self, node):
assert len(
node.inputs) == 2, "The count of Crop node\'s input is not 2."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
example = self.graph.get_bottom_node(node, idx=1, copy=True)
params = node.layer.crop_param
axis = params.axis
input_shape = node.input_shape[0]
if axis < 0:
axis += len(input_shape)
offset_real = [0] * len(input_shape)
if hasattr(params, "offset") and len(params.offset) > 0:
offset = list(params.offset)
assert (len(input_shape) - axis
) == len(offset), "invalid offset[%s] in crop layer" % (
str(offset))
offset_real = [0] * axis + offset
self.pd_graph.add_layer(
"paddle.crop",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
shape=node.input_shape[1],
offsets=list(offset_real))
def Flatten(self, node):
assert len(
node.
inputs) == 1, "The count of DetectionOutput node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
shape=node.output_shape[0])
def Power(self, node):
assert len(
node.inputs) == 1, "The count of Permute node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.power_param
layer_attrs = {
'scale': params.scale,
'bias': params.shift,
'bias_after_scale': True
}
self.pd_graph.add_layer(
"paddle.scale",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
self.pd_graph.add_layer(
"paddle.pow",
inputs={"x": node.layer_name},
outputs=[node.layer_name],
exponent=params.power)
def Reduction(self, node):
assert len(
node.inputs) == 1, "The count of Reduction node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.reduction_param
operation = params.operation
axis = params.axis
coeff = params.coeff
assert operation >= 1 and operation <= 4, "reduction reduction [%s] error" % (
operation)
input_len = len(node.input_shape[0])
if axis < 0:
axis += input_len + 1
dim = list(range(input_len))
# operation = SUM
if operation == 1:
layer_attrs = {
"dim": dim[axis:],
"keep_dim": False,
}
self.pd_graph.add_layer(
"paddle.sum",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
# operation = ASUM
elif operation == 2:
self.pd_graph.add_layer(
"paddle.abs",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name])
layer_attrs = {
"dim": dim[axis:],
"keep_dim": False,
}
self.pd_graph.add_layer(
"paddle.sum",
inputs={"input": node.layer_name},
outputs=[node.layer_name],
**layer_attrs)
# operation = SUMSQ
elif operation == 3:
self.pd_graph.add_layer(
"paddle.pow",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
exponent=2.0)
layer_attrs = {
"dim": dim[axis:],
"keep_dim": False,
}
self.pd_graph.add_layer(
"paddle.sum",
inputs={"input": node.layer_name},
outputs=[node.layer_name],
**layer_attrs)
# operation = MEAN
else:
layer_attrs = {
"dim": dim[axis:],
"keep_dim": False,
}
self.pd_graph.add_layer(
"paddle.mean",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
self.pd_graph.add_layer(
"paddle.scale",
inputs={"x": node.layer_name},
outputs=[node.layer_name],
scale=coeff)
def DetectionOutput(self, node):
assert len(
node.inputs) == 3, "The count of DetectionOutput node\'s input is not 3."
inputs_list = list()
for i in range(len(node.inputs)):
input = self.graph.get_bottom_node(node, idx=i, copy=True)
if i == 1:
input = self.graph.get_bottom_node(node, idx=i, copy=True)
while input is not None \
and input.layer_type != 'Softmax' \
and input.layer_type != 'Sigmoid':
input = self.graph.get_bottom_node(input, idx=0, copy=True)
assert input is not None, 'This kind of DetectionOutput is not supported!'
input = self.graph.get_bottom_node(input, idx=0, copy=True)
inputs_list.append(self.get_input_name(input))
params = node.layer.detection_output_param
nms_param = params.nms_param
nms_param_dict = dict()
nms_param_dict["nms_threshold"] = nms_param.nms_threshold
nms_param_dict["top_k"] = nms_param.top_k
nms_param_dict["eta"] = nms_param.eta
if nms_param is None:
nms_param_dict = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0}
self.pd_graph.add_layer(
"paddle.split",
inputs={"input": inputs_list[2]},
outputs=[node.layer_name + "_priorbox_list"],
num_or_sections=2,
dim=1)
self.pd_graph.add_layer(
"prim.getitem",
inputs={"list": node.layer_name + "_priorbox_list"},
outputs=[node.layer_name + "_pb"],
index=0)
self.pd_graph.add_layer(
"prim.getitem",
inputs={"list": node.layer_name + "_priorbox_list"},
outputs=[node.layer_name + "_pbv"],
index=1)
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": node.layer_name + "_pb"},
outputs=[node.layer_name + "_pb"],
shape=[-1, 4])
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": node.layer_name + "_pbv"},
outputs=[node.layer_name + "_pbv"],
shape=[-1, 4])
self.pd_graph.add_layer(
"prim.shape_dim",
inputs={"input": node.layer_name + "_pb"},
outputs=[node.layer_name + "_pb_dim"],
dim=0)
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": inputs_list[0]},
outputs=[node.layer_name + "_loc"],
shape="[-1, {}_pb_dim, 4]".format(node.layer_name))
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": inputs_list[1]},
outputs=[node.layer_name + "_conf_flatten"],
shape="[0, {}_pb_dim, -1]".format(node.layer_name))
default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0}
fields = ["eta", "top_k", "nms_threshold"]
for f in default.keys():
if f not in nms_param_dict:
nms_param_dict[f] = default[f]
inputs_dict = {}
inputs_dict["loc"] = node.layer_name + "_loc"
inputs_dict["scores"] = node.layer_name + "_conf_flatten"
inputs_dict["prior_box"] = node.layer_name + "_pb"
inputs_dict["prior_box_var"] = node.layer_name + "_pbv"
layer_attrs = {
"background_label": params.background_label_id,
"nms_threshold": nms_param_dict["nms_threshold"],
"nms_top_k": nms_param_dict["top_k"],
"keep_top_k": params.keep_top_k,
"score_threshold": params.confidence_threshold,
"nms_eta": nms_param_dict["eta"]}
self.pd_graph.add_layer(
"fluid.layers.detection_output",
inputs=inputs_dict,
outputs=[node.layer_name],
**layer_attrs)
def Normalize(self, node):
assert len(
node.inputs) == 1, "The count of Normalize node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.norm_param
if node.data is None or len(node.data) != 1:
print(
"The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.format(node.layer_name, node.layer_type))
self.parmas[node.layer_name + ".scale"] = \
np.zeros([1] if params.channel_shared else [1, 1, 1, node.input_shape[0][1]]).astype("float32")
else:
self.parmas[node.layer_name + ".scale"] = self.adjust_parameters(node)[0]
self.pd_graph.add_layer(
"paddle.nn.functional.normalize",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name + "_l2"],
p=2,
axis=1)
graph.add_layer(
"paddle.to_tensor",
inputs={},
outputs=[node.layer_name + "_param"],
data="params[{}]".format(string(node.layer_name + ".scale")))
inputs_dict = {}
inputs_dict["x"] = node.layer_name + "_l2"
inputs_dict["y"] = node.layer_name + "_param"
self.pd_graph.add_layer(
"paddle.multiply",
inputs=inputs_dict,
outputs=[node.layer_name],
axis=-1 if params.channel_shared else 1)
def Permute(self, node):
assert len(
node.inputs) == 1, "The count of Permute node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.permute_param
order = list(params.order)
self.pd_graph.add_layer(
"paddle.transpose",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
perm=order)
def PriorBox(self, node):
assert len(
node.inputs) == 2, "The count of PriorBox node\'s input is not 2."
input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input1 = self.graph.get_bottom_node(node, idx=1, copy=True)
inputs_dict = {}
inputs_dict["input"] = self.get_input_name(input0)
inputs_dict["image"] = self.get_input_name(input1)
params = node.layer.prior_box_param
steps = tuple(params.step) if type(params.step) \
is list or type(params.step) is tuple \
else (params.step, params.step)
layer_attrs = {
"min_sizes": params.min_size,
"max_sizes": params.max_size,
"aspect_ratios": params.aspect_ratio,
"variance": params.variance,
"flip": params.flip,
"clip": params.clip,
"steps": steps,
"offset": params.offset,
"min_max_aspect_ratios_order": True}
self.pd_graph.add_layer(
"fluid.layers.prior_box",
inputs=inputs_dict,
outputs=[node.layer_name + "_box", node.layer_name + "_var"],
**layer_attrs)
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": node.layer_name + "_box"},
outputs=[node.layer_name + "_box"],
shape=[1, 1, -1])
self.pd_graph.add_layer(
"paddle.reshape",
inputs={"x": node.layer_name + "_var"},
outputs=[node.layer_name + "_var"],
shape=[1, 1, -1])
self.pd_graph.add_layer(
"prim.list",
inputs={"input0": node.layer_name + "_box",
"input1": node.layer_name + "_var"},
outputs=[node.layer_name + "_list"])
self.pd_graph.add_layer(
"paddle.concat",
inputs={"x": node.layer_name + "_list"},
outputs=[node.layer_name],
axis=1)
def ReLU6(self, node):
if "relu6" in self.nn_name2id:
self.nn_name2id["relu6"] += 1
else:
self.nn_name2id["relu6"] = 0
relu6_name = "relu6" + str(self.nn_name2id["relu6"])
output_name = node.layer_name
layer_outputs = [relu6_name, output_name]
assert len(
node.inputs) == 1, "The count of RelU6 node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
self.pd_graph.add_layer(
"paddle.nn.ReLU6",
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs)
def ROIPooling(self, node):
assert len(
node.inputs) == 2, "The count of ROIPooling node\'s input is not 2."
input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input1 = self.graph.get_bottom_node(node, idx=1, copy=True)
inputs_dict = {}
inputs_dict["input"] = self.get_input_name(input0)
inputs_dict["roi"] = self.get_input_name(input1)
params = node.layer.roi_pooling_param
self.pd_graph.add_layer(
"paddle.slice",
inputs={"input": self.get_input_name(input1)},
outputs=[self.get_input_name(input1)],
axes=[1],
starts=[1],
ends=[5])
layer_attrs = {
"pooled_height": params.pooled_h,
"pooled_width": params.pooled_w,
"spatial_scale": params.spatial_scale}
self.pd_graph.add_layer(
"fluid.layers.roi_pool",
inputs=inputs_dict,
outputs=[node.layer_name],
**layer_attrs)
def ShuffleChannel(self, node):
assert len(
node.inputs) == 1, "The count of ShuffleChannel node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.shuffle_channel_param
self.pd_graph.add_layer(
"fluid.layers.shuffle_channel",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
group=params.group)
def Upsample(self, node):
assert len(
node.inputs) == 1, "The count of Upsample node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.upsample_param
layer_attrs = {
"align_corners": False,
"scale_factor": params.scale,
"mode": "nearest"}
self.pd_graph.add_layer(
"paddle.nn.functioanl.interpolate",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def Select(self, node):
assert len(
node.inputs) == 1, "The count of Select node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
input_shape = node.input_shape[0]
params = node.layer.select_param
layer_attrs = {
"input_shape": input_shape,
"point": params.slice_point,
"axis": params.axis}
self.pd_graph.add_layer(
"prim.update_end",
inputs={},
outputs=[node.layer_name + "_end"],
**layer_attrs)
layer_attrs = {
"axes": [params.axis],
"starts": [params.slice_point[0]]}
self.pd_graph.add_layer(
"paddle.split",
inputs={"input": self.get_input_name(input),
"end": node.layer_name + "_end"},
outputs=[node.layer_name],
**layer_attrs)
def directly_map(self, node):
assert node.layer_type in self.directly_map_ops
op_info = self.directly_map_ops[node.layer_type]
input = self.graph.get_bottom_node(node, idx=0, copy=True)
prefix_name = node.layer_type.lower()
if prefix_name in self.nn_name2id:
self.nn_name2id[prefix_name] += 1
else:
self.nn_name2id[prefix_name] = 0
first_output_name = prefix_name + str(self.nn_name2id[prefix_name])
output_name = node.layer_name
layer_outputs = [relu_name, output_name]
assert len(
node.inputs) == 1, "The count of Activate node\'s input is not 1."
input = self.graph.get_bottom_node(node, idx=0, copy=True)
self.pd_graph.add_layer(
op_info,
inputs={"input": self.get_input_name(input)},
outputs=layer_outputs)
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numbers
from functools import reduce
def get_kernel_parameters(params):
[k_h, k_w] = [1, 1]
if isinstance(params.kernel_size, numbers.Number):
[k_h, k_w] = [params.kernel_size] * 2
elif len(params.kernel_size) > 0:
k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[0]
k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[
len(params.kernel_size) - 1]
elif params.kernel_h > 0 or params.kernel_w > 0:
k_h = params.kernel_h
k_w = params.kernel_w
[s_h, s_w] = [1, 1]
if isinstance(params.stride, numbers.Number):
[s_h, s_w] = [params.stride] * 2
elif len(params.stride) > 0:
s_h = params.stride_h if params.stride_h > 0 else params.stride[0]
s_w = params.stride_w if params.stride_w > 0 else params.stride[len(
params.stride) - 1]
elif params.stride_h > 0 or params.stride_w > 0:
s_h = params.stride_h
s_w = params.stride_w
[p_h, p_w] = [0, 0]
if isinstance(params.pad, numbers.Number):
[p_h, p_w] = [params.pad] * 2
elif len(params.pad) > 0:
p_h = params.pad_h if params.pad_h > 0 else params.pad[0]
p_w = params.pad_w if params.pad_w > 0 else params.pad[len(params.pad) -
1]
elif params.pad_h > 0 or params.pad_w > 0:
p_h = params.pad_h
p_w = params.pad_w
dila_h = dila_w = 1
if hasattr(params, 'dilation'):
dila_len = len(params.dilation)
if dila_len == 2:
dila_h = params.dilation[0]
dila_w = params.dilation[1]
elif dila_len == 1:
dila_h = dila_w = params.dilation[0]
else:
assert dila_len == 0, "invalid length[%s] of dilation in convolution" % (
dila_len)
return dila_h, dila_w, p_h, p_w, k_h, k_w, s_h, s_w
def get_strided_kernel_output_shape(params, input_shape, round_func):
i_h = input_shape[2]
i_w = input_shape[3]
dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w = get_kernel_parameters(
params)
o_h = (i_h + 2 * pad_h - (dila_h *
(kernel_h - 1) + 1)) / float(stride_h) + 1
o_w = (i_w + 2 * pad_w - (dila_w *
(kernel_w - 1) + 1)) / float(stride_w) + 1
o_h = int(round_func(o_h))
o_w = int(round_func(o_w))
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape[1]
return [[input_shape[0], c, o_h, o_w]]
def shape_convolution(layer, input_shape):
params = layer.convolution_param
return get_strided_kernel_output_shape(params, input_shape[0], math.floor)
def shape_deconvolution(layer, input_shape):
h_i = input_shape[0][2]
w_i = input_shape[0][3]
params = layer.convolution_param
dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w = get_kernel_parameters(
params)
h_o = (h_i - 1) * stride_h - 2 * pad_h + dila_h * (kernel_h - 1) + 1
w_o = (w_i - 1) * stride_w - 2 * pad_w + dila_w * (kernel_w - 1) + 1
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape.channels
return [[input_shape[0][0], c, h_o, w_o]]
def shape_pooling(layer, input_shape):
params = layer.pooling_param
global_pool = getattr(params, 'global_pooling', False)
if global_pool:
return [[input_shape[0][0], input_shape[0][1], 1, 1]]
ceil_mode = getattr(params, 'ceil_mode', True)
if ceil_mode is True:
method = math.ceil
else:
method = math.floor
return get_strided_kernel_output_shape(params, input_shape[0], method)
def shape_convolutiondepthwise(layer, input_shape):
params = layer.convolution_param
return get_strided_kernel_output_shape(params, input_shape[0], math.floor)
def shape_innerproduct(layer, input_shape):
params = layer.inner_product_param
return [[input_shape[0][0], params.num_output]]
def shape_lrn(layer, input_shape):
return input_shape
def shape_relu(layer, input_shape):
return input_shape
def shape_softmax(layer, input_shape):
return input_shape
def shape_input(layer, input_shape):
return [list(layer.input_param.shape[0].dim)]
def shape_memorydata(layer, input_shape):
params = layer.memory_data_param
shape = []
shape.append(int(params.batch_size))
shape.append(int(params.channels))
shape.append(int(params.height))
shape.append(int(params.width))
return [shape]
def shape_concat(layer, input_shape):
params = layer.concat_param
axis = params.axis
output_shape = None
for shape in input_shape:
if output_shape is None:
output_shape = []
for i in range(len(shape)):
output_shape.append(shape[i])
else:
output_shape[axis] += shape[axis]
return [output_shape]
def shape_slice(layer, input_shape):
inshape = input_shape[0]
top_len = len(layer.top)
params = layer.slice_param
axis = params.axis
slice_dim = params.slice_dim
if slice_dim != 1 and axis == 1:
axis = slice_dim
points = list(params.slice_point)
count = inshape[axis]
if len(points) == 0:
assert count % top_len == 0, "the parameter of Slice is wrong"
part = count / top_len
t = part
while t < count:
points.append(int(t))
t += part
points = [0] + points + [count]
output_shape = []
for i in range(len(points)):
shape = []
for ii in range(len(inshape)):
shape.append(inshape[ii])
size = points[i + 1] - points[i]
shape[axis] = size
output_shape.append(shape)
if i == len(points) - 2:
break
return output_shape
def shape_prelu(layer, input_shape):
return input_shape
def shape_sigmoid(layer, input_shape):
return input_shape
def shape_absval(layer, input_shape):
return input_shape
def shape_accuracy(layer, input_shape):
return [[1]]
def shape_tanh(layer, input_shape):
return input_shape
def shape_eltwise(layer, input_shape):
return [input_shape[0]]
def shape_batchnorm(layer, input_shape):
return input_shape
def shape_scale(layer, input_shape):
return input_shape
def shape_reshape(layer, input_shape):
def count(num_list):
return reduce(lambda a, b: a * b, num_list)
inshape = input_shape[0]
params = layer.reshape_param
axis = params.axis if hasattr(params, 'axis') else 0
num_axes = params.num_axes if hasattr(params, 'num_axes') else -1
if inshape[0] == -1:
inshape[0] = 1
input_count = count(inshape)
input_num_axes = len(inshape)
input_start_axis = axis
start_axis = input_start_axis if input_start_axis >= 0 \
else input_num_axes + input_start_axis + 1
assert start_axis >= 0, "[Reshape]axis %d out of range" % (input_start_axis)
assert start_axis <= input_num_axes, "[Reshape]axis %d out of range for %d-D input data"\
% (input_start_axis, input_num_axes)
assert num_axes >= -1, "[Reshape]num_axes must be >= 0, or -1 for all"
end_axis = input_num_axes if num_axes == -1 else start_axis + num_axes
assert end_axis <= input_num_axes, "end_axis[%d] = axis[%d] + num_axes[%d] is out of range"\
% (end_axis, start_axis, num_axes)
num_axes_replaced = end_axis - start_axis
num_axes_retained = input_num_axes - num_axes_replaced
num_new_axes = len(list(params.shape.dim))
output_shape = []
for i in range(start_axis):
output_shape.append(inshape[i])
for i in range(num_new_axes):
output_shape.append(params.shape.dim[i])
for i in range(end_axis, input_num_axes):
output_shape.append(inshape[i])
assert len(output_shape) == num_axes_retained + num_new_axes,\
"[Reshape]invalid dims of output shape[%s]" % (str(output_shape))
inferred_axis = -1
copy_axes = []
constant_count = 1
for i in range(num_new_axes):
top_dim = params.shape.dim[i]
if top_dim == 0:
copy_axes.append(i)
copy_axis_index = start_axis + i
output_shape[copy_axis_index] = inshape[copy_axis_index]
elif top_dim == -1:
assert inferred_axis == -1, "[Reshape]new shape contains multiple -1 dims"
inferred_axis = i
else:
constant_count *= top_dim
if inferred_axis >= 0:
explicit_count = constant_count
l = inshape[0:start_axis]
if len(l) > 0:
explicit_count *= count(l)
l = inshape[end_axis:]
if len(l) > 0:
explicit_count *= count(l)
for i in range(len(copy_axes)):
explicit_count *= output_shape[start_axis + copy_axes[i]]
assert input_count % explicit_count == 0, "[Reshape]botom count[%d] "\
"must be divisible by product of the specified dimensions[%d] "\
% (input_count, explicit_count)
output_shape[start_axis + inferred_axis] = int(input_count / explicit_count)
output_count = count(output_shape)
assert output_count == input_count, "[Reshape]output count[%d] must match input count[%d]" % (
output_count, input_count)
output_shape[0] = -1
return [output_shape]
def shape_argmax(layer, input_shape):
inshape = input_shape[0]
params = layer.argmax_param
out_max_val = params.out_max_val if hasattr(params, out_max_val) else False
top_k = params.top_k if hasattr(params, top_k) else 1
axis = parmas.axis if hasattr(params, axis) else -1
if axis < 0:
axis += len(inshape)
assert (axis + 1 == len(inshape)
), 'only can be applied on the last dimension[axis:%d, %s] now,'\
'make sure you have set axis param in xxx.prototxt file' \
% (axis, str(inshape))
output_shape = inshape
output_shape[-1] = top_k
if out_max_val is True:
output_shape[-1] *= 2
return [output_shape]
def shape_crop(layer, input_shape):
assert len(input_shape) == 2, "the number of crop's inputs must be 2"
return [input_shape[1]]
def shape_flatten(layer, input_shape):
assert len(input_shape) == 1, "the number of flatten's inputs must be 1"
inshape = input_shape[0]
params = layer.flatten_param
start_axis = params.axis
end_axis = params.end_axis
if start_axis < 0:
start_axis += len(inshape)
if end_axis < 0:
end_axis += len(inshape) + 1
assert start_axis <= end_axis, 'invalid axis[%d] or end_axis[%d] params'\
% (start_axis, end_axis)
output_shape = inshape[0:start_axis]
if len(inshape[start_axis:end_axis]) != 0:
flat_sz = reduce(lambda a, b: a * b, inshape[start_axis:end_axis])
output_shape += [flat_sz]
output_shape += inshape[end_axis:len(inshape)]
output_shape[0] = -1
return [output_shape]
def shape_power(layer, input_shape):
return input_shape
def shape_reduction(layer, input_shape):
params = layer.reduction_param
axis = params.axis
if axis < 0:
axis += len(input_shape[0]) + 1
assert axis <= len(input_shape[0]), 'invalid axis[%d] error' % (axis)
return [input_shape[0:axis]]
def shape_axpy(layer, input_shape):
assert len(input_shapes) == 3, "not valid input shape for axpy layer"
assert len(input_shapes[0]) == len(input_shapes[1]), 'should have same dims'
output_shape = input_shapes[1]
assert (input_shapes[2] == output_shape),\
"shape not consistent for axpy[%s <--> %s]" \
% (str(output_shape), str(input_shapes[2]))
return [output_shape]
def shape_detectionoutput(layer, input_shape):
return [[-1, 6]]
def shape_normalize(layer, input_shape):
return input_shape
def shape_permute(layer, input_shape, order=None):
inshape = input_shape[0]
output_shape = []
order = list(order)
for ii in order:
assert ii < len(inshape), "invalid order for permute[%s]" % (name)
output_shape.append(inshape[ii])
return [output_shape]
def shape_priorbox(layer, input_shape, max_size=None, aspect_ratio=None):
fc_shape = input_shape[0]
N = 1
if not max_size == None:
N += 1
if not aspect_ratio == None:
N += 2 * len(aspect_ratio)
N_bbx = fc_shape[2] * fc_shape[3] * N
output_shape = [1, 2, 4 * N_bbx]
return [output_shape]
def shape_relu6(layer, input_shape):
return input_shape
def shape_roipooling(layer, input_shape, pooled_w=None, pooled_h=None):
base_fea_shape = input_shapes[0]
rois_shape = input_shapes[1]
output_shape = base_fea_shape
output_shape[0] = rois_shape[0]
output_shape[2] = pooled_h
output_shape[3] = pooled_w
return [output_shape]
def shape_shufflechannel(layer, input_shape):
return input_shape
def shape_upsample(layer, input_shape, scale):
assert len(input_shapes) == 1, "not valid input shape for upsample layer"
assert type(scale) is int
input_shape = input_shapes[0]
new_h = scale * input_shape[2]
new_w = scale * input_shape[3]
output_shape = [input_shape[0], input_shape[1], new_h, new_w]
return [output_shape]
def shape_select(layer, input_shape, slice_point, axis):
input_shape = input_shapes[0]
start = slice_point[0]
if len(slice_point) == 2:
end = slice_point[1]
else:
end = input_shape[axis]
assert end > start, "invalid slice_point with [start:%d, end:%d]"\
% (start, end)
output_shape = input_shape
output_shape[axis] = end - start
return [output_shape]
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
NO_OUTPUT_COUNT = 0
def gen_codes(code_list, indent=0):
indent_blank = " " * indent
codes = []
for code_line in code_list:
if code_line.strip() == "":
codes.append('\n')
else:
codes.append(indent_blank + code_line + '\n')
return codes
def get_value(layer, key, layer_id=None, different_attrs=None):
""" 进行optimizer后可能把inputs的value直接用数值代替(ConstantFuser),
会把input换成attr,所以需要此处的操作。
"""
if key in layer.inputs:
return layer.inputs[key]
else:
if different_attrs is None:
return str(layer.attrs[key])
else:
key_name = "{}_{}".format(layer.outputs[0], key)
if key_name in different_attrs:
return key_name
else:
if layer_id is None:
return str(layer.attrs[key])
key_name = "{}_{}".format("layer_id/{}".format(layer_id), key)
if key_name in different_attrs:
new_key_name = "attr_{}".format(NO_OUTPUT_COUNT)
NO_OUTPUT_COUNT += 1
diff_index = different_attrs.index(key_name)
different_attrs[diff_index] = new_key_name
return new_key_name
else:
return str(layer.attrs[key])
def prim_add(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} + {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs), get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_add_(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} + {} * {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
layer.attrs["alpha"],
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_and(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} and {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs), get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_append(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{}.append({})".format(
get_value(layer, "list", layer_id, different_attrs),
get_value(layer, "element", layer_id, different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_assert(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
if layer.attrs["type"] == "eq":
values = get_value(layer, "key")
if "value" in layer.attrs:
values = layer.attrs["value"]
if isinstance(values, list):
s = ""
for v in values:
s += "{} == {} or ".format(get_value(layer, "key"), v)
if len(s) > 0:
s = s[:-4]
line = "assert {}, \'The {} must be {}!\'".format(
s, get_value(layer, "key"), get_value(layer, "value"))
else:
line = "assert {} == {}, \'The {} must be {}!\'".format(
get_value(layer, "key"),
get_value(layer, "value"),
get_value(layer, "key"), get_value(layer, "value"))
else:
raise Exception("Not implement yet!")
forward_func.extend(gen_codes([line], indent=indent))
def prim_check_dim(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
lines = []
dim = get_value(layer, "dim", different_attrs)
lines.append("if {} < 0:".format(dim))
lines.append(" {} = {} + {}".format(layer.outputs[
0], dim, get_value(layer, "len", different_attrs)))
lines.append("else:")
lines.append(" {} = {}".format(layer.outputs[0], dim))
forward_func.extend(gen_codes(lines, indent=indent))
def prim_constant(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}".format(layer.outputs[0], layer.attrs["value"])
forward_func.extend(gen_codes([line], indent=indent))
def prim_contain(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} in {}".format(layer.outputs[0],
get_value(layer, "element", different_attrs),
get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_dict(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = dict()".format(layer.outputs[0])
forward_func.extend(gen_codes([line], indent=indent))
def prim_dict_construct(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
lines = list()
line = "{} = dict()".format(layer.outputs[0])
lines.append(line)
for i in range(len(layer.inputs)):
line = "{}[{}] = {}".format(layer.outputs[0],
get_value(layer, "key{}".format(i), different_attrs),
get_value(layer, "value{}".format(i), different_attrs))
lines.append(line)
forward_func.extend(gen_codes(lines, indent=indent))
def prim_div(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} / {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_eq(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} == {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_equal(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_exception(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "raise RaiseException({})".format(get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_float(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = float({})".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_floor(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = math.floor({})".format(layer.outputs[0],
get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_floordiv(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} // {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_getitem(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}[{}]".format(layer.outputs[0],
get_value(layer, "list", different_attrs),
get_value(layer, "index", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_gt(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} > {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_if(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "if {} :".format(get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
block = layer.blocks[0]
b_init_lines, b_forward_lines = block.gen_dygraph_code(indent=indent + 1)
init_func.extend(b_init_lines)
forward_func.extend(b_forward_lines)
block = layer.blocks[1]
if len(block.layers) > 0:
b_init_lines, b_forward_lines = block.gen_dygraph_code(
indent=indent + 1)
if len(b_forward_lines) != 0:
line = "else:"
forward_func.extend(gen_codes([line], indent=indent))
init_func.extend(b_init_lines)
forward_func.extend(b_forward_lines)
def prim_int(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = int({})".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_is(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} is {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_isinstance(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = isinstance({}, {})".format(layer.outputs[0],
get_value(layer, "input", different_attrs),
layer.attrs["cls"])
forward_func.extend(gen_codes([line], indent=indent))
def prim_isnot(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} is not {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_le(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} <= {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_len(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = len({})".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_len2list(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
lines = []
lines.append("{} = []".format(layer.outputs[0]))
lines.append("for i in range({}):".format(get_value(layer, "len", different_attrs)))
lines.append(" {}.append(i)".format(layer.outputs[0]))
forward_func.extend(gen_codes(lines, indent=indent))
def prim_lt(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} < {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_list(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
input_len = len(layer.inputs) + len(layer.attrs)
inputs_list = list()
for i in range(input_len):
inputs_list.append(get_value(layer, "input{}".format(i), different_attrs))
inputs_str = ', '.join(inputs_list)
line = "{} = [{}]".format(layer.outputs[0], inputs_str)
forward_func.extend(gen_codes([line], indent=indent))
def prim_list_unpack(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}".format(", ".join(layer.outputs), get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_loop(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
loop_range = get_value(layer, "input", different_attrs)
line = "for {} in range({}):".format(layer.outputs[1], loop_range)
forward_func.extend(gen_codes([line], indent=indent))
block = layer.blocks[0]
b_init_lines, b_forward_lines = block.gen_dygraph_code(indent=indent + 1)
init_func.extend(b_init_lines)
forward_func.extend(b_forward_lines)
def prim_min(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = min({})".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_mul(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} * {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_ne(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} != {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_neg(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = -{}".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_not(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = not {}".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_or(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} or {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_replaceitem(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{}[{}] = {}".format(
get_value(layer, "list", layer_id, different_attrs),
get_value(layer, "index", layer_id, different_attrs),
get_value(layer, "item", layer_id, different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_requires_grad(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = not {}.stop_gradient".format(layer.outputs[0],
get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_rsub(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} - {} * {}".format(layer.outputs[0],
get_value(layer, "y", different_attrs),
get_value(layer, "x", different_attrs),
get_value(layer, "alpha", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_select(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}[".format(layer.outputs[0], get_value(layer, "input", different_attrs))
for dim in range(layer.attrs["dim"]):
line += ":, "
line += (get_value(layer, "index", different_attrs) + "]")
forward_func.extend(gen_codes([line], indent=indent))
def prim_set_attr(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_set_item(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{}[{}] = {}".format(
get_value(layer, "dict", different_attrs),
get_value(layer, "key", different_attrs), get_value(layer, "value", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_shape_dim(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = fluid.layers.shape({})[{}]".format(layer.outputs[0],
get_value(layer, "input", different_attrs),
get_value(layer, "dim", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_slice(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}[{}: {}: {}]".format(layer.outputs[0],
get_value(layer, "input", different_attrs),
get_value(layer, "start", different_attrs),
get_value(layer, "end", different_attrs),
get_value(layer, "step", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_str(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = str({})".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_sub(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {} - {}".format(layer.outputs[0],
get_value(layer, "x", different_attrs),
get_value(layer, "y", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_tuple(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
input_len = len(layer.inputs) + len(layer.attrs)
inputs_list = list()
for i in range(input_len):
inputs_list.append(get_value(layer, "input{}".format(i), different_attrs))
inputs_str = ', '.join(inputs_list)
line = "{} = ({})".format(layer.outputs[0], inputs_str)
forward_func.extend(gen_codes([line], indent=indent))
def prim_tuple_unpack(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
outputs_str = ', '.join(layer.outputs)
line = "{} = {}".format(outputs_str, get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_type(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}.dtype".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_update_end(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
lines = []
input_shape = get_value(layer, "input_shape", different_attrs)
point = get_value(layer, "point", different_attrs)
axis = get_value(layer, "axis", different_attrs)
lines.append("if len{} == 2:".format(point))
lines.append(" {} = {}[1]".format(layer.outputs[0], point))
lines.append("else:")
lines.append(" {} = {}[]".format(layer.outputs[0], dim))
forward_func.extend(gen_codes(lines, indent=indent))
def prim_var2list(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}.numpy().tolist()".format(layer.outputs[0],
get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_warnings(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
lines = ["import warnings"]
line = "warnings.warn({}, stacklevel={})".format(
get_value(layer, "input", different_attrs), layer.attrs["stacklevel"])
lines.append(line)
forward_func.extend(gen_codes(lines, indent=indent))
...@@ -16,8 +16,8 @@ def axpy_layer(inputs, input_shape=None, name=None): ...@@ -16,8 +16,8 @@ def axpy_layer(inputs, input_shape=None, name=None):
alpha = inputs[0] alpha = inputs[0]
x = inputs[1] x = inputs[1]
y = inputs[2] y = inputs[2]
out = fluid.layers.elementwise_mul(x, alpha, axis=0) out = paddle.multiply(x, alpha, axis=0)
out = fluid.layers.elementwise_add(out, y, name=name) out = paddle.add(out, y, name=name)
return out return out
......
...@@ -15,15 +15,6 @@ def convolutiondepthwise_shape(input_shape, ...@@ -15,15 +15,6 @@ def convolutiondepthwise_shape(input_shape,
kernel_w=None, kernel_w=None,
stride_h=None, stride_h=None,
stride_w=None): stride_w=None):
[k_h, k_w] = [1, 1]
if isinstance(kernel_size, numbers.Number):
[k_h, k_w] = [kernel_size] * 2
elif len(kernel_size) > 0:
k_h = kernel_h if kernel_h > 0 else kernel_size[0]
k_w = kernel_w if kernel_w > 0 else kernel_size[len(kernel_size) - 1]
elif kernel_h > 0 or kernel_w > 0:
k_h = kernel_h
k_w = kernel_w
[s_h, s_w] = [1, 1] [s_h, s_w] = [1, 1]
if isinstance(stride, numbers.Number): if isinstance(stride, numbers.Number):
[s_h, s_w] = [stride] * 2 [s_h, s_w] = [stride] * 2
...@@ -79,15 +70,6 @@ def convolutiondepthwise_layer(inputs, ...@@ -79,15 +70,6 @@ def convolutiondepthwise_layer(inputs,
input_shape=None, input_shape=None,
name=None): name=None):
import numbers import numbers
[k_h, k_w] = [1, 1]
if isinstance(kernel_size, numbers.Number):
[k_h, k_w] = [kernel_size] * 2
elif len(kernel_size) > 0:
k_h = kernel_h if kernel_h > 0 else kernel_size[0]
k_w = kernel_w if kernel_w > 0 else kernel_size[len(kernel_size) - 1]
elif kernel_h > 0 or kernel_w > 0:
k_h = kernel_h
k_w = kernel_w
[s_h, s_w] = [1, 1] [s_h, s_w] = [1, 1]
if isinstance(stride, numbers.Number): if isinstance(stride, numbers.Number):
[s_h, s_w] = [stride] * 2 [s_h, s_w] = [stride] * 2
...@@ -122,16 +104,14 @@ def convolutiondepthwise_layer(inputs, ...@@ -122,16 +104,14 @@ def convolutiondepthwise_layer(inputs,
c_out = num_output if num_output is not None else input_shape[0][1] c_out = num_output if num_output is not None else input_shape[0][1]
group = int(c_in / (c_in / c_out)) if c_in > c_out else int(c_in / group = int(c_in / (c_in / c_out)) if c_in > c_out else int(c_in /
(c_out / c_in)) (c_out / c_in))
out = fluid.layers.conv2d( out = paddle.nn.functional.conv2d(
input, input,
dilation=[dila_h, dila_w], dilation=[dila_h, dila_w],
filter_size=[k_h, k_w],
stride=[s_h, s_w], stride=[s_h, s_w],
padding=[p_h, p_w], padding=[p_h, p_w],
groups=group, groups=group,
num_filters=c_out, weight=name + '_weights',
param_attr=name + '_weights', bias=name + '_bias',
bias_attr=name + '_bias',
name=name) name=name)
return out return out
......
...@@ -14,30 +14,18 @@ def detectionoutput_layer(inputs, ...@@ -14,30 +14,18 @@ def detectionoutput_layer(inputs,
confidence_threshold=0.1, confidence_threshold=0.1,
input_shape=None, input_shape=None,
name=None): name=None):
nms_param_str = nms_param
nms_param = {}
part = nms_param_str.split(',')
for s in part:
if s == '':
break
else:
name, obj = s.split(': ')
if name == 'top_k':
nms_param[name] = int(obj)
else:
nms_param[name] = float(obj)
if nms_param is None: if nms_param is None:
nms_param = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} nms_param = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0}
mbox_conf_flatten = inputs[1] mbox_conf_flatten = inputs[1]
mbox_priorbox = inputs[2] mbox_priorbox = inputs[2]
mbox_priorbox_list = fluid.layers.split(mbox_priorbox, 2, dim=1) mbox_priorbox_list = paddle.split(mbox_priorbox, 2, dim=1)
pb = mbox_priorbox_list[0] pb = mbox_priorbox_list[0]
pbv = mbox_priorbox_list[1] pbv = mbox_priorbox_list[1]
pb = fluid.layers.reshape(x=pb, shape=[-1, 4]) pb = paddle.reshape(x=pb, shape=[-1, 4])
pbv = fluid.layers.reshape(x=pbv, shape=[-1, 4]) pbv = paddle.reshape(x=pbv, shape=[-1, 4])
mbox_loc = inputs[0] mbox_loc = inputs[0]
mbox_loc = fluid.layers.reshape(x=mbox_loc, shape=[-1, pb.shape[0], 4]) mbox_loc = paddle.reshape(x=mbox_loc, shape=[-1, pb.shape[0], 4])
mbox_conf_flatten = fluid.layers.reshape( mbox_conf_flatten = paddle.reshape(
x=mbox_conf_flatten, shape=[0, pb.shape[0], -1]) x=mbox_conf_flatten, shape=[0, pb.shape[0], -1])
default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0}
......
...@@ -13,14 +13,14 @@ def normalize_layer(inputs, ...@@ -13,14 +13,14 @@ def normalize_layer(inputs,
name=None): name=None):
assert across_spatial == False, "Only support across_spatial == False for Normalize" assert across_spatial == False, "Only support across_spatial == False for Normalize"
input = inputs[0] input = inputs[0]
l2_norm = fluid.layers.l2_normalize(input, axis=1, name=name + '_l2') l2_norm = paddle.nn.functional.normalize(input, axis=1, p=2, name=name + '_l2')
scale_param = fluid.layers.create_parameter( scale_param = paddle.static.create_parameter(
shape=[1] if channel_shared else [1, 1, 1, input_shape[0][1]], shape=[1] if channel_shared else [1, 1, 1, input_shape[0][1]],
dtype=input.dtype, dtype=input.dtype,
attr=fluid.ParamAttr(name=name + '_scale')) attr=paddle.ParamAttr(name=name + '_scale'))
scale_param = fluid.layers.reshape(x=scale_param, \ scale_param = paddle.reshape(x=scale_param, \
shape=[1] if channel_shared else [input_shape[0][1]]) shape=[1] if channel_shared else [input_shape[0][1]])
out = fluid.layers.elementwise_mul( out = paddle.multiply(
x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1) x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1)
return out return out
......
...@@ -14,7 +14,7 @@ def permute_shape(input_shape, order=None): ...@@ -14,7 +14,7 @@ def permute_shape(input_shape, order=None):
def permute_layer(inputs, order=None, input_shape=None, name=None): def permute_layer(inputs, order=None, input_shape=None, name=None):
input = inputs[0] input = inputs[0]
order = list(order) order = list(order)
out = fluid.layers.transpose(input, perm=order, name=name) out = paddle.transpose(input, perm=order, name=name)
return out return out
......
...@@ -8,7 +8,7 @@ def relu6_shape(input_shape): ...@@ -8,7 +8,7 @@ def relu6_shape(input_shape):
def relu6_layer(inputs, input_shape=None, name=None): def relu6_layer(inputs, input_shape=None, name=None):
input = inputs[0] input = inputs[0]
out = fluid.layers.relu6(x=input) out = paddle.nn.functional.relu6(x=input)
return out return out
......
...@@ -20,7 +20,7 @@ def roipooling_layer(inputs, ...@@ -20,7 +20,7 @@ def roipooling_layer(inputs,
name=None): name=None):
input = inputs[0] input = inputs[0]
roi = inputs[1] roi = inputs[1]
roi = fluid.layers.slice(roi, axes=[1], starts=[1], ends=[5]) roi = paddle.slice(roi, axes=[1], starts=[1], ends=[5])
out = fluid.layers.roi_pool( out = fluid.layers.roi_pool(
input, input,
roi, roi,
......
...@@ -30,7 +30,7 @@ def select_layer(inputs, ...@@ -30,7 +30,7 @@ def select_layer(inputs,
out = [] out = []
for i in range(len(slice_point)): for i in range(len(slice_point)):
out.append( out.append(
fluid.layers.slice( paddle.slice(
input, input,
axes=[axis], axes=[axis],
starts=[slice_point[i]], starts=[slice_point[i]],
......
...@@ -42,8 +42,8 @@ def upsample_layer(inputs, scale, input_shape=None, name=None): ...@@ -42,8 +42,8 @@ def upsample_layer(inputs, scale, input_shape=None, name=None):
:return: :return:
""" """
x = inputs[0] x = inputs[0]
out = fluid.layers.resize_nearest( out = paddle.nn.functioanl.interpolate(
x, align_corners=False, scale=scale, name=name) x, align_corners=False, scale_factor=scale, name=name)
return out return out
......
...@@ -13,19 +13,21 @@ ...@@ -13,19 +13,21 @@
# limitations under the License. # limitations under the License.
import numbers import numbers
import copy
import numpy as np import numpy as np
from x2paddle.decoder.caffe_decoder import CaffeGraph from x2paddle.decoder.caffe_decoder import CaffeGraph
from x2paddle.core.op_mapper import OpMapper from x2paddle.core.op_mapper import OpMapper
from x2paddle.core.util import * from x2paddle.core.util import *
from x2paddle.op_mapper import caffe_shape from x2paddle.op_mapper.static.caffe2paddle import caffe_shape
from x2paddle.op_mapper.caffe_custom_layer import * from x2paddle.op_mapper.static.caffe2paddle.caffe_custom_layer import *
from x2paddle.core.program import PaddleGraph
class CaffeOpMapper(OpMapper): class CaffeOpMapper(OpMapper):
directly_map_ops = { directly_map_ops = {
'AbsVal': 'abs', 'AbsVal': 'paddle.abs',
'Sigmoid': 'sigmoid', 'Sigmoid': 'paddle.nn.functional.sigmoid',
'TanH': 'tanh', 'TanH': 'paddle.tanh',
} }
def __init__(self, decoder): def __init__(self, decoder):
...@@ -34,6 +36,9 @@ class CaffeOpMapper(OpMapper): ...@@ -34,6 +36,9 @@ class CaffeOpMapper(OpMapper):
self.weights = dict() self.weights = dict()
resolver = decoder.resolver resolver = decoder.resolver
self.used_custom_layers = {} self.used_custom_layers = {}
self.pd_graph = PaddleGraph(parent_layer=None, graph_type="static")
self.pd_graph.inputs = self.graph.input_nodes
self.pd_graph.outputs = self.graph.output_nodes
print("Total nodes: {}".format(len(self.graph.topo_sort))) print("Total nodes: {}".format(len(self.graph.topo_sort)))
for node_name in self.graph.topo_sort: for node_name in self.graph.topo_sort:
...@@ -46,7 +51,7 @@ class CaffeOpMapper(OpMapper): ...@@ -46,7 +51,7 @@ class CaffeOpMapper(OpMapper):
func = getattr(self, op) func = getattr(self, op)
func(node) func(node)
elif op in custom_layers: elif op in custom_layers:
self.set_node_shape(node, is_fluid_op=False) self.set_node_shape(node, is_custom_op=True)
self.deal_custom_layer(node) self.deal_custom_layer(node)
elif op in self.directly_map_ops: elif op in self.directly_map_ops:
self.set_node_shape(node) self.set_node_shape(node)
...@@ -54,6 +59,9 @@ class CaffeOpMapper(OpMapper): ...@@ -54,6 +59,9 @@ class CaffeOpMapper(OpMapper):
else: else:
raise Exception( raise Exception(
"The op {} in model is not supported yet.".format(op)) "The op {} in model is not supported yet.".format(op))
self.pd_graph.set_parameters(self.weights)
self.pd_graph.set_custom_func(self.used_custom_layers)
def op_checker(self): def op_checker(self):
unsupported_ops = set() unsupported_ops = set()
...@@ -71,7 +79,7 @@ class CaffeOpMapper(OpMapper): ...@@ -71,7 +79,7 @@ class CaffeOpMapper(OpMapper):
print(op) print(op)
return False return False
def set_node_shape(self, node, is_fluid_op=True): def set_node_shape(self, node, is_custom_op=False):
inputs = node.inputs inputs = node.inputs
input_shape = [] input_shape = []
for i, nm in enumerate(inputs): for i, nm in enumerate(inputs):
...@@ -83,11 +91,11 @@ class CaffeOpMapper(OpMapper): ...@@ -83,11 +91,11 @@ class CaffeOpMapper(OpMapper):
node.input_shape = input_shape node.input_shape = input_shape
func_name = 'shape_' + node.layer_type.lower() func_name = 'shape_' + node.layer_type.lower()
if is_fluid_op: if is_custom_op:
node.output_shape = compute_output_shape(node)
else:
node.output_shape = getattr(caffe_shape, func_name)(node.layer, node.output_shape = getattr(caffe_shape, func_name)(node.layer,
input_shape) input_shape)
else:
node.output_shape = compute_output_shape(node)
def adjust_parameters(self, node): def adjust_parameters(self, node):
data = node.data data = node.data
...@@ -189,27 +197,16 @@ class CaffeOpMapper(OpMapper): ...@@ -189,27 +197,16 @@ class CaffeOpMapper(OpMapper):
def Input(self, node): def Input(self, node):
shape = list(node.layer.input_param.shape[0].dim)[1:] shape = list(node.layer.input_param.shape[0].dim)[1:]
dtype = 'float32' dtype = 'float32'
attr = { layer_attrs = {
'dtype': string(dtype), "dtype": string(dtype),
'shape': shape, "shape": [-1] + shape,
'name': string(node.layer_name) "name": string(node.layer_name)
}
node.fluid_code.add_layer(
"data", inputs=None, output=node, param_attr=attr)
def MemoryData(self, node):
# TODO(syf): Paddlepaddle can't fully support
shape = node.output_shape[0][1:]
dtype = 'float32'
attr = {
'dtype': string(dtype),
'shape': shape,
'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"data", inputs=None, output=node.layer_name + '0', param_attr=attr) kernel="paddle.static.data",
node.fluid_code.add_note('{} = [{}]'.format(node.layer_name, inputs={},
node.layer_name + '0')) outputs=[node.layer_name],
**layer_attrs)
def Convolution(self, node): def Convolution(self, node):
data = node.data data = node.data
...@@ -219,7 +216,7 @@ class CaffeOpMapper(OpMapper): ...@@ -219,7 +216,7 @@ class CaffeOpMapper(OpMapper):
if data is None: if data is None:
data = [] data = []
print( print(
'The parameter of {} (type is {}) is not set. So we set the parameters as 0' "The parameter of {} (type is {}) is not set. So we set the parameters as 0"
.format(node.layer_name, node.layer_type)) .format(node.layer_name, node.layer_type))
input_c = node.input_shape[0][1] input_c = node.input_shape[0][1]
output_c = channel output_c = channel
...@@ -229,28 +226,53 @@ class CaffeOpMapper(OpMapper): ...@@ -229,28 +226,53 @@ class CaffeOpMapper(OpMapper):
data.append(np.zeros([output_c, ]).astype('float32')) data.append(np.zeros([output_c, ]).astype('float32'))
else: else:
data = self.adjust_parameters(node) data = self.adjust_parameters(node)
self.weights[node.layer_name + '_weights'] = data[0] self.weights[node.layer_name + '_weight_attr'] = data[0]
if len(data) == 2: if len(data) == 2:
self.weights[node.layer_name + '_bias'] = data[1] self.weights[node.layer_name + '_bias_attr'] = data[1]
assert len(node.inputs assert len(node.inputs
) == 1, 'The count of Convolution node\'s input is not 1.' ) == 1, 'The count of Convolution node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
layer_attrs = {
attr = { "dtype": string("float32"),
'filter_size': kernel, "shape": data[0].shape,
'num_filters': channel, "name": string("{}_weight".format(node.layer_name))
}
self.pd_graph.add_layer(
kernel="paddle.static.data",
inputs={},
outputs=["{}_weight".format(node.layer_name)],
**layer_attrs)
self.pd_graph.add_layer(
kernel="paddle.ParamAttr",
inputs={},
outputs=["{}_weight_attr".format(node.layer_name)],
name = string("{}_weight_attr".format(node.layer_name)))
layer_attrs = {
'stride': stride, 'stride': stride,
'padding': pad, 'padding': pad,
'dilation': dilation, 'dilation': dilation,
'groups': group, 'groups': group,
'name': string(node.layer_name), 'name': string(node.layer_name),
'param_attr': string(node.layer_name + '_weights'),
'bias_attr': False
if len(data) == 1 else string(node.layer_name + '_bias'),
} }
node.fluid_code.add_layer( inputs_dict = {"x": self.get_input_name(input),
"conv2d", inputs=input, output=node, param_attr=attr) "weight": "{}_weight".format(node.layer_name),
"weight_attr": "{}_weight_attr".format(node.layer_name)}
if len(data) == 1:
layer_attrs["bias_attr"] = False
else:
self.pd_graph.add_layer(
kernel="paddle.ParamAttr",
inputs={},
outputs=["{}_bias_attr".format(node.layer_name)],
name = string("{}_bias_attr".format(node.layer_name)))
inputs_dict["bias_attr"] = "{}_bias_attr".format(node.layer_name)
self.pd_graph.add_layer(
kernel="paddle.nn.functional.conv2d",
inputs=inputs_dict,
outputs=[node.layer_name],
**layer_attrs)
def Deconvolution(self, node): def Deconvolution(self, node):
data = node.data data = node.data
params = node.layer.convolution_param params = node.layer.convolution_param
...@@ -275,48 +297,52 @@ class CaffeOpMapper(OpMapper): ...@@ -275,48 +297,52 @@ class CaffeOpMapper(OpMapper):
assert len(node.inputs assert len(node.inputs
) == 1, 'The count of Deconvolution node\'s input is not 1.' ) == 1, 'The count of Deconvolution node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = { layer_attrs = {
'output_size': None, 'output_size': None,
'filter_size': kernel,
'num_filters': channel,
'stride': stride, 'stride': stride,
'padding': pad, 'padding': pad,
'dilation': dilation, 'dilation': dilation,
'groups': group, 'groups': group,
'name': string(node.layer_name), 'name': string(node.layer_name),
'param_attr': string(node.layer_name + '_weights'), 'weight': string(node.layer_name + '_weights'),
'bias_attr': False 'bias': False
if len(data) == 1 else string(node.layer_name + '_bias') if len(data) == 1 else string(node.layer_name + '_bias')
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"conv2d_transpose", inputs=input, output=node, param_attr=attr) kernel="paddle.nn.functional.conv_transpose2d",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def Pooling(self, node): def Pooling(self, node):
params = node.layer.pooling_param params = node.layer.pooling_param
ceil_mode = getattr(params, 'ceil_mode', True) ceil_mode = getattr(params, 'ceil_mode', True)
global_pool = getattr(params, 'global_pooling', False) global_pool = getattr(params, 'global_pooling', False)
assert not global_pool, "The global_pool must be False!"
kernel_default = [1, 1] kernel_default = [1, 1]
channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters( channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters(
node.layer_type, params) node.layer_type, params)
if params.pool == 0:
pool_type = 'max'
else:
pool_type = 'avg'
assert len( assert len(
node.inputs) == 1, 'The count of Pooling node\'s input is not 1.' node.inputs) == 1, 'The count of Pooling node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = { layer_attrs = {
'pool_size': kernel, 'kernel_size': kernel,
'pool_stride': stride, 'stride': stride,
'pool_padding': pad, 'padding': pad,
'ceil_mode': ceil_mode, 'ceil_mode': ceil_mode,
'pool_type': string(pool_type),
'exclusive': False,
'global_pooling': global_pool,
'name': string(node.layer_name)
} }
node.fluid_code.add_layer( if params.pool == 0:
"pool2d", inputs=input, output=node, param_attr=attr) self.pd_graph.add_layer(
kernel="paddle.nn.functional.max_pool2d",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
else:
self.pd_graph.add_layer(
kernel="paddle.nn.functional.avg_pool2d",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def LRN(self, node): def LRN(self, node):
assert len(node.inputs) == 1, 'The count of LRN node\'s input is not 1.' assert len(node.inputs) == 1, 'The count of LRN node\'s input is not 1.'
...@@ -329,15 +355,18 @@ class CaffeOpMapper(OpMapper): ...@@ -329,15 +355,18 @@ class CaffeOpMapper(OpMapper):
# We'll account for that here. # We'll account for that here.
alpha = params.alpha / float(params.local_size) alpha = params.alpha / float(params.local_size)
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = { layer_attrs = {
'n': params.local_size, 'n': params.local_size,
'k': params.k, 'k': params.k,
'alpha': alpha, 'alpha': alpha,
'beta': params.beta, 'beta': params.beta,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"lrn", inputs=input, output=node, param_attr=attr) kernel="fluid.layers.lrn",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def InnerProduct(self, node): def InnerProduct(self, node):
data = node.data data = node.data
...@@ -374,7 +403,7 @@ class CaffeOpMapper(OpMapper): ...@@ -374,7 +403,7 @@ class CaffeOpMapper(OpMapper):
assert params.axis == 1 assert params.axis == 1
assert params.bias_term == True assert params.bias_term == True
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = { layer_attrs = {
'size': params.num_output, 'size': params.num_output,
'name': string(node.layer_name), 'name': string(node.layer_name),
'act': None, 'act': None,
...@@ -382,8 +411,11 @@ class CaffeOpMapper(OpMapper): ...@@ -382,8 +411,11 @@ class CaffeOpMapper(OpMapper):
'bias_attr': False 'bias_attr': False
if len(data) == 1 else string(node.layer_name + '_bias') if len(data) == 1 else string(node.layer_name + '_bias')
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"fc", inputs=input, output=node, param_attr=attr) kernel="paddle.static.nn.fc",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def Softmax(self, node): def Softmax(self, node):
assert len( assert len(
...@@ -394,9 +426,12 @@ class CaffeOpMapper(OpMapper): ...@@ -394,9 +426,12 @@ class CaffeOpMapper(OpMapper):
shape = node.input_shape[0] shape = node.input_shape[0]
dims = len(shape) dims = len(shape)
axis = axis + dims if axis < 0 else axis axis = axis + dims if axis < 0 else axis
attr = {'axis': axis, 'name': string(node.layer_name + '_softmax')} layer_attrs = {'axis': axis, 'name': string(node.layer_name + '_softmax')}
node.fluid_code.add_layer( self.pd_graph.add_layer(
"softmax", inputs=input, output=node, param_attr=attr) kernel="paddle.nn.functional.softmax",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def Slice(self, node): def Slice(self, node):
assert len( assert len(
...@@ -412,27 +447,33 @@ class CaffeOpMapper(OpMapper): ...@@ -412,27 +447,33 @@ class CaffeOpMapper(OpMapper):
sections_list = [] sections_list = []
for s in output_shape: for s in output_shape:
sections_list.append(s[axis]) sections_list.append(s[axis])
attr = { layer_attrs = {
'num_or_sections': sections_list, 'num_or_sections': sections_list,
'dim': axis, 'dim': axis,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"split", inputs=input, output=node.layer_name, param_attr=attr) kernel="paddle.split",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def Concat(self, node): def Concat(self, node):
assert len( assert len(
node.inputs node.inputs
) >= 1, 'The count of Concat node\'s input is not more than 1.' ) >= 1, 'The count of Concat node\'s input is not more than 1.'
inputs = [] inputs_list = []
for i in range(len(node.inputs)): for i in range(len(node.inputs)):
input = self.graph.get_bottom_node(node, idx=i, copy=True) input = self.graph.get_bottom_node(node, idx=i, copy=True)
inputs.append(input) inputs_list.append(self.get_input_name(input))
params = node.layer.concat_param params = node.layer.concat_param
axis = params.axis axis = params.axis
attr = {'axis': axis, 'name': string(node.layer_name)} layer_attrs = {'axis': axis, 'name': string(node.layer_name)}
node.fluid_code.add_layer( self.pd_graph.add_layer(
"concat", inputs=inputs, output=node, param_attr=attr) kernel="paddle.concat",
inputs={"x": inputs_list},
outputs=[node.layer_name],
**layer_attrs)
def ReLU(self, node): def ReLU(self, node):
""" """
...@@ -447,12 +488,16 @@ class CaffeOpMapper(OpMapper): ...@@ -447,12 +488,16 @@ class CaffeOpMapper(OpMapper):
params = node.layer.relu_param params = node.layer.relu_param
if params.HasField('negative_slope') and params.negative_slope != 0: if params.HasField('negative_slope') and params.negative_slope != 0:
negative_slope = float(params.negative_slope) negative_slope = float(params.negative_slope)
self.pd_graph.add_layer(
attr = {'alpha': negative_slope} kernel="paddle.nn.functional.leaky_relu",
node.fluid_code.add_layer( inputs={"x": self.get_input_name(input)},
'leaky_relu', inputs=input, output=node, param_attr=attr) outputs=[node.layer_name],
negative_slope=negative_slope)
else: else:
node.fluid_code.add_layer('relu', inputs=input, output=node) self.pd_graph.add_layer(
kernel="paddle.nn.functional.relu",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name])
def PReLU(self, node): def PReLU(self, node):
assert len( assert len(
...@@ -467,44 +512,40 @@ class CaffeOpMapper(OpMapper): ...@@ -467,44 +512,40 @@ class CaffeOpMapper(OpMapper):
data = node.data data = node.data
assert data is not None, 'The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.'.format( assert data is not None, 'The parameter of {} (type is {}) is not set. You need to use python package of caffe to set the default value.'.format(
node.layer_name, node.layer_type) node.layer_name, node.layer_type)
import paddle self.weights[node.layer_name + '_weights'] = data[0]
pd_version = paddle.__version__ layer_attrs = {
if pd_version.startswith("1.8.4") or pd_version.startswith("1.8.3"):
self.weights[node.layer_name + '_weights'] = data[0].reshape(1, -1)
else:
self.weights[node.layer_name + '_weights'] = data[0]
attr = {
'mode': string(mode), 'mode': string(mode),
'param_attr': string(node.layer_name + '_weights'), 'param_attr': string(node.layer_name + '_weights'),
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"prelu", inputs=input, output=node, param_attr=attr) kernel="paddle.nn.functional.prelu",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def Accuracy(self, node): def Accuracy(self, node):
assert len( assert len(
node.inputs) == 2, 'The count of Accuracy node\'s input is not 2.' node.inputs) == 2, 'The count of Accuracy node\'s input is not 2.'
inputs = [] inputs_dict = dict()
inputs[0] = None for i, shape in enumerate(node.input_shape):
inputs[1] = None
i = 0
for shape in node.input_shape:
if shape[1] == 1: if shape[1] == 1:
input = self.graph.get_bottom_node(node, idx=i, copy=True) input = self.graph.get_bottom_node(node, idx=i, copy=True)
inputs[1] = input inputs_dict["label"] = self.get_input_name(input)
else: else:
input = self.graph.get_bottom_node(node, idx=i, copy=True) input = self.graph.get_bottom_node(node, idx=i, copy=True)
inputs[0] = input inputs_dict["input"] = self.get_input_name(input)
i += 1
params = node.layer.accuracy_param params = node.layer.accuracy_param
top_k = params.top_k top_k = params.top_k
axis = params.axis axis = params.axis
ignore_label = params.ignore_label ignore_label = params.ignore_label
assert axis == 1, 'PaddlePaddle can not support the situation when the axis is not 1.' assert axis == 1, 'PaddlePaddle can not support the situation when the axis is not 1.'
assert not ignore_label >= 0, 'PaddlePaddle can not support the situation when the model has ignore label.' assert not ignore_label >= 0, 'PaddlePaddle can not support the situation when the model has ignore label.'
attr = {'k': top_k} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.metric.accuracy",
"accuracy", inputs=inputs, output=node, param_attr=attr) inputs=inputs_dict,
outputs=[node.layer_name],
k=top_k)
def Eltwise(self, node): def Eltwise(self, node):
assert len( assert len(
...@@ -518,79 +559,68 @@ class CaffeOpMapper(OpMapper): ...@@ -518,79 +559,68 @@ class CaffeOpMapper(OpMapper):
inputs.append(input1) inputs.append(input1)
if mode == 0: if mode == 0:
inputs_dict = {} inputs_dict = {}
inputs_dict['x'] = inputs[0] inputs_dict['x'] = self.get_input_name(inputs[0])
inputs_dict['y'] = inputs[1] inputs_dict['y'] = self.get_input_name(inputs[1])
attr = {'act': None, 'name': string(node.layer_name)} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.multiply",
"elementwise_mul",
inputs=inputs_dict, inputs=inputs_dict,
output=node, outputs=[node.layer_name])
param_attr=attr)
elif mode == 1: elif mode == 1:
if hasattr(params, 'coeff') and len(params.coeff) == 2: if hasattr(params, 'coeff') and len(params.coeff) == 2:
coeff = params.coeff coeff = params.coeff
input1_name = self.get_input_name(inputs[0]) input1_name = self.get_input_name(inputs[0])
attr = { layer_attrs = {
'shape': [1], 'shape': [1],
'value': coeff[0], 'fill_value': coeff[0],
'dtype': '{}.dtype'.format(input1_name) 'dtype': '{}.dtype'.format(input1_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"fill_constant", kernel="paddle.full",
inputs=None, inputs={},
output=node.layer_name + '_const1', outputs=["{}_const1".format(node.layer_name)],
param_attr=attr) **layer_attrs)
attr = {'act': None, 'name': string(node.layer_name + '_mul1')} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.multiply",
"elementwise_mul", inputs={"x": input1_name,
inputs=input1_name + ', ' + node.layer_name + '_const1', "y": "{}_const1".format(node.layer_name)},
output=node.layer_name + '_mul1', outputs=["{}_mul1".format(node.layer_name)])
param_attr=attr)
input2_name = self.get_input_name(inputs[1]) input2_name = self.get_input_name(inputs[1])
attr = { layer_attrs = {
'shape': [1], 'shape': [1],
'value': coeff[1], 'fill_value': coeff[1],
'dtype': '{}.dtype'.format(input2_name) 'dtype': '{}.dtype'.format(input2_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"fill_constant", kernel="paddle.full",
inputs=None, inputs={},
output=node.layer_name + '_const2', outputs=["{}_const2".format(node.layer_name)],
param_attr=attr) **layer_attrs)
attr = {'act': None, 'name': string(node.layer_name + '_mul2')} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.multiply",
"elementwise_mul", inputs={"x": input2_name,
inputs=input2_name + ', ' + node.layer_name + '_const2', "y": "{}_const2".format(node.layer_name)},
output=node.layer_name + '_mul2', outputs=["{}_mul2".format(node.layer_name)])
param_attr=attr) self.pd_graph.add_layer(
kernel="paddle.add",
attr = {'act': None, 'name': string(node.layer_name)} inputs={"x": "{}_mul1".format(node.layer_name),
node.fluid_code.add_layer( "y": "{}_mul2".format(node.layer_name)},
"elementwise_add", outputs=[node.layer_name])
inputs='{}_mul1, {}_mul2'.format(node.layer_name,
node.layer_name),
output=node,
param_attr=attr)
else: else:
inputs_dict = {} inputs_dict = {}
inputs_dict['x'] = inputs[0] inputs_dict['x'] = self.get_input_name(inputs[0])
inputs_dict['y'] = inputs[1] inputs_dict['y'] = self.get_input_name(inputs[1])
attr = {'act': None, 'name': string(node.layer_name)} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.add",
"elementwise_add",
inputs=inputs_dict, inputs=inputs_dict,
output=node, outputs=[node.layer_name])
param_attr=attr)
else: else:
inputs_dict = {} inputs_dict = {}
inputs_dict['x'] = inputs[0] inputs_dict['x'] = self.get_input_name(inputs[0])
inputs_dict['y'] = inputs[1] inputs_dict['y'] = self.get_input_name(inputs[1])
attr = {'act': None, 'name': string(node.layer_name)} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.add",
"elementwise_max", inputs=inputs_dict,
inputs=inputs_dict, outputs=[node.layer_name])
output=node,
param_attr=attr)
def BatchNorm(self, node): def BatchNorm(self, node):
assert len( assert len(
...@@ -619,7 +649,7 @@ class CaffeOpMapper(OpMapper): ...@@ -619,7 +649,7 @@ class CaffeOpMapper(OpMapper):
variance *= scaling_factor variance *= scaling_factor
self.weights[node.layer_name + '_mean'] = mean self.weights[node.layer_name + '_mean'] = mean
self.weights[node.layer_name + '_variance'] = variance self.weights[node.layer_name + '_variance'] = variance
attr = { layer_attrs = {
'is_test': True, 'is_test': True,
'param_attr': None, 'param_attr': None,
'bias_attr': None, 'bias_attr': None,
...@@ -628,8 +658,11 @@ class CaffeOpMapper(OpMapper): ...@@ -628,8 +658,11 @@ class CaffeOpMapper(OpMapper):
'epsilon': eps, 'epsilon': eps,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"batch_norm", inputs=input, output=node, param_attr=attr) kernel="paddle.nn.functional.batch_norm",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def Scale(self, node): def Scale(self, node):
if node.data is None: if node.data is None:
...@@ -659,73 +692,87 @@ class CaffeOpMapper(OpMapper): ...@@ -659,73 +692,87 @@ class CaffeOpMapper(OpMapper):
input0 = self.graph.get_bottom_node(node, idx=0, copy=True) input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input1 = self.graph.get_bottom_node(node, idx=1, copy=True) input1 = self.graph.get_bottom_node(node, idx=1, copy=True)
inputs_dict = {} inputs_dict = {}
inputs_dict['x'] = input0 inputs_dict['x'] = self.get_input_name(input0)
inputs_dict['y'] = input1 inputs_dict['y'] = self.get_input_name(input1)
attr = {'axis': axis, 'name': string(node.layer_name + '_mul')} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.multiply",
"elementwise_mul",
inputs=inputs_dict, inputs=inputs_dict,
output=node.layer_name + '_mul', outputs=["{}_mul".format(node.layer_name)],
param_attr=attr) axis=axis)
else: else:
bias_shape = node.input_shape[0][axis:axis + num_axes] bias_shape = node.input_shape[0][axis:axis + num_axes]
input0 = self.graph.get_bottom_node(node, idx=0, copy=True) input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input0_name = self.get_input_name(input0) input0_name = self.get_input_name(input0)
attr = { self.pd_graph.add_layer(
kernel="paddle.ParamAttr",
inputs={},
outputs=["{}_scale".format(node.layer_name)],
name = string("{}_scale".format(node.layer_name)))
layer_attrs = {
'dtype': '{}.dtype'.format(input0_name), 'dtype': '{}.dtype'.format(input0_name),
'shape': bias_shape, 'shape': bias_shape,
'name': string(node.layer_name + '_cparam1'), 'name': string(node.layer_name + '_cparam1'),
'attr': string(node.layer_name + '_scale'),
'is_bias': True, 'is_bias': True,
'default_initializer': 'Constant(value=1.0)' 'default_initializer': 'Constant(value=1.0)'
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"create_parameter", inputs=None, output=node, param_attr=attr) kernel="paddle.static.create_parameter",
inputs={"attr": node.layer_name + '_scale',},
outputs=["{}_cparam1".format(node.layer_name)],
**layer_attrs)
inputs_dict = {} inputs_dict = {}
inputs_dict['x'] = input0 inputs_dict['x'] = self.get_input_name(input0)
inputs_dict['y'] = node inputs_dict['y'] = "{}_cparam1".format(node.layer_name)
attr = {'axis': axis, 'name': string(node.layer_name + '_mul')} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.multiply",
"elementwise_mul",
inputs=inputs_dict, inputs=inputs_dict,
output=node.layer_name + '_mul', outputs=["{}_mul".format(node.layer_name)],
param_attr=attr) axis=axis)
scale_shape = bias_shape scale_shape = bias_shape
input0_name = self.get_input_name(input0) input0_name = self.get_input_name(input0)
attr = { self.pd_graph.add_layer(
kernel="paddle.ParamAttr",
inputs={},
outputs=["{}_offset".format(node.layer_name)],
name = string("{}_offset".format(node.layer_name)))
layer_attrs = {
'dtype': '{}.dtype'.format(input0_name), 'dtype': '{}.dtype'.format(input0_name),
'shape': scale_shape, 'shape': scale_shape,
'name': string(node.layer_name + '_cparam2'), 'name': string(node.layer_name + '_cparam2'),
'attr': string(node.layer_name + '_offset'),
'is_bias': True, 'is_bias': True,
'default_initializer': 'Constant(value=1.0)' 'default_initializer': 'Constant(value=1.0)'
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"create_parameter", kernel="paddle.static.create_parameter",
inputs=None, inputs={"attr": node.layer_name + '_offset'},
output=node.layer_name + '_offset_param', outputs=["{}_cparam2".format(node.layer_name)],
param_attr=attr) **layer_attrs)
attr = {'axis': axis, 'name': string(node.layer_name + '_add')} inputs_dict = {}
node.fluid_code.add_layer( inputs_dict['x'] = "{}_mul".format(node.layer_name)
"elementwise_add", inputs_dict['y'] = "{}_cparam2".format(node.layer_name)
inputs='{}_mul, {}_offset_param'.format(node.layer_name, self.pd_graph.add_layer(
node.layer_name), kernel="paddle.add",
output=node, inputs=inputs_dict,
param_attr=attr) outputs=[node.layer_name],
axis=axis)
def Reshape(self, node): def Reshape(self, node):
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
top_count = len(input.layer.top) top_count = len(input.layer.top)
is_inplace = False if top_count == 1 else True is_inplace = False if top_count == 1 else True
output_shape = node.output_shape[0] output_shape = node.output_shape[0]
attr = { layer_attrs = {
'shape': output_shape, 'shape': output_shape,
'inplace': is_inplace, 'inplace': is_inplace,
'act': None, 'act': None,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"reshape", inputs=input, output=node, param_attr=attr) kernel="paddle.reshape",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
def ArgMax(self, node): def ArgMax(self, node):
assert len(node.inputs) == 1 and len( assert len(node.inputs) == 1 and len(
...@@ -741,33 +788,29 @@ class CaffeOpMapper(OpMapper): ...@@ -741,33 +788,29 @@ class CaffeOpMapper(OpMapper):
if axis < 0: if axis < 0:
axis += len(input_shape) axis += len(input_shape)
if out_max_val is True: if out_max_val is True:
attr = {'k': top_k, 'name': string(node.layer_name + '_topk')} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.topk",
"topk", inputs={"input": self.get_input_name(input)},
inputs=input, outputs=["{}_topk_var".format(node.layer_name),
output='{}_topk_var, {}_index_var'.format(node.layer_name, "{}_index_var".format(node.layer_name)],
node.layer_name), k=top_k)
param_attr=attr) self.pd_graph.add_layer(
attr = {'dtype': '{}_topk_var.dtype'.format(node.layer_name)} kernel="paddle.cast",
node.fluid_code.add_layer( inputs={"x": "{}_topk_var".format(node.layer_name)},
"cast", outputs=["{}_topk_var".format(node.layer_name)],
inputs='{}_index_var'.format(node.layer_name), dtype="{}_topk_var.dtype".format(node.layer_name))
output='{}_index_var'.format(node.layer_name), self.pd_graph.add_layer(
param_attr=attr) kernel="paddle.concat",
attr = {'axis': axis, 'name': string(node.layer_name)} inputs={"x": "[{}_topk_var, {}_index_var]".format(node.layer_name,
node.fluid_code.add_layer( node.layer_name)},
"concat", outputs=[node.layer_name],
inputs='{}_topk_var, {}_index_var'.format(node.layer_name, axis=axis)
node.layer_name),
output=node,
param_attr=attr)
else: else:
attr = {'k': top_k, 'name': string(node.layer_name)} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.topk",
"topk", inputs={"input": self.get_input_name(input)},
inputs=input, outputs=["_", node.layer_name],
output='_, {}'.format(node.layer_name), k=top_k)
param_attr=attr)
def Crop(self, node): def Crop(self, node):
assert len( assert len(
...@@ -786,24 +829,25 @@ class CaffeOpMapper(OpMapper): ...@@ -786,24 +829,25 @@ class CaffeOpMapper(OpMapper):
) == len(offset), "invalid offset[%s] in crop layer" % ( ) == len(offset), "invalid offset[%s] in crop layer" % (
str(offset)) str(offset))
offset_real = [0] * axis + offset offset_real = [0] * axis + offset
attr = {'offsets': list(offset_real), 'name': string(node.layer_name)} layer_attrs = {"offsets": list(offset_real),
node.fluid_code.add_layer( "shape": node.input_shape[1]}
"crop", self.pd_graph.add_layer(
inputs={'x': input, kernel="paddle.crop",
'shape': node.input_shape[1]}, inputs={"x": self.get_input_name(input)},
output=node, outputs=[node.layer_name],
param_attr=attr) **layer_attrs)
def Flatten(self, node): def Flatten(self, node):
assert len( assert len(
node. node.
inputs) == 1, 'The count of DetectionOutput node\'s input is not 1.' inputs) == 1, 'The count of DetectionOutput node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
shape = node.output_shape[0] self.pd_graph.add_layer(
attr = {'shape': shape, 'name': string(node.layer_name)} kernel="paddle.reshape",
node.fluid_code.add_layer( inputs={"x": self.get_input_name(input)},
"reshape", inputs=input, output=node, param_attr=attr) outputs=[node.layer_name],
shape = node.output_shape[0])
def Power(self, node): def Power(self, node):
assert len( assert len(
node.inputs) == 1, 'The count of Permute node\'s input is not 1.' node.inputs) == 1, 'The count of Permute node\'s input is not 1.'
...@@ -812,17 +856,22 @@ class CaffeOpMapper(OpMapper): ...@@ -812,17 +856,22 @@ class CaffeOpMapper(OpMapper):
power = params.power power = params.power
scale = params.scale scale = params.scale
shift = params.shift shift = params.shift
attr = { layer_attrs = {
'scale': scale, 'scale': scale,
'bias': shift, 'bias': shift,
'bias_after_scale': True, 'bias_after_scale': True,
'name': string(node.layer_name + '_scale') 'name': string(node.layer_name + '_scale')
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"scale", inputs=input, output=node, param_attr=attr) kernel="paddle.scale",
attr = {'factor': power, 'name': string(node.layer_name)} inputs={"x": self.get_input_name(input)},
node.fluid_code.add_layer( outputs=[node.layer_name],
"pow", inputs=node, output=node, param_attr=attr) **layer_attrs)
self.pd_graph.add_layer(
kernel="paddle.pow",
inputs={"x": node.layer_name},
outputs=[node.layer_name],
factor=power)
def Reduction(self, node): def Reduction(self, node):
assert len( assert len(
...@@ -839,46 +888,63 @@ class CaffeOpMapper(OpMapper): ...@@ -839,46 +888,63 @@ class CaffeOpMapper(OpMapper):
axis += input_len + 1 axis += input_len + 1
dim = list(range(input_len)) dim = list(range(input_len))
if operation == 1: ## operation = SUM if operation == 1: ## operation = SUM
attr = { layer_attrs = {
'dim': dim[axis:], 'dim': dim[axis:],
'keep_dim': False, 'keep_dim': False,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"reduce_sum", inputs=input, output=node, param_attr=attr) kernel="paddle.sum",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
elif operation == 2: ## operation = ASUM elif operation == 2: ## operation = ASUM
attr = {'name': string(node.layer_name + '_abs')} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.abs",
"abs", inputs=input, output=node, param_attr=attr) inputs={"x": self.get_input_name(input)},
attr = { outputs=[node.layer_name])
layer_attrs = {
'dim': dim[axis:], 'dim': dim[axis:],
'keep_dim': False, 'keep_dim': False,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"reduce_sum", inputs=node, output=node, param_attr=attr) kernel="paddle.sum",
inputs={"input": node.layer_name},
outputs=[node.layer_name],
**layer_attrs)
elif operation == 3: ## operation = SUMSQ elif operation == 3: ## operation = SUMSQ
attr = {'factor': 2.0, 'name': string(node.layer_name + '_pow')} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel="paddle.pow",
"pow", inputs=input, output=node, param_attr=attr) inputs={"x": self.get_input_name(input)},
attr = { outputs=[node.layer_name],
factor=2.0)
layer_attrs = {
'dim': dim[axis:], 'dim': dim[axis:],
'keep_dim': False, 'keep_dim': False,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"reduce_sum", inputs=node, output=node, param_attr=attr) kernel="paddle.sum",
inputs={"input": node.layer_name},
outputs=[node.layer_name],
**layer_attrs)
else: ## operation = MEAN else: ## operation = MEAN
attr = { layer_attrs = {
'dim': dim[axis:], 'dim': dim[axis:],
'keep_dim': False, 'keep_dim': False,
'name': string(node.layer_name) 'name': string(node.layer_name)
} }
node.fluid_code.add_layer( self.pd_graph.add_layer(
"reduce_mean", inputs=node, output=node, param_attr=attr) kernel="paddle.mean",
attr = {'scale': coeff} inputs={"input": node.layer_name},
node.fluid_code.add_layer( outputs=[node.layer_name],
"scale", inputs=node, output=node, param_attr=attr) **layer_attrs)
self.pd_graph.add_layer(
kernel="paddle.scale",
inputs={"x": node.layer_name},
outputs=[node.layer_name],
scale=coeff)
def deal_custom_layer(self, node): def deal_custom_layer(self, node):
op = node.layer_type op = node.layer_type
...@@ -893,7 +959,7 @@ class CaffeOpMapper(OpMapper): ...@@ -893,7 +959,7 @@ class CaffeOpMapper(OpMapper):
weights_name = deal_weights(node) weights_name = deal_weights(node)
for i in range(len(data)): for i in range(len(data)):
self.weights[weights_name[i]] = data[i] self.weights[weights_name[i]] = data[i]
inputs_node = [] inputs_list = []
for i in range(len(node.inputs)): for i in range(len(node.inputs)):
input = self.graph.get_bottom_node(node, idx=i, copy=True) input = self.graph.get_bottom_node(node, idx=i, copy=True)
if i == 1 and op == 'DetectionOutput': if i == 1 and op == 'DetectionOutput':
...@@ -904,13 +970,19 @@ class CaffeOpMapper(OpMapper): ...@@ -904,13 +970,19 @@ class CaffeOpMapper(OpMapper):
input = self.graph.get_bottom_node(input, idx=0, copy=True) input = self.graph.get_bottom_node(input, idx=0, copy=True)
assert input is not None, 'This kind of DetectionOutput is not supported!' assert input is not None, 'This kind of DetectionOutput is not supported!'
input = self.graph.get_bottom_node(input, idx=0, copy=True) input = self.graph.get_bottom_node(input, idx=0, copy=True)
inputs_node.append(input) inputs_list.append(self.get_input_name(input))
node.fluid_code.add_layer( kwargs_tmp = copy.deepcopy(kwargs)
func.__code__.co_name, for k, v in kwargs_tmp.items():
inputs=inputs_node, if str(type(v)) == "<class 'caffe_pb2.NonMaximumSuppressionParameter'>":
output=node, kwargs[k] = dict()
param_attr=kwargs, kwargs[k]["nms_threshold"] = v.nms_threshold
is_custom_layer=True) kwargs[k]["top_k"] = v.top_k
kwargs[k]["eta"] = v.eta
self.pd_graph.add_layer(
kernel="combination_layer:{}".format(op),
inputs={"inputs": inputs_list},
outputs=[node.layer_name],
**kwargs)
if op not in self.used_custom_layers: if op not in self.used_custom_layers:
self.used_custom_layers[op] = custom_code self.used_custom_layers[op] = custom_code
...@@ -918,6 +990,8 @@ class CaffeOpMapper(OpMapper): ...@@ -918,6 +990,8 @@ class CaffeOpMapper(OpMapper):
assert node.layer_type in self.directly_map_ops assert node.layer_type in self.directly_map_ops
op_info = self.directly_map_ops[node.layer_type] op_info = self.directly_map_ops[node.layer_type]
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
attr = {'name': string(node.layer_name)} self.pd_graph.add_layer(
node.fluid_code.add_layer( kernel=op_info,
op_info, inputs=input, output=node, param_attr=attr) inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name])
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册