提交 44a7e5ab 编写于 作者: S SunAhong1993

modify from review

from x2paddle.decoder.caffe_decoder import CaffeDecoder
Decoder = CaffeDecoder
from x2paddle.op_mapper.dygraph.caffe2paddle.caffe_op_mapper import CaffeOpMapper
DygraphOpMapper = CaffeOpMapper
from x2paddle.op_mapper.static.caffe2paddle.caffe_op_mapper import CaffeOpMapper
StaticOpMapper = CaffeOpMapper
from x2paddle.optimizer.caffe_optimizer import CaffeOptimizer
StaticOptimizer = CaffeOptimizer
\ No newline at end of file
......@@ -88,18 +88,6 @@ def arg_parser():
action="store_true",
default=False,
help="define whether merge the params")
parser.add_argument(
"--jit_type",
"-jt",
type=_text_type,
default="script",
help="define the jit type of pytorch Module.")
parser.add_argument(
"--input_files",
"-if",
action='append',
default=None,
help="define the inputs' file path")
parser.add_argument(
"--paddle_type",
"-pt",
......@@ -155,11 +143,11 @@ def tf2paddle(model_path,
def caffe2paddle(proto, weight, save_dir, caffe_proto,
paddle_type, params_merge=False):
from x2paddle.caffe_convert import Decoder
from x2paddle.decoder.caffe_decoder import CaffeDecoder
if paddle_type == "dygraph":
from x2paddle.caffe_convert import DygraphOpMapper as OpMapper
from x2paddle.op_mapper.dygraph.caffe2paddle.caffe_op_mapper import CaffeOpMapper
else:
from x2paddle.caffe_convert import StaticOpMapper as OpMapper
from x2paddle.op_mapper.static.caffe2paddle.caffe_op_mapper import CaffeOpMapper
import google.protobuf as gpb
ver_part = gpb.__version__.split('.')
version_satisfy = False
......@@ -168,10 +156,15 @@ def caffe2paddle(proto, weight, save_dir, caffe_proto,
version_satisfy = True
assert version_satisfy, '[ERROR] google.protobuf >= 3.6.0 is required'
print("Now translating model from caffe to paddle.")
model = Decoder(proto, weight, caffe_proto)
mapper = OpMapper(model)
mapper.pd_graph.build()
mapper.pd_graph.gen_model(save_dir)
model = CaffeDecoder(proto, weight, caffe_proto)
mapper = CaffeOpMapper(model)
mapper.paddle_graph.build()
print("Model optimizing ...")
from x2paddle.optimizer.optimizer import GraphOptimizer
graph_opt = GraphOptimizer(source_frame="caffe", paddle_type=paddle_type)
graph_opt.optimize(mapper.paddle_graph)
print("Model optimized.")
mapper.paddle_graph.gen_model(save_dir)
def onnx2paddle(model_path, save_dir, params_merge=False):
......
......@@ -63,7 +63,7 @@ class PaddleLayer(object):
class PaddleGraph(object):
def __init__(self, parent_layer=None, graph_type="static"):
def __init__(self, source_type=None, parent_layer=None, graph_type="static"):
self.layers = OrderedDict()
self.edges_out = dict()
self.edges_in = dict()
......@@ -72,7 +72,8 @@ class PaddleGraph(object):
self.parameters = dict()
self.parent_layer = parent_layer
self.graph_type = graph_type
self.custom_func = None
self.source_type = source_type
self.custom_code = None
self.inputs_info = None
def set_name(self, name):
......@@ -81,8 +82,8 @@ class PaddleGraph(object):
def set_parameters(self, parameters):
self.parameters = parameters
def set_custom_func(self, custom_func):
self.custom_func = custom_func
def set_custom(self, custom_code):
self.custom_code = custom_code
def set_inputs_info(self, inputs_info):
self.inputs_info = inputs_info
......@@ -168,10 +169,6 @@ class PaddleGraph(object):
if not isinstance(vs, list):
vs = [vs]
for v in vs:
if "[" in v:
remove_index = v.index("[")
v_part = v[remove_index:]
v = v.replace(v_part, "")
assert v in outputs_from_nodes or (
inputs is not None and v in list(inputs.values())
) or (
......@@ -237,55 +234,61 @@ class PaddleGraph(object):
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if self.graph_type == "static":
code_dir = os.path.join(save_dir, 'model_with_code')
infer_dir = os.path.join(save_dir, 'inference_model')
self.gen_code(code_dir)
sys.path.append(code_dir)
import x2paddle_model
paddle.enable_static()
scope = paddle.static.Scope()
startup_program = paddle.static.Program()
main_program = paddle.static.Program()
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_program, startup_program):
inputs, outputs = x2paddle_model.x2paddle_net()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
param_dir = os.path.join(code_dir, 'weights')
for k, v in self.parameters.items():
if scope.find_var(k):
self.dump_parameter(k, v, param_dir)
def if_exist(var):
b = os.path.exists(
os.path.join(os.path.join(param_dir, var.name)))
return b
fluid.io.load_vars(
exe, param_dir, main_program, predicate=if_exist)
fluid.io.save_inference_model(
dirname=infer_dir,
feeded_var_names=[i.name for i in inputs],
target_vars=outputs,
executor=exe)
self.gen_static_model(save_dir)
else:
if jit_type == "trace":
from x2paddle.optimizer.code_optimizer import HierarchicalTree
hierarchical_tree = HierarchicalTree(self)
for layer_id, layer in self.layers.items():
hierarchical_tree.insert(layer)
hierarchical_tree.save_source_files(save_dir)
self.dump_dygraph_parameter(save_dir)
else:
self.gen_dygraph_code(save_dir)
self.dump_dygraph_parameter(save_dir)
input_shapes = list()
input_types = list()
for input_name in self.inputs:
input_shapes.append(self.inputs_info[input_name][0])
input_types.append(self.inputs_info[input_name][1])
# 如果input_files非空,则导出推理模型;其值类似[[None, 3, 224, 224]]
self.dygraph2static(save_dir, input_shapes, input_types)
def gen_code(self, code_dir):
self.gen_dygraph_model(save_dir, jit_type)
def gen_static_model(self, save_dir):
code_dir = os.path.join(save_dir, 'model_with_code')
infer_dir = os.path.join(save_dir, 'inference_model')
self.gen_static_code(code_dir)
sys.path.append(code_dir)
import x2paddle_model
paddle.enable_static()
scope = paddle.static.Scope()
startup_program = paddle.static.Program()
main_program = paddle.static.Program()
with paddle.static.scope_guard(scope):
with paddle.static.program_guard(main_program, startup_program):
inputs, outputs = x2paddle_model.x2paddle_net()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(startup_program)
param_dir = os.path.join(code_dir, 'weights')
for k, v in self.parameters.items():
if scope.find_var(k):
self.dump_parameter(k, v, param_dir)
def if_exist(var):
b = os.path.exists(
os.path.join(os.path.join(param_dir, var.name)))
return b
fluid.io.load_vars(
exe, param_dir, main_program, predicate=if_exist)
fluid.io.save_inference_model(
dirname=infer_dir,
feeded_var_names=[i.name for i in inputs],
target_vars=outputs,
executor=exe)
def gen_dygraph_model(self, save_dir, jit_type=None):
if jit_type == "trace":
from x2paddle.optimizer.code_optimizer import HierarchicalTree
hierarchical_tree = HierarchicalTree(self)
for layer_id, layer in self.layers.items():
hierarchical_tree.insert(layer)
hierarchical_tree.save_source_files(save_dir)
self.dump_dygraph_parameter(save_dir)
else:
self.gen_dygraph_code(save_dir)
self.dump_dygraph_parameter(save_dir)
input_shapes = list()
input_types = list()
for input_name in self.inputs:
input_shapes.append(self.inputs_info[input_name][0])
input_types.append(self.inputs_info[input_name][1])
# 如果input_files非空,则导出推理模型;其值类似[[None, 3, 224, 224]]
self.dygraph2static(save_dir, input_shapes, input_types)
def gen_static_code(self, code_dir):
def write_code(f, code_list, indent=0):
indent_blank = " " * indent
for code_line in code_list:
......@@ -307,10 +310,10 @@ class PaddleGraph(object):
],
indent=0)
if self.custom_func is not None:
if self.custom_code is not None:
write_code(
f,
list(self.custom_func.values()),
list(self.custom_code.values()),
indent=0)
write_code(f,
["", "def x2paddle_net():"],
......@@ -334,7 +337,7 @@ class PaddleGraph(object):
for output in layer.outputs:
line += "{}, ".format(output)
line = line.strip(", ")
if layer.kernel.startswith("combination_layer"):
if layer.kernel.startswith("custom_layer"):
line += " = {}(".format(layer.kernel.split(":")[-1].lower() + "_layer")
else:
line += " = {}(".format(layer.kernel)
......@@ -440,20 +443,24 @@ class PaddleGraph(object):
return codes
def gen_head():
if self.source_type == "caffe":
custom_import = "from x2paddle.op_mapper.dygraph.caffe2paddle " + \
"import caffe_custom_layer as x2paddle_nn"
self.head = gen_codes(
[
"from paddle.fluid.initializer import Constant",
"from paddle.fluid.param_attr import ParamAttr",
"import paddle",
"import paddle.fluid as fluid",
custom_import,
"",
"class {}(fluid.dygraph.Layer):".format(self.name),
"class {}(paddle.nn.Layer):".format(self.name),
],
indent=0)
input_data_name = ', '.join(self.inputs)
self.init_func.extend(
gen_codes(
["def __init__(self, params):"], indent=1))
["def __init__(self):"], indent=1))
self.init_func.extend(
gen_codes(
["super({}, self).__init__()".format(self.name)], indent=2))
......@@ -462,18 +469,26 @@ class PaddleGraph(object):
["def forward(self, {}):".format(input_data_name)],
indent=1))
def gen_run_net_code(code_dir):
def gen_main_code(code_dir):
input_data_name = ', '.join(self.inputs)
self.run_func = gen_codes(
[
"",
"def run_net({}):".format(input_data_name),
"def main({}):".format(input_data_name),
],
indent=0)
comment_list = list()
comment_list.append("# 共{}个输入".format(len(self.inputs_info)))
for k, v in self.inputs_info.items():
comment_list.append("# {}: 形状为{},类型为{}。".format(k, v[0], v[1]))
self.run_func.extend(
gen_codes(
comment_list,
indent=1))
self.run_func.extend(
gen_codes(["paddle.disable_static()",
"params, _ = fluid.load_dygraph('{}/model')".format(code_dir),
"model = {}(params)".format(self.name),
"model = {}()".format(self.name),
"model.set_dict(params)",
"model.eval()",
"out = model({})".format(input_data_name),
......@@ -506,13 +521,17 @@ class PaddleGraph(object):
for layer_id, layer in self.layers.items():
if ("paddle.nn" in layer.kernel and "functional" not in layer.kernel
) or layer.kernel == "paddle.to_tensor" or \
"paddle.fluid.dygraph" in layer.kernel:
"paddle.fluid.dygraph" in layer.kernel or \
layer.kernel.startswith("custom_layer"):
line = "{}".format(
layer.outputs[0]
) if layer.kernel == "paddle.to_tensor" and not layer.attrs[
"data"].startswith("params[") else "self.{}".format(
layer.outputs[0])
line += " = {}(".format(layer.kernel)
if layer.kernel.startswith("custom_layer"):
line += "= x2paddle_nn.{}(".format(layer.kernel.split(":")[-1])
else:
line += " = {}(".format(layer.kernel)
for k, v in layer.attrs.items():
line += "{}={}, ".format(k, v)
line = line.strip(", ")
......@@ -540,7 +559,7 @@ class PaddleGraph(object):
line += "{}, ".format(v)
line = line.strip(", ")
line += ")"
self.forward_func.extend(gen_codes([line], indent=indent))
self.forward_func.extend(gen_codes([line], indent=indent))
elif "prim" in layer.kernel:
func_name = layer.kernel.replace(".", "_")
from x2paddle.op_mapper.dygraph import prim2code
......@@ -567,9 +586,14 @@ class PaddleGraph(object):
line += "{}={}, ".format(k, v)
line = line.strip(", ")
line += ")"
self.forward_func.extend(gen_codes([line], indent=indent))
if layer.kernel == "self.create_parameter":
self.init_func.extend(gen_codes(["self." + line], indent=indent))
self.forward_func.extend(gen_codes(["{} = self.{}".format(layer.outputs[0],
layer.outputs[0])], indent=indent))
else:
self.forward_func.extend(gen_codes([line], indent=indent))
if indent == 2:
gen_run_net_code(code_dir)
gen_main_code(code_dir)
write_code(code_dir)
else:
return self.init_func, self.forward_func
......@@ -593,7 +617,7 @@ class PaddleGraph(object):
import x2paddle_code
paddle.disable_static()
restore, _ = fluid.load_dygraph(osp.join(save_dir, "model"))
model = getattr(x2paddle_code, self.name)(restore)
model = getattr(x2paddle_code, self.name)()
model.set_dict(restore)
model.eval()
static_model = paddle.jit.to_static(model, input_spec=sepc_list)
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
......@@ -18,3 +18,11 @@ import os
def string(param):
return "\'{}\'".format(param)
def name_generator(nn_name, nn_name2id):
if nn_name in nn_name2id:
nn_name2id[nn_name] += 1
else:
nn_name2id[nn_name] = 0
real_nn_name = nn_name + str(nn_name2id[nn_name])
return real_nn_name
\ No newline at end of file
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .detectionoutput import DetectionOutput
from .normalize import Normalize
from .priorbox import PriorBox
from .roipooling import ROIPooling
from .select import Select
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
class DetectionOutput(object):
def __init__(self, nms_threshold, nms_top_k, keep_top_k, nms_eta, score_threshold, background_label):
self.detection_output_layer_attrs = {
"background_label": background_label,
"nms_threshold": nms_threshold,
"nms_top_k": nms_top_k,
"keep_top_k": keep_top_k,
"score_threshold": score_threshold,
"nms_eta": nms_eta}
def __call__(self, x0, x1, x2):
priorbox_list = paddle.split(x2, num_or_sections=2, axis=1)
pb = priorbox_list[0]
pbv = priorbox_list[1]
pb = paddle.reshape(x=pb, shape=[-1, 4])
pbv = paddle.reshape(x=pbv, shape=[-1, 4])
pb_dim = fluid.layers.shape(pb)[0]
loc = paddle.reshape(x0, shape=[-1, pb_dim, 4])
conf_flatten = paddle.reshape(x1, shape=[0, pb_dim, -1])
out = fluid.layers.detection_output(loc=loc,
scores=conf_flatten,
prior_box=pb,
prior_box_var=pbv,
**self.detection_output_layer_attrs)
return out
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
class Normalize(object):
def __init__(self, axis, param_name, param_shape):
self.axis = axis
self.param_name = param_name
self.param_shape = param_shape
def __call__(self, x):
l2 = fluid.layers.prior_box(x=x, p=2, axis=1)
attr = fluid.ParamAttr(name=self.param_name, trainable=False)
param = paddle.nn.Layer.create_parameter(shape=self.param_shape,
attr=atr)
out = paddle.multiply(x=l2, y=param, axis=self.axis)
return out
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
class PriorBox(object):
def __init__(self, min_sizes, max_sizes,
aspect_ratios, variance, flip,
clip, steps, offset,
min_max_aspect_ratios_order):
self.priorbox_layer_attrs = {
"min_sizes": min_sizes,
"max_sizes": max_sizes,
"aspect_ratios": aspect_ratios,
"variance": variance,
"flip": flip,
"clip": clip,
"steps": steps,
"offset": offset,
"min_max_aspect_ratios_order": min_max_aspect_ratios_order}
def __call__(self, x0, x1):
box, var = fluid.layers.prior_box(input=x0,
image=x1,
**self.priorbox_layer_attrs)
box = paddle.reshape(x=box, shape=[1, 1, -1])
var = paddle.reshape(x=var, shape=[1, 1, -1])
out = paddle.concat(x=[box, var], axis=1)
return out
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
class ROIPooling(object):
def __init__(self, pooled_height, pooled_width, spatial_scale):
self.roipooling_layer_attrs = {
"pooled_height": pooled_height,
"pooled_width": pooled_width,
"spatial_scale": spatial_scale}
def __call__(self, x0, x1):
slice_x1 = paddle.slice(input=x1, axes=[1],
starts=[1], ends=[5])
out = fluid.layers.roi_pool(input=x0,
rois=slice_x1,
**self.roipooling_layer_attrs)
return out
\ No newline at end of file
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
class Select(object):
def __init__(self, input_shape, point, axis):
self.point = point
self.input_shape = input_shape
self.axis = axis
def __call__(self, x):
start = self.point[0]
if len(self.point) == 2:
end = self.point[1]
else:
end = self.input_shape[self.axis]
out = paddle.slice(x=x,
start=start,
end=end,
axes=[self.axis])
return out
\ No newline at end of file
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
......@@ -10,7 +10,7 @@
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# limitations under the License.
import math
import numbers
......@@ -383,7 +383,8 @@ def shape_detectionoutput(layer, input_shape):
def shape_normalize(layer, input_shape):
return input_shape
def shape_permute(layer, input_shape, order=None):
def shape_permute(layer, input_shape):
order = layer.permute_param.order
inshape = input_shape[0]
output_shape = []
order = list(order)
......@@ -392,7 +393,9 @@ def shape_permute(layer, input_shape, order=None):
output_shape.append(inshape[ii])
return [output_shape]
def shape_priorbox(layer, input_shape, max_size=None, aspect_ratio=None):
def shape_priorbox(layer, input_shape):
max_size = layer.prior_box_param.max_size
aspect_ratio = layer.prior_box_param.aspect_ratio
fc_shape = input_shape[0]
N = 1
if not max_size == None:
......@@ -406,7 +409,9 @@ def shape_priorbox(layer, input_shape, max_size=None, aspect_ratio=None):
def shape_relu6(layer, input_shape):
return input_shape
def shape_roipooling(layer, input_shape, pooled_w=None, pooled_h=None):
def shape_roipooling(layer, input_shape):
pooled_w = layer.roi_pooling_param.pooled_w
pooled_h = layer.roi_pooling_param.pooled_h
base_fea_shape = input_shapes[0]
rois_shape = input_shapes[1]
output_shape = base_fea_shape
......@@ -418,7 +423,8 @@ def shape_roipooling(layer, input_shape, pooled_w=None, pooled_h=None):
def shape_shufflechannel(layer, input_shape):
return input_shape
def shape_upsample(layer, input_shape, scale):
def shape_upsample(layer, input_shape):
scale = layer.upsample_param.scale
assert len(input_shapes) == 1, "not valid input shape for upsample layer"
assert type(scale) is int
input_shape = input_shapes[0]
......@@ -428,7 +434,9 @@ def shape_upsample(layer, input_shape, scale):
output_shape = [input_shape[0], input_shape[1], new_h, new_w]
return [output_shape]
def shape_select(layer, input_shape, slice_point, axis):
def shape_select(layer, input_shape):
slice_point = layer.select_param.slice_point
axis = layer.select_param.axis
input_shape = input_shapes[0]
start = slice_point[0]
if len(slice_point) == 2:
......
......@@ -419,18 +419,6 @@ def prim_tuple_unpack(layer, indent=1, init_func=[], forward_func=[], layer_id=N
def prim_type(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
line = "{} = {}.dtype".format(layer.outputs[0], get_value(layer, "input", different_attrs))
forward_func.extend(gen_codes([line], indent=indent))
def prim_update_end(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
lines = []
input_shape = get_value(layer, "input_shape", different_attrs)
point = get_value(layer, "point", different_attrs)
axis = get_value(layer, "axis", different_attrs)
lines.append("if len{} == 2:".format(point))
lines.append(" {} = {}[1]".format(layer.outputs[0], point))
lines.append("else:")
lines.append(" {} = {}[]".format(layer.outputs[0], dim))
forward_func.extend(gen_codes(lines, indent=indent))
def prim_var2list(layer, indent=1, init_func=[], forward_func=[], layer_id=None, different_attrs=None):
......
文件已添加
文件已添加
文件已添加
文件已添加
......@@ -36,9 +36,9 @@ class CaffeOpMapper(OpMapper):
self.weights = dict()
resolver = decoder.resolver
self.used_custom_layers = {}
self.pd_graph = PaddleGraph(parent_layer=None, graph_type="static")
self.pd_graph.inputs = self.graph.input_nodes
self.pd_graph.outputs = self.graph.output_nodes
self.paddle_graph = PaddleGraph(parent_layer=None, graph_type="static", source_type="caffe")
self.paddle_graph.inputs = self.graph.input_nodes
self.paddle_graph.outputs = self.graph.output_nodes
print("Total nodes: {}".format(len(self.graph.topo_sort)))
for node_name in self.graph.topo_sort:
......@@ -59,8 +59,8 @@ class CaffeOpMapper(OpMapper):
else:
raise Exception(
"The op {} in model is not supported yet.".format(op))
self.pd_graph.set_parameters(self.weights)
self.pd_graph.set_custom_func(self.used_custom_layers)
self.paddle_graph.set_parameters(self.weights)
self.paddle_graph.set_custom(self.used_custom_layers)
def op_checker(self):
......@@ -202,7 +202,7 @@ class CaffeOpMapper(OpMapper):
"shape": [-1] + shape,
"name": string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.data",
inputs={},
outputs=[node.layer_name],
......@@ -244,7 +244,7 @@ class CaffeOpMapper(OpMapper):
'bias_attr': False
if len(data) == 1 else string(node.layer_name + '_bias'),
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.conv2d",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -287,7 +287,7 @@ class CaffeOpMapper(OpMapper):
'bias_attr': False
if len(data) == 1 else string(node.layer_name + '_bias')
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.conv2d_transpose",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -317,7 +317,7 @@ class CaffeOpMapper(OpMapper):
'global_pooling': global_pool,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.pool2d",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -341,7 +341,7 @@ class CaffeOpMapper(OpMapper):
'beta': params.beta,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.lrn",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -390,7 +390,7 @@ class CaffeOpMapper(OpMapper):
'bias_attr': False
if len(data) == 1 else string(node.layer_name + '_bias')
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.fc",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -406,7 +406,7 @@ class CaffeOpMapper(OpMapper):
dims = len(shape)
axis = axis + dims if axis < 0 else axis
layer_attrs = {'axis': axis, 'name': string(node.layer_name + '_softmax')}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.softmax",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -431,7 +431,7 @@ class CaffeOpMapper(OpMapper):
'dim': axis,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.split",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -448,7 +448,7 @@ class CaffeOpMapper(OpMapper):
params = node.layer.concat_param
axis = params.axis
layer_attrs = {'axis': axis, 'name': string(node.layer_name)}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.concat",
inputs={"x": inputs_list},
outputs=[node.layer_name],
......@@ -467,13 +467,13 @@ class CaffeOpMapper(OpMapper):
params = node.layer.relu_param
if params.HasField('negative_slope') and params.negative_slope != 0:
negative_slope = float(params.negative_slope)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.leaky_relu",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
alpha=negative_slope)
else:
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.relu",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name])
......@@ -497,7 +497,7 @@ class CaffeOpMapper(OpMapper):
'param_attr': string(node.layer_name + '_weights'),
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.prelu",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -520,7 +520,7 @@ class CaffeOpMapper(OpMapper):
ignore_label = params.ignore_label
assert axis == 1, 'PaddlePaddle can not support the situation when the axis is not 1.'
assert not ignore_label >= 0, 'PaddlePaddle can not support the situation when the model has ignore label.'
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.accuracy",
inputs=inputs_dict,
outputs=[node.layer_name],
......@@ -540,7 +540,7 @@ class CaffeOpMapper(OpMapper):
inputs_dict = {}
inputs_dict['x'] = self.get_input_name(inputs[0])
inputs_dict['y'] = self.get_input_name(inputs[1])
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_mul",
inputs=inputs_dict,
outputs=[node.layer_name])
......@@ -553,12 +553,12 @@ class CaffeOpMapper(OpMapper):
'value': coeff[0],
'dtype': '{}.dtype'.format(input1_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.fill_constant",
inputs={},
outputs=["{}_const1".format(node.layer_name)],
**layer_attrs)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_mul",
inputs={"x": input1_name,
"y": "{}_const1".format(node.layer_name)},
......@@ -569,17 +569,17 @@ class CaffeOpMapper(OpMapper):
'value': coeff[1],
'dtype': '{}.dtype'.format(input2_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.fill_constant",
inputs={},
outputs=["{}_const2".format(node.layer_name)],
**layer_attrs)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_mul",
inputs={"x": input2_name,
"y": "{}_const2".format(node.layer_name)},
outputs=["{}_mul2".format(node.layer_name)])
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_add",
inputs={"x": "{}_mul1".format(node.layer_name),
"y": "{}_mul2".format(node.layer_name)},
......@@ -588,7 +588,7 @@ class CaffeOpMapper(OpMapper):
inputs_dict = {}
inputs_dict['x'] = self.get_input_name(inputs[0])
inputs_dict['y'] = self.get_input_name(inputs[1])
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_add",
inputs=inputs_dict,
outputs=[node.layer_name])
......@@ -596,7 +596,7 @@ class CaffeOpMapper(OpMapper):
inputs_dict = {}
inputs_dict['x'] = self.get_input_name(inputs[0])
inputs_dict['y'] = self.get_input_name(inputs[1])
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_max",
inputs=inputs_dict,
outputs=[node.layer_name])
......@@ -637,7 +637,7 @@ class CaffeOpMapper(OpMapper):
'epsilon': eps,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.batch_norm",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -673,7 +673,7 @@ class CaffeOpMapper(OpMapper):
inputs_dict = {}
inputs_dict['x'] = self.get_input_name(input0)
inputs_dict['y'] = self.get_input_name(input1)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_mul",
inputs=inputs_dict,
outputs=["{}_mul".format(node.layer_name)],
......@@ -682,7 +682,7 @@ class CaffeOpMapper(OpMapper):
bias_shape = node.input_shape[0][axis:axis + num_axes]
input0 = self.graph.get_bottom_node(node, idx=0, copy=True)
input0_name = self.get_input_name(input0)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.ParamAttr",
inputs={},
outputs=["{}_scale".format(node.layer_name)],
......@@ -694,7 +694,7 @@ class CaffeOpMapper(OpMapper):
'is_bias': True,
'default_initializer': 'Constant(value=1.0)'
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.create_parameter",
inputs={"attr": node.layer_name + '_scale',},
outputs=["{}_cparam1".format(node.layer_name)],
......@@ -702,14 +702,14 @@ class CaffeOpMapper(OpMapper):
inputs_dict = {}
inputs_dict['x'] = self.get_input_name(input0)
inputs_dict['y'] = "{}_cparam1".format(node.layer_name)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_mul",
inputs=inputs_dict,
outputs=["{}_mul".format(node.layer_name)],
axis=axis)
scale_shape = bias_shape
input0_name = self.get_input_name(input0)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.ParamAttr",
inputs={},
outputs=["{}_offset".format(node.layer_name)],
......@@ -721,7 +721,7 @@ class CaffeOpMapper(OpMapper):
'is_bias': True,
'default_initializer': 'Constant(value=1.0)'
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.create_parameter",
inputs={"attr": node.layer_name + '_offset'},
outputs=["{}_cparam2".format(node.layer_name)],
......@@ -729,7 +729,7 @@ class CaffeOpMapper(OpMapper):
inputs_dict = {}
inputs_dict['x'] = "{}_mul".format(node.layer_name)
inputs_dict['y'] = "{}_cparam2".format(node.layer_name)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.elementwise_add",
inputs=inputs_dict,
outputs=[node.layer_name],
......@@ -747,7 +747,7 @@ class CaffeOpMapper(OpMapper):
'act': None,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -767,25 +767,25 @@ class CaffeOpMapper(OpMapper):
if axis < 0:
axis += len(input_shape)
if out_max_val is True:
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.topk",
inputs={"input": self.get_input_name(input)},
outputs=["{}_topk_var".format(node.layer_name),
"{}_index_var".format(node.layer_name)],
k=top_k)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.cast",
inputs={"x": "{}_topk_var".format(node.layer_name)},
outputs=["{}_topk_var".format(node.layer_name)],
dtype="{}_topk_var.dtype".format(node.layer_name))
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.concat",
inputs={"x": "[{}_topk_var, {}_index_var]".format(node.layer_name,
node.layer_name)},
outputs=[node.layer_name],
axis=axis)
else:
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.topk",
inputs={"input": self.get_input_name(input)},
outputs=["_", node.layer_name],
......@@ -810,7 +810,7 @@ class CaffeOpMapper(OpMapper):
offset_real = [0] * axis + offset
layer_attrs = {"offsets": list(offset_real),
"shape": node.input_shape[1]}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.crop_tensor",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -821,7 +821,7 @@ class CaffeOpMapper(OpMapper):
node.
inputs) == 1, 'The count of DetectionOutput node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.reshape",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -841,12 +841,12 @@ class CaffeOpMapper(OpMapper):
'bias_after_scale': True,
'name': string(node.layer_name + '_scale')
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.scale",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.pow",
inputs={"x": node.layer_name},
outputs=[node.layer_name],
......@@ -872,13 +872,13 @@ class CaffeOpMapper(OpMapper):
'keep_dim': False,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.reduce_sum",
inputs={"input": self.get_input_name(input)},
outputs=[node.layer_name],
**layer_attrs)
elif operation == 2: ## operation = ASUM
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.abs",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name])
......@@ -887,13 +887,13 @@ class CaffeOpMapper(OpMapper):
'keep_dim': False,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.reduce_sum",
inputs={"input": node.layer_name},
outputs=[node.layer_name],
**layer_attrs)
elif operation == 3: ## operation = SUMSQ
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.pow",
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name],
......@@ -903,7 +903,7 @@ class CaffeOpMapper(OpMapper):
'keep_dim': False,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.reduce_sum",
inputs={"input": node.layer_name},
outputs=[node.layer_name],
......@@ -914,12 +914,12 @@ class CaffeOpMapper(OpMapper):
'keep_dim': False,
'name': string(node.layer_name)
}
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="fluid.layers.reduce_mean",
inputs={"input": node.layer_name},
outputs=[node.layer_name],
**layer_attrs)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="paddle.scale",
inputs={"x": node.layer_name},
outputs=[node.layer_name],
......@@ -957,7 +957,7 @@ class CaffeOpMapper(OpMapper):
kwargs[k]["nms_threshold"] = v.nms_threshold
kwargs[k]["top_k"] = v.top_k
kwargs[k]["eta"] = v.eta
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel="combination_layer:{}".format(op),
inputs={"inputs": inputs_list},
outputs=[node.layer_name],
......@@ -969,7 +969,7 @@ class CaffeOpMapper(OpMapper):
assert node.layer_type in self.directly_map_ops
op_info = self.directly_map_ops[node.layer_type]
input = self.graph.get_bottom_node(node, idx=0, copy=True)
self.pd_graph.add_layer(
self.paddle_graph.add_layer(
kernel=op_info,
inputs={"x": self.get_input_name(input)},
outputs=[node.layer_name])
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.decoder.caffe_decoder import CaffeGraph
from x2paddle.core.util import *
class CaffeOptimizer(object):
layers_with_act = ['Convolution', 'Deconvolution', 'InnerProduct']
activation_ops = ['ReLU', 'Sigmoid']
def __init__(self, mapper):
self.graph = mapper.graph
def merge_bn_scale(self):
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
if node.layer_type == 'Scale':
parent_node = self.graph.get_bottom_node(node, idx=0)
if parent_node.layer_type == 'BatchNorm':
is_delete_node = True if len(
parent_node.outputs) == 1 else False
parent_fluid_layer = parent_node.fluid_code.layers[0]
input = parent_fluid_layer.inputs
parent_param_attr = parent_fluid_layer.param_attr
parent_param_attr['param_attr'] = string(node.layer_name +
'_scale')
parent_param_attr['bias_attr'] = string(node.layer_name +
'_offset')
if is_delete_node:
parent_node.fluid_code.clear()
node.fluid_code.clear()
node.fluid_code.add_layer(
"batch_norm",
inputs=input,
output=node,
param_attr=parent_param_attr)
def merge_op_activation(self):
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
if node.layer_type in self.activation_ops:
parent_node = self.graph.get_bottom_node(node, idx=0)
if parent_node.layer_type in self.layers_with_act:
is_delete_node = True if len(
parent_node.outputs) == 1 else False
parent_fluid_layer = parent_node.fluid_code.layers[0]
input = parent_fluid_layer.inputs
parent_param_attr = parent_fluid_layer.param_attr
parent_param_attr['act'] = string(node.layer_type.lower())
op = parent_fluid_layer.op
if is_delete_node:
parent_node.fluid_code.clear()
node.fluid_code.clear()
node.fluid_code.add_layer(
op,
inputs=input,
output=node,
param_attr=parent_param_attr)
文件已添加
......@@ -12,17 +12,19 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from .adaptive_pool2d_fuser import AdaptivePool2dFuser
from .adaptive_pool2d_fuse_pass import AdaptivePool2dFusePass
from .batchnorm2d_fuser import BatchNorm2dFuser
from .batchnorm2d_fuse_pass import BatchNorm2dFusePass
from .constant_fuser import ConstantFuser
from .constant_fuse_pass import ConstantFusePass
from .dropout_fuser import DropoutFuser
from .dropout_fuse_pass import DropoutFusePass
from .fc_fuser import FcFuser
from .fc_fuse_pass import FcFusePass
from .interpolate_bilinear_fuser import InterpolateBilinearFuser
from .interpolate_bilinear_fuse_pass import InterpolateBilinearFusePass
from .reshape_fuser import ReshapeFuser
from .reshape_fuse_pass import ReshapeFusePass
from .adaptive_pool2d_fuser import Dygraph_AdaptivePool2dFuser
from .adaptive_pool2d_fuse_pass import Dygraph_AdaptivePool2dFusePass
from .batchnorm2d_fuser import Dygraph_BatchNorm2dFuser
from .batchnorm2d_fuse_pass import Dygraph_BatchNorm2dFusePass
from .bn_scale_fuser import Dygraph_BNScaleFuser
from .bn_scale_fuse_pass import Dygraph_BNScaleFusePass
from .constant_fuser import Dygraph_ConstantFuser
from .constant_fuse_pass import Dygraph_ConstantFusePass
from .dropout_fuser import Dygraph_DropoutFuser
from .dropout_fuse_pass import Dygraph_DropoutFusePass
from .fc_fuser import Dygraph_FcFuser
from .fc_fuse_pass import Dygraph_FcFusePass
from .interpolate_bilinear_fuser import Dygraph_InterpolateBilinearFuser
from .interpolate_bilinear_fuse_pass import Dygraph_InterpolateBilinearFusePass
from .reshape_fuser import Dygraph_ReshapeFuser
from .reshape_fuse_pass import Dygraph_ReshapeFusePass
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
文件已添加
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_AdaptivePool2dFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_AdaptivePool2dFusePass(Pass):
name = "dygraph_adaptive_pool2d_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_AdaptivePool2dFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
adaptive_pool2d_fuse_pass = Dygraph_AdaptivePool2dFusePass()
......@@ -13,14 +13,14 @@
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class AdaptivePool2dFuser(FuseBase):
class Dygraph_AdaptivePool2dFuser(FuseBase):
def __init__(self):
super(AdaptivePool2dFuser, self).__init__(graph_type="dygraph")
super(Dygraph_AdaptivePool2dFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的adaptive pool2d图结构。
......
......@@ -12,22 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass
from x2paddle.optimizer.pytorch_optimizer.fusion import BatchNorm2dFuser
from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_BatchNorm2dFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class BatchNorm2dFusePass(Pass):
name = "batchnorm2d_fuse_pass"
class Dygraph_BatchNorm2dFusePass(Pass):
name = "dygraph_batchnorm2d_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = BatchNorm2dFuser()
fuser = Dygraph_BatchNorm2dFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
batchnorm2d_fuse_pass = BatchNorm2dFusePass()
batchnorm2d_fuse_pass = Dygraph_BatchNorm2dFusePass()
......@@ -13,14 +13,14 @@
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class BatchNorm2dFuser(FuseBase):
class Dygraph_BatchNorm2dFuser(FuseBase):
def __init__(self):
super(BatchNorm2dFuser, self).__init__(graph_type="dygraph")
super(Dygraph_BatchNorm2dFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的batchnorm2d图结构。
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_BNScaleFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Dygraph_BNScaleFusePass(Pass):
name = "dygraph_bn_scale_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Dygraph_BNScaleFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
bn_scale_fuse_pass = Dygraph_BNScaleFusePass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Dygraph_BNScaleFuser(FuseBase):
def __init__(self):
super(Dygraph_BNScaleFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的batchnorm2d图结构。
batchnorm2d层模式python实现代码示例:
bn_conv1 = self.batchnorm0(conv1)
scale_conv1_cparam1 = self.scale_conv1_cparam1
scale_conv1_mul = paddle.multiply(x=bn_conv1, y=scale_conv1_cparam1, axis=1)
scale_conv1_cparam2 = self.scale_conv1_cparam2
scale_conv1 = fluid.layers.elementwise_add(x=scale_conv1_mul, y=scale_conv1_cparam2, axis=1)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"paddle.nn.BatchNorm2D",
inputs={"input": "bn-input-0"},
outputs=[gen_name(0)])
self.pattern.add_layer(
"self.create_parameter",
inputs={},
outputs=[gen_name(1)])
inputs_dict = {}
inputs_dict['x'] = gen_name(0)
inputs_dict['y'] = gen_name(1)
self.pattern.add_layer(
"paddle.multiply",
inputs=inputs_dict,
outputs=[gen_name(2)])
self.pattern.add_layer(
"self.create_parameter",
inputs={},
outputs=[gen_name(3)])
inputs_dict = {}
inputs_dict['x'] = gen_name(2)
inputs_dict['y'] = gen_name(3)
self.pattern.add_layer(
"fluid.layers.elementwise_add",
inputs=inputs_dict,
outputs=[gen_name(4)])
self.pattern.build(inputs={"input-0": "bn-input-0"})
def insert_new_layer(self, graph, parameters, matches):
new_layer = self.gen_new_layer(parameters, matches)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[0]]
layer_inputs = layer.inputs
bn_name = layer.outputs[0]
layer_attrs = layer.attrs
layer_attrs.pop("weight_attr")
layer_attrs.pop("bias_attr")
layer = matches[layers_id[4]]
layer_outputs = [bn_name] + layer.outputs
layer = matches[layers_id[1]]
data0_name = layer.outputs[0]
data0_numpy = parameters.pop(data0_name)
parameters["{}.weight".format(layer_outputs[0])] = data0_numpy
layer = matches[layers_id[3]]
data1_name = layer.outputs[0]
data1_numpy = parameters.pop(data1_name)
parameters["{}.bias".format(layer_outputs[0])] = data1_numpy
new_layer = PaddleLayer(
layers_id[0],
"paddle.nn.BatchNorm2D",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return new_layer
\ No newline at end of file
......@@ -12,22 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass
from x2paddle.optimizer.pytorch_optimizer.fusion import ConstantFuser
from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_ConstantFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class ConstantFusePass(Pass):
name = "constant_fuse_pass"
class Dygraph_ConstantFusePass(Pass):
name = "dygraph_constant_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = ConstantFuser()
fuser = Dygraph_ConstantFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
constant_fuse_pass = ConstantFuser()
constant_fuse_pass = Dygraph_ConstantFuser()
......@@ -13,14 +13,14 @@
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class ConstantFuser(FuseBase):
class Dygraph_ConstantFuser(FuseBase):
def __init__(self):
super(ConstantFuser, self).__init__(graph_type="dygraph")
super(Dygraph_ConstantFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的constant图结构。
......
......@@ -12,22 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass
from x2paddle.optimizer.pytorch_optimizer.fusion import DropoutFuser
from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_DropoutFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class DropoutFusePass(Pass):
name = "dropout_fuse_pass"
class Dygraph_DropoutFusePass(Pass):
name = "dygraph_dropout_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = DropoutFuser()
fuser = Dygraph_DropoutFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
dropout_fuse_pass = DropoutFuser()
dropout_fuse_pass = Dygraph_DropoutFuser()
......@@ -13,14 +13,14 @@
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class DropoutFuser(FuseBase):
class Dygraph_DropoutFuser(FuseBase):
def __init__(self):
super(DropoutFuser, self).__init__(graph_type="dygraph")
super(Dygraph_DropoutFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的constant图结构。
......
......@@ -12,22 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass
from x2paddle.optimizer.pytorch_optimizer.fusion import FcFuser
from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_FcFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class FcFusePass(Pass):
name = "fc_fuse_pass"
class Dygraph_FcFusePass(Pass):
name = "dygraph_fc_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = FcFuser()
fuser = Dygraph_FcFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
fc_fuse_pass = FcFusePass()
fc_fuse_pass = Dygraph_FcFusePass()
......@@ -13,15 +13,15 @@
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class FcFuser(FuseBase):
class Dygraph_FcFuser(FuseBase):
def __init__(self):
self.linear_index = 0
super(FcFuser, self).__init__(graph_type="dygraph")
super(Dygraph_FcFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的fc图结构。
......
......@@ -12,22 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass
from x2paddle.optimizer.pytorch_optimizer.fusion import AdaptivePool2dFuser
from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_InterpolateBilinearFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class AdaptivePool2dFusePass(Pass):
name = "adaptive_pool2d_fuse_pass"
class Dygraph_InterpolateBilinearFusePass(Pass):
name = "dygraph_interpolate_bilinear_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = AdaptivePool2dFuser()
fuser = Dygraph_InterpolateBilinearFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
adaptive_pool2d_fuse_pass = AdaptivePool2dFusePass()
interpolate_bilinear_fuse_pass = Dygraph_InterpolateBilinearFusePass()
......@@ -13,14 +13,14 @@
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class InterpolateBilinearFuser(FuseBase):
class Dygraph_InterpolateBilinearFuser(FuseBase):
def __init__(self):
super(InterpolateBilinearFuser, self).__init__(graph_type="dygraph")
super(Dygraph_InterpolateBilinearFuser, self).__init__(graph_type="dygraph")
import torch
torch_version = torch.__version__
torch_version_part = torch_version.split(".")
......
......@@ -12,22 +12,22 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass
from x2paddle.optimizer.pytorch_optimizer.fusion import ReshapeFuser
from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.dygraph import Dygraph_ReshapeFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class ReshapeFusePass(Pass):
name = "reshape_fuse_pass"
class Dygraph_ReshapeFusePass(Pass):
name = "dygraph_reshape_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = ReshapeFuser()
fuser = Dygraph_ReshapeFuser()
fuser.operate(graph, match_kind="edge")
# 用于注册
reshape_fuse_pass = ReshapeFusePass()
reshape_fuse_pass = Dygraph_ReshapeFusePass()
......@@ -13,14 +13,14 @@
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pytorch_optimizer.pattern_matcher import FuseBase
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class ReshapeFuser(FuseBase):
class Dygraph_ReshapeFuser(FuseBase):
def __init__(self):
super(ReshapeFuser, self).__init__(graph_type="dygraph")
super(Dygraph_ReshapeFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的reshape图结构。
......
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .bn_scale_fuser import Static_BNScaleFuser
from .bn_scale_fuse_pass import Static_BNScaleFusePass
\ No newline at end of file
文件已添加
文件已添加
文件已添加
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pass_ import Pass
from x2paddle.optimizer.fusion.static import Static_BNScaleFuser
from x2paddle.optimizer.pass_manager import pass_register
@pass_register
class Static_BNScaleFusePass(Pass):
name = "static_bn_scale_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = Static_BNScaleFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
bn_scale_fuse_pass = Static_BNScaleFusePass()
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from x2paddle.optimizer.pattern_matcher import FuseBase
from x2paddle.core.program import PaddleGraph, PaddleLayer
from x2paddle.core.util import *
class Static_BNScaleFuser(FuseBase):
def __init__(self):
super(Static_BNScaleFuser, self).__init__(graph_type="dygraph")
def build_pattern(self):
""" 描述需要替换的batchnorm2d图结构。
batchnorm2d层模式python实现代码示例:
conv5_bn = fluid.layers.batch_norm(input=conv5, is_test=True, param_attr=None, bias_attr=None, moving_mean_name='conv5_bn_mean', moving_variance_name='conv5_bn_variance', epsilon=9.999999747378752e-06, name='conv5_bn')
conv5_scale_scale = fluid.ParamAttr(name='conv5_scale_scale')
conv5_scale_cparam1 = fluid.layers.create_parameter(attr=conv5_scale_scale, dtype=conv5_bn.dtype, shape=[256], name='conv5_scale_cparam1', is_bias=True, default_initializer=Constant(value=1.0))
conv5_scale_mul = fluid.layers.elementwise_mul(x=conv5_bn, y=conv5_scale_cparam1, axis=1)
conv5_scale_offset = fluid.ParamAttr(name='conv5_scale_offset')
conv5_scale_cparam2 = fluid.layers.create_parameter(attr=conv5_scale_offset, dtype=conv5_bn.dtype, shape=[256], name='conv5_scale_cparam2', is_bias=True, default_initializer=Constant(value=1.0))
conv5_scale = fluid.layers.elementwise_add(x=conv5_scale_mul, y=conv5_scale_cparam2, axis=1)
"""
def gen_name(id):
return "x" + str(id)
self.pattern.add_layer(
"fluid.layers.batch_norm",
inputs={"input": "bn-input-0"},
outputs=[gen_name(0)])
self.pattern.add_layer(
"fluid.ParamAttr",
inputs={},
outputs=[gen_name(1)])
self.pattern.add_layer(
"fluid.layers.create_parameter",
inputs={"attr": gen_name(1)},
outputs=[gen_name(2)])
inputs_dict = {}
inputs_dict['x'] = gen_name(0)
inputs_dict['y'] = gen_name(2)
self.pattern.add_layer(
"fluid.layers.elementwise_mul",
inputs=inputs_dict,
outputs=[gen_name(3)])
self.pattern.add_layer(
"fluid.ParamAttr",
inputs={},
outputs=[gen_name(4)])
self.pattern.add_layer(
"fluid.layers.create_parameter",
inputs={"attr": gen_name(4)},
outputs=[gen_name(5)])
inputs_dict = {}
inputs_dict['x'] = gen_name(3)
inputs_dict['y'] = gen_name(5)
self.pattern.add_layer(
"fluid.layers.elementwise_add",
inputs=inputs_dict,
outputs=[gen_name(6)])
self.pattern.build(inputs={"input-0": "bn-input-0"})
def insert_new_layer(self, graph, parameters, matches):
new_layer = self.gen_new_layer(parameters, matches)
new_layer_id = list(matches.keys())[0]
graph.layers[new_layer_id] = new_layer
matches.pop(new_layer_id)
def gen_new_layer(self, parameters, matches):
layers_id = list(matches.keys())
layer = matches[layers_id[0]]
layer_inputs = layer.inputs
layer_name = layer.outputs[0]
layer_attrs = layer.attrs
layer_attrs["param_attr"] = string("{}_scale".format(layer_name))
layer_attrs["bias_attr"] = string("{}_offset".format(layer_name))
layer = matches[layers_id[-1]]
layer_outputs = layer.outputs
layer = matches[layers_id[1]]
layer_name = layer.outputs[0]
scale_numpy = parameters.pop(layer_name)
parameters[layer_attrs["param_attr"][1: -1]] = scale_numpy
layer = matches[layers_id[4]]
layer_name = layer.outputs[0]
scale_numpy = parameters.pop(layer_name)
parameters[layer_attrs["bias_attr"][1: -1]] = scale_numpy
new_layer = PaddleLayer(
layers_id[0],
"fluid.layers.batch_norm",
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
return new_layer
\ No newline at end of file
......@@ -12,22 +12,36 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pytorch_optimizer.fusion import *
from x2paddle.optimizer.pytorch_optimizer.pass_manager import PassManager
from x2paddle.optimizer.pass_manager import PassManager
from x2paddle.optimizer.fusion.dygraph import *
from x2paddle.optimizer.fusion.static import *
class GraphOptimizer(object):
def __init__(self):
self.passes = [
"constant_fuse_pass", "batchnorm2d_fuse_pass",
"interpolate_bilinear_fuse_pass", "fc_fuse_pass",
"adaptive_pool2d_fuse_pass", "reshape_fuse_pass",
"dropout_fuse_pass"
]
def __init__(self, source_frame, paddle_type="dygraph"):
if source_frame == "pytorch":
self.passes = [
"dygraph_constant_fuse_pass", "dygraph_batchnorm2d_fuse_pass",
"dygraph_interpolate_bilinear_fuse_pass", "dygraph_fc_fuse_pass",
"dygraph_adaptive_pool2d_fuse_pass", "dygraph_reshape_fuse_pass",
"dygraph_dropout_fuse_pass"
]
elif source_frame == "caffe":
if paddle_type == "dygraph":
self.passes = ["dygraph_bn_scale_fuse_pass"]
else:
self.passes = ["static_bn_scale_fuse_pass"]
else:
# TODO
pass
def optimize(self, graph):
for pass_name in self.passes:
pass_ = PassManager.lookup(pass_name)()
pass_.apply(graph)
while True:
before_len = len(graph.layers)
pass_.apply(graph)
after_len = len(graph.layers)
if before_len == after_len:
break
print("{} done!".format(pass_name))
return graph
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.optimizer.pytorch_optimizer.pass_ import Pass
from x2paddle.optimizer.pytorch_optimizer.fusion import InterpolateBilinearFuser
from x2paddle.optimizer.pytorch_optimizer.pass_manager import pass_register
@pass_register
class InterpolateBilinearFusePass(Pass):
name = "interpolate_bilinear_fuse_pass"
def __init__(self):
Pass.__init__(self)
def apply(self, graph):
fuser = InterpolateBilinearFuser()
fuser.operate(graph, match_kind="topo")
# 用于注册
interpolate_bilinear_fuse_pass = InterpolateBilinearFusePass()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册
反馈
建议
客服 返回
顶部