未验证 提交 58e1668e 编写于 作者: J Jason 提交者: GitHub

Merge pull request #291 from PaddlePaddle/develop_code-format

code format
- repo: local
- repo: https://github.com/PaddlePaddle/mirrors-yapf.git
sha: 0d79c0c469bab64f7229c9aca2b1186ef47f0e37
hooks:
- id: yapf
name: yapf
entry: yapf
language: system
args: [-i, --style .style.yapf]
files: \.py$
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: a11d9314b22d8f8c7556443875b731ef05965464
hooks:
......@@ -18,6 +14,7 @@
- id: check-symlinks
- id: check-added-large-files
- repo: local
hooks:
- id: copyright_checker
name: copyright_checker
......
language: python
python:
- '2.7'
- '3.5'
- '3.6'
script:
- if [[ $TRAVIS_PYTHON_VERSION != 2.7 ]]; then /bin/bash ./tools/check_code_style.sh; fi
......
......@@ -11,8 +11,7 @@ setuptools.setup(
version=x2paddle.__version__,
author="dltp-sz",
author_email="dltp-sz@baidu.com",
description=
"a toolkit for converting trained model to PaddlePaddle from other deep learning frameworks.",
description="a toolkit for converting trained model to PaddlePaddle from other deep learning frameworks.",
long_description=long_description,
long_description_content_type="text/plain",
url="https://github.com/PaddlePaddle/x2paddle",
......@@ -23,6 +22,4 @@ setuptools.setup(
"Operating System :: OS Independent",
],
license='Apache 2.0',
entry_points={'console_scripts': [
'x2paddle=x2paddle.convert:main',
]})
entry_points={'console_scripts': ['x2paddle=x2paddle.convert:main', ]})
......@@ -5,12 +5,14 @@ model_dir = sys.argv[1]
new_model_dir = sys.argv[2]
exe = fluid.Executor(fluid.CPUPlace())
[inference_program, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(dirname=model_dir, executor=exe)
fetch_targets] = fluid.io.load_inference_model(
dirname=model_dir, executor=exe)
print(feed_target_names)
fluid.io.save_inference_model(dirname=new_model_dir,
feeded_var_names=feed_target_names,
target_vars=fetch_targets,
executor=exe,
main_program=inference_program,
params_filename="__params__")
fluid.io.save_inference_model(
dirname=new_model_dir,
feeded_var_names=feed_target_names,
target_vars=fetch_targets,
executor=exe,
main_program=inference_program,
params_filename="__params__")
......@@ -48,8 +48,7 @@ def arg_parser():
"-f",
type=_text_type,
default=None,
help=
"define which deeplearning framework(tensorflow/caffe/onnx/paddle2onnx)"
help="define which deeplearning framework(tensorflow/caffe/onnx/paddle2onnx)"
)
parser.add_argument(
"--caffe_proto",
......@@ -126,7 +125,6 @@ def tf2paddle(model_path,
optimizer.merge_bias()
optimizer.optimize_sub_graph()
# optimizer.merge_batch_norm()
# optimizer.merge_prelu()
else:
......
......@@ -46,8 +46,9 @@ class Layer(object):
for input in self.inputs:
if isinstance(input, GraphNode):
if hasattr(input, "index"):
in_list += (input.layer_name +
"[{}]".format(input.index) + ", ")
in_list += (
input.layer_name + "[{}]".format(input.index) + ", "
)
else:
in_list += (input.layer_name + ", ")
elif isinstance(input, six.string_types):
......@@ -71,8 +72,8 @@ class Layer(object):
layer_code = layer_code + key + "={}, ".format(input)
elif isinstance(self.inputs, GraphNode):
if hasattr(self.inputs, "index"):
layer_code += (self.inputs.layer_name +
"[{}]".format(self.inputs.index))
layer_code += (
self.inputs.layer_name + "[{}]".format(self.inputs.index))
else:
layer_code += (self.inputs.layer_name)
if self.op != "=":
......
......@@ -64,10 +64,8 @@ def run_net(param_dir="./"):
b = os.path.exists(os.path.join(param_dir, var.name))
return b
fluid.io.load_vars(exe,
param_dir,
fluid.default_main_program(),
predicate=if_exist)
fluid.io.load_vars(
exe, param_dir, fluid.default_main_program(), predicate=if_exist)
class OpMapper(object):
......@@ -98,8 +96,8 @@ class OpMapper(object):
def add_codes(self, codes, indent=0):
if isinstance(codes, list):
for code in codes:
self.paddle_codes += (self.tab * indent + code.strip('\n') +
'\n')
self.paddle_codes += (
self.tab * indent + code.strip('\n') + '\n')
elif isinstance(codes, str):
self.paddle_codes += (self.tab * indent + codes.strip('\n') + '\n')
else:
......@@ -135,24 +133,25 @@ class OpMapper(object):
os.path.join(os.path.join(py_code_dir, var.name)))
return b
fluid.io.load_vars(exe,
py_code_dir,
fluid.default_main_program(),
predicate=if_exist)
fluid.io.load_vars(
exe,
py_code_dir,
fluid.default_main_program(),
predicate=if_exist)
if params_merge:
fluid.io.save_inference_model(dirname=os.path.join(
save_dir, "inference_model"),
feeded_var_names=input_names,
target_vars=outputs,
executor=exe,
params_filename="__params__")
fluid.io.save_inference_model(
dirname=os.path.join(save_dir, "inference_model"),
feeded_var_names=input_names,
target_vars=outputs,
executor=exe,
params_filename="__params__")
else:
fluid.io.save_inference_model(dirname=os.path.join(
save_dir, "inference_model"),
feeded_var_names=input_names,
target_vars=outputs,
executor=exe,
params_filename=None)
fluid.io.save_inference_model(
dirname=os.path.join(save_dir, "inference_model"),
feeded_var_names=input_names,
target_vars=outputs,
executor=exe,
params_filename=None)
except:
raise Exception(
"Paddle code was saved in {}/model.py, but seems there's wrong exist, please check model.py manually."
......
......@@ -49,13 +49,11 @@ class CaffeResolver(object):
class CaffeGraphNode(GraphNode):
def __init__(self, layer, type_str, layer_name=None):
if layer_name is None:
super(CaffeGraphNode,
self).__init__(layer,
layer.name.replace('/', '_').replace('-', '_'))
super(CaffeGraphNode, self).__init__(
layer, layer.name.replace('/', '_').replace('-', '_'))
else:
super(CaffeGraphNode,
self).__init__(layer,
layer_name.replace('/', '_').replace('-', '_'))
super(CaffeGraphNode, self).__init__(
layer, layer_name.replace('/', '_').replace('-', '_'))
self.layer_type = type_str
self.fluid_code = FluidCode()
self.data = None
......@@ -268,8 +266,8 @@ class CaffeDecoder(object):
c_i = blob.channels
h = blob.height
w = blob.width
data = np.asarray(list(blob.data),
dtype=np.float32).reshape(c_o, c_i, h, w)
data = np.asarray(
list(blob.data), dtype=np.float32).reshape(c_o, c_i, h, w)
transformed.append(data)
return transformed
此差异已折叠。
......@@ -71,9 +71,8 @@ class ONNXGraphNode(GraphNode):
if attr.type == onnx.AttributeProto.TENSOR:
dtype = np.dtype(TENSOR_TYPE_TO_NP_TYPE[attr.t.data_type])
data = attr.t.raw_data
value = np.frombuffer(data,
dtype=dtype,
count=(len(data) // dtype.itemsize))
value = np.frombuffer(
data, dtype=dtype, count=(len(data) // dtype.itemsize))
elif attr.type == onnx.AttributeProto.STRING:
value = attr.s
value = value.decode() if isinstance(value, bytes) else value
......@@ -205,9 +204,8 @@ class ONNXGraph(Graph):
self.node_map[name].weight = weight
self.node_map[name].embeded_as = []
else:
self.node_map[name] = ONNXGraphDataNode(initializer,
layer_name=name,
is_global_input=False)
self.node_map[name] = ONNXGraphDataNode(
initializer, layer_name=name, is_global_input=False)
self.node_map[name].weight = weight
self.node_map[name].embeded_as = []
......@@ -494,8 +492,8 @@ class ONNXDecoder(object):
sess = rt.InferenceSession(model_path)
for ipt in sess.get_inputs():
datatype = datatype_map[ipt.type]
input_dict[ipt.name] = np.random.random(
ipt.shape).astype(datatype)
input_dict[ipt.name] = np.random.random(ipt.shape).astype(
datatype)
res = sess.run(None, input_feed=input_dict)
except:
......
......@@ -120,13 +120,13 @@ class TFGraph(Graph):
def build(self):
for layer in self.model.node:
self.node_map[layer.name.replace('/', '_').replace(
'-', '_')] = TFGraphNode(layer, data_format=self.tf_data_format)
'-', '_')] = TFGraphNode(
layer, data_format=self.tf_data_format)
for layer_name, node in self.node_map.items():
for in_node in node.layer.input:
in_node = in_node.replace('/',
'_').replace('-',
'_').replace('^', '')
in_node = in_node.replace('/', '_').replace('-', '_').replace(
'^', '')
if in_node not in self.node_map:
if in_node.strip().split(':')[0] in self.node_map:
self.connect(in_node.strip().split(':')[0], layer_name)
......@@ -390,10 +390,10 @@ class TFDecoder(object):
shape=shape,
name="x2paddle_{}".format(layer.name))
except:
x2paddle_input = tf.placeholder(dtype=dtype,
shape=shape,
name="x2paddle_{}".format(
layer.name))
x2paddle_input = tf.placeholder(
dtype=dtype,
shape=shape,
name="x2paddle_{}".format(layer.name))
input_map["{}:0".format(layer.name)] = x2paddle_input
if shape.count(None) > 0:
......
......@@ -122,16 +122,17 @@ def convolutiondepthwise_layer(inputs,
c_out = num_output if num_output is not None else input_shape[0][1]
group = int(c_in / (c_in / c_out)) if c_in > c_out else int(c_in /
(c_out / c_in))
out = fluid.layers.conv2d(input,
dilation=[dila_h, dila_w],
filter_size=[k_h, k_w],
stride=[s_h, s_w],
padding=[p_h, p_w],
groups=group,
num_filters=c_out,
param_attr=name + '_weights',
bias_attr=name + '_bias',
name=name)
out = fluid.layers.conv2d(
input,
dilation=[dila_h, dila_w],
filter_size=[k_h, k_w],
stride=[s_h, s_w],
padding=[p_h, p_w],
groups=group,
num_filters=c_out,
param_attr=name + '_weights',
bias_attr=name + '_bias',
name=name)
return out
......@@ -142,7 +143,8 @@ def convolutiondepthwise_weights(name, data=None):
return weights_name
register(kind='ConvolutionDepthwise',
shape=convolutiondepthwise_shape,
layer=convolutiondepthwise_layer,
weights=convolutiondepthwise_weights)
register(
kind='ConvolutionDepthwise',
shape=convolutiondepthwise_shape,
layer=convolutiondepthwise_layer,
weights=convolutiondepthwise_weights)
......@@ -37,8 +37,8 @@ def detectionoutput_layer(inputs,
pbv = fluid.layers.reshape(x=pbv, shape=[-1, 4])
mbox_loc = inputs[0]
mbox_loc = fluid.layers.reshape(x=mbox_loc, shape=[-1, pb.shape[0], 4])
mbox_conf_flatten = fluid.layers.reshape(x=mbox_conf_flatten,
shape=[0, pb.shape[0], -1])
mbox_conf_flatten = fluid.layers.reshape(
x=mbox_conf_flatten, shape=[0, pb.shape[0], -1])
default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0}
fields = ['eta', 'top_k', 'nms_threshold']
......@@ -64,7 +64,8 @@ def detectionoutput_weights(name, data=None):
return weights_name
register(kind='DetectionOutput',
shape=detectionoutput_shape,
layer=detectionoutput_layer,
weights=detectionoutput_weights)
register(
kind='DetectionOutput',
shape=detectionoutput_shape,
layer=detectionoutput_layer,
weights=detectionoutput_weights)
......@@ -20,9 +20,8 @@ def normalize_layer(inputs,
attr=name + '_scale')
scale_param = fluid.layers.reshape(x=scale_param, \
shape=[1] if channel_shared else [input_shape[0][1]])
out = fluid.layers.elementwise_mul(x=l2_norm,
y=scale_param,
axis=-1 if channel_shared else 1)
out = fluid.layers.elementwise_mul(
x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1)
return out
......@@ -31,7 +30,8 @@ def normalize_weights(name, data=None):
return weights_name
register(kind='Normalize',
shape=normalize_shape,
layer=normalize_layer,
weights=normalize_weights)
register(
kind='Normalize',
shape=normalize_shape,
layer=normalize_layer,
weights=normalize_weights)
......@@ -23,7 +23,8 @@ def permute_weights(name, data=None):
return weights_name
register(kind='Permute',
shape=permute_shape,
layer=permute_layer,
weights=permute_weights)
register(
kind='Permute',
shape=permute_shape,
layer=permute_layer,
weights=permute_weights)
......@@ -30,18 +30,19 @@ def priorbox_layer(inputs,
steps = tuple(step) if type(step) is list or type(step) is tuple else (step,
step)
box, variance_ = fluid.layers.prior_box(input,
image,
min_sizes=min_size,
max_sizes=max_size,
aspect_ratios=aspect_ratio,
variance=variance,
flip=flip,
clip=clip,
steps=steps,
offset=offset,
name=name,
min_max_aspect_ratios_order=True)
box, variance_ = fluid.layers.prior_box(
input,
image,
min_sizes=min_size,
max_sizes=max_size,
aspect_ratios=aspect_ratio,
variance=variance,
flip=flip,
clip=clip,
steps=steps,
offset=offset,
name=name,
min_max_aspect_ratios_order=True)
box = fluid.layers.reshape(box, [1, 1, -1])
variance_ = fluid.layers.reshape(variance_, [1, 1, -1])
out = fluid.layers.concat([box, variance_], axis=1)
......@@ -53,7 +54,8 @@ def priorbox_weights(name, data=None):
return weights_name
register(kind='PriorBox',
shape=priorbox_shape,
layer=priorbox_layer,
weights=priorbox_weights)
register(
kind='PriorBox',
shape=priorbox_shape,
layer=priorbox_layer,
weights=priorbox_weights)
......@@ -23,8 +23,7 @@ def register(kind, shape, layer, weights):
kind = [kind]
else:
assert type(
kind
) is list, 'invalid param "kind" for register, not a list or str'
kind) is list, 'invalid param "kind" for register, not a list or str'
for k in kind:
assert type(
......
......@@ -21,11 +21,12 @@ def roipooling_layer(inputs,
input = inputs[0]
roi = inputs[1]
roi = fluid.layers.slice(roi, axes=[1], starts=[1], ends=[5])
out = fluid.layers.roi_pool(input,
roi,
pooled_height=pooled_h,
pooled_width=pooled_w,
spatial_scale=spatial_scale)
out = fluid.layers.roi_pool(
input,
roi,
pooled_height=pooled_h,
pooled_width=pooled_w,
spatial_scale=spatial_scale)
return out
......@@ -34,7 +35,8 @@ def roipooling_weights(name, data=None):
return weights_name
register(kind='ROIPooling',
shape=roipooling_shape,
layer=roipooling_layer,
weights=roipooling_weights)
register(
kind='ROIPooling',
shape=roipooling_shape,
layer=roipooling_layer,
weights=roipooling_weights)
......@@ -30,11 +30,12 @@ def select_layer(inputs,
out = []
for i in range(len(slice_point)):
out.append(
fluid.layers.slice(input,
axes=[axis],
starts=[slice_point[i]],
ends=[slice_point[i + 1]],
name=name + '_' + str(i)))
fluid.layers.slice(
input,
axes=[axis],
starts=[slice_point[i]],
ends=[slice_point[i + 1]],
name=name + '_' + str(i)))
if i == len(slice_point) - 2:
break
return out
......@@ -45,7 +46,8 @@ def select_weights(name, data=None):
return weights_name
register(kind='Select',
shape=select_shape,
layer=select_layer,
weights=select_weights)
register(
kind='Select',
shape=select_shape,
layer=select_layer,
weights=select_weights)
......@@ -17,7 +17,8 @@ def shufflechannel_weights(name, data=None):
return weights_name
register(kind='ShuffleChannel',
shape=shufflechannel_shape,
layer=shufflechannel_layer,
weights=shufflechannel_weights)
register(
kind='ShuffleChannel',
shape=shufflechannel_shape,
layer=shufflechannel_layer,
weights=shufflechannel_weights)
......@@ -33,8 +33,8 @@ def get_kernel_parameters(params):
[s_h, s_w] = [params.stride] * 2
elif len(params.stride) > 0:
s_h = params.stride_h if params.stride_h > 0 else params.stride[0]
s_w = params.stride_w if params.stride_w > 0 else params.stride[
len(params.stride) - 1]
s_w = params.stride_w if params.stride_w > 0 else params.stride[len(
params.stride) - 1]
elif params.stride_h > 0 or params.stride_w > 0:
s_h = params.stride_h
s_w = params.stride_w
......
......@@ -24,21 +24,18 @@ def InstanceNormalization_layer(inputs, name=None):
epsilon = 1e-5
input_ = inputs[0]
mean = fluid.layers.reduce_mean(input_, dim=[2, 3], keep_dim=True)
var = fluid.layers.reduce_mean(fluid.layers.square(input_ - mean),
dim=[2, 3],
keep_dim=True)
var = fluid.layers.reduce_mean(
fluid.layers.square(input_ - mean), dim=[2, 3], keep_dim=True)
if name is not None:
scale_name = name + "_scale"
offset_name = name + "_offset"
scale_param = inputs[1]
offset_param = inputs[2]
scale = fluid.layers.create_parameter(name=scale_param.name,
shape=input_.shape[1:2],
dtype="float32")
offset = fluid.layers.create_parameter(name=offset_param.name,
shape=input_.shape[1:2],
dtype="float32")
scale = fluid.layers.create_parameter(
name=scale_param.name, shape=input_.shape[1:2], dtype="float32")
offset = fluid.layers.create_parameter(
name=offset_param.name, shape=input_.shape[1:2], dtype="float32")
tmp = fluid.layers.elementwise_mul(x=(input_ - mean), y=scale, axis=1)
tmp = tmp / fluid.layers.sqrt(var + epsilon)
......@@ -51,8 +48,9 @@ def InstanceNormalization_weights(name, data=None):
return weights_name
register(kind='InstanceNormalization',
shape=InstanceNormalization_shape,
layer=InstanceNormalization_layer,
child_func=None,
weights=InstanceNormalization_weights)
register(
kind='InstanceNormalization',
shape=InstanceNormalization_shape,
layer=InstanceNormalization_layer,
child_func=None,
weights=InstanceNormalization_weights)
......@@ -36,8 +36,7 @@ def register(kind, shape, layer, child_func, weights):
kind = [kind]
else:
assert type(
kind
) is list, 'invalid param "kind" for register, not a list or str'
kind) is list, 'invalid param "kind" for register, not a list or str'
for k in kind:
assert type(
......
......@@ -28,60 +28,49 @@ default_op_mapping_field_values['FILL_NAME_FIELD'] = True
default_op_mapping = {
'Shape': ['shape', ['X'], ['Out']],
'Clip': [
'clip', ['X'], ['Out'],
dict(),
dict(
min=(_np.asarray([255, 255, 127, 255],
dtype=_np.uint8).view(_np.float32)[0]),
max=(_np.asarray([255, 255, 127, 127],
dtype=_np.uint8).view(_np.float32)[0]),
)
'clip', ['X'], ['Out'], dict(), dict(
min=(_np.asarray(
[255, 255, 127, 255], dtype=_np.uint8).view(_np.float32)[0]),
max=(_np.asarray(
[255, 255, 127, 127], dtype=_np.uint8).view(_np.float32)[0]), )
],
'Erf': ['erf', ['X'], ['Out']],
'Ceil': ['ceil', ['X'], ['Out']],
'ReduceMean': [
'reduce_mean', ['X'], ['Out'],
dict(axes='dim', keepdims='keep_dim'),
dict(keep_dim=1)
'reduce_mean', ['X'], ['Out'], dict(
axes='dim', keepdims='keep_dim'), dict(keep_dim=1)
],
'ReduceSum': [
'reduce_sum', ['X'], ['Out'],
dict(axes='dim', keepdims='keep_dim'),
dict(keep_dim=1)
'reduce_sum', ['X'], ['Out'], dict(
axes='dim', keepdims='keep_dim'), dict(keep_dim=1)
],
'ReduceMin': [
'reduce_min', ['X'], ['Out'],
dict(axes='dim', keepdims='keep_dim'),
dict(keep_dim=1)
'reduce_min', ['X'], ['Out'], dict(
axes='dim', keepdims='keep_dim'), dict(keep_dim=1)
],
'ReduceMax': [
'reduce_max', ['X'], ['Out'],
dict(axes='dim', keepdims='keep_dim'),
dict(keep_dim=1)
'reduce_max', ['X'], ['Out'], dict(
axes='dim', keepdims='keep_dim'), dict(keep_dim=1)
],
#active function
'Relu': ['relu', ['X'], ['Out']],
'LeakyRelu': ['leaky_relu', ['X'], ['Out'],
dict(), dict(alpha=.01)],
'Elu': ['elu', ['X'], ['Out'],
dict(), dict(alpha=1.)],
'LeakyRelu': ['leaky_relu', ['X'], ['Out'], dict(), dict(alpha=.01)],
'Elu': ['elu', ['X'], ['Out'], dict(), dict(alpha=1.)],
'ThresholdedRelu': [
'thresholded_relu', ['X'], ['Out'],
dict(alpha='threshold'),
'thresholded_relu', ['X'], ['Out'], dict(alpha='threshold'),
dict(alpha=1.)
],
'Tanh': ['tanh', ['X'], ['Out']],
'Sigmoid': ['sigmoid', ['X'], ['Out']],
'HardSigmoid': [
'hard_sigmoid', ['X'], ['Out'],
dict(alpha='slope', beta='offset'),
dict(slope=.2, offset=.5)
'hard_sigmoid', ['X'], ['Out'], dict(
alpha='slope', beta='offset'), dict(
slope=.2, offset=.5)
],
'Softsign': ['softsign', ['X'], ['Out']],
'Softplus': ['softplus', ['X'], ['Out']],
'Exp': ['exp', ['X'], ['Out']],
'Softmax': ['softmax', ['X'], ['Out'],
dict(), dict(axis=1)],
'Softmax': ['softmax', ['X'], ['Out'], dict(), dict(axis=1)],
'Sqrt': ['sqrt', ['X'], ['Out']],
'Floor': ['floor', ['X'], ['Out']],
'Abs': ['abs', ['X'], ['Out']],
......
......@@ -140,8 +140,8 @@ class ONNXOpMapper(OpMapper):
model.graph.ClearField('output')
model.graph.output.MergeFrom(model.graph.value_info)
onnx.save(model, os.path.join(self.tmp_data_dir,
'onnx_model_infer.onnx'))
onnx.save(model,
os.path.join(self.tmp_data_dir, 'onnx_model_infer.onnx'))
sess = rt.InferenceSession(
os.path.join(self.tmp_data_dir, 'onnx_model_infer.onnx'))
res = sess.run(None, input_feed=inputs_dict)
......@@ -217,8 +217,7 @@ class ONNXOpMapper(OpMapper):
default_attrs,
input_perm,
output_perm,
fill_name_field,
) = info
fill_name_field, ) = info
if fluid_op in default_ioa_constraint:
for predicate, message in default_ioa_constraint[fluid_op]:
......@@ -429,10 +428,8 @@ class ONNXOpMapper(OpMapper):
}
node.fluid_code.add_layer(
'roi_align',
inputs={
'input': val_x,
'rois': val_rois
},
inputs={'input': val_x,
'rois': val_rois},
output=node,
param_attr=attr)
......@@ -449,10 +446,8 @@ class ONNXOpMapper(OpMapper):
}
node.fluid_code.add_layer(
'roi_pool',
inputs={
'input': val_x,
'rois': val_rois
},
inputs={'input': val_x,
'rois': val_rois},
output=node,
param_attr=attr)
......@@ -527,10 +522,8 @@ class ONNXOpMapper(OpMapper):
val_y = self.graph.get_input_node(node, idx=1, copy=True)
node.fluid_code.add_layer(
'greater_than',
inputs={
'x': val_x,
'y': val_y
},
inputs={'x': val_x,
'y': val_y},
output=node,
param_attr=None)
......@@ -549,11 +542,10 @@ class ONNXOpMapper(OpMapper):
shape = val_output.out_shapes[0]
if shape is None:
shape = list(value.shape)
_logger.warning(
'in (Constant -> %s): '
'attribute "shape" of %s not inferred, '
'using value as 1-D tensor may lead to fails',
val_output.layer_name, val_output.layer_name)
_logger.warning('in (Constant -> %s): '
'attribute "shape" of %s not inferred, '
'using value as 1-D tensor may lead to fails',
val_output.layer_name, val_output.layer_name)
if len(value) == 1:
value = value.tolist()
......@@ -616,10 +608,8 @@ class ONNXOpMapper(OpMapper):
if axis == 0 and len(indices_shape) <= 1:
node.fluid_code.add_layer(
'gather',
inputs={
'input': val_x,
'index': indices
},
inputs={'input': val_x,
'index': indices},
output=node,
param_attr=None)
elif axis > 0 and len(indices_shape) <= 1:
......@@ -634,10 +624,8 @@ class ONNXOpMapper(OpMapper):
param_attr=attr_trans)
node.fluid_code.add_layer(
'gather',
inputs={
'input': name_trans,
'index': indices
},
inputs={'input': name_trans,
'index': indices},
output=node,
param_attr=None)
node.fluid_code.add_layer(
......@@ -649,9 +637,7 @@ class ONNXOpMapper(OpMapper):
'reshape',
inputs=indices,
output=indices,
param_attr={'shape': [
reshape_shape,
]})
param_attr={'shape': [reshape_shape, ]})
perm = list(range(len(val_x.out_shapes[0])))
perm = [axis] + perm[:axis] + perm[axis + 1:]
......@@ -664,10 +650,8 @@ class ONNXOpMapper(OpMapper):
param_attr=attr_trans)
node.fluid_code.add_layer(
'gather',
inputs={
'input': name_trans,
'index': indices
},
inputs={'input': name_trans,
'index': indices},
output=node,
param_attr=None)
node.fluid_code.add_layer(
......@@ -926,8 +910,10 @@ class ONNXOpMapper(OpMapper):
def Sum(self, node):
val_inps = node.layer.input
inputs = {
"x": self.graph.get_input_node(node, idx=0, copy=True),
"y": self.graph.get_input_node(node, idx=1, copy=True),
"x": self.graph.get_input_node(
node, idx=0, copy=True),
"y": self.graph.get_input_node(
node, idx=1, copy=True),
}
node.fluid_code.add_layer("elementwise_add", inputs=inputs, output=node)
......@@ -1022,10 +1008,8 @@ class ONNXOpMapper(OpMapper):
val_y = self.graph.get_input_node(node, idx=1, copy=True)
node.fluid_code.add_layer(
"equal",
inputs={
'x': val_x,
'y': val_y
},
inputs={'x': val_x,
'y': val_y},
output=node,
param_attr=None)
......@@ -1055,29 +1039,23 @@ class ONNXOpMapper(OpMapper):
mul_val_x = val_x.layer_name + '_mul'
node.fluid_code.add_layer(
"elementwise_mul",
inputs={
'x': val_x,
'y': cast_condition
},
inputs={'x': val_x,
'y': cast_condition},
output=mul_val_x,
param_attr=None)
mul_val_y = val_y.layer_name + '_mul'
node.fluid_code.add_layer(
"elementwise_mul",
inputs={
'x': val_y,
'y': cast_not_condition
},
inputs={'x': val_y,
'y': cast_not_condition},
output=mul_val_y,
param_attr=None)
node.fluid_code.add_layer(
"elementwise_add",
inputs={
'x': mul_val_x,
'y': mul_val_y
},
inputs={'x': mul_val_x,
'y': mul_val_y},
output=node,
param_attr=None)
......@@ -1106,7 +1084,8 @@ class ONNXOpMapper(OpMapper):
output=flatten_name,
param_attr={'axis': 0})
node.fluid_code.add_layer(
"concat", inputs=flatten_names, output=node, param_attr={'axis': 0})
"concat", inputs=flatten_names, output=node,
param_attr={'axis': 0})
def Identity(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
......@@ -1280,11 +1259,11 @@ class ONNXOpMapper(OpMapper):
output_size = [0, 0]
output_size[0] = (val_x.out_shapes[0][2] -
1) * strides[0] - 2 * paddings[0] + dilations[0] * (
output_size[0] = (val_x.out_shapes[0][2] - 1
) * strides[0] - 2 * paddings[0] + dilations[0] * (
kernel_shape[0] - 1) + 1 + out_padding[0]
output_size[1] = (val_x.out_shapes[0][3] -
1) * strides[1] - 2 * paddings[1] + dilations[1] * (
output_size[1] = (val_x.out_shapes[0][3] - 1
) * strides[1] - 2 * paddings[1] + dilations[1] * (
kernel_shape[1] - 1) + 1 + out_padding[1]
attr = {
'num_filters': num_out_channels,
......@@ -1367,29 +1346,23 @@ class ONNXOpMapper(OpMapper):
'squeeze',
inputs=val_x,
output=var_x0,
param_attr={
'axes': [1],
'name': string(var_x0)
})
param_attr={'axes': [1],
'name': string(var_x0)})
var_w0 = node.layer_name + '_w0'
node.fluid_code.add_layer(
'squeeze',
inputs=val_w,
output=var_w0,
param_attr={
'axes': [0],
'name': string(var_w0)
})
param_attr={'axes': [0],
'name': string(var_w0)})
var_fc = node.layer_name + '_fc'
var_mm = (node.layer_name + '_mm') if val_b else var_fc
node.fluid_code.add_layer(
'matmul',
inputs={
'x': var_x0,
'y': var_w0
},
inputs={'x': var_x0,
'y': var_w0},
output=var_mm,
param_attr={
'transpose_x': 0,
......@@ -1402,10 +1375,8 @@ class ONNXOpMapper(OpMapper):
'squeeze',
inputs=val_r,
output=var_r0,
param_attr={
'axes': [0],
'name': string(var_r0)
})
param_attr={'axes': [0],
'name': string(var_r0)})
var_r0t = node.layer_name + '_r0t'
......@@ -1413,10 +1384,8 @@ class ONNXOpMapper(OpMapper):
'transpose',
inputs=var_r0,
output=var_r0t,
param_attr={
'perm': [1, 0],
'name': string(var_r0t)
})
param_attr={'perm': [1, 0],
'name': string(var_r0t)})
if val_b:
var_bi = node.layer_name + '_bi'
var_bh = node.layer_name + '_bh'
......@@ -1434,10 +1403,8 @@ class ONNXOpMapper(OpMapper):
'squeeze',
inputs=var_bi,
output=var_bi0,
param_attr={
'axes': [0],
'name': string(var_bi0)
})
param_attr={'axes': [0],
'name': string(var_bi0)})
node.fluid_code.add_layer(
'elmentwise_add',
......@@ -1454,10 +1421,8 @@ class ONNXOpMapper(OpMapper):
'squeeze',
inputs=val_xh,
output=var_xh0,
param_attr={
'axes': [1],
'name': string(var_xh0)
})
param_attr={'axes': [1],
'name': string(var_xh0)})
var_y00 = node.layer_name + '_y00'
attr = {
......
......@@ -30,8 +30,8 @@ def im2sequence(op, block):
slice_blocks = list()
for i in range(out_h):
for j in range(out_w):
starts_name = "im2sequence.starts.{}.{}.{}".format(
im2seq_counter, i, j)
starts_name = "im2sequence.starts.{}.{}.{}".format(im2seq_counter,
i, j)
starts_tensor = helper.make_tensor(
name=starts_name,
data_type=onnx_pb.TensorProto.INT64,
......
......@@ -44,8 +44,7 @@ def multiclass_nms(op, block):
if normalized == False:
warnings.warn(
'The parameter normalized of multiclass_nms OP of Paddle is False, which has diff with ONNX. \
Please set normalized=True in multiclass_nms of Paddle'
)
Please set normalized=True in multiclass_nms of Paddle')
#convert the paddle attribute to onnx tensor
name_score_threshold = [outputs['Out'][0] + "@score_threshold"]
......@@ -353,7 +352,8 @@ def multiclass_nms(op, block):
outputs_gather_topk_class = [result_name + "@gather_topk_class"]
node_gather_topk_class = onnx.helper.make_node(
'Gather',
inputs=outputs_gather_1_nonzero + [outputs_topk_select_topk_indices[1]],
inputs=outputs_gather_1_nonzero +
[outputs_topk_select_topk_indices[1]],
outputs=outputs_gather_topk_class,
axis=1)
node_list.append(node_gather_topk_class)
......@@ -362,7 +362,8 @@ def multiclass_nms(op, block):
outputs_gather_topk_boxes_id = [result_name + "@gather_topk_boxes_id"]
node_gather_topk_boxes_id = onnx.helper.make_node(
'Gather',
inputs=outputs_gather_2_nonzero + [outputs_topk_select_topk_indices[1]],
inputs=outputs_gather_2_nonzero +
[outputs_topk_select_topk_indices[1]],
outputs=outputs_gather_topk_boxes_id,
axis=1)
node_list.append(node_gather_topk_boxes_id)
......
......@@ -4,8 +4,6 @@ from onnx import onnx_pb, helper
def get_old_name(arg, name_prefix=''):
"""Get the old rame for a possible renamed argument
"""
prefix_index = arg.find(name_prefix)
if prefix_index != -1:
......@@ -40,8 +38,8 @@ def yolo_box(op, block):
downsample_ratio = attrs['downsample_ratio']
input_size = input_height * downsample_ratio
conf_thresh = attrs['conf_thresh']
conf_thresh_mat = np.ones([num_anchors * input_height * input_width
]) * conf_thresh
conf_thresh_mat = np.ones([num_anchors * input_height *
input_width]) * conf_thresh
node_list = []
im_outputs = []
......
......@@ -250,8 +250,7 @@ class PaddleOpMapper(object):
node = helper.make_node(
pool_type[op.attr('pooling_type')][1],
inputs=op.input('X'),
outputs=op.output('Out'),
)
outputs=op.output('Out'), )
else:
input_shape = block.var(op.input('X')[0]).shape
k_size = op.attr('ksize')
......@@ -407,8 +406,7 @@ class PaddleOpMapper(object):
node = helper.make_node(
'Clip',
inputs=[op.input('X')[0], min_name, max_name],
outputs=op.output('Out'),
)
outputs=op.output('Out'), )
return [min_node, max_node, node]
def shape(self, op, block):
......@@ -450,8 +448,7 @@ class PaddleOpMapper(object):
node = helper.make_node(
"Slice",
inputs=[op.input('Input')[0], starts_name, ends_name, axes_name],
outputs=op.output('Out'),
)
outputs=op.output('Out'), )
return [starts_node, ends_node, axes_node, node]
def fill_constant(self, op, block):
......@@ -551,8 +548,8 @@ class PaddleOpMapper(object):
if op.attr('align_corners'):
coordinate_transformation_mode = 'align_corners'
if ('OutSize' in input_names and len(op.input('OutSize')) > 0) or (
'SizeTensor' in input_names
and len(op.input('SizeTensor')) > 0):
'SizeTensor' in input_names and
len(op.input('SizeTensor')) > 0):
node_list = list()
roi_node = self.make_constant_node(
self.get_name(op.type, 'roi'), onnx_pb.TensorProto.FLOAT,
......@@ -631,8 +628,7 @@ class PaddleOpMapper(object):
elif 'Scale' in input_names and len(op.input('Scale')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0],
op.input('Scale')[0]],
inputs=[op.input('X')[0], op.input('Scale')[0]],
outputs=op.output('Out'),
mode='linear',
coordinate_transformation_mode=coordinate_transformation_mode)
......@@ -641,8 +637,9 @@ class PaddleOpMapper(object):
scale = op.attr('scale')
if out_shape.count(-1) > 0:
scale_name = self.get_name(op.type, 'scale')
scale_node = self.make_constant_node(
scale_name, onnx_pb.TensorProto.FLOAT, [1, 1, scale, scale])
scale_node = self.make_constant_node(scale_name,
onnx_pb.TensorProto.FLOAT,
[1, 1, scale, scale])
roi_name = self.get_name(op.type, 'roi')
roi_node = self.make_constant_node(roi_name,
onnx_pb.TensorProto.FLOAT,
......@@ -667,16 +664,14 @@ class PaddleOpMapper(object):
if 'OutSize' in input_names and len(op.input('OutSize')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0], '',
op.input('OutSize')[0]],
inputs=[op.input('X')[0], '', op.input('OutSize')[0]],
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode)
elif 'Scale' in input_names and len(op.input('Scale')) > 0:
node = helper.make_node(
'Resize',
inputs=[op.input('X')[0],
op.input('Scale')[0]],
inputs=[op.input('X')[0], op.input('Scale')[0]],
outputs=op.output('Out'),
mode='nearest',
coordinate_transformation_mode=coordinate_transformation_mode)
......@@ -685,8 +680,9 @@ class PaddleOpMapper(object):
scale = op.attr('scale')
if out_shape.count(-1) > 0:
scale_name = self.get_name(op.type, 'scale')
scale_node = self.make_constant_node(
scale_name, onnx_pb.TensorProto.FLOAT, [1, 1, scale, scale])
scale_node = self.make_constant_node(scale_name,
onnx_pb.TensorProto.FLOAT,
[1, 1, scale, scale])
roi_name = self.get_name(op.type, 'roi')
roi_node = self.make_constant_node(roi_name,
onnx_pb.TensorProto.FLOAT,
......@@ -737,8 +733,7 @@ class PaddleOpMapper(object):
node1 = helper.make_node(
'Clip',
inputs=[name0, min_name, max_name],
outputs=[name1],
)
outputs=[name1], )
name2 = self.get_name(op.type, 'mul')
node2 = helper.make_node(
'Mul', inputs=[op.input('X')[0], name1], outputs=[name2])
......@@ -814,14 +809,6 @@ class PaddleOpMapper(object):
keepdims=0)
return node
def yolo_box(self, op, block):
from .paddle_custom_layer.yolo_box import yolo_box
return yolo_box(op, block)
def multiclass_nms(self, op, block):
from .paddle_custom_layer.multiclass_nms import multiclass_nms
return multiclass_nms(op, block)
def reciprocal(self, op, block):
inputs = op.input(op.input_names[0])
outputs = op.output(op.output_names[0])
......
此差异已折叠。
......@@ -486,8 +486,8 @@ class TFOpMapperNHWC(OpMapper):
attr = {"shape": shape}
self.add_omit_nodes(param.layer_name, node.layer_name)
else:
assert len(param.out_shapes[0]
) == 1, "Unexpected situation of shape parameter"
assert len(param.out_shapes[
0]) == 1, "Unexpected situation of shape parameter"
attr = {"shape": [-1]}
node.fluid_code.add_layer(
"reshape",
......@@ -577,8 +577,8 @@ class TFOpMapperNHWC(OpMapper):
def ConcatV2(self, node):
inputs = [
self.graph.get_node(name, copy=True)
for name in node.layer.input[:-1]
self.graph.get_node(
name, copy=True) for name in node.layer.input[:-1]
]
axis = self.graph.get_node(node.layer.input[-1], copy=True)
assert axis.layer_type == "Const"
......@@ -608,7 +608,8 @@ class TFOpMapperNHWC(OpMapper):
def Pack(self, node):
inputs = [
self.graph.get_node(name, copy=True) for name in node.layer.input
self.graph.get_node(
name, copy=True) for name in node.layer.input
]
axis = node.get_attr("axis")
attr = {"axis": axis}
......@@ -949,8 +950,8 @@ class TFOpMapperNHWC(OpMapper):
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
resize_shape = self.decoder.infer_shape_tensor(resize_shape,
node.out_shapes[0])
align_corners = node.get_attr("align_corners")
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer(
......@@ -969,8 +970,8 @@ class TFOpMapperNHWC(OpMapper):
if resize_shape.layer_type == "Const":
resize_shape = resize_shape.value.tolist()
else:
resize_shape = self.decoder.infer_shape_tensor(
resize_shape, node.out_shapes[0])
resize_shape = self.decoder.infer_shape_tensor(resize_shape,
node.out_shapes[0])
align_corners = node.get_attr("align_corners")
attr = {"perm": [0, 3, 1, 2]}
node.fluid_code.add_layer(
......
......@@ -41,10 +41,11 @@ class CaffeOptimizer(object):
if is_delete_node:
parent_node.fluid_code.clear()
node.fluid_code.clear()
node.fluid_code.add_layer("batch_norm",
inputs=input,
output=node,
param_attr=parent_param_attr)
node.fluid_code.add_layer(
"batch_norm",
inputs=input,
output=node,
param_attr=parent_param_attr)
def merge_op_activation(self):
for node_name in self.graph.topo_sort:
......@@ -62,7 +63,8 @@ class CaffeOptimizer(object):
if is_delete_node:
parent_node.fluid_code.clear()
node.fluid_code.clear()
node.fluid_code.add_layer(op,
inputs=input,
output=node,
param_attr=parent_param_attr)
node.fluid_code.add_layer(
op,
inputs=input,
output=node,
param_attr=parent_param_attr)
......@@ -554,10 +554,11 @@ class TFOptimizer(object):
node.fluid_code.layers[0].param_attr["shape"] = shape
node.fluid_code.layers[0].output = "nhwc_" + name
attr = {"perm": [0, 2, 3, 1]}
node.fluid_code.add_layer("transpose",
inputs="nhwc_" + name,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"transpose",
inputs="nhwc_" + name,
output=node,
param_attr=attr)
self.graph.input_nodes[i] = "nhwc_" + name
for i, name in enumerate(self.graph.output_nodes):
node = self.graph.get_node(name)
......@@ -767,8 +768,8 @@ class TFOptimizer(object):
is_prelu = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
if len(in_nodes0[0].outputs) != 1 or len(in_nodes0[1]
.outputs) != 1:
is_prelu = False
continue
......@@ -777,8 +778,8 @@ class TFOptimizer(object):
self.graph.get_node(in_name)
for in_name in in_nodes0[1].inputs
]
if in_nodes2[1].layer_type != "Const" or numpy.fabs(
in_nodes2[1].value - 0.5) > 1e-06:
if in_nodes2[1].layer_type != "Const" or numpy.fabs(in_nodes2[
1].value - 0.5) > 1e-06:
is_prelu = False
continue
if in_nodes2[0].layer_type != "Mul":
......@@ -787,8 +788,8 @@ class TFOptimizer(object):
if exist_act(in_nodes2[0]):
is_prelu = False
continue
if len(in_nodes2[1].outputs) != 1 or len(
in_nodes2[0].outputs) != 1:
if len(in_nodes2[1].outputs) != 1 or len(in_nodes2[0]
.outputs) != 1:
is_prelu = False
continue
......@@ -803,8 +804,8 @@ class TFOptimizer(object):
if exist_act(in_nodes3[1]):
is_prelu = False
continue
if len(in_nodes3[0].outputs) != 1 or len(
in_nodes3[1].outputs) != 1:
if len(in_nodes3[0].outputs) != 1 or len(in_nodes3[1]
.outputs) != 1:
is_prelu = False
continue
......@@ -856,12 +857,12 @@ class TFOptimizer(object):
mode = "element"
elif len(in_nodes3[0].value.shape) == 0:
mode = "all"
elif len(in_nodes3[0].value.shape
) == 1 and in_nodes3[0].value.shape[0] == 1:
elif len(in_nodes3[0].value.shape) == 1 and in_nodes3[
0].value.shape[0] == 1:
mode = "all"
elif len(in_shape) == 4 and len(
in_nodes3[0].value.shape
) == 1 and in_nodes3[0].value.shape[0] == in_shape[-1]:
elif len(in_shape) == 4 and len(in_nodes3[
0].value.shape) == 1 and in_nodes3[0].value.shape[
0] == in_shape[-1]:
mode = "channel"
weight = self.op_mapper.weights[in_nodes3[0].layer_name]
weight = numpy.expand_dims(weight, 0)
......@@ -916,14 +917,15 @@ class TFOptimizer(object):
self.graph.get_node(in_name) for in_name in node.inputs
]
if in_nodes0[0].layer_type != "Mul" or in_nodes0[
1].layer_type != "Const" or in_nodes0[1].value.size != 1:
1].layer_type != "Const" or in_nodes0[
1].value.size != 1:
is_scale = False
continue
if exist_act(in_nodes0[0]):
is_scale = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
if len(in_nodes0[0].outputs) != 1 or len(in_nodes0[1]
.outputs) != 1:
is_scale = False
continue
......@@ -939,8 +941,8 @@ class TFOptimizer(object):
if exist_act(in_nodes1[1]):
is_scale = False
continue
if len(in_nodes1[0].outputs) != 1 or len(
in_nodes1[1].outputs) != 1:
if len(in_nodes1[0].outputs) != 1 or len(in_nodes1[1]
.outputs) != 1:
is_scale = False
continue
......@@ -962,8 +964,8 @@ class TFOptimizer(object):
scale = 1.0 / in_nodes2[1].value * in_nodes1[0].value
act = None
if node.fluid_code.layers[0].param_attr is not None:
act = node.fluid_code.layers[0].param_attr.get(
"act", None)
act = node.fluid_code.layers[0].param_attr.get("act",
None)
node.fluid_code.clear()
attr = {
......@@ -972,10 +974,8 @@ class TFOptimizer(object):
"bias_after_scale": True,
"act": act
}
node.fluid_code.add_layer("scale",
inputs=in_node,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"scale", inputs=in_node, output=node, param_attr=attr)
del self.graph.node_map[in_nodes0[0].layer_name]
del self.graph.node_map[in_nodes0[1].layer_name]
......@@ -1004,17 +1004,17 @@ class TFOptimizer(object):
if exist_act(in_nodes0[0]):
is_affine_channel = False
continue
if len(in_nodes0[0].outputs) != 1 or len(
in_nodes0[1].outputs) != 1:
if len(in_nodes0[0].outputs) != 1 or len(in_nodes0[1]
.outputs) != 1:
is_affine_channel = False
continue
in_nodes1 = [
self.graph.get_node(in_name)
for in_name in in_nodes0[0].inputs
]
if len(in_nodes1[0].out_shapes[0]
) != 4 or in_nodes1[1].layer_type != "Const" or len(
in_nodes1[1].value.shape) != 3:
if len(in_nodes1[0].out_shapes[0]) != 4 or in_nodes1[
1].layer_type != "Const" or len(in_nodes1[1]
.value.shape) != 3:
is_affine_channel = False
continue
if len(in_nodes1[1].outputs) != 1:
......@@ -1037,8 +1037,8 @@ class TFOptimizer(object):
node.layer_type = "AffineChannel"
node.inputs = [in_node.layer_name]
scale = 1.0 / in_nodes0[1].value.flatten()
bias = in_nodes1[1].value.flatten(
) / in_nodes0[1].value.flatten()
bias = in_nodes1[1].value.flatten() / in_nodes0[
1].value.flatten()
if not bias_add:
bias *= -1.0
self.op_mapper.weights[node.layer_name + "_scale"] = scale
......@@ -1046,8 +1046,8 @@ class TFOptimizer(object):
act = None
if node.fluid_code.layers[0].param_attr is not None:
act = node.fluid_code.layers[0].param_attr.get(
"act", None)
act = node.fluid_code.layers[0].param_attr.get("act",
None)
node.fluid_code.clear()
attr = {
......@@ -1055,29 +1055,32 @@ class TFOptimizer(object):
"shape": [channel],
"name": string(node.layer_name + "_scale")
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node.layer_name + "_scale",
param_attr=attr)
node.fluid_code.add_layer(
"create_parameter",
inputs=None,
output=node.layer_name + "_scale",
param_attr=attr)
attr = {
"dtype": string(scale.dtype),
"shape": [channel],
"name": string(node.layer_name + "_bias")
}
node.fluid_code.add_layer("create_parameter",
inputs=None,
output=node.layer_name + "_bias",
param_attr=attr)
node.fluid_code.add_layer(
"create_parameter",
inputs=None,
output=node.layer_name + "_bias",
param_attr=attr)
inputs = {
"x": in_node,
"scale": node.layer_name + "_scale",
"bias": node.layer_name + "_bias"
}
attr = {"act": act}
node.fluid_code.add_layer("affine_channel",
inputs=inputs,
output=node,
param_attr=attr)
node.fluid_code.add_layer(
"affine_channel",
inputs=inputs,
output=node,
param_attr=attr)
del self.graph.node_map[in_nodes0[0].layer_name]
del self.graph.node_map[in_nodes0[1].layer_name]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册