提交 cb974168 编写于 作者: S SunAhong1993

modify caffe static

上级 09816290
...@@ -1092,11 +1092,7 @@ class CaffeOpMapper(OpMapper): ...@@ -1092,11 +1092,7 @@ class CaffeOpMapper(OpMapper):
**layer_attrs) **layer_attrs)
def ReLU6(self, node): def ReLU6(self, node):
if "relu6" in self.nn_name2id: relu6_name = name_generator("relu6", self.nn_name2id)
self.nn_name2id["relu6"] += 1
else:
self.nn_name2id["relu6"] = 0
relu6_name = "relu6" + str(self.nn_name2id["relu6"])
output_name = node.layer_name output_name = node.layer_name
layer_outputs = [relu6_name, output_name] layer_outputs = [relu6_name, output_name]
assert len( assert len(
...@@ -1124,7 +1120,7 @@ class CaffeOpMapper(OpMapper): ...@@ -1124,7 +1120,7 @@ class CaffeOpMapper(OpMapper):
"pooled_width": params.pooled_w, "pooled_width": params.pooled_w,
"spatial_scale": params.spatial_scale} "spatial_scale": params.spatial_scale}
self.paddle_graph.add_layer( self.paddle_graph.add_layer(
"custom_layer:ROIPooling", "custom_layer:roipooling",
inputs=inputs_dict, inputs=inputs_dict,
outputs=layer_outputs, outputs=layer_outputs,
**layer_attrs) **layer_attrs)
......
from .register import get_registered_layers # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
#custom layer import begins # Licensed under the Apache License, Version 2.0 (the "License"
from . import roipooling # you may not use this file except in compliance with the License.
from . import priorbox # You may obtain a copy of the License at
from . import permute #
from . import detectionoutput # http://www.apache.org/licenses/LICENSE-2.0
from . import normalize #
from . import select # Unless required by applicable law or agreed to in writing, software
from . import shufflechannel # distributed under the License is distributed on an "AS IS" BASIS,
from . import convolutiondepthwise # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
from . import axpy # See the License for the specific language governing permissions and
from . import upsample # limitations under the License.
from . import relu6
#custom layer import ends
from .detectionoutput import detectionoutput
custom_layers = get_registered_layers() from .normalize import normalize
from .priorbox import priorbox
from .roipooling import roipooling
def set_args(f, params): from .select import select
""" set args for function 'f' using the parameters in node.layer.param \ No newline at end of file
Args:
f (function): a python function object
params (object): a object contains attributes needed by f's arguments
Returns:
arg_names (list): a list of argument names
kwargs (dict): a dict contains needed arguments
"""
argc = f.__code__.co_argcount
arg_list = f.__code__.co_varnames[0:argc]
kwargs = {}
for arg_name in arg_list:
if hasattr(params, arg_name) and params is not None:
kwargs[arg_name] = getattr(params, arg_name)
return arg_list, kwargs
def has_layer(layer_type):
""" test whether this layer exists in custom layer
"""
return layer_type in custom_layers
def get_params(layer, layer_type):
import re
if layer_type.lower() == "deconvolution" or layer_type.lower(
) == "convolutiondepthwise":
param_name = '_'.join(('convolution', 'param'))
elif layer_type.lower() == "normalize":
param_name = '_'.join(('norm', 'param'))
elif len(layer_type) - len(re.sub("[A-Z]", "", layer_type)) >= 2:
s = ''
tmp_name = ''
for i, ch in enumerate(layer_type):
if i == 0:
s += ch.lower()
continue
elif ch.isupper() and layer_type[i - 1].islower():
tmp_name += (s + '_')
s = ''
s += ch.lower()
tmp_name += s
param_name = '_'.join((tmp_name, 'param'))
else:
param_name = '_'.join((layer_type.lower(), 'param'))
return getattr(layer, param_name, None)
def compute_output_shape(node):
""" compute the output shape of custom layer
"""
layer_type = node.layer_type
assert layer_type in custom_layers, "layer[%s] not exist in custom layers" % (
layer_type)
shape_func = custom_layers[layer_type]['shape']
layer = node.layer
params = get_params(layer, layer_type)
arg_names, kwargs = set_args(shape_func, params)
input_shape = node.input_shape
return shape_func(input_shape, **kwargs)
def make_custom_layer(node):
""" get the code which implement the custom layer function
"""
layer_type = node.layer_type
assert layer_type in custom_layers, "layer[%s] not exist in custom layers" % (
layer_type)
layer_func = custom_layers[layer_type]['layer']
import inspect
return inspect.getsource(layer_func), layer_func
def deal_weights(node, data=None):
""" deal the weights of the custom layer
"""
layer_type = node.layer_type
weights_func = custom_layers[layer_type]['weights']
name = node.layer_name
return weights_func(name, data)
from .register import register
from x2paddle.core.util import *
def axpy_shape(input_shapes):
assert len(input_shapes) == 3, "not valid input shape for axpy layer"
assert len(input_shapes[0]) == len(input_shapes[1]), 'should have same dims'
output_shape = input_shapes[1]
assert (input_shapes[2] == output_shape),\
"shape not consistent for axpy[%s <--> %s]" \
% (str(output_shape), str(input_shapes[2]))
return [output_shape]
def axpy_layer(inputs, input_shape=None, name=None):
alpha = inputs[0]
x = inputs[1]
y = inputs[2]
out = fluid.layers.elementwise_mul(x, alpha, axis=0)
out = fluid.layers.elementwise_add(out, y, name=name)
return out
def axpy_weights(name, data=None):
weights_name = []
return weights_name
register(kind='Axpy', shape=axpy_shape, layer=axpy_layer, weights=axpy_weights)
from .register import register
from x2paddle.core.util import *
import numbers
def convolutiondepthwise_shape(input_shape,
num_output=None,
pad=None,
kernel_size=None,
stride=None,
dilation=None,
pad_h=None,
pad_w=None,
kernel_h=None,
kernel_w=None,
stride_h=None,
stride_w=None):
[k_h, k_w] = [1, 1]
if isinstance(kernel_size, numbers.Number):
[k_h, k_w] = [kernel_size] * 2
elif len(kernel_size) > 0:
k_h = kernel_h if kernel_h > 0 else kernel_size[0]
k_w = kernel_w if kernel_w > 0 else kernel_size[len(kernel_size) - 1]
elif kernel_h > 0 or kernel_w > 0:
k_h = kernel_h
k_w = kernel_w
[s_h, s_w] = [1, 1]
if isinstance(stride, numbers.Number):
[s_h, s_w] = [stride] * 2
elif len(stride) > 0:
s_h = stride_h if stride_h > 0 else stride[0]
s_w = stride_w if stride_w > 0 else stride[len(stride) - 1]
elif stride_h > 0 or stride_w > 0:
s_h = stride_h
s_w = stride_w
[p_h, p_w] = [0, 0]
if isinstance(pad, numbers.Number):
[p_h, p_w] = [pad] * 2
elif len(pad) > 0:
p_h = pad_h if pad_h > 0 else pad[0]
p_w = pad_w if pad_w > 0 else pad[len(pad) - 1]
elif pad_h > 0 or pad_w > 0:
p_h = pad_h
p_w = pad_w
dila_len = len(dilation)
dila_h = 1
dila_w = 1
if dila_len == 2:
dila_h = dilation[0]
dila_w = dilation[1]
elif dila_len == 1:
dila_h = dila_w = dilation[0]
else:
assert dila_len == 0, "invalid length[%s] of dilation in convolution" % (
dila_len)
i_w = input_shape[0][2]
i_h = input_shape[0][3]
o_h = (i_h + 2 * p_h - (dila_h * (k_h - 1) + 1)) / float(s_h) + 1
o_w = (i_w + 2 * p_w - (dila_w * (k_w - 1) + 1)) / float(s_w) + 1
import math
o_h = int(math.floor(o_h))
o_w = int(math.floor(o_w))
c = num_output if num_output is not None else input_shape[0][1]
return [[input_shape[0][0], c, o_h, o_w]]
def convolutiondepthwise_layer(inputs,
num_output=None,
pad=None,
kernel_size=None,
stride=None,
dilation=None,
pad_h=None,
pad_w=None,
kernel_h=None,
kernel_w=None,
stride_h=None,
stride_w=None,
input_shape=None,
name=None):
import numbers
[k_h, k_w] = [1, 1]
if isinstance(kernel_size, numbers.Number):
[k_h, k_w] = [kernel_size] * 2
elif len(kernel_size) > 0:
k_h = kernel_h if kernel_h > 0 else kernel_size[0]
k_w = kernel_w if kernel_w > 0 else kernel_size[len(kernel_size) - 1]
elif kernel_h > 0 or kernel_w > 0:
k_h = kernel_h
k_w = kernel_w
[s_h, s_w] = [1, 1]
if isinstance(stride, numbers.Number):
[s_h, s_w] = [stride] * 2
elif len(stride) > 0:
s_h = stride_h if stride_h > 0 else stride[0]
s_w = stride_w if stride_w > 0 else stride[len(stride) - 1]
elif stride_h > 0 or stride_w > 0:
s_h = stride_h
s_w = stride_w
[p_h, p_w] = [0, 0]
if isinstance(pad, numbers.Number):
[p_h, p_w] = [pad] * 2
elif len(pad) > 0:
p_h = pad_h if pad_h > 0 else pad[0]
p_w = pad_w if pad_w > 0 else pad[len(pad) - 1]
elif pad_h > 0 or pad_w > 0:
p_h = pad_h
p_w = pad_w
input = inputs[0]
dila_len = len(dilation)
dila_h = 1
dila_w = 1
if dila_len == 2:
dila_h = dilation[0]
dila_w = dilation[1]
elif dila_len == 1:
dila_h = dila_w = dilation[0]
else:
assert dila_len == 0, "invalid length[%s] of dilation in convolution" % (
dila_len)
c_in = input_shape[0][1]
c_out = num_output if num_output is not None else input_shape[0][1]
group = int(c_in / (c_in / c_out)) if c_in > c_out else int(c_in /
(c_out / c_in))
out = fluid.layers.conv2d(
input,
dilation=[dila_h, dila_w],
filter_size=[k_h, k_w],
stride=[s_h, s_w],
padding=[p_h, p_w],
groups=group,
num_filters=c_out,
param_attr=name + '_weights',
bias_attr=name + '_bias',
name=name)
return out
def convolutiondepthwise_weights(name, data=None):
weights_name = []
weights_name.append(name + '_weights')
weights_name.append(name + '_bias')
return weights_name
register(
kind='ConvolutionDepthwise',
shape=convolutiondepthwise_shape,
layer=convolutiondepthwise_layer,
weights=convolutiondepthwise_weights)
from .register import register # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
from x2paddle.core.util import * #
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
def detectionoutput_shape(input_shape): # You may obtain a copy of the License at
return [[-1, 6]] #
# http://www.apache.org/licenses/LICENSE-2.0
#
def detectionoutput_layer(inputs, # Unless required by applicable law or agreed to in writing, software
nms_param=None, # distributed under the License is distributed on an "AS IS" BASIS,
background_label_id=0, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
share_location=True, # See the License for the specific language governing permissions and
keep_top_k=100, # limitations under the License.
confidence_threshold=0.1,
input_shape=None, import paddle
name=None): import paddle.fluid as fluid
if nms_param is None:
nms_param = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} def detectionoutput(x0,
mbox_conf_flatten = inputs[1] x1,
mbox_priorbox = inputs[2] x2,
mbox_priorbox_list = fluid.layers.split(mbox_priorbox, 2, dim=1) nms_threshold,
pb = mbox_priorbox_list[0] nms_top_k,
pbv = mbox_priorbox_list[1] keep_top_k,
pb = fluid.layers.reshape(x=pb, shape=[-1, 4]) nms_eta,
pbv = fluid.layers.reshape(x=pbv, shape=[-1, 4]) score_threshold,
mbox_loc = inputs[0] background_label):
mbox_loc = fluid.layers.reshape(x=mbox_loc, shape=[-1, pb.shape[0], 4]) detection_output_layer_attrs = {
mbox_conf_flatten = fluid.layers.reshape( "background_label": background_label,
x=mbox_conf_flatten, shape=[0, pb.shape[0], -1]) "nms_threshold": nms_threshold,
"nms_top_k": nms_top_k,
default = {"nms_threshold": 0.3, "top_k": 10, "eta": 1.0} "keep_top_k": keep_top_k,
fields = ['eta', 'top_k', 'nms_threshold'] "score_threshold": score_threshold,
for f in default.keys(): "nms_eta": nms_eta}
if f not in nms_param: priorbox_list = paddle.split(x2, num_or_sections=2, axis=1)
nms_param[f] = default[f] pb = priorbox_list[0]
out = fluid.layers.detection_output( pbv = priorbox_list[1]
scores=mbox_conf_flatten, pb = paddle.reshape(x=pb, shape=[-1, 4])
loc=mbox_loc, pbv = paddle.reshape(x=pbv, shape=[-1, 4])
prior_box=pb, pb_dim = fluid.layers.shape(pb)[0]
prior_box_var=pbv, loc = paddle.reshape(x0, shape=[-1, pb_dim, 4])
background_label=background_label_id, conf_flatten = paddle.reshape(x1, shape=[0, pb_dim, -1])
nms_threshold=nms_param["nms_threshold"], out = fluid.layers.detection_output(loc=loc,
nms_top_k=nms_param["top_k"], scores=conf_flatten,
keep_top_k=keep_top_k, prior_box=pb,
score_threshold=confidence_threshold, prior_box_var=pbv,
nms_eta=nms_param["eta"]) **detection_output_layer_attrs)
return out return out
\ No newline at end of file
def detectionoutput_weights(name, data=None):
weights_name = []
return weights_name
register(
kind='DetectionOutput',
shape=detectionoutput_shape,
layer=detectionoutput_layer,
weights=detectionoutput_weights)
from .register import register # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
from x2paddle.core.util import * #
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
def normalize_shape(input_shape): # You may obtain a copy of the License at
return input_shape #
# http://www.apache.org/licenses/LICENSE-2.0
#
def normalize_layer(inputs, # Unless required by applicable law or agreed to in writing, software
across_spatial=None, # distributed under the License is distributed on an "AS IS" BASIS,
channel_shared=None, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
input_shape=None, # See the License for the specific language governing permissions and
name=None): # limitations under the License.
assert across_spatial == False, "Only support across_spatial == False for Normalize"
input = inputs[0] import paddle
l2_norm = fluid.layers.l2_normalize(input, axis=1, name=name + '_l2') import paddle.fluid as fluid
scale_param = fluid.layers.create_parameter(
shape=[1] if channel_shared else [1, 1, 1, input_shape[0][1]], def normalize(x, axis, param_name, param_shape, param_dtype):
dtype=input.dtype, l2 = fluid.layers.prior_box(x=x, p=2, axis=1)
attr=fluid.ParamAttr(name=name + '_scale')) param = paddle.static.nn.create_parameter(shape=param_shape,
scale_param = fluid.layers.reshape(x=scale_param, \ dtype=string(param_dtype),
shape=[1] if channel_shared else [input_shape[0][1]]) name=string(param_name))
out = fluid.layers.elementwise_mul( out = paddle.multiply(x=l2, y=param, axis=axis)
x=l2_norm, y=scale_param, axis=-1 if channel_shared else 1) return out
return out \ No newline at end of file
def normalize_weights(name, data=None):
weights_name = [name + '_scale']
return weights_name
register(
kind='Normalize',
shape=normalize_shape,
layer=normalize_layer,
weights=normalize_weights)
from .register import register
from x2paddle.core.util import *
def permute_shape(input_shape, order=None):
inshape = input_shape[0]
output_shape = []
for ii in order:
assert ii < len(inshape), "invalid order for permute[%s]" % (name)
output_shape.append(inshape[ii])
return [output_shape]
def permute_layer(inputs, order=None, input_shape=None, name=None):
input = inputs[0]
order = list(order)
out = fluid.layers.transpose(input, perm=order, name=name)
return out
def permute_weights(name, data=None):
weights_name = []
return weights_name
register(
kind='Permute',
shape=permute_shape,
layer=permute_layer,
weights=permute_weights)
from .register import register # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
from x2paddle.core.util import * #
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
def priorbox_shape(input_shape, max_size=None, aspect_ratio=None): # You may obtain a copy of the License at
fc_shape = input_shape[0] #
N = 1 # http://www.apache.org/licenses/LICENSE-2.0
if not max_size == None: #
N += 1 # Unless required by applicable law or agreed to in writing, software
if not aspect_ratio == None: # distributed under the License is distributed on an "AS IS" BASIS,
N += 2 * len(aspect_ratio) # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
N_bbx = fc_shape[2] * fc_shape[3] * N # See the License for the specific language governing permissions and
output_shape = [1, 2, 4 * N_bbx] # limitations under the License.
return [output_shape]
import paddle
import paddle.fluid as fluid
def priorbox_layer(inputs,
step=0.0, def priorbox(x0,
offset=0.5, x1,
min_size=None, min_sizes,
max_size=[], max_sizes,
aspect_ratio=[1.0], aspect_ratios,
flip=False, variance,
clip=False, flip,
variance=[0.1, 0.1, 0.2, 0.2], clip,
input_shape=None, steps,
name=None): offset,
input = inputs[0] min_max_aspect_ratios_order):
image = inputs[1] priorbox_layer_attrs = {
steps = tuple(step) if type(step) is list or type(step) is tuple else (step, "min_sizes": min_sizes,
step) "max_sizes": max_sizes,
"aspect_ratios": aspect_ratios,
box, variance_ = fluid.layers.prior_box( "variance": variance,
input, "flip": flip,
image, "clip": clip,
min_sizes=min_size, "steps": steps,
max_sizes=max_size, "offset": offset,
aspect_ratios=aspect_ratio, "min_max_aspect_ratios_order": min_max_aspect_ratios_order}
variance=variance, box, var = fluid.layers.prior_box(input=x0,
flip=flip, image=x1,
clip=clip, **priorbox_layer_attrs)
steps=steps, box = paddle.reshape(x=box, shape=[1, 1, -1])
offset=offset, var = paddle.reshape(x=var, shape=[1, 1, -1])
name=name, out = paddle.concat(x=[box, var], axis=1)
min_max_aspect_ratios_order=True)
box = fluid.layers.reshape(box, [1, 1, -1])
variance_ = fluid.layers.reshape(variance_, [1, 1, -1])
out = fluid.layers.concat([box, variance_], axis=1)
return out return out
def priorbox_weights(name, data=None):
weights_name = []
return weights_name
register(
kind='PriorBox',
shape=priorbox_shape,
layer=priorbox_layer,
weights=priorbox_weights)
""" this module provides 'register' for registering customized layers
"""
g_custom_layers = {}
def register(kind, shape, layer, weights):
""" register a custom layer or a list of custom layers
Args:
@kind (str or list): type name of the layer
@shape (function): a function to generate the shape of layer's output
@layer (function): a function to generate the paddle code of layer
@weights (function): a function to deal with weights data
Returns:
None
"""
assert type(shape).__name__ == 'function', 'shape should be a function'
assert type(layer).__name__ == 'function', 'layer should be a function'
if type(kind) is str:
kind = [kind]
else:
assert type(
kind) is list, 'invalid param "kind" for register, not a list or str'
for k in kind:
assert type(
k) is str, 'invalid param "kind" for register, not a list of str'
assert k not in g_custom_layers, 'this type[%s] has already been registered' % (
k)
g_custom_layers[k] = {
'shape': shape,
'layer': layer,
'weights': weights
}
def get_registered_layers():
return g_custom_layers
from .register import register
from x2paddle.core.util import *
def relu6_shape(input_shape):
return input_shape
def relu6_layer(inputs, input_shape=None, name=None):
input = inputs[0]
out = fluid.layers.relu6(x=input)
return out
def relu6_weights(name, data=None):
weights_name = []
return weights_name
register(
kind='ReLU6', shape=relu6_shape, layer=relu6_layer, weights=relu6_weights)
from .register import register # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
from x2paddle.core.util import * #
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
def roipooling_shape(input_shape, pooled_w=None, pooled_h=None): # You may obtain a copy of the License at
base_fea_shape = input_shapes[0] #
rois_shape = input_shapes[1] # http://www.apache.org/licenses/LICENSE-2.0
output_shape = base_fea_shape #
output_shape[0] = rois_shape[0] # Unless required by applicable law or agreed to in writing, software
output_shape[2] = pooled_h # distributed under the License is distributed on an "AS IS" BASIS,
output_shape[3] = pooled_w # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
return [output_shape] # See the License for the specific language governing permissions and
# limitations under the License.
def roipooling_layer(inputs, import paddle
pooled_w=None, import paddle.fluid as fluid
pooled_h=None,
spatial_scale=None, def roipooling(x0,
input_shape=None, x1,
name=None): pooled_height,
input = inputs[0] pooled_width,
roi = inputs[1] spatial_scale):
roi = fluid.layers.slice(roi, axes=[1], starts=[1], ends=[5]) roipooling_layer_attrs = {
out = fluid.layers.roi_pool( "pooled_height": pooled_height,
input, "pooled_width": pooled_width,
roi, "spatial_scale": spatial_scale}
pooled_height=pooled_h, slice_x1 = paddle.slice(input=x1, axes=[1],
pooled_width=pooled_w, starts=[1], ends=[5])
spatial_scale=spatial_scale) out = fluid.layers.roi_pool(input=x0,
rois=slice_x1,
**roipooling_layer_attrs)
return out return out
def roipooling_weights(name, data=None):
weights_name = []
return weights_name
register(
kind='ROIPooling',
shape=roipooling_shape,
layer=roipooling_layer,
weights=roipooling_weights)
from .register import register # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
from x2paddle.core.util import * #
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
def select_shape(input_shape, axis=None, slice_point=None): # You may obtain a copy of the License at
inshape = input_shape[0] #
slice_point = slice_point # http://www.apache.org/licenses/LICENSE-2.0
start = slice_point[0] #
if len(slice_point) == 2: # Unless required by applicable law or agreed to in writing, software
end = slice_point[1] # distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import paddle.fluid as fluid
def select(x,
input_shape,
point,
axis):
start = point[0]
if len(point) == 2:
end = point[1]
else: else:
end = input_shape[axis] end = input_shape[axis]
assert end > start, "invalid slice_point with [start:%d, end:%d]" % (start, out = paddle.slice(x=x,
end) start=start,
output_shape = input_shape end=end,
output_shape[axis] = end - start axes=[axis])
return [output_shape] return out
\ No newline at end of file
def select_layer(inputs,
axis=None,
slice_point=None,
input_shape=None,
name=None):
input = inputs[0]
maxint32 = 2147483647
slice_point = [0] + slice_point
slice_point.append(maxint32)
i = 0
out = []
for i in range(len(slice_point)):
out.append(
fluid.layers.slice(
input,
axes=[axis],
starts=[slice_point[i]],
ends=[slice_point[i + 1]],
name=name + '_' + str(i)))
if i == len(slice_point) - 2:
break
return out
def select_weights(name, data=None):
weights_name = []
return weights_name
register(
kind='Select',
shape=select_shape,
layer=select_layer,
weights=select_weights)
from .register import register
from x2paddle.core.util import *
def shufflechannel_shape(input_shape):
return input_shape
def shufflechannel_layer(inputs, group=None, input_shape=None, name=None):
input = inputs[0]
out = fluid.layers.shuffle_channel(x=input, group=group)
return out
def shufflechannel_weights(name, data=None):
weights_name = []
return weights_name
register(
kind='ShuffleChannel',
shape=shufflechannel_shape,
layer=shufflechannel_layer,
weights=shufflechannel_weights)
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
Author: Drift
Email: wutuobang@baidu.com
Date: 2020/04/22 18:45
"""
from .register import register
from x2paddle.core.util import *
def upsample_shape(input_shapes, scale):
"""
:param input_shapes:
:param scale:
:return:
"""
assert len(input_shapes) == 1, "not valid input shape for upsample layer"
assert type(scale) is int
input_shape = input_shapes[0]
new_h = scale * input_shape[2]
new_w = scale * input_shape[3]
output_shape = [input_shape[0], input_shape[1], new_h, new_w]
return [output_shape]
def upsample_layer(inputs, scale, input_shape=None, name=None):
"""
:param inputs:
:param scale:
:param input_shape:
:param name:
:return:
"""
x = inputs[0]
out = fluid.layers.resize_nearest(
x, align_corners=False, scale=scale, name=name)
return out
def upsample_weights(name, data=None):
"""
:param name:
:param data:
:return:
"""
weights_name = []
return weights_name
register(
kind='Upsample',
shape=upsample_shape,
layer=upsample_layer,
weights=upsample_weights)
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numbers
from functools import reduce
def get_kernel_parameters(params):
[k_h, k_w] = [1, 1]
if isinstance(params.kernel_size, numbers.Number):
[k_h, k_w] = [params.kernel_size] * 2
elif len(params.kernel_size) > 0:
k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[0]
k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[
len(params.kernel_size) - 1]
elif params.kernel_h > 0 or params.kernel_w > 0:
k_h = params.kernel_h
k_w = params.kernel_w
[s_h, s_w] = [1, 1]
if isinstance(params.stride, numbers.Number):
[s_h, s_w] = [params.stride] * 2
elif len(params.stride) > 0:
s_h = params.stride_h if params.stride_h > 0 else params.stride[0]
s_w = params.stride_w if params.stride_w > 0 else params.stride[len(
params.stride) - 1]
elif params.stride_h > 0 or params.stride_w > 0:
s_h = params.stride_h
s_w = params.stride_w
[p_h, p_w] = [0, 0]
if isinstance(params.pad, numbers.Number):
[p_h, p_w] = [params.pad] * 2
elif len(params.pad) > 0:
p_h = params.pad_h if params.pad_h > 0 else params.pad[0]
p_w = params.pad_w if params.pad_w > 0 else params.pad[len(params.pad) -
1]
elif params.pad_h > 0 or params.pad_w > 0:
p_h = params.pad_h
p_w = params.pad_w
dila_h = dila_w = 1
if hasattr(params, 'dilation'):
dila_len = len(params.dilation)
if dila_len == 2:
dila_h = params.dilation[0]
dila_w = params.dilation[1]
elif dila_len == 1:
dila_h = dila_w = params.dilation[0]
else:
assert dila_len == 0, "invalid length[%s] of dilation in convolution" % (
dila_len)
return dila_h, dila_w, p_h, p_w, k_h, k_w, s_h, s_w
def get_strided_kernel_output_shape(params, input_shape, round_func):
i_h = input_shape[2]
i_w = input_shape[3]
dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w = get_kernel_parameters(
params)
o_h = (i_h + 2 * pad_h - (dila_h *
(kernel_h - 1) + 1)) / float(stride_h) + 1
o_w = (i_w + 2 * pad_w - (dila_w *
(kernel_w - 1) + 1)) / float(stride_w) + 1
o_h = int(round_func(o_h))
o_w = int(round_func(o_w))
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape[1]
return [[input_shape[0], c, o_h, o_w]]
def shape_convolution(layer, input_shape):
params = layer.convolution_param
return get_strided_kernel_output_shape(params, input_shape[0], math.floor)
def shape_deconvolution(layer, input_shape):
h_i = input_shape[0][2]
w_i = input_shape[0][3]
params = layer.convolution_param
dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w = get_kernel_parameters(
params)
h_o = (h_i - 1) * stride_h - 2 * pad_h + dila_h * (kernel_h - 1) + 1
w_o = (w_i - 1) * stride_w - 2 * pad_w + dila_w * (kernel_w - 1) + 1
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape.channels
return [[input_shape[0][0], c, h_o, w_o]]
def shape_pooling(layer, input_shape):
params = layer.pooling_param
global_pool = getattr(params, 'global_pooling', False)
if global_pool:
return [[input_shape[0][0], input_shape[0][1], 1, 1]]
ceil_mode = getattr(params, 'ceil_mode', True)
if ceil_mode is True:
method = math.ceil
else:
method = math.floor
return get_strided_kernel_output_shape(params, input_shape[0], method)
def shape_innerproduct(layer, input_shape):
params = layer.inner_product_param
return [[input_shape[0][0], params.num_output]]
def shape_lrn(layer, input_shape):
return input_shape
def shape_relu(layer, input_shape):
return input_shape
def shape_softmax(layer, input_shape):
return input_shape
def shape_input(layer, input_shape):
return [list(layer.input_param.shape[0].dim)]
def shape_memorydata(layer, input_shape):
params = layer.memory_data_param
shape = []
shape.append(int(params.batch_size))
shape.append(int(params.channels))
shape.append(int(params.height))
shape.append(int(params.width))
return [shape]
def shape_concat(layer, input_shape):
params = layer.concat_param
axis = params.axis
output_shape = None
for shape in input_shape:
if output_shape is None:
output_shape = []
for i in range(len(shape)):
output_shape.append(shape[i])
else:
output_shape[axis] += shape[axis]
return [output_shape]
def shape_slice(layer, input_shape):
inshape = input_shape[0]
top_len = len(layer.top)
params = layer.slice_param
axis = params.axis
slice_dim = params.slice_dim
if slice_dim != 1 and axis == 1:
axis = slice_dim
points = list(params.slice_point)
count = inshape[axis]
if len(points) == 0:
assert count % top_len == 0, "the parameter of Slice is wrong"
part = count / top_len
t = part
while t < count:
points.append(int(t))
t += part
points = [0] + points + [count]
output_shape = []
for i in range(len(points)):
shape = []
for ii in range(len(inshape)):
shape.append(inshape[ii])
size = points[i + 1] - points[i]
shape[axis] = size
output_shape.append(shape)
if i == len(points) - 2:
break
return output_shape
def shape_prelu(layer, input_shape):
return input_shape
def shape_sigmoid(layer, input_shape):
return input_shape
def shape_absval(layer, input_shape):
return input_shape
def shape_accuracy(layer, input_shape):
return [[1]]
def shape_tanh(layer, input_shape):
return input_shape
def shape_eltwise(layer, input_shape):
return [input_shape[0]]
def shape_batchnorm(layer, input_shape):
return input_shape
def shape_scale(layer, input_shape):
return input_shape
def shape_reshape(layer, input_shape):
def count(num_list):
return reduce(lambda a, b: a * b, num_list)
inshape = input_shape[0]
params = layer.reshape_param
axis = params.axis if hasattr(params, 'axis') else 0
num_axes = params.num_axes if hasattr(params, 'num_axes') else -1
if inshape[0] == -1:
inshape[0] = 1
input_count = count(inshape)
input_num_axes = len(inshape)
input_start_axis = axis
start_axis = input_start_axis if input_start_axis >= 0 \
else input_num_axes + input_start_axis + 1
assert start_axis >= 0, "[Reshape]axis %d out of range" % (input_start_axis)
assert start_axis <= input_num_axes, "[Reshape]axis %d out of range for %d-D input data"\
% (input_start_axis, input_num_axes)
assert num_axes >= -1, "[Reshape]num_axes must be >= 0, or -1 for all"
end_axis = input_num_axes if num_axes == -1 else start_axis + num_axes
assert end_axis <= input_num_axes, "end_axis[%d] = axis[%d] + num_axes[%d] is out of range"\
% (end_axis, start_axis, num_axes)
num_axes_replaced = end_axis - start_axis
num_axes_retained = input_num_axes - num_axes_replaced
num_new_axes = len(list(params.shape.dim))
outshape = []
for i in range(start_axis):
outshape.append(inshape[i])
for i in range(num_new_axes):
outshape.append(params.shape.dim[i])
for i in range(end_axis, input_num_axes):
outshape.append(inshape[i])
assert len(outshape) == num_axes_retained + num_new_axes,\
"[Reshape]invalid dims of output shape[%s]" % (str(outshape))
inferred_axis = -1
copy_axes = []
constant_count = 1
for i in range(num_new_axes):
top_dim = params.shape.dim[i]
if top_dim == 0:
copy_axes.append(i)
copy_axis_index = start_axis + i
outshape[copy_axis_index] = inshape[copy_axis_index]
elif top_dim == -1:
assert inferred_axis == -1, "[Reshape]new shape contains multiple -1 dims"
inferred_axis = i
else:
constant_count *= top_dim
if inferred_axis >= 0:
explicit_count = constant_count
l = inshape[0:start_axis]
if len(l) > 0:
explicit_count *= count(l)
l = inshape[end_axis:]
if len(l) > 0:
explicit_count *= count(l)
for i in range(len(copy_axes)):
explicit_count *= outshape[start_axis + copy_axes[i]]
assert input_count % explicit_count == 0, "[Reshape]botom count[%d] "\
"must be divisible by product of the specified dimensions[%d] "\
% (input_count, explicit_count)
outshape[start_axis + inferred_axis] = int(input_count / explicit_count)
output_count = count(outshape)
assert output_count == input_count, "[Reshape]output count[%d] must match input count[%d]" % (
output_count, input_count)
outshape[0] = -1
return [outshape]
def shape_argmax(layer, input_shape):
inshape = input_shape[0]
params = layer.argmax_param
out_max_val = params.out_max_val if hasattr(params, out_max_val) else False
top_k = params.top_k if hasattr(params, top_k) else 1
axis = parmas.axis if hasattr(params, axis) else -1
if axis < 0:
axis += len(inshape)
assert (axis + 1 == len(inshape)
), 'only can be applied on the last dimension[axis:%d, %s] now,'\
'make sure you have set axis param in xxx.prototxt file' \
% (axis, str(inshape))
outshape = inshape
outshape[-1] = top_k
if out_max_val is True:
outshape[-1] *= 2
return [outshape]
def shape_crop(layer, input_shape):
assert len(input_shape) == 2, "the number of crop's inputs must be 2"
return [input_shape[1]]
def shape_flatten(layer, input_shape):
assert len(input_shape) == 1, "the number of flatten's inputs must be 1"
inshape = input_shape[0]
params = layer.flatten_param
start_axis = params.axis
end_axis = params.end_axis
if start_axis < 0:
start_axis += len(inshape)
if end_axis < 0:
end_axis += len(inshape) + 1
assert start_axis <= end_axis, 'invalid axis[%d] or end_axis[%d] params'\
% (start_axis, end_axis)
output_shape = inshape[0:start_axis]
if len(inshape[start_axis:end_axis]) != 0:
flat_sz = reduce(lambda a, b: a * b, inshape[start_axis:end_axis])
output_shape += [flat_sz]
output_shape += inshape[end_axis:len(inshape)]
output_shape[0] = -1
return [output_shape]
def shape_power(layer, input_shape):
return input_shape
def shape_reduction(layer, input_shape):
params = layer.reduction_param
axis = params.axis
if axis < 0:
axis += len(input_shape[0]) + 1
assert axis <= len(input_shape[0]), 'invalid axis[%d] error' % (axis)
return [input_shape[0:axis]]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册