提交 7ff5fb00 编写于 作者: R Renwb1991 提交者: qingqing01

caffe2fluid: support crop and reduction layer convertion (#1145)

* add crop and reduction layer
* caffe2fluid: support crop and reduction layer convertion
上级 b0b3803f
......@@ -14,12 +14,13 @@ import permute
import detection_out
import normalize
import select
import crop
import reduction
#custom layer import ends
custom_layers = get_registered_layers()
def set_args(f, params, node=None):
""" set args for function 'f' using the parameters in node.layer.parameters
......@@ -41,6 +42,9 @@ def set_args(f, params, node=None):
if arg_name in params:
kwargs[arg_name] = params[arg_name]
if node is not None and len(node.metadata):
kwargs.update(node.metadata)
return arg_list, kwargs
......
""" a custom layer for 'crop', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/crop.html
"""
from .register import register
def crop_shape(input_shape, shape=None):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (num | list of num): a list of number or num which represents the input shape
@shape (list of integer): the shape of output
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
if isinstance(input_shape, list):
assert len(input_shape) == 2, "the number of crop's inputs must be 2"
return input_shape[1]
elif not shape is None:
assert len(shape) == len(input_shape.shape), "input_shape is diff with output_shape"
return shape
else:
raise Exception,"crop_shape input error"
return None
def crop_layer(input, name, shape=None, axis=2, offset=None):
""" build a layer of type 'Crop' using fluid
Args:
@input (variables | list of variables): input fluid variable for this layer
@shape (list of integer): the shape of output
@name (str): name for this layer
@axis (integer): parameter from caffe's Crop layer
@offset (Variable|list/tuple of integer|None): parameter from caffe's Crop layer
Returns:
output (variable): output variable for this layer
"""
input_shape = None
output_shape = None
input_tensor = None
if isinstance(input, list):
assert len(input) == 2, "the number of crop's inputs must be 2"
input_shape = input[0].shape
output_shape = input[1].shape
input_tensor = input[0]
elif not shape is None:
assert len(shape) == len(input.shape), "input_shape is diff with output_shape"
input_shape = input.shape
output_shape = shape
input_tensor = input
else:
raise Exception,"crop_layer input error"
assert len(output_shape) == len(input_shape), "input_shape is diff with output_shape"
if axis < 0:
axis += len(input_shape)
if offset is not None:
assert (len(input_shape) - axis) == len(offset), "invalid offset[%s] in crop layer" % (str(offset))
offset = [0] * axis + offset
import paddle.fluid as fluid
output = fluid.layers.crop(input_tensor, shape=output_shape, offsets=offset, name=name)
return output
register(kind='Crop', shape=crop_shape, layer=crop_layer)
""" a custom layer for 'crop', maybe we should implement this in standard way.
more info can be found here: http://caffe.berkeleyvision.org/tutorial/layers/reduction.html
"""
from .register import register
def reduction_shape(input_shape, axis=0):
""" calculate the output shape of this layer using input shape
Args:
@input_shape (list of num): a list of number which represents the input shape
@axis (int): parameter from caffe's reduction layer
Returns:
@output_shape (list of num): a list of numbers represent the output shape
"""
if axis < 0:
axis += len(input_shape) + 1
assert axis <= len(input_shape), 'invalid axis[%d] error' % (axis)
return input_shape[0:axis]
def reduction_layer(input, name, axis=0, operation=1, coeff=1.0):
""" build a layer of type 'Crop' using fluid
Args:
@input (variable): input fluid variable for this layer
@name (str): name for this layer
@axis (int): parameter from caffe's reduction layer
@operation (int): parameter from caffe's reduction layer
@coeff (float): parameter from caffe's reduction layer
Returns:
output (variable): output variable for this layer
"""
assert operation >= 1 and operation <= 4, "reduction reduction [%s] error" % (operation)
input_len = len(input.shape)
if axis < 0:
axis += input_len + 1
dim = range(input_len)
import paddle.fluid as fluid
if operation == 1: ## operation = SUM
output = fluid.layers.reduce_sum(input, dim=dim[axis:], keep_dim=False, name=name)
elif operation == 2: ## operation = ASUM
absout = fluid.layers.abs(input)
output = fluid.layers.reduce_sum(absout, dim=dim[axis:], keep_dim=False, name=name)
elif operation == 3: ## operation = SUMSQ
powout = fluid.layers.pow(x=input, factor=2.0)
output = fluid.layers.reduce_sum(powout, dim=dim[axis:], keep_dim=False, name=name)
else: ## operation = MEAN
output = fluid.layers.reduce_mean(input, dim=dim[axis:], keep_dim=False, name=name)
mulout = fluid.layers.scale(x=output, scale=coeff)
return mulout
register(kind='Reduction', shape=reduction_shape, layer=reduction_layer)
......@@ -20,6 +20,7 @@ LAYER_DESCRIPTORS = {
'Data': shape_data,
'Dropout': shape_identity,
'DummyData': shape_data,
'Crop': shape_crop,
'EuclideanLoss': shape_scalar,
'Eltwise': shape_identity,
'Exp': shape_identity,
......
......@@ -351,6 +351,24 @@ class Network(object):
x=output, y=i, name=self.get_unique_output_name(name, 'add'))
return output
@layer
def max(self, inputs, name):
fluid = import_fluid()
output = inputs[0]
for i in inputs[1:]:
output = fluid.layers.elementwise_max(
x=output, y=i, name=self.get_unique_output_name(name, 'max'))
return output
@layer
def multiply(self, inputs, name):
fluid = import_fluid()
output = inputs[0]
for i in inputs[1:]:
output = fluid.layers.elementwise_mul(
x=output, y=i, name=self.get_unique_output_name(name, 'mul'))
return output
@layer
def fc(self, input, num_out, name, relu=True, act=None):
fluid = import_fluid()
......
......@@ -5,7 +5,7 @@ from ..graph import GraphBuilder, NodeMapper
from ..layers import NodeKind
from ..transformers import (DataInjector, DataReshaper, NodeRenamer,
SubNodeFuser, ReLUFuser, BatchNormScaleBiasFuser,
BatchNormPreprocessor, ParameterNamer)
BatchNormPreprocessor, ParameterNamer, CropFuser)
from . import network
......@@ -325,7 +325,13 @@ class Transformer(object):
# Slashes are used for scoping in Paddle. Replace slashes
# in node names with underscores.
# (Caffe's GoogLeNet implementation uses slashes)
NodeRenamer(lambda node: node.name.replace('/', '_'))
NodeRenamer(lambda node: node.name.replace('/', '_')),
# Fuse Crop
# Crop is to return a scalar output Blob for an input Blob of arbitrary size.
# When one of the input Blob is "input" or "DummyData", we can remove this input Blob
# and put the shape into the reduction layer.
CropFuser()
]
self.graph = graph.transformed(transformers)
......
......@@ -62,6 +62,8 @@ def shape_identity(node):
def shape_scalar(node):
return make_tensor(1, 1, 1, 1)
def shape_crop(node):
raise KaffeError('crop function had been defined in customer_layers')
def shape_data(node):
if node.output_shape:
......
......@@ -176,6 +176,63 @@ class DataReshaper(object):
del node.reshaped_data
return graph
class CropFuser(object):
'''
Crop is to return a scalar output Blob for an input Blob of arbitrary size.
When one of the input Blob is "input" or "DummyData", we can remove the input Blob
and put the shape into the reduction layer.
'''
_traced_names = {}
@classmethod
def traced_names(cls):
return cls._traced_names
@classmethod
def trace(cls, fname, tname):
""" recording the names mapping,
the value of 'fname' will be replaced by value of 'tname'
"""
if fname not in cls._traced_names:
cls._traced_names[fname] = []
cls._traced_names[fname].append(tname)
def __init__(self, allowed_parent_types=[NodeKind.Input, NodeKind.DummyData]):
self.allowed_parent_types = allowed_parent_types
def __call__(self, graph):
nodes = graph.nodes
fused_nodes = []
for node in nodes:
if len(node.parents) != 2:
# reduction layer must has two parent layers.
continue
parent = node.parents[1]
if not self.is_eligible_pair(parent, node):
continue
# Change the graph structure.
parent.children.remove(node)
node.parents.remove(parent)
# Let the sub-class merge the fused node in any arbitrary way.
if not len(parent.children):
fused_nodes.append(parent)
#fused_nodes.append(parent)
self.merge(parent, node)
# rebuild the graph
transformed_nodes = [node for node in nodes if node not in fused_nodes]
return graph.replaced(transformed_nodes)
def is_eligible_pair(self, parent, child):
'''Returns true if this parent/child pair is eligible for fusion.'''
return child.kind == NodeKind.Crop
#return (self.allowed_parent_types is not None and \
# len(parent.children) == 1 and \
# parent.kind in self.allowed_parent_types and \
# child.kind == NodeKind.Crop)
def merge(self, parent, child):
'''Merge the parent node into the child.'''
child.metadata['shape'] = [parent.output_shape.batch_size, parent.output_shape.channels, parent.output_shape.height, parent.output_shape.width]
class SubNodeFuser(object):
'''
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册