提交 04996630 编写于 作者: S SunAhong1993

add caffe emitter for alexnet

上级 99582ade
...@@ -11,3 +11,283 @@ ...@@ -11,3 +11,283 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import numbers
from x2paddle.parser.caffe_parser import CaffeGraph
from x2paddle.core.emitter import Emitter
from x2paddle.core.util import *
class CaffeEmitter(Emitter):
def __init__(self, parser):
super(CaffeEmitter, self).__init__()
self.parser = parser
self.graph = parser.caffe_graph
self.weights = dict()
self.resolver = parser.resolver
def run(self):
print("Total nodes: {}".format(len(self.graph.topo_sort)))
for node_name in self.graph.topo_sort:
node = self.graph.get_node(node_name)
op = node.layer_type
if hasattr(self, op):
emit_func = getattr(self, op)
emit_func(node)
for i in range(len(self.graph.topo_sort)):
node_name = self.graph.topo_sort[i]
node = self.graph.get_node(node_name)
for layer in node.fluid_code.layers:
print(layer.get_code())
for name, param in self.weights.items():
export_paddle_param(param, name.replace('/', '_'), "params1")
@staticmethod
def get_kernel_value(scalar, repeated, idx, default=None):
if scalar:
return scalar
if repeated:
if isinstance(repeated, numbers.Number):
return repeated
if len(repeated) == 1:
# Same value applies to all spatial dimensions
return int(repeated[0])
assert idx < len(repeated)
# Extract the value for the given spatial dimension
return repeated[idx]
if default is None:
raise ValueError('Unable to determine kernel parameter!')
return default
def get_kernel_parameters(self, kind, params):
assert kind in ['Convolution', 'Pooling', 'Deconvolution']
k_h = self.get_kernel_value(params.kernel_h, params.kernel_size, 0)
k_w = self.get_kernel_value(params.kernel_w, params.kernel_size, 1)
s_h = self.get_kernel_value(params.stride_h,
params.stride,
0,
default=1)
s_w = self.get_kernel_value(params.stride_w,
params.stride,
1,
default=1)
p_h = self.get_kernel_value(params.pad_h, params.pad, 0, default=0)
p_w = self.get_kernel_value(params.pad_w, params.pad, 1, default=0)
dila_h = dila_w = 1
group = 1
c_o = 1
if kind in ['Convolution', 'Deconvolution']:
c_o = params.num_output
group = params.group
dila_len = len(params.dilation)
if dila_len == 2:
dila_h = params.dilation[0]
dila_w = params.dilation[1]
elif dila_len == 1:
dila_h = dila_w = params.dilation[0]
else:
assert dila_len == 0, "invalid length[%s] of dilation in convolution" % (
dila_len)
kernel = [k_h, k_w]
stride = [s_h, s_w]
pad = [p_h, p_w]
dilation = [dila_h, dila_w]
return c_o, kernel, stride, pad, dilation, group
def Input(self, node):
shape = list(node.layer.input_param.shape[0].dim)[1:]
dtype = 'float32'
attr = {
'dtype': string(dtype),
'shape': shape,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("data",
inputs=None,
output=node,
param_attr=attr)
def Convolution(self, node):
data = node.data
self.weights[node.layer_name + '_weights'] = data[0]
if len(data) == 2:
self.weights[node.layer_name + '_bias'] = data[1]
params = node.layer.convolution_param
channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters(
node.layer_type, params)
assert len(node.inputs
) == 1, 'The count of Convolution node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0)
attr = {
'filter_size': kernel,
'num_filters': channel,
'stride': stride,
'padding': pad,
'dilation': dilation,
'groups': group,
'name': string(node.layer_name),
'param_attr': string(node.layer_name + '_weights'),
'bias_attr': string(node.layer_name + '_bias'),
}
node.fluid_code.add_layer("conv2d",
inputs=input,
output=node,
param_attr=attr)
def Deconvolution(self, node):
data = node.data
self.weights[node.layer_name + '_weights'] = data[0]
if len(data) == 2:
self.weights[node.layer_name + '_bias'] = data[1]
params = node.layer.convolution_param
channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters(
node.layer_type, params)
assert len(node.inputs
) == 1, 'The count of Deconvolution node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0)
attr = {
'output_size': None,
'filter_size': kernel,
'num_filters': channel,
'stride': stride,
'padding': pad,
'dilation': dilation,
'groups': group,
'name': string(node.layer_name),
'param_attr': string(node.layer_name + '_weights'),
'bias_attr': string(node.layer_name + '_bias')
}
node.fluid_code.add_layer("conv2d_transpose",
inputs=input,
output=node,
param_attr=attr)
def Pooling(self, node):
params = node.layer.pooling_param
channel, kernel, stride, pad, dilation, group = self.get_kernel_parameters(
node.layer_type, params)
if params.pool == 0:
pool_type = 'max'
else:
pool_type = 'avg'
assert len(
node.inputs) == 1, 'The count of Pooling node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0)
attr = {
'pool_size': kernel,
'pool_stride': stride,
'pool_padding': pad,
'ceil_mode': True,
'pool_type': string(pool_type),
'exclusive': True,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("pool2d",
inputs=input,
output=node,
param_attr=attr)
def ReLU(self, node):
assert len(
node.inputs) == 1, 'The count of ReLU node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0)
attr = {'name': string(node.layer_name)}
node.fluid_code.add_layer("relu",
inputs=input,
output=node,
param_attr=attr)
def LRN(self, node):
assert len(node.inputs) == 1, 'The count of LRN node\'s input is not 1.'
params = node.layer.lrn_param
# The window size must be an odd value. For a window
# size of (2*n+1), Paddle defines depth_radius = n.
assert params.local_size % 2 == 1
# Caffe scales by (alpha/(2*n+1)), whereas Paddle
# just scales by alpha (as does Krizhevsky's paper).
# We'll account for that here.
alpha = params.alpha / float(params.local_size)
input = self.graph.get_bottom_node(node, idx=0)
attr = {
'n': params.local_size,
'k': 1.0,
'alpha': alpha,
'beta': params.beta,
'name': string(node.layer_name)
}
node.fluid_code.add_layer("lrn",
inputs=input,
output=node,
param_attr=attr)
def InnerProduct(self, node):
data = node.data
self.weights[node.layer_name + '_weights'] = data[0]
if len(data) == 2:
self.weights[node.layer_name + '_bias'] = data[1]
assert len(node.inputs
) == 1, 'The count of InnerProduct node\'s input is not 1.'
params = node.layer.inner_product_param
assert params.axis == 1
assert params.bias_term == True
input = self.graph.get_bottom_node(node, idx=0)
attr = {
'size': params.num_output,
'name': string(node.layer_name),
'act': None,
'param_attr': string(node.layer_name + '_weights'),
'bias_attr': string(node.layer_name + '_bias')
}
node.fluid_code.add_layer("fc",
inputs=input,
output=node,
param_attr=attr)
def Softmax(self, node):
assert len(
node.inputs) == 1, 'The count of Softmax node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0)
params = node.layer.softmax_param
axis = params.axis
shape = node.input_shape[0]
dims = len(shape)
axis = axis + dims if axis < 0 else axis
need_transpose = False
if axis + 1 != dims:
need_transpose = True
if need_transpose:
in_order = list(range(dims))
in_order.remove(axis)
in_order.append(axis)
attr = {
'perm': in_order,
'name': string(node.layer_name + '_transpose_in')
}
node.fluid_code.add_layer("transpose",
inputs=input,
output=node,
param_attr=attr)
attr = {'name': string(node.layer_name + '_softmax')}
node.fluid_code.add_layer("softmax",
inputs=node if need_transpose else input,
output=node,
param_attr=attr)
if need_transpose:
out_order = [
0,
] * dims
for id, v in enumerate(in_order):
out_order[v] = id
attr = {
'perm': out_order,
'name': string(node.layer_name + '_transpose_out')
}
node.fluid_code.add_layer("transpose",
inputs=node,
output=node,
param_attr=attr)
...@@ -17,6 +17,8 @@ import sys ...@@ -17,6 +17,8 @@ import sys
from google.protobuf import text_format from google.protobuf import text_format
import numpy as np import numpy as np
from x2paddle.core.graph import GraphNode, Graph from x2paddle.core.graph import GraphNode, Graph
from x2paddle.core.fluid_code import FluidCode
from x2paddle.parser import caffe_shape
class CaffeResolver(object): class CaffeResolver(object):
...@@ -62,10 +64,19 @@ class CaffeGraphNode(GraphNode): ...@@ -62,10 +64,19 @@ class CaffeGraphNode(GraphNode):
else: else:
super(CaffeGraphNode, self).__init__(layer, layer_name) super(CaffeGraphNode, self).__init__(layer, layer_name)
self.layer_type = layer.type self.layer_type = layer.type
self.fluid_code = FluidCode()
def set_params(self, params): def set_params(self, params):
self.data = params self.data = params
def set_output_shape(self, input_shape):
func_name = 'shape_' + self.layer_type.lower()
self.output_shape = getattr(caffe_shape, func_name)(self.layer,
input_shape)
def set_input_shape(self, input_shape):
self.input_shape = input_shape
class CaffeGraph(Graph): class CaffeGraph(Graph):
def __init__(self, model, params): def __init__(self, model, params):
...@@ -148,8 +159,34 @@ class CaffeGraph(Graph): ...@@ -148,8 +159,34 @@ class CaffeGraph(Graph):
else: else:
notice('Ignoring parameters for non-existent layer: %s' % \ notice('Ignoring parameters for non-existent layer: %s' % \
layer_name) layer_name)
for layer_name in self.node_map:
node = self.node_map[layer_name]
inputs = node.inputs
i = 0
input_shape = []
for nm in inputs:
last_node = self.get_node(nm)
tmp = node.layer.bottom[i]
i = i + 1
idx = list(last_node.layer.top).index(tmp)
input_shape.append(last_node.output_shape[idx])
node.set_output_shape(input_shape)
node.set_input_shape(input_shape)
super(CaffeGraph, self).build() super(CaffeGraph, self).build()
def get_bottom_node(self, node, idx=0, copy=False):
input_node_name = node.inputs[idx]
assert input_node_name in self.node_map, 'The {} isn\'t a valid node'.format(
name)
input_node = self.node_map[input_node_name]
if len(input_node.layer.top) > 1:
idx = list(input_node.layer.top).index(node.layer.bottom[need])
name = input_node_name + ':' + str(idx)
else:
name = input_node_name
return self.get_node(name, copy=copy)
class CaffeParser(object): class CaffeParser(object):
def __init__(self, proto_path, model_path, use_caffe=True): def __init__(self, proto_path, model_path, use_caffe=True):
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
def get_params_w_h(params):
if hasattr(params, 'dilation'):
if len(params.dilation) == 0:
dila_h = 1
dila_w = 1
elif len(params.dilation) == 1:
dila_h = params.dilation[0]
dila_w = params.dilation[0]
else:
dila_h = params.dilation[0]
dila_w = params.dilation[1]
else:
dila_h = 1
dila_w = 1
if not isinstance(getattr(params, 'pad'), int):
if len(params.pad) == 0:
pad_h = 1
pad_w = 1
elif len(params.pad) == 1:
pad_h = params.pad[0]
pad_w = params.pad[0]
else:
pad_h, pad_w, = params.pad[0]
pad_w = params.pad[1]
if params.pad_h != 0 or params.pad_w != 0:
pad_h = params.pad_h
pad_w = params.pad_w
else:
if params.pad_h != 0 or params.pad_w != 0:
pad_h = params.pad_h
pad_w = params.pad_w
else:
pad_h = getattr(params, 'pad')
pad_w = getattr(params, 'pad')
if not isinstance(getattr(params, 'kernel_size'), int):
if len(params.kernel_size) == 0:
kernel_h = 1
kernel_w = 1
elif len(params.kernel_size) == 1:
kernel_h = params.kernel_size[0]
kernel_w = params.kernel_size[0]
else:
kernel_h = params.kernel_size[0]
kernel_w = params.kernel_size[1]
if params.kernel_h != 0 or params.kernel_w != 0:
kernel_h = params.kernel_h
kernel_w = params.kernel_w
else:
if params.kernel_h != 0 or params.kernel_w != 0:
kernel_h = params.kernel_h
kernel_w = params.kernel_w
else:
kernel_h = getattr(params, 'kernel_size')
kernel_w = getattr(params, 'kernel_size')
if not isinstance(getattr(params, 'stride'), int):
if len(params.stride) == 0:
stride_h = 1
stride_w = 1
elif len(params.stride) == 1:
stride_h = params.stride[0]
stride_w = params.stride[0]
else:
stride_h = params.stride[0]
stride_w = params.stride[1]
if params.stride_h != 0 or params.stride_w != 0:
stride_h = params.stride_h
stride_w = params.stride_w
else:
if params.stride_h != 0 or params.stride_w != 0:
stride_h = params.stride_h
stride_w = params.stride_w
else:
stride_h = getattr(params, 'stride')
stride_w = getattr(params, 'stride')
return dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w
def get_filter_output_shape(i_h, i_w, params, round_func):
dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w = get_params_w_h(
params)
o_h = (i_h + 2 * pad_h - (dila_h *
(kernel_h - 1) + 1)) / float(stride_h) + 1
o_w = (i_w + 2 * pad_w - (dila_w *
(kernel_w - 1) + 1)) / float(stride_w) + 1
return (int(round_func(o_h)), int(round_func(o_w)))
def get_strided_kernel_output_shape(params, input_shape, round_func):
o_h, o_w = get_filter_output_shape(input_shape[2], input_shape[3], params,
round_func)
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape[1]
return [[input_shape[0], c, o_h, o_w]]
def shape_convolution(layer, input_shape):
params = layer.convolution_param
return get_strided_kernel_output_shape(params, input_shape[0], math.floor)
def shape_deconvolution(layer, input_shape):
h_i = input_shape[2]
w_i = input_shape[3]
params = layer.convolution_param
dila_h, dila_w, pad_h, pad_w, kernel_h, kernel_w, stride_h, stride_w = get_params_w_h(
params)
h_o = (h_i - 1) * stride_h - 2 * pad_h + dila_h * (kernel_h - 1) + 1
w_o = (w_i - 1) * stride_w - 2 * pad_w + dila_w * (kernel_w - 1) + 1
has_c_o = hasattr(params, 'num_output')
c = params.num_output if has_c_o else input_shape.channels
return [[input_shape[0][0], c, h_o, w_o]]
def shape_pooling(layer, input_shape):
params = layer.pooling_param
global_pool = getattr(params, 'global_pooling', False)
if global_pool:
return [[input_shape[0][0], input_shape[0][1], 1, 1]]
ceil_mode = getattr(params, 'ceil_mode', True)
if ceil_mode is True:
method = math.ceil
else:
method = math.floor
return get_strided_kernel_output_shape(params, input_shape[0], method)
def shape_innerproduct(layer, input_shape):
params = layer.inner_product_param
return [[input_shape[0][0], params.num_output]]
def shape_lrn(layer, input_shape):
return input_shape
def shape_relu(layer, input_shape):
return input_shape
def shape_softmax(layer, input_shape):
return input_shape
def shape_input(layer, input_shape):
return [list(layer.input_param.shape[0].dim)]
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册