未验证 提交 48244d82 编写于 作者: J Jason 提交者: GitHub

Merge pull request #4 from PaddlePaddle/develop

pull
...@@ -10,5 +10,3 @@ ...@@ -10,5 +10,3 @@
| Normalize | [code](https://github.com/weiliu89/caffe/blob/ssd/src/caffe/layers/normalize_layer.cpp) | | Normalize | [code](https://github.com/weiliu89/caffe/blob/ssd/src/caffe/layers/normalize_layer.cpp) |
| ROIPooling | [code](https://github.com/rbgirshick/caffe-fast-rcnn/blob/0dcd397b29507b8314e252e850518c5695efbb83/src/caffe/layers/roi_pooling_layer.cpp) | | ROIPooling | [code](https://github.com/rbgirshick/caffe-fast-rcnn/blob/0dcd397b29507b8314e252e850518c5695efbb83/src/caffe/layers/roi_pooling_layer.cpp) |
| Axpy | [code](https://github.com/hujie-frank/SENet/blob/master/src/caffe/layers/axpy_layer.cpp) | | Axpy | [code](https://github.com/hujie-frank/SENet/blob/master/src/caffe/layers/axpy_layer.cpp) |
...@@ -7,7 +7,6 @@ function abort(){ ...@@ -7,7 +7,6 @@ function abort(){
trap 'abort' 0 trap 'abort' 0
set -e set -e
cd $TRAVIS_BUILD_DIR cd $TRAVIS_BUILD_DIR
export PATH=/usr/bin:$PATH export PATH=/usr/bin:$PATH
pre-commit install pre-commit install
......
...@@ -179,6 +179,9 @@ def main(): ...@@ -179,6 +179,9 @@ def main():
x2paddle.__version__)) x2paddle.__version__))
return return
assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)"
assert args.save_dir is not None, "--save_dir is not defined"
try: try:
import paddle import paddle
v0, v1, v2 = paddle.__version__.split('.') v0, v1, v2 = paddle.__version__.split('.')
...@@ -187,8 +190,6 @@ def main(): ...@@ -187,8 +190,6 @@ def main():
return return
except: except:
print("paddlepaddle not installed, use \"pip install paddlepaddle\"") print("paddlepaddle not installed, use \"pip install paddlepaddle\"")
assert args.framework is not None, "--framework is not defined(support tensorflow/caffe/onnx)"
assert args.save_dir is not None, "--save_dir is not defined"
if args.framework == "tensorflow": if args.framework == "tensorflow":
assert args.model is not None, "--model should be defined while translating tensorflow model" assert args.model is not None, "--model should be defined while translating tensorflow model"
......
...@@ -13,8 +13,9 @@ ...@@ -13,8 +13,9 @@
# limitations under the License. # limitations under the License.
from x2paddle.core.graph import GraphNode from x2paddle.core.graph import GraphNode
import collections
from x2paddle.core.util import * from x2paddle.core.util import *
import collections
import six
class Layer(object): class Layer(object):
...@@ -28,7 +29,7 @@ class Layer(object): ...@@ -28,7 +29,7 @@ class Layer(object):
def get_code(self): def get_code(self):
layer_code = "" layer_code = ""
if self.output is not None: if self.output is not None:
if isinstance(self.output, str): if isinstance(self.output, six.string_types):
layer_code = self.output + " = " layer_code = self.output + " = "
else: else:
layer_code = self.output.layer_name + " = " layer_code = self.output.layer_name + " = "
...@@ -47,7 +48,7 @@ class Layer(object): ...@@ -47,7 +48,7 @@ class Layer(object):
"[{}]".format(input.index) + ", ") "[{}]".format(input.index) + ", ")
else: else:
in_list += (input.layer_name + ", ") in_list += (input.layer_name + ", ")
elif isinstance(input, str): elif isinstance(input, six.string_types):
in_list += (input + ", ") in_list += (input + ", ")
else: else:
raise Exception( raise Exception(
...@@ -72,7 +73,7 @@ class Layer(object): ...@@ -72,7 +73,7 @@ class Layer(object):
"[{}]".format(self.inputs.index) + ", ") "[{}]".format(self.inputs.index) + ", ")
else: else:
layer_code += (self.inputs.layer_name + ", ") layer_code += (self.inputs.layer_name + ", ")
elif isinstance(self.inputs, str): elif isinstance(self.inputs, six.string_types):
layer_code += (self.inputs + ", ") layer_code += (self.inputs + ", ")
else: else:
raise Exception("Unknown type of inputs.") raise Exception("Unknown type of inputs.")
...@@ -119,6 +120,6 @@ class FluidCode(object): ...@@ -119,6 +120,6 @@ class FluidCode(object):
for layer in self.layers: for layer in self.layers:
if isinstance(layer, Layer): if isinstance(layer, Layer):
codes.append(layer.get_code()) codes.append(layer.get_code())
elif isinstance(layer, str): elif isinstance(layer, six.string_types):
codes.append(layer) codes.append(layer)
return codes return codes
...@@ -12,6 +12,8 @@ ...@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from __future__ import print_function
from __future__ import division
import collections import collections
import copy as cp import copy as cp
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.proto import framework_pb2 from paddle.fluid.proto import framework_pb2
from x2paddle.core.util import * from x2paddle.core.util import *
import inspect import inspect
...@@ -46,6 +47,28 @@ def export_paddle_param(param, param_name, dir): ...@@ -46,6 +47,28 @@ def export_paddle_param(param, param_name, dir):
fp.close() fp.close()
# This func will copy to generate code file
def run_net(param_dir="./"):
import os
inputs, outputs = x2paddle_net()
for i, out in enumerate(outputs):
if isinstance(out, list):
for out_part in out:
outputs.append(out_part)
del outputs[i]
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
def if_exist(var):
b = os.path.exists(os.path.join(param_dir, var.name))
return b
fluid.io.load_vars(exe,
param_dir,
fluid.default_main_program(),
predicate=if_exist)
class OpMapper(object): class OpMapper(object):
def __init__(self): def __init__(self):
self.paddle_codes = "" self.paddle_codes = ""
......
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import paddle.fluid as fluid
import numpy import numpy
import math import math
import os import os
...@@ -20,25 +18,3 @@ import os ...@@ -20,25 +18,3 @@ import os
def string(param): def string(param):
return "\'{}\'".format(param) return "\'{}\'".format(param)
# This func will copy to generate code file
def run_net(param_dir="./"):
import os
inputs, outputs = x2paddle_net()
for i, out in enumerate(outputs):
if isinstance(out, list):
for out_part in out:
outputs.append(out_part)
del outputs[i]
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
def if_exist(var):
b = os.path.exists(os.path.join(param_dir, var.name))
return b
fluid.io.load_vars(exe,
param_dir,
fluid.default_main_program(),
predicate=if_exist)
...@@ -236,11 +236,7 @@ class CaffeDecoder(object): ...@@ -236,11 +236,7 @@ class CaffeDecoder(object):
data.MergeFromString(open(self.model_path, 'rb').read()) data.MergeFromString(open(self.model_path, 'rb').read())
pair = lambda layer: (layer.name, self.normalize_pb_data(layer)) pair = lambda layer: (layer.name, self.normalize_pb_data(layer))
layers = data.layers or data.layer layers = data.layers or data.layer
import time
start = time.time()
self.params = [pair(layer) for layer in layers if layer.blobs] self.params = [pair(layer) for layer in layers if layer.blobs]
end = time.time()
print('cost:', str(end - start))
def normalize_pb_data(self, layer): def normalize_pb_data(self, layer):
transformed = [] transformed = []
......
因为 它太大了无法显示 source diff 。你可以改为 查看blob
...@@ -18,6 +18,8 @@ def normalize_layer(inputs, ...@@ -18,6 +18,8 @@ def normalize_layer(inputs,
shape=[1] if channel_shared else [input_shape[0][1]], shape=[1] if channel_shared else [input_shape[0][1]],
dtype=input.dtype, dtype=input.dtype,
attr=name + '_scale') attr=name + '_scale')
scale_param = fluid.layers.reshape(x=scale_param, \
shape=[1] if channel_shared else [input_shape[0][1]])
out = fluid.layers.elementwise_mul(x=l2_norm, out = fluid.layers.elementwise_mul(x=l2_norm,
y=scale_param, y=scale_param,
axis=-1 if channel_shared else 1) axis=-1 if channel_shared else 1)
......
...@@ -135,7 +135,8 @@ class CaffeOpMapper(OpMapper): ...@@ -135,7 +135,8 @@ class CaffeOpMapper(OpMapper):
if isinstance(params.kernel_size, numbers.Number): if isinstance(params.kernel_size, numbers.Number):
[k_h, k_w] = [params.kernel_size] * 2 [k_h, k_w] = [params.kernel_size] * 2
elif len(params.kernel_size) > 0: elif len(params.kernel_size) > 0:
k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[0] k_h = params.kernel_h if params.kernel_h > 0 else params.kernel_size[
0]
k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[ k_w = params.kernel_w if params.kernel_w > 0 else params.kernel_size[
len(params.kernel_size) - 1] len(params.kernel_size) - 1]
elif params.kernel_h > 0 or params.kernel_w > 0: elif params.kernel_h > 0 or params.kernel_w > 0:
...@@ -156,8 +157,8 @@ class CaffeOpMapper(OpMapper): ...@@ -156,8 +157,8 @@ class CaffeOpMapper(OpMapper):
[p_h, p_w] = [params.pad] * 2 [p_h, p_w] = [params.pad] * 2
elif len(params.pad) > 0: elif len(params.pad) > 0:
p_h = params.pad_h if params.pad_h > 0 else params.pad[0] p_h = params.pad_h if params.pad_h > 0 else params.pad[0]
p_w = params.pad_w if params.pad_w > 0 else params.pad[len(params.pad) - p_w = params.pad_w if params.pad_w > 0 else params.pad[
1] len(params.pad) - 1]
elif params.pad_h > 0 or params.pad_w > 0: elif params.pad_h > 0 or params.pad_w > 0:
p_h = params.pad_h p_h = params.pad_h
p_w = params.pad_w p_w = params.pad_w
...@@ -225,12 +226,17 @@ class CaffeOpMapper(OpMapper): ...@@ -225,12 +226,17 @@ class CaffeOpMapper(OpMapper):
node.layer_type, params) node.layer_type, params)
if data is None: if data is None:
data = [] data = []
print('The parameter of {} (type is {}) is not set. So we set the parameters as 0'.format( print(
node.layer_name, node.layer_type)) 'The parameter of {} (type is {}) is not set. So we set the parameters as 0'
.format(node.layer_name, node.layer_type))
input_c = node.input_shape[0][1] input_c = node.input_shape[0][1]
output_c = channel output_c = channel
data.append(np.zeros([output_c, input_c, kernel[0], kernel[1]]).astype('float32')) data.append(
data.append(np.zeros([output_c,])).astype('float32') np.zeros([output_c, input_c, kernel[0],
kernel[1]]).astype('float32'))
data.append(np.zeros([
output_c,
])).astype('float32')
else: else:
data = self.adjust_parameters(node) data = self.adjust_parameters(node)
self.weights[node.layer_name + '_weights'] = data[0] self.weights[node.layer_name + '_weights'] = data[0]
...@@ -272,12 +278,17 @@ class CaffeOpMapper(OpMapper): ...@@ -272,12 +278,17 @@ class CaffeOpMapper(OpMapper):
node.layer_type, params) node.layer_type, params)
if data is None: if data is None:
data = [] data = []
print('The parameter of {} (type is {}) is not set. So we set the parameters as 0'.format( print(
node.layer_name, node.layer_type)) 'The parameter of {} (type is {}) is not set. So we set the parameters as 0'
.format(node.layer_name, node.layer_type))
input_c = node.input_shape[0][1] input_c = node.input_shape[0][1]
output_c = channel output_c = channel
data.append(np.zeros([output_c, input_c, kernel[0], kernel[1]]).astype('float32')) data.append(
data.append(np.zeros([output_c,]).astype('float32')) np.zeros([output_c, input_c, kernel[0],
kernel[1]]).astype('float32'))
data.append(np.zeros([
output_c,
]).astype('float32'))
else: else:
data = self.adjust_parameters(node) data = self.adjust_parameters(node)
self.weights[node.layer_name + '_weights'] = data[0] self.weights[node.layer_name + '_weights'] = data[0]
...@@ -369,13 +380,17 @@ class CaffeOpMapper(OpMapper): ...@@ -369,13 +380,17 @@ class CaffeOpMapper(OpMapper):
data = node.data data = node.data
params = node.layer.inner_product_param params = node.layer.inner_product_param
if data is None: if data is None:
print('The parameter of {} (type is {}) is not set. So we set the parameters as 0.'.format( print(
node.layer_name, node.layer_type)) 'The parameter of {} (type is {}) is not set. So we set the parameters as 0.'
.format(node.layer_name, node.layer_type))
input_c = node.input_shape[0][1] input_c = node.input_shape[0][1]
output_c = params.num_output output_c = params.num_output
data = [] data = []
data.append(np.zeros([input_c, output_c]).astype('float32').astype('float32')) data.append(
data.append(np.zeros([output_c]).astype('float32').astype('float32')) np.zeros([input_c,
output_c]).astype('float32').astype('float32'))
data.append(
np.zeros([output_c]).astype('float32').astype('float32'))
else: else:
data = self.adjust_parameters(node) data = self.adjust_parameters(node)
# Reshape the parameters to Paddle's ordering # Reshape the parameters to Paddle's ordering
...@@ -616,7 +631,8 @@ class CaffeOpMapper(OpMapper): ...@@ -616,7 +631,8 @@ class CaffeOpMapper(OpMapper):
param_attr=attr) param_attr=attr)
def BatchNorm(self, node): def BatchNorm(self, node):
assert len(node.inputs) == 1, 'The count of BatchNorm node\'s input is not 1.' assert len(
node.inputs) == 1, 'The count of BatchNorm node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True) input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.batch_norm_param params = node.layer.batch_norm_param
if hasattr(params, 'eps'): if hasattr(params, 'eps'):
...@@ -624,11 +640,16 @@ class CaffeOpMapper(OpMapper): ...@@ -624,11 +640,16 @@ class CaffeOpMapper(OpMapper):
else: else:
eps = 1e-5 eps = 1e-5
if node.data is None or len(node.data) != 3: if node.data is None or len(node.data) != 3:
print('The parameter of {} (type is {}) is not set. So we set the parameters as 0'.format( print(
node.layer_name, node.layer_type)) 'The parameter of {} (type is {}) is not set. So we set the parameters as 0'
.format(node.layer_name, node.layer_type))
input_c = node.input_shape[0][1] input_c = node.input_shape[0][1]
mean = np.zeros([input_c,]).astype('float32') mean = np.zeros([
variance = np.zeros([input_c,]).astype('float32') input_c,
]).astype('float32')
variance = np.zeros([
input_c,
]).astype('float32')
scale = 0 scale = 0
else: else:
node.data = [np.squeeze(i) for i in node.data] node.data = [np.squeeze(i) for i in node.data]
...@@ -655,11 +676,16 @@ class CaffeOpMapper(OpMapper): ...@@ -655,11 +676,16 @@ class CaffeOpMapper(OpMapper):
def Scale(self, node): def Scale(self, node):
if node.data is None: if node.data is None:
print('The parameter of {} (type is {}) is not set. So we set the parameters as 0'.format( print(
node.layer_name, node.layer_type)) 'The parameter of {} (type is {}) is not set. So we set the parameters as 0'
.format(node.layer_name, node.layer_type))
input_c = node.input_shape[0][1] input_c = node.input_shape[0][1]
self.weights[node.layer_name + '_scale'] = np.zeros([input_c,]).astype('float32') self.weights[node.layer_name + '_scale'] = np.zeros([
self.weights[node.layer_name + '_offset'] = np.zeros([input_c,]).astype('float32') input_c,
]).astype('float32')
self.weights[node.layer_name + '_offset'] = np.zeros([
input_c,
]).astype('float32')
else: else:
self.weights[node.layer_name + '_scale'] = np.squeeze(node.data[0]) self.weights[node.layer_name + '_scale'] = np.squeeze(node.data[0])
self.weights[node.layer_name + '_offset'] = np.squeeze(node.data[1]) self.weights[node.layer_name + '_offset'] = np.squeeze(node.data[1])
......
...@@ -43,7 +43,8 @@ def get_kernel_parameters(params): ...@@ -43,7 +43,8 @@ def get_kernel_parameters(params):
[p_h, p_w] = [params.pad] * 2 [p_h, p_w] = [params.pad] * 2
elif len(params.pad) > 0: elif len(params.pad) > 0:
p_h = params.pad_h if params.pad_h > 0 else params.pad[0] p_h = params.pad_h if params.pad_h > 0 else params.pad[0]
p_w = params.pad_w if params.pad_w > 0 else params.pad[len(params.pad) - 1] p_w = params.pad_w if params.pad_w > 0 else params.pad[len(params.pad) -
1]
elif params.pad_h > 0 or params.pad_w > 0: elif params.pad_h > 0 or params.pad_w > 0:
p_h = params.pad_h p_h = params.pad_h
p_w = params.pad_w p_w = params.pad_w
......
...@@ -94,7 +94,7 @@ class ONNXOpMapper(OpMapper): ...@@ -94,7 +94,7 @@ class ONNXOpMapper(OpMapper):
print(op) print(op)
return False return False
def directly_map(self, node, *args, name='', **kwargs): def directly_map(self, node, name='', *args, **kwargs):
inputs = node.layer.input inputs = node.layer.input
outputs = node.layer.output outputs = node.layer.output
op_type = node.layer_type op_type = node.layer_type
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
| ResNet50 | [code](https://github.com/soeaver/caffe-model/blob/master/cls/resnet/deploy_resnet50.prototxt) | | ResNet50 | [code](https://github.com/soeaver/caffe-model/blob/master/cls/resnet/deploy_resnet50.prototxt) |
| Unet | [code](https://github.com/jolibrain/deepdetect/blob/master/templates/caffe/unet/deploy.prototxt) | | Unet | [code](https://github.com/jolibrain/deepdetect/blob/master/templates/caffe/unet/deploy.prototxt) |
| VGGNet | [code](https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-vgg_ilsvrc_16_layers_deploy-prototxt) | | VGGNet | [code](https://gist.github.com/ksimonyan/211839e770f7b538e2d8#file-vgg_ilsvrc_16_layers_deploy-prototxt) |
| FaceDetection | [code](https://github.com/ShiqiYu/libfacedetection/blob/master/models/caffe/yufacedetectnet-open-v1.prototxt) |
...@@ -65,4 +65,3 @@ ...@@ -65,4 +65,3 @@
| mNASNet | [pytorch(personal practice)](https://github.com/rwightman/gen-efficientnet-pytorch) |9| | mNASNet | [pytorch(personal practice)](https://github.com/rwightman/gen-efficientnet-pytorch) |9|
| EfficientNet | [pytorch(personal practice)](https://github.com/rwightman/gen-efficientnet-pytorch) |9| | EfficientNet | [pytorch(personal practice)](https://github.com/rwightman/gen-efficientnet-pytorch) |9|
| SqueezeNet | [onnx official](https://s3.amazonaws.com/download.onnx/models/opset_9/squeezenet.tar.gz) |9| | SqueezeNet | [onnx official](https://s3.amazonaws.com/download.onnx/models/opset_9/squeezenet.tar.gz) |9|
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册