提交 cfd80849 编写于 作者: C Channingss

merge paddle/develop

---
name: caffe2paddle
about: Caffe模型转换至PaddlePaddle,请说明Caffe模型的来源,模型类型(例如图像分类、目标检测等)
---
Caffe模型转换至PaddlePaddle,请说明Caffe模型的来源,模型类型(例如图像分类、目标检测等)
如有原模型文件或github链接,如方便可一并附上,方便开发人员分析。
---
name: onnx2paddle
about: ONNX模型转换至PaddlePaddle,请说明ONNX模型的来源,模型类型(例如图像分类、目标检测等)
---
ONNX模型转换至PaddlePaddle,请说明ONNX模型的来源,模型类型(例如图像分类、目标检测等)
如有原模型文件或github链接,如方便可一并附上,方便开发人员分析。
---
name: 其它类型
about: 例如Bug类,需求建议类
---
---
name: paddle2onnx
about: Paddle模型转换至ONNX,请说明Paddle模型的来源,模型类型(例如图像分类、目标检测等)
---
Paddle模型转换至ONNX,请说明Paddle模型的来源,模型类型(例如图像分类、目标检测等)
如有原模型文件或github链接,如方便可一并附上,方便开发人员分析,同时建议说明模型转换的使用场景:)
---
name: tensorflow2paddle
about: TensorFlow模型转换至PaddlePaddle,请说明TensorFlow模型的来源,模型类型(例如图像分类、目标检测等)
---
TensorFlow模型转换至PaddlePaddle,请说明TensorFlow模型的来源,模型类型(例如图像分类、目标检测等)
如有原模型文件或github链接,可以一并附上,方便开发人员分析。
目前,代码中已经提供了8个非官方op(不在[官网](http://caffe.berkeleyvision.org/tutorial/layers)上的op)的转换,这些op对应的Caffe实现源码如下:
目前,代码中已经提供了10个非官方op(不在[官网](http://caffe.berkeleyvision.org/tutorial/layers)上的op)的转换,这些op对应的Caffe实现源码如下:
| op | 该版本实现源码 |
|-------|--------|
......@@ -10,3 +10,5 @@
| Normalize | [code](https://github.com/weiliu89/caffe/blob/ssd/src/caffe/layers/normalize_layer.cpp) |
| ROIPooling | [code](https://github.com/rbgirshick/caffe-fast-rcnn/blob/0dcd397b29507b8314e252e850518c5695efbb83/src/caffe/layers/roi_pooling_layer.cpp) |
| Axpy | [code](https://github.com/hujie-frank/SENet/blob/master/src/caffe/layers/axpy_layer.cpp) |
| ReLU6 | [code](https://github.com/chuanqi305/ssd/blob/ssd/src/caffe/layers/relu6_layer.cpp) |
| Upsample | [code](https://github.com/eric612/MobileNet-YOLO/blob/master/src/caffe/layers/upsample_layer.cpp) |
......@@ -34,6 +34,7 @@
| 21 | Axpy | 22 | ROIPolling | 23 | Permute | 24 | DetectionOutput |
| 25 | Normalize | 26 | Select | 27 | ShuffleChannel | 28 | ConvolutionDepthwise |
| 29 | ReLU | 30 | AbsVal | 31 | Sigmoid | 32 | TanH |
| 33 | ReLU6 | 34 | Upsample |
## ONNX
......
__version__ = "0.7.4"
from .core.program import PaddleProgram
program = PaddleProgram()
name_counter = dict()
def gen_name(op_name, var_name):
name = "{}.{}".format(op_name, var_name)
if name not in name_counter:
name_counter[name] = 0
else:
name_counter[name] += 1
name = name + "." + str(name_counter[name])
return name
......@@ -192,7 +192,7 @@ def onnx2paddle(model_path, save_dir, params_merge=False):
print("Paddle model and code generated.")
def paddle2onnx(model_path, save_dir, opset_number):
def paddle2onnx(model_path, save_dir, opset_version=10):
from x2paddle.decoder.paddle_decoder import PaddleDecoder
from x2paddle.op_mapper.paddle2onnx.paddle_op_mapper import PaddleOpMapper
import paddle.fluid as fluid
......
......@@ -55,6 +55,24 @@ def export_paddle_param(param, param_name, dir):
def run_net(param_dir="./"):
import os
inputs, outputs = x2paddle_net()
ops = fluid.default_main_program().global_block().ops
used_vars = list()
for op in ops:
used_vars += op.input_arg_names
tmp = list()
for input in inputs:
if isinstance(input, list):
for ipt in input:
if ipt.name not in used_vars:
continue
tmp.append(ipt)
else:
if input.name not in used_vars:
continue
tmp.append(input)
inputs = tmp
for i, out in enumerate(outputs):
if isinstance(out, list):
for out_part in out:
......@@ -122,12 +140,30 @@ class OpMapper(object):
import model
try:
inputs, outputs = model.x2paddle_net()
ops = fluid.default_main_program().global_block().ops
used_vars = list()
for op in ops:
used_vars += op.input_arg_names
for i, out in enumerate(outputs):
if isinstance(out, list):
for out_part in out:
outputs.append(out_part)
del outputs[i]
input_names = [input.name for input in inputs]
input_names = list()
for input in inputs:
if isinstance(input, list):
for ipt in input:
if ipt.name not in used_vars:
continue
input_names.append(ipt.name)
else:
if input.name not in used_vars:
continue
input_names.append(input.name)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
......
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from __future__ import division
import collections
import os
class PaddleLayer(object):
def __init__(self, kernel, inputs, outputs, **kwargs):
assert isinstance(
inputs,
dict), "parameter 'inputs' for PaddleLayer should be type of dict"
assert isinstance(
outputs,
list), "parameter, 'outputs' for PaddleLayer should be type of list"
self.kernel = kernel
self.inputs = inputs
self.outputs = outputs
self.attrs = kwargs
class PaddleProgram(object):
def __init__(self):
self.layers = list()
self.edges_out = dict()
self.edges_in = dict()
self.inputs = list()
self.outputs = list()
self.parameters = dict()
def add_layer(self, kernel, inputs, outputs, **kwargs):
layer = PaddleLayer(kernel, inputs, outputs, **kwargs)
self.layers.append(layer)
def build(self):
outputs = dict()
for i in range(len(self.layers)):
layer = self.layers[i]
for output in layer.outputs:
outputs[output] = i
for k, v in layer.inputs.items():
assert v in outputs, "Couldn't find {} in previous layers, the layers should be make by topological sort".format(
v)
in_layer_index = outputs[v]
if in_layer_index not in self.edges_out:
self.edges_out[in_layer_index] = list()
self.edges_out[in_layer_index].append(i)
if i not in self.edges_in:
self.edges_in[i] = list()
self.edges_in[i].append(in_layer_index)
def get_layer_outputs(self, i):
return self.edges_out[i]
def get_layer_inputs(self, i):
return self.edges_in[i]
def gen_code(self, code_dir):
def write_code(f, code_list, indent=0):
indent_blank = " " * indent
for code_line in code_list:
if code_line.strip() == "":
f.write('\n')
else:
f.write(indent_blank + code_line + '\n')
f = open(os.path.join(code_dir, 'model.py'), 'w')
write_code(
f, [
"from paddle.fluid.initializer import Constant",
"from paddle.fluid.param_attr import ParamAttr",
"import paddle.fluid as fluid"
"", "def x2paddle_net():"
],
indent=0)
for i, layer in enumerate(self.layers):
if self.edges_in.get(i, 0) == 0 and self.edges_out.get(i, 0) == 0:
continue
line = ""
if len(layer.outputs) == 1:
line = layer.outputs[0]
else:
for output in layer.outputs:
line += "{}, ".format(output)
line = line.strip(", ")
line += " = {}(".format(layer.kernel)
for k, v in layer.inputs.items():
line += "{}={}, ".format(k, v)
for k, v in layer.attrs.items():
line += "{}={}, ".format(k, v)
line = line.strip(", ")
line += ")"
write_code(f, [line], indent=1)
f.close()
def gen_parameters(self, code_dir):
pass
def gen_inference_model(self, model_dir):
pass
......@@ -88,6 +88,19 @@ class CaffeGraph(Graph):
# filter them out here.
if (not exclude) and (phase == 'test'):
exclude = (type_str == 'Dropout')
if layer.type == 'Dropout':
drop_layer_top = layer.top[0]
drop_layer_bottom = layer.bottom[0]
if drop_layer_top != drop_layer_bottom:
for next_layer in layers:
for next_layer_bottom_idx, next_layer_bottom in enumerate(
next_layer.bottom):
if drop_layer_top == next_layer_bottom:
next_layer.bottom.remove(drop_layer_top)
next_layer.bottom.insert(
next_layer_bottom_idx,
drop_layer_bottom)
if not exclude:
filtered_layers.append(layer)
# Guard against dupes.
......
......@@ -48,7 +48,7 @@ class TFGraphNode(GraphNode):
@property
def out_shapes(self):
if self.layer_type == "OneShotIterator":
if self.layer_type == "OneShotIterator" or self.layer_type == "IteratorV2":
values = self.layer.attr["output_shapes"].list.shape
else:
values = self.layer.attr["_output_shapes"].list.shape
......@@ -68,7 +68,8 @@ class TFGraphNode(GraphNode):
if dtype == 0:
dtype = self.layer.attr['output_types'].list.type[0]
if dtype not in self.dtype_map:
raise Exception("Dtype[{}] not in dtype_map".format(dtype))
raise Exception("Dtype[{}] of node({}) not in dtype_map".format(
dtype, self.layer.name))
return self.dtype_map[dtype]
@property
......@@ -114,16 +115,20 @@ class TFGraph(Graph):
def __init__(self, model, data_format="NHWC"):
super(TFGraph, self).__init__(model)
self.identity_map = dict()
self.multi_out_ops = ['Split', 'SplitV']
self.multi_out_ops = ['Split', 'SplitV', 'IteratorV2']
self.tf_data_format = data_format
def build(self):
for layer in self.model.node:
if layer.op == 'Assert':
continue
self.node_map[layer.name.replace('/', '_').replace(
'-', '_')] = TFGraphNode(
layer, data_format=self.tf_data_format)
for layer_name, node in self.node_map.items():
if node.layer_type == 'Const':
continue
for in_node in node.layer.input:
in_node = in_node.replace('/', '_').replace('-', '_').replace(
'^', '')
......@@ -139,6 +144,14 @@ class TFGraph(Graph):
super(TFGraph, self).build()
for layer in self.model.node:
if layer.op == 'Assert':
for ipt in layer.input:
ipt_name = ipt.replace('-', '_').replace('/', '_')
if ipt_name in self.output_nodes:
idx = self.output_nodes.index(ipt_name)
del self.output_nodes[idx]
# tensorflow graph optimize
self._remove_isolated_node()
self._optimize_dialiation_conv()
......@@ -272,7 +285,6 @@ class TFGraph(Graph):
def data_format_propagation(self, node):
current_node = self.node_map[node.layer_name]
current_node = node.tf_data_format
outputs = current_node.outputs
if len(outputs) == 0:
return
......@@ -322,7 +334,7 @@ class TFDecoder(object):
graph_def = cp.deepcopy(graph_def)
input_map = dict()
for layer in graph_def.node:
if layer.op != "Placeholder" and layer.op != "OneShotIterator":
if layer.op != "Placeholder" and layer.op != "OneShotIterator" and layer.op != "IteratorV2":
continue
graph_node = TFGraphNode(layer)
dtype = graph_node.layer.attr['dtype'].type
......@@ -403,7 +415,7 @@ class TFDecoder(object):
else:
value = graph_node.layer.attr["shape"].shape
shape = [dim.size for dim in value.dim]
self.input_info[graph_node.layer_name] = (shape, dtype)
self.input_info[layer.name] = (shape, dtype)
return input_map
......
......@@ -10,6 +10,8 @@ from . import select
from . import shufflechannel
from . import convolutiondepthwise
from . import axpy
from . import upsample
from . import relu6
#custom layer import ends
custom_layers = get_registered_layers()
......
......@@ -2,7 +2,7 @@ from .register import register
from x2paddle.core.util import *
def axpy_shape(input_shape):
def axpy_shape(input_shapes):
assert len(input_shapes) == 3, "not valid input shape for axpy layer"
assert len(input_shapes[0]) == len(input_shapes[1]), 'should have same dims'
output_shape = input_shapes[1]
......@@ -18,7 +18,7 @@ def axpy_layer(inputs, input_shape=None, name=None):
y = inputs[2]
out = fluid.layers.elementwise_mul(x, alpha, axis=0)
out = fluid.layers.elementwise_add(out, y, name=name)
print(out)
return out
def axpy_weights(name, data=None):
......
from .register import register
from x2paddle.core.util import *
def relu6_shape(input_shape):
return input_shape
def relu6_layer(inputs, input_shape=None, name=None):
input = inputs[0]
out = fluid.layers.relu6(x=input)
return out
def relu6_weights(name, data=None):
weights_name = []
return weights_name
register(
kind='ReLU6', shape=relu6_shape, layer=relu6_layer, weights=relu6_weights)
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2020 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
Author: Drift
Email: wutuobang@baidu.com
Date: 2020/04/22 18:45
"""
from .register import register
from x2paddle.core.util import *
def upsample_shape(input_shapes, scale):
"""
:param input_shapes:
:param scale:
:return:
"""
assert len(input_shapes) == 1, "not valid input shape for upsample layer"
assert type(scale) is int
input_shape = input_shapes[0]
new_h = scale * input_shape[2]
new_w = scale * input_shape[3]
output_shape = [input_shape[0], input_shape[1], new_h, new_w]
return [output_shape]
def upsample_layer(inputs, scale, input_shape=None, name=None):
"""
:param inputs:
:param scale:
:param input_shape:
:param name:
:return:
"""
x = inputs[0]
out = fluid.layers.resize_nearest(
x, align_corners=False, scale=scale, name=name)
return out
def upsample_weights(name, data=None):
"""
:param name:
:param data:
:return:
"""
weights_name = []
return weights_name
register(
kind='Upsample',
shape=upsample_shape,
layer=upsample_layer,
weights=upsample_weights)
......@@ -23,7 +23,6 @@ from x2paddle.op_mapper.caffe_custom_layer import *
class CaffeOpMapper(OpMapper):
directly_map_ops = {
'ReLU': 'relu',
'AbsVal': 'abs',
'Sigmoid': 'sigmoid',
'TanH': 'tanh',
......@@ -435,6 +434,26 @@ class CaffeOpMapper(OpMapper):
node.fluid_code.add_layer(
"concat", inputs=inputs, output=node, param_attr=attr)
def ReLU(self, node):
"""
:param node:
:return:
"""
assert len(
node.inputs) == 1, 'The count of ReLU node\'s input is not 1.'
input = self.graph.get_bottom_node(node, idx=0, copy=True)
params = node.layer.relu_param
if params.HasField('negative_slope') and params.negative_slope != 0:
negative_slope = float(params.negative_slope)
attr = {'alpha': negative_slope}
node.fluid_code.add_layer(
'leaky_relu', inputs=input, output=node, param_attr=attr)
else:
node.fluid_code.add_layer('relu', inputs=input, output=node)
def PReLU(self, node):
assert len(
node.inputs) == 1, 'The count of PReLU node\'s input is not 1.'
......
......@@ -41,6 +41,33 @@ class OpSet11(OpSet10):
outputs=op.output('Out'), )
return [min_node, max_node, node]
def pad2d(self, op, block):
x_shape = block.var(op.input('X')[0]).shape
paddings = op.attr('paddings')
onnx_pads = []
#TODO support pads is Variable
if op.attr('data_format') == 'NCHW':
pads = [
0, 0, paddings[0], paddings[2], 0, 0, paddings[1], paddings[3]
]
else:
pads = [
0, paddings[0], paddings[2], 0, 0, paddings[1], paddings[3], 0
]
pads_name = self.get_name(op.type, 'pads')
pads_node = self.make_constant_node(pads_name,
onnx_pb.TensorProto.INT64, pads)
constant_value_name = self.get_name(op.type, 'constant_value')
constant_value_node = self.make_constant_node(constant_value_name,
onnx_pb.TensorProto.FLOAT,
op.attr('pad_value'))
node = helper.make_node(
'Pad',
inputs=op.input('X') + [pads_name, constant_value_name],
outputs=op.output('Out'),
mode=op.attr('mode'))
return [pads_node, constant_value_node, node]
def bilinear_interp(self, op, block):
input_names = op.input_names
coordinate_transformation_mode = ''
......
......@@ -215,6 +215,28 @@ class OpSet9(object):
pads=op.attr('paddings') + op.attr('paddings'))
return node
def pad2d(self, op, block):
x_shape = block.var(op.input('X')[0]).shape
paddings = op.attr('paddings')
onnx_pads = []
if op.attr('data_format') == 'NCHW':
pads = [
0, 0, paddings[0], paddings[2], 0, 0, paddings[1], paddings[3]
]
else:
pads = [
0, paddings[0], paddings[2], 0, 0, paddings[1], paddings[3], 0
]
#TODO support pads is Variable
node = helper.make_node(
'Pad',
inputs=op.input('X'),
outputs=op.output('Out'),
mode=op.attr('mode'),
value=op.attr('pad_value'),
pads=pads)
return node
def softmax(self, op, block):
axis = op.attr('axis')
shape = block.var(op.output('Out')[0]).shape
......
......@@ -15,6 +15,7 @@
from x2paddle.decoder.tf_decoder import TFGraph
from x2paddle.core.op_mapper import OpMapper
from x2paddle.core.util import *
import math
import inspect
import numpy
import sys
......@@ -47,7 +48,8 @@ class TFOpMapperNHWC(OpMapper):
'LeakyRelu': ['leaky_relu', {
'alpha': 'alpha'
}],
'Floor': ['floor']
'Floor': ['floor'],
'Erf': ['erf']
}
elementwise_ops = {
'Add': 'elementwise_add',
......@@ -56,6 +58,7 @@ class TFOpMapperNHWC(OpMapper):
'Sub': 'elementwise_sub',
'Maximum': 'elementwise_max',
'Minimum': 'elementwise_min',
'LessEqual': 'less_equal',
'Mul': 'elementwise_mul',
'FloorDiv': 'elementwise_floordiv'
}
......@@ -71,7 +74,11 @@ class TFOpMapperNHWC(OpMapper):
not_placeholder = list()
for name in self.graph.input_nodes:
if self.graph.get_node(name).layer_type != "Placeholder":
if self.graph.get_node(
name).layer_type != "Placeholder" and self.graph.get_node(
name
).layer_type != "OneShotIterator" and self.graph.get_node(
name).layer_type != "IteratorV2":
not_placeholder.append(name)
for name in not_placeholder:
idx = self.graph.input_nodes.index(name)
......@@ -631,10 +638,20 @@ class TFOpMapperNHWC(OpMapper):
attr = {"shape": shape}
node.fluid_code.add_layer(
"reshape", inputs=x, output=x, param_attr=attr)
if transpose_a is None:
transpose_a = node.get_attr('adj_x')
if transpose_b is None:
transpose_b = node.get_attr('adj_y')
attr = {"transpose_x": transpose_a, "transpose_y": transpose_b}
node.fluid_code.add_layer(
"matmul", inputs=inputs, output=node, param_attr=attr)
def BatchMatMul(self, node):
return self.MatMul(node)
def BatchMatMulV2(self, node):
return self.MatMul(node)
def ArgMax(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
axis = self.graph.get_node(node.layer.input[1], copy=True)
......@@ -789,7 +806,7 @@ class TFOpMapperNHWC(OpMapper):
"transpose", inputs=input, output=node, param_attr=attr)
input = node
else:
self.data_format_propagation(node)
self.graph.data_format_propagation(node)
attr = {
"bias_attr": False,
......@@ -957,7 +974,9 @@ class TFOpMapperNHWC(OpMapper):
if y.layer_type == 'Const':
self.add_omit_nodes(y.layer_name, node.layer_name)
dim = y.value.tolist()
attr = {'axes': [dim]}
if not isinstance(dim, list):
dim = [dim]
attr = {'axes': dim}
else:
attr = {'axes': y}
node.fluid_code.add_layer(
......@@ -980,3 +999,95 @@ class TFOpMapperNHWC(OpMapper):
"=", inputs=x, output=node, param_attr=None)
else:
raise Exception("SpaceToBatchND is not supported")
def OneHot(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
depth = self.graph.get_node(node.layer.input[1], copy=True)
on_value = self.graph.get_node(node.layer.input[2], copy=True)
off_value = self.graph.get_node(node.layer.input[3], copy=True)
assert depth.layer_type == 'Const', 'Parameter depth should be Const in OneHot'
assert on_value.layer_type == 'Const', 'Parameter on_value should be Const in OneHot'
assert off_value.layer_type == 'Const', 'Parameter off_value should be Const in OneHot'
self.add_omit_nodes(depth.layer_name, node.layer_name)
self.add_omit_nodes(on_value.layer_name, node.layer_name)
self.add_omit_nodes(off_value.layer_name, node.layer_name)
depth = depth.value
on_value = on_value.value
off_value = off_value.value
assert math.fabs(on_value -
1.0) < 1e-06, "on_value should be 1 in OneHot"
assert math.fabs(off_value -
0.0) < 1e-06, "off_value should be 0 in OneHot"
attr = {'depth': depth}
node.fluid_code.add_layer(
"one_hot",
inputs=input,
output=node,
param_attr=attr,
use_fluid=True)
def Pow(self, node):
x = self.graph.get_node(node.layer.input[0], copy=True)
factor = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(factor.layer_name, node.layer_name)
if factor.layer_type == 'Const':
factor = factor.value.tolist()
else:
factor = self.decoder.infer_tensor(factor)
attr = {'factor': factor}
node.fluid_code.add_layer("pow", inputs=x, output=node, param_attr=attr)
def All(self, node):
input = self.graph.get_node(node.layer.input[0], copy=True)
reduce_idx = self.graph.get_node(node.layer.input[1], copy=True)
self.add_omit_nodes(reduce_idx.layer_name, node.layer_name)
assert reduce_idx.layer_type == "Const", "Only support Const parameter[reduce_idx]"
dims = reduce_idx.value.tolist()
keep_dims = node.get_attr("keep_dims")
attr = {"dim": dims, "keep_dim": keep_dims}
node.fluid_code.add_layer(
"reduce_all", inputs=input, output=node, param_attr=attr)
def GatherV2(self, node):
embeddings = self.graph.get_node(node.layer.input[0], copy=True)
index = self.graph.get_node(node.layer.input[1], copy=True)
axis = self.graph.get_node(node.layer.input[2], copy=True)
self.add_omit_nodes(axis.layer_name, node.layer_name)
assert axis.layer_type == 'Const', "Only support Const parameter[axis]"
axis = axis.value.tolist()
assert axis == 0, "Only support axis=0 in GatherV2 OP"
attr = {'overwrite': False}
if len(index.out_shapes[0]) != 1:
reshape_attr = {"shape": [-1]}
node.fluid_code.add_layer(
"reshape", inputs=index, output=index, param_attr=reshape_attr)
inputs = {'input': embeddings, 'index': index}
node.fluid_code.add_layer(
"gather", inputs=inputs, output=node, param_attr=attr)
def OneShotIterator(self, node):
return self.Placeholder(node)
def IteratorV2(self, node):
dtype_map = {
1: "float32",
3: "int32",
4: "uint8",
9: "int64",
10: "bool"
}
shapes = node.out_shapes
dtypes = node.layer.attr['output_types'].list.type
node.fluid_code.add_note("{} = [0] * {}".format(node.layer_name,
len(shapes)))
for i, shape in enumerate(shapes):
attr = {
'dtype': string(dtype_map[dtypes[i]]),
'shape': shape,
'name': string("{}_{}".format(node.layer_name, i)),
'append_batch_size': False
}
output = "{}[{}]".format(node.layer_name, i)
node.fluid_code.add_layer(
"data", inputs=None, output=output, param_attr=attr)
......@@ -33,6 +33,8 @@
| MobileNet_V1 | [code](https://github.com/shicai/MobileNet-Caffe) |
| MobileNet_V2 | [code](https://github.com/shicai/MobileNet-Caffe) |
| ShuffleNet_v2 | [code](https://github.com/miaow1988/ShuffleNet_V2_pytorch_caffe/releases/tag/v0.1.0) |
| InceptionV3 | [code](https://github.com/soeaver/caffe-model/blob/master/cls/inception/) |
| InceptionV4 | [code](https://github.com/soeaver/caffe-model/blob/master/cls/inception/) |
| mNASNet | [code](https://github.com/LiJianfei06/MnasNet-caffe) |
| MTCNN | [code](https://github.com/kpzhang93/MTCNN_face_detection_alignment/tree/master/code/codes/MTCNNv1/model) |
| Mobilenet_SSD | [code](https://github.com/chuanqi305/MobileNet-SSD) |
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册