提交 2f9e2ca9 编写于 作者: S SunAhong1993

add caffe emitter

上级 04996630
......@@ -11,9 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.parser.tf_parser import TFParser
from x2paddle.optimizer.tf_optimizer import TFGraphOptimizer
from x2paddle.emitter.tf_emitter import TFEmitter
from six import text_type as _text_type
import argparse
......@@ -50,6 +48,9 @@ def arg_parser():
def tf2paddle(model, save_dir):
print("Now translating model from tensorflow to paddle.")
from x2paddle.parser.tf_parser import TFParser
from x2paddle.optimizer.tf_optimizer import TFGraphOptimizer
from x2paddle.emitter.tf_emitter import TFEmitter
parser = TFParser(model)
emitter = TFEmitter(parser)
emitter.run()
......@@ -57,7 +58,13 @@ def tf2paddle(model, save_dir):
def caffe2paddle(proto, weight, save_dir):
print("Not implement yet.")
print("Now translating model from caffe to paddle.")
from x2paddle.parser.caffe_parser import CaffeParser
from x2paddle.emitter.caffe_emitter import CaffeEmitter
parser = CaffeParser(proto, weight)
emitter = CaffeEmitter(parser)
emitter.run()
emitter.save_python_model(save_dir)
def main():
......
......@@ -45,6 +45,8 @@ def export_paddle_param(param, param_name, dir):
assert param.size == 1, "Unexpected situation happend!"
shape = [1]
assert str(param.dtype) in dtype_map, "Unknown dtype of params."
if not os.path.exists(dir):
os.makedirs(dir)
fp = open(os.path.join(dir, param_name), 'wb')
fp.write(struct.pack('i', 0))
......
......@@ -24,7 +24,11 @@ class CaffeEmitter(Emitter):
self.parser = parser
self.graph = parser.caffe_graph
self.weights = dict()
self.resolver = parser.resolver
resolver = parser.resolver
if resolver.has_pycaffe():
self.did_use_pb = False
else:
self.did_use_pb = True
def run(self):
print("Total nodes: {}".format(len(self.graph.topo_sort)))
......@@ -38,11 +42,48 @@ class CaffeEmitter(Emitter):
for i in range(len(self.graph.topo_sort)):
node_name = self.graph.topo_sort[i]
node = self.graph.get_node(node_name)
for layer in node.fluid_code.layers:
print(layer.get_code())
self.net_code += node.fluid_code.gen_codes()
def adjust_parameters(self, node, data):
if not self.did_use_pb:
return data
# When using the protobuf-backend, each parameter initially has four dimensions.
# In certain cases (like FC layers), we want to eliminate the singleton dimensions.
# This implementation takes care of the common cases. However, it does leave the
# potential for future issues.
# The Caffe-backend does not suffer from this problem.
data = list(data)
squeeze_indices = [1] # Squeeze biases.
if node.kind == NodeKind.InnerProduct:
squeeze_indices.append(0) # Squeeze FC.
for idx in squeeze_indices:
if idx >= len(data):
continue
for name, param in self.weights.items():
export_paddle_param(param, name.replace('/', '_'), "params1")
d = data[idx]
assert len(
d.shape
) == 4, 'invalid shape[%s] from caffe when adjust_parameters' % (
str(d.shape))
shape_old = d.shape
sq_axis = None
if idx == 0:
sq_axis = (0, 1)
elif idx == 1:
sq_axis = (0, 1, 2)
else:
continue
data[idx] = np.squeeze(d, axis=sq_axis)
shape_new = data[idx].shape
if len(shape_old) != shape_new:
debug('squeeze idx:%d, with kind:%s,name:%s' % \
(idx, node.kind, node.name))
return data
@staticmethod
def get_kernel_value(scalar, repeated, idx, default=None):
......@@ -114,6 +155,7 @@ class CaffeEmitter(Emitter):
def Convolution(self, node):
data = node.data
data = self.adjust_parameters(node, data)
self.weights[node.layer_name + '_weights'] = data[0]
if len(data) == 2:
self.weights[node.layer_name + '_bias'] = data[1]
......@@ -141,6 +183,7 @@ class CaffeEmitter(Emitter):
def Deconvolution(self, node):
data = node.data
data = self.adjust_parameters(node, data)
self.weights[node.layer_name + '_weights'] = data[0]
if len(data) == 2:
self.weights[node.layer_name + '_bias'] = data[1]
......@@ -227,6 +270,16 @@ class CaffeEmitter(Emitter):
def InnerProduct(self, node):
data = node.data
data = self.adjust_parameters(node, data)
# Reshape the parameters to Paddle's ordering
transpose_order = (1, 0)
w = data[0]
fc_shape = w.shape
output_channels = fc_shape[0]
w = w.reshape((output_channels, -1))
w = w.transpose(transpose_order)
data[0] = w
self.weights[node.layer_name + '_weights'] = data[0]
if len(data) == 2:
self.weights[node.layer_name + '_bias'] = data[1]
......
......@@ -212,6 +212,8 @@ class CaffeParser(object):
def load_using_caffe(self):
caffe = self.resolver.caffe
caffe.set_mode_cpu()
print(self.proto_path)
print(self.model_path)
net = caffe.Net(self.proto_path, self.model_path, caffe.TEST)
data = lambda blob: blob.data
self.params = [(k, list(map(data, v))) for k, v in net.params.items()]
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册