提交 162399b6 编写于 作者: J jiazhenwei

python3 compatibile, update env requirment

上级 c8d90837
......@@ -21,3 +21,4 @@ mace/examples/android/macelibrary/src/main/cpp/mace/
\.project/
*swp
*~
.python-version
......@@ -14,7 +14,7 @@ Required dependencies
- Tested version
* - Python
-
- 2.7
- 2.7 or 3.6
* - Bazel
- `bazel installation guide <https://docs.bazel.build/versions/master/install.html>`__
- 0.13.0
......
......@@ -194,7 +194,7 @@ class CaffeConverter(base_converter.ConverterInterface):
caffe_weights = caffe_pb2.NetParameter()
# parse prototxt
with open(src_model_file, 'rb') as f:
with open(src_model_file, 'r') as f:
google.protobuf.text_format.Merge(
str(f.read()), self._caffe_layers)
self.filter_test_layers(self._caffe_layers)
......@@ -471,7 +471,7 @@ class CaffeConverter(base_converter.ConverterInterface):
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_activation_type_str
type_arg.s = self.activation_type[caffe_op.type].name
type_arg.s = six.b(self.activation_type[caffe_op.type].name)
if caffe_op.type == 'PReLU':
alpha_tensor_name = caffe_op.name + '_alpha'
......
......@@ -122,10 +122,13 @@ class TensorflowConverter(base_converter.ConverterInterface):
'SAME': PaddingMode.SAME,
'FULL': PaddingMode.FULL
}
padding_mode = {six.b(k): v for k, v in six.iteritems(padding_mode)}
pooling_type_mode = {
TFOpType.AvgPool.name: PoolingType.AVG,
TFOpType.MaxPool.name: PoolingType.MAX
}
eltwise_type = {
TFOpType.Add.name: EltwiseType.SUM,
TFOpType.Sub.name: EltwiseType.SUB,
......@@ -144,6 +147,7 @@ class TensorflowConverter(base_converter.ConverterInterface):
TFOpType.Rsqrt.name: EltwiseType.POW,
TFOpType.Equal.name: EltwiseType.EQUAL,
}
activation_type = {
TFOpType.Relu.name: ActivationType.RELU,
TFOpType.Relu6.name: ActivationType.RELUX,
......@@ -245,13 +249,13 @@ class TensorflowConverter(base_converter.ConverterInterface):
def replace_input_output_tensor_name(self):
for op in self._mace_net_def.op:
for i in xrange(len(op.input)):
for i in six.moves.range(len(op.input)):
if op.input[i][-2:] == ':0':
op_name = op.input[i][:-2]
if op_name in self._option.input_nodes \
or op_name in self._option.output_nodes:
op.input[i] = op_name
for i in xrange(len(op.output)):
for i in six.moves.range(len(op.output)):
if op.output[i][-2:] == ':0':
op_name = op.output[i][:-2]
if op_name in self._option.output_nodes:
......@@ -432,7 +436,7 @@ class TensorflowConverter(base_converter.ConverterInterface):
if len(tf_op.inputs) == 1:
return len(tf_op.inputs[0].shape) == 0
elif len(tf_op.inputs) == 2:
return len(tf_op.inputs[0].shape) == 0 and\
return len(tf_op.inputs[0].shape) == 0 and \
len(tf_op.inputs[1].shape) == 0
if check_is_scalar(tf_op):
......@@ -456,8 +460,8 @@ class TensorflowConverter(base_converter.ConverterInterface):
EltwiseType.SUM, EltwiseType.PROD,
EltwiseType.MAX, EltwiseType.MIN]
if len(tf_op.inputs) > 1 and\
len(tf_op.inputs[1].shape) == 0 and\
if len(tf_op.inputs) > 1 and \
len(tf_op.inputs[1].shape) == 0 and \
tf_op.inputs[1].op.type == TFOpType.Const.name:
scalar = tf_op.inputs[1].eval().astype(np.float32)
value_arg = op.arg.add()
......@@ -465,20 +469,20 @@ class TensorflowConverter(base_converter.ConverterInterface):
value_arg.f = scalar
self._skip_tensor.add(tf_op.inputs[1].name)
value_index_arg = op.arg.add()
value_index_arg.name =\
value_index_arg.name = \
MaceKeyword.mace_scalar_input_index_str
value_index_arg.i = 1
self._skip_tensor.add(tf_op.inputs[1].name)
del op.input[1]
elif len(tf_op.inputs[0].shape) == 0 and\
tf_op.inputs[0].op.type == TFOpType.Const.name and\
elif len(tf_op.inputs[0].shape) == 0 and \
tf_op.inputs[0].op.type == TFOpType.Const.name and \
is_commutative(type_arg.i):
scalar = tf_op.inputs[0].eval().astype(np.float32)
value_arg = op.arg.add()
value_arg.name = MaceKeyword.mace_scalar_input_str
value_arg.f = scalar
value_index_arg = op.arg.add()
value_index_arg.name =\
value_index_arg.name = \
MaceKeyword.mace_scalar_input_index_str
value_index_arg.i = 0
self._skip_tensor.add(tf_op.inputs[0].name)
......@@ -503,7 +507,7 @@ class TensorflowConverter(base_converter.ConverterInterface):
type_arg = op.arg.add()
type_arg.name = MaceKeyword.mace_activation_type_str
type_arg.s = self.activation_type[tf_op.type].name
type_arg.s = six.b(self.activation_type[tf_op.type].name)
if tf_op.type == TFOpType.Relu6.name:
limit_arg = op.arg.add()
......@@ -520,7 +524,8 @@ class TensorflowConverter(base_converter.ConverterInterface):
is_training = tf_op.get_attr(tf_is_training_str)
assert is_training is False, 'Only support batch normalization ' \
'with is_training False, but got %s' % is_training
'with is_training False, but got %s' % \
is_training
gamma_value = tf_op.inputs[1].eval().astype(np.float32)
beta_value = tf_op.inputs[2].eval().astype(np.float32)
......@@ -531,8 +536,8 @@ class TensorflowConverter(base_converter.ConverterInterface):
scale_name = self.get_scope(tf_op.name) + '/scale:0'
offset_name = self.get_scope(tf_op.name) + '/offset:0'
scale_value = (
(1.0 / np.vectorize(math.sqrt)(
var_value + epsilon_value)) * gamma_value)
(1.0 / np.vectorize(math.sqrt)(
var_value + epsilon_value)) * gamma_value)
offset_value = (-mean_value * scale_value) + beta_value
self.add_tensor(scale_name, scale_value.shape, mace_pb2.DT_FLOAT,
scale_value)
......
......@@ -94,7 +94,7 @@ class MemoryOptimizer(object):
if output_type == mace_pb2.DT_UINT8:
data_type_size = 1
return MemoryBlock(mace_pb2.CPU_BUFFER,
[reduce(operator.mul, output_shape, 1) *
[six.moves.reduce(operator.mul, output_shape, 1) *
data_type_size])
def mem_size(self, memory_block):
......@@ -123,7 +123,9 @@ class MemoryOptimizer(object):
for op in self.net_def.op:
if not self.op_need_optimize_memory(op):
continue
origin_mem_size += reduce(operator.mul, op.output_shape[0].dims, 1)
origin_mem_size += six.moves.reduce(operator.mul,
op.output_shape[0].dims,
1)
return origin_mem_size
def get_total_optimized_mem_size(self):
......@@ -170,8 +172,8 @@ class MemoryOptimizer(object):
output_type)
mem_id = -1
if len(self.idle_mem) > 0:
best_mem_add_size = sys.maxint
best_mem_waste_size = sys.maxint
best_mem_add_size = six.MAXSIZE
best_mem_waste_size = six.MAXSIZE
for mid in self.idle_mem:
old_mem_block = self.mem_block[mid]
if old_mem_block.mem_type != op_mem_block.mem_type:
......
......@@ -191,7 +191,7 @@ def save_model_to_proto(net_def, model_tag, output_dir):
proto_file_path = output_dir + model_tag + '.pb'
with open(proto_file_path, "wb") as f:
f.write(net_def.SerializeToString())
with open(proto_file_path + '_txt', "wb") as f:
with open(proto_file_path + '_txt', "w") as f:
f.write(str(net_def))
......
......@@ -22,7 +22,7 @@ namespace mace {
extern const std::map<std::string, std::vector<{{data_type}}>> {{variable_name}} =
{
{% for key, value in maps.iteritems() %}
{% for key, value in maps.items() %}
{
"{{key}}",
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册