提交 56216c59 编写于 作者: L liuqi

Bug: fix buffer transform and caffe duplicate name bugs.

1. Fix data format bug at buffer_transformer.
2. Fix duplicate tensor name bug caused by inplace caffe op.
上级 5efbfbff
......@@ -222,7 +222,11 @@ void MemoryOptimizer::Optimize(
idle_blocks_.insert(mem_id);
}
} else {
MACE_CHECK(tensor_ref_count_.at(input_name) >= 0);
MACE_CHECK(tensor_ref_count_.at(input_name) >= 0,
"Reference count of tensor ",
input_name,
" is ",
tensor_ref_count_.at(input_name));
}
}
}
......
......@@ -348,9 +348,10 @@ class Tensor {
MACE_CHECK(image_shape[0] <= buffer_->shape()[0] &&
image_shape[1] <= buffer_->shape()[1],
"tensor (source op ", name_,
"): current physical image shape: ", buffer_->shape()[0],
", ", buffer_->shape()[1], " < logical image shape: ",
image_shape[0], ", ", image_shape[1]);
"): current logical image shape:",
image_shape[0], ", ", image_shape[1],
" > physical image shape: ",
buffer_->shape()[0], ", ", buffer_->shape()[1]);
return MaceStatus::MACE_SUCCESS;
}
}
......
......@@ -66,7 +66,7 @@ class OpenCLBufferTransformer {
VLOG(2) << "Transform CPU Buffer " << input->name()
<< " to GPU Buffer " << internal_tensor->name()
<< " with data type " << dt;
if (data_format == DataFormat::NCHW && input->shape().size() == 4) {
if (data_format == DataFormat::NHWC && input->shape().size() == 4) {
// 1. (NCHW -> NHWC)
std::vector<int> dst_dims = {0, 2, 3, 1};
std::vector<index_t> output_shape =
......
......@@ -77,13 +77,6 @@ class ReshapeOp : public Operation {
}
Tensor *output = this->Output(OUTPUT);
// NCHW -> NHWC
if (D == DeviceType::GPU && out_shape.size() == 4) {
std::vector<int> dst_dims = {0, 2, 3, 1};
std::vector<index_t> out_shape_gpu = TransposeShape<index_t, index_t>(
out_shape, dst_dims);
out_shape = out_shape_gpu;
}
output->ReuseTensorBuffer(*input);
output->Reshape(out_shape);
......
......@@ -137,7 +137,6 @@ class CaffeNet(object):
layer.top[i] = new_name
self._alias_op_output_name[old_name] = new_name
self._used_op_output_name.update([new_name])
for input_tensor in layer.bottom:
if input_tensor not in self._consumers:
self._consumers[input_tensor] = []
......@@ -248,7 +247,8 @@ class CaffeConverter(base_converter.ConverterInterface):
for op in ops:
for i in six.moves.range(len(op.output)):
original_output_name = op.output[i].split('#')[0]
if original_output_name not in visited:
if original_output_name not in visited and\
original_output_name not in self._option.input_nodes:
self.replace_input_name(
consumers.get(op.output[i], []),
op.output[i],
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册