From bec34bb7760322cdace879ab1cfd3014226b4e71 Mon Sep 17 00:00:00 2001 From: liuqi Date: Mon, 8 Jan 2018 20:18:45 +0800 Subject: [PATCH] Fix memory optimization bug and add some tuning parameters. --- mace/kernels/opencl/addn.cc | 1 + mace/kernels/opencl/batch_norm_opencl.cc | 1 + mace/kernels/opencl/concat.cc | 1 + mace/kernels/opencl/conv_2d_opencl_1x1.cc | 1 + mace/kernels/opencl/conv_2d_opencl_3x3.cc | 1 + mace/kernels/opencl/conv_2d_opencl_general.cc | 1 + mace/kernels/opencl/pooling_opencl.cc | 1 + mace/kernels/opencl/relu_opencl.cc | 1 + mace/kernels/opencl/resize_bilinear_opencl.cc | 1 + mace/kernels/opencl/softmax_opencl.cc | 1 + mace/python/tools/memory_optimizer.py | 7 ++----- tools/gcn.config | 2 +- 12 files changed, 13 insertions(+), 6 deletions(-) diff --git a/mace/kernels/opencl/addn.cc b/mace/kernels/opencl/addn.cc index 514f0d2a..42359f45 100644 --- a/mace/kernels/opencl/addn.cc +++ b/mace/kernels/opencl/addn.cc @@ -54,6 +54,7 @@ static void AddN(const std::vector &input_tensors, local_ws[0] = std::min(width_pixels, kwg_size); local_ws[1] = std::min(batch_height_pixels, kwg_size / local_ws[0]); return {{local_ws[0], local_ws[1]}, + {local_ws[1], local_ws[0]}, {kwg_size / 16, 16}, {kwg_size / 32, 32}, {kwg_size / 64, 64}, diff --git a/mace/kernels/opencl/batch_norm_opencl.cc b/mace/kernels/opencl/batch_norm_opencl.cc index c3dc8445..513d7366 100644 --- a/mace/kernels/opencl/batch_norm_opencl.cc +++ b/mace/kernels/opencl/batch_norm_opencl.cc @@ -67,6 +67,7 @@ void BatchNormFunctor::operator()( local_ws[2] = std::min(height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{8, 128, 1}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size / 16, 4, 4}, {kwg_size / 32, 4, 8}, {kwg_size / 32, 8, 4}, diff --git a/mace/kernels/opencl/concat.cc b/mace/kernels/opencl/concat.cc index bea536b6..921b34ce 100644 --- a/mace/kernels/opencl/concat.cc +++ b/mace/kernels/opencl/concat.cc @@ -57,6 +57,7 @@ static void Concat2(const Tensor *input0, local_ws[2] = std::min(height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{4, 15, 8}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size / 16, 4, 4}, {kwg_size / 32, 4, 8}, {kwg_size / 32, 8, 4}, diff --git a/mace/kernels/opencl/conv_2d_opencl_1x1.cc b/mace/kernels/opencl/conv_2d_opencl_1x1.cc index d56926a8..49eea13d 100644 --- a/mace/kernels/opencl/conv_2d_opencl_1x1.cc +++ b/mace/kernels/opencl/conv_2d_opencl_1x1.cc @@ -74,6 +74,7 @@ void Conv1x1(const Tensor *input, local_ws[2] = std::min(height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{4, 15, 8}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size/16, 4, 4}, {kwg_size/32, 4, 8}, {kwg_size/32, 8, 4}, diff --git a/mace/kernels/opencl/conv_2d_opencl_3x3.cc b/mace/kernels/opencl/conv_2d_opencl_3x3.cc index f48c7fc3..d108dea1 100644 --- a/mace/kernels/opencl/conv_2d_opencl_3x3.cc +++ b/mace/kernels/opencl/conv_2d_opencl_3x3.cc @@ -68,6 +68,7 @@ static void Conv2d3x3S12(const Tensor *input, const Tensor *filter, local_ws[2] = std::min(height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{4, 15, 8}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size / 16, 4, 4}, {kwg_size / 32, 4, 8}, {kwg_size / 32, 8, 4}, diff --git a/mace/kernels/opencl/conv_2d_opencl_general.cc b/mace/kernels/opencl/conv_2d_opencl_general.cc index 2bd897f0..89026e83 100644 --- a/mace/kernels/opencl/conv_2d_opencl_general.cc +++ b/mace/kernels/opencl/conv_2d_opencl_general.cc @@ -70,6 +70,7 @@ void Conv2dOpencl(const Tensor *input, const Tensor *filter, local_ws[2] = std::min(height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{4, 15, 8}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size / 16, 4, 4}, {kwg_size / 32, 4, 8}, {kwg_size / 32, 8, 4}, diff --git a/mace/kernels/opencl/pooling_opencl.cc b/mace/kernels/opencl/pooling_opencl.cc index 991dcac3..6835af69 100644 --- a/mace/kernels/opencl/pooling_opencl.cc +++ b/mace/kernels/opencl/pooling_opencl.cc @@ -69,6 +69,7 @@ static void Pooling(const Tensor *input, local_ws[2] = std::min(out_height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{4, 15, 8}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size / 16, 4, 4}, {kwg_size / 32, 4, 8}, {kwg_size / 32, 8, 4}, diff --git a/mace/kernels/opencl/relu_opencl.cc b/mace/kernels/opencl/relu_opencl.cc index 7561b1fa..831197f1 100644 --- a/mace/kernels/opencl/relu_opencl.cc +++ b/mace/kernels/opencl/relu_opencl.cc @@ -57,6 +57,7 @@ void ReluFunctor::operator()(const Tensor *input, local_ws[2] = std::min(height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{4, 15, 8}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size / 16, 4, 4}, {kwg_size / 32, 4, 8}, {kwg_size / 32, 8, 4}, diff --git a/mace/kernels/opencl/resize_bilinear_opencl.cc b/mace/kernels/opencl/resize_bilinear_opencl.cc index 588d83c6..7d3af223 100644 --- a/mace/kernels/opencl/resize_bilinear_opencl.cc +++ b/mace/kernels/opencl/resize_bilinear_opencl.cc @@ -66,6 +66,7 @@ void ResizeBilinearFunctor::operator()( local_ws[2] = std::min(out_height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{4, 15, 8}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size / 16, 4, 4}, {kwg_size / 32, 4, 8}, {kwg_size / 32, 8, 4}, diff --git a/mace/kernels/opencl/softmax_opencl.cc b/mace/kernels/opencl/softmax_opencl.cc index 147e53d5..407de210 100644 --- a/mace/kernels/opencl/softmax_opencl.cc +++ b/mace/kernels/opencl/softmax_opencl.cc @@ -48,6 +48,7 @@ void SoftmaxFunctor::operator()(const Tensor *logits, local_ws[2] = std::min(height * batch, kwg_size / (local_ws[0] * local_ws[1])); return {{4, 15, 8}, //SNPE size {local_ws[0], local_ws[1], local_ws[2]}, + {local_ws[2], local_ws[1], local_ws[0]}, {kwg_size / 16, 4, 4}, {kwg_size / 32, 4, 8}, {kwg_size / 32, 8, 4}, diff --git a/mace/python/tools/memory_optimizer.py b/mace/python/tools/memory_optimizer.py index ac507145..109ec4c3 100644 --- a/mace/python/tools/memory_optimizer.py +++ b/mace/python/tools/memory_optimizer.py @@ -23,15 +23,12 @@ class MemoryOptimizer(object): for op in net_def.op: if self.is_buffer_image_op(op): continue - tensor_name = self._op_to_tensor(op) + tensor_name = op.output[0] if tensor_name in consumers: self.ref_counter[tensor_name] = len(consumers[tensor_name]) else: self.ref_counter[tensor_name] = 0 - def _op_to_tensor(self, op): - return op.name + ':0' - def is_buffer_image_op(self, op): return op.type == 'BufferToImage' or op.type == 'ImageToBuffer' @@ -51,7 +48,7 @@ class MemoryOptimizer(object): print('WARNING: There is no output shape information to do memory optimization.') return op.mem_id = mem_id - self.op_mem[self._op_to_tensor(op)] = mem_id + self.op_mem[op.output[0]] = mem_id if mem_id not in self.mem_block: self.mem_block[mem_id] = [0, 0] mem_size = self.mem_block[mem_id] diff --git a/tools/gcn.config b/tools/gcn.config index 304d7a29..85ea36b3 100644 --- a/tools/gcn.config +++ b/tools/gcn.config @@ -1,2 +1,2 @@ TF_INPUT_NODE=input -TF_OUTPUT_NODE=GCN/br_result_2/fcn_br \ No newline at end of file +TF_OUTPUT_NODE=softmax/Reshape_1 \ No newline at end of file -- GitLab