From 313b4b5a8eaacef353d71980e625cb26bd5d1445 Mon Sep 17 00:00:00 2001 From: chonwhite Date: Tue, 14 Aug 2018 16:06:10 +0800 Subject: [PATCH] tesnor qunat --- CMakeLists.txt | 6 +-- src/fpga/fpga_quantilization.cpp | 21 +++++----- src/fpga/fpga_quantilization.h | 3 +- src/io/executor.cpp | 6 ++- src/memory/t_malloc.cpp | 2 +- src/operators/feed_op.h | 2 +- .../kernel/fpga/conv_add_bn_kernel.cpp | 5 +-- src/operators/kernel/fpga/conv_add_kernel.cpp | 42 +++++++++++++++++++ src/operators/kernel/fpga/conv_kernel.cpp | 2 +- src/operators/op_param.h | 2 +- test/CMakeLists.txt | 14 ++++--- test/fpga/test_tensor_quant.cpp | 33 ++++++++++----- tools/op.cmake | 2 + 13 files changed, 100 insertions(+), 40 deletions(-) create mode 100644 src/operators/kernel/fpga/conv_add_kernel.cpp diff --git a/CMakeLists.txt b/CMakeLists.txt index 4ccf73763c..635b7ef6f5 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -6,9 +6,9 @@ option(USE_OPENMP "openmp support" OFF) option(USE_EXCEPTION "use std exception" ON) option(LOG_PROFILE "log profile" ON) # select the platform to build -option(CPU "armv7 with neon" ON) +option(CPU "armv7 with neon" OFF) option(MALI_GPU "mali gpu" OFF) -option(FPGA "fpga" OFF) +option(FPGA "fpga" ON) file(GLOB_RECURSE PADDLE_MOBILE_CC src/*.cc src/*.cpp src/*.c src/*.mm) file(GLOB_RECURSE PADDLE_MOBILE_H src/*.h) @@ -139,7 +139,7 @@ set(CMAKE_LIBRARY_OUTPUT_DIRECTORY build) set(CMAKE_RUNTIME_OUTPUT_DIRECTORY build) # NET default -set(NET "default" CACHE STRING "select net type") +set(NET "FPGAnets" CACHE STRING "select net type") set_property(CACHE NET PROPERTY STRINGS "default" "googlenet" "mobilenet" "yolo" "squeezenet" "FPGAnets") include("${CMAKE_CURRENT_LIST_DIR}/tools/op.cmake") diff --git a/src/fpga/fpga_quantilization.cpp b/src/fpga/fpga_quantilization.cpp index dee3d3abc1..bba462538b 100644 --- a/src/fpga/fpga_quantilization.cpp +++ b/src/fpga/fpga_quantilization.cpp @@ -46,8 +46,12 @@ static Dtype find_max(Dtype* data, int num) { return max; } + // template -framework::Tensor* quantify_filter(framework::Tensor* filter) { +void quantify_filter(framework::Tensor* filter) { + + DLOG << "quantilize_filter........"; + float scale = 0; float fix_range = static_cast((1 << (8 - 1)) - 1); @@ -62,25 +66,20 @@ framework::Tensor* quantify_filter(framework::Tensor* filter) { // 32bit filter -> 8bit filter; if (filter->type() == typeid(float)) { float* float_data = filter->data(); - float max = find_max(float_data, filter->numel()); + float max = find_max(float_data, filter->numel()); scale = (max / fix_range); - framework::Tensor* filter = filter; - framework::Tensor* quant_filter = new framework::Tensor(); - - int_data = quant_filter->mutable_data(); for (int i = 0; i < filter->numel(); ++i) { tmp_data[i] = (int8_t)float_data[i] * scale; } - filter = quant_filter; + int_data = filter->mutable_data(); } else { - int8_t max = find_max(filter->data(), filter->numel()); + int8_t max = find_max(filter->data(), filter->numel()); scale = (max / fix_range); - int_data = filter->data(); for (int i = 0; i < filter->numel(); ++i) { - tmp_data[i] = int_data[i]; + tmp_data[i] = filter->data()[i]; } int_data = filter->mutable_data(); } @@ -88,7 +87,7 @@ framework::Tensor* quantify_filter(framework::Tensor* filter) { chw_to_hwc(tmp_data, int_data, batch_size, channel, height, width); delete tmp_data; *(filter->fpga_args().scale_pointer()) = scale; - return filter; + } } // namespace fpga diff --git a/src/fpga/fpga_quantilization.h b/src/fpga/fpga_quantilization.h index 56e14f89ac..4f1f6ad402 100644 --- a/src/fpga/fpga_quantilization.h +++ b/src/fpga/fpga_quantilization.h @@ -25,6 +25,7 @@ static void chw_to_hwc(Dtype* data_in, Dtype* data_out, int num, int channel, int height, int width); // template -framework::Tensor* quantify_filter(framework::Tensor* filter); +void quantify_filter(framework::Tensor* filter); + } // namespace fpga } // namespace paddle_mobile diff --git a/src/io/executor.cpp b/src/io/executor.cpp index d6434b64aa..4d73a4c085 100644 --- a/src/io/executor.cpp +++ b/src/io/executor.cpp @@ -77,6 +77,7 @@ Executor::Executor(const framework::Program p, int batch_size, auto op_base = framework::OpRegistry::CreateOp( op->Type(), op->GetInputs(), op->GetOutputs(), op->GetAttrMap(), program_.scope); + DLOG << "InferShape: "; op_base->InferShape(); ops_of_block_[*block_desc.get()].push_back(op_base); #ifdef PADDLE_EXECUTOR_MULTITHREAD @@ -84,16 +85,19 @@ Executor::Executor(const framework::Program p, int batch_size, #endif } } + DLOG << "InitMemory: "; + if (program_.combined) { InitCombineMemory(); } else { InitMemory(); } - + DLOG << "InitMemory end "; std::shared_ptr to_predict_block = to_predict_program_->Block(0); auto &ops = ops_of_block_[*to_predict_block.get()]; for (const auto &op : ops) { + DLOG << "Init op " << op->Type(); op->Init(); } } diff --git a/src/memory/t_malloc.cpp b/src/memory/t_malloc.cpp index 42b8c45518..8902543347 100644 --- a/src/memory/t_malloc.cpp +++ b/src/memory/t_malloc.cpp @@ -26,7 +26,7 @@ namespace paddle_mobile { namespace memory { const int MALLOC_ALIGN = 64; -#ifdef PADDLE_MOBILE_FPGA +#ifdef PADDLE_MOBILE_FPGA__VV namespace fpga = paddle_mobile::fpga; void Copy(void *dst, const void *src, size_t num) { diff --git a/src/operators/feed_op.h b/src/operators/feed_op.h index 4766d56d9a..a79c2f95fc 100644 --- a/src/operators/feed_op.h +++ b/src/operators/feed_op.h @@ -37,7 +37,7 @@ class FeedOp : public framework::OperatorBase { param_.Out()->Resize(out_dims); } -#ifdef PADDLE_MOBILE_FPGA +#ifdef PADDLE_MOBILE_FPGA__VV void RunImpl() const { fpga::PerformBypass(param_.FpgaArgs()); } void Init() { const Tensor *input = param_.InputX(); diff --git a/src/operators/kernel/fpga/conv_add_bn_kernel.cpp b/src/operators/kernel/fpga/conv_add_bn_kernel.cpp index 095ae4a6d0..91553a8aa3 100644 --- a/src/operators/kernel/fpga/conv_add_bn_kernel.cpp +++ b/src/operators/kernel/fpga/conv_add_bn_kernel.cpp @@ -60,10 +60,7 @@ bool ConvAddBNKernel::Init(FusionConvAddBNParam *param) { param->SetNewScale(new_scale); param->SetNewBias(new_bias); - Tensor *quant_filter = fpga::quantify_filter(filter); - - // delete original filter? - filter = quant_filter; + fpga::quantify_filter(filter); auto filter_ptr = filter->data(); fpga::ConvArgs convArgs; diff --git a/src/operators/kernel/fpga/conv_add_kernel.cpp b/src/operators/kernel/fpga/conv_add_kernel.cpp new file mode 100644 index 0000000000..1f2697a9f6 --- /dev/null +++ b/src/operators/kernel/fpga/conv_add_kernel.cpp @@ -0,0 +1,42 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ +#ifdef FUSION_CONVADD_OP + +#include "operators/kernel/conv_add_kernel.h" +#include "../central-arm-func/conv_add_arm_func.h" +#include "fpga/fpga_quantilization.h" + +namespace paddle_mobile { +namespace operators { + +template <> +bool ConvAddKernel::Init(FusionConvAddParam *param) { + DLOG << ">>>>>>>>>>>>>>>>>>>> ConvKernel <<<<<<<<<<<<<<<<<<<<<<<"; + Tensor *filter = param->Filter(); + fpga::quantify_filter(filter); + return true; +} + +template <> +void ConvAddKernel::Compute(const FusionConvAddParam ¶m) const { + ConvAddCompute(param); +} + +template class ConvAddKernel; + +} // namespace operators +} // namespace paddle_mobile + +#endif + diff --git a/src/operators/kernel/fpga/conv_kernel.cpp b/src/operators/kernel/fpga/conv_kernel.cpp index 91d0f393fc..fc13bbda64 100644 --- a/src/operators/kernel/fpga/conv_kernel.cpp +++ b/src/operators/kernel/fpga/conv_kernel.cpp @@ -27,7 +27,7 @@ bool ConvKernel::Init(ConvParam *param) { template <> void ConvKernel::Compute(const ConvParam ¶m) const { - // ConvCompute(param); + ConvCompute(param); } template class ConvKernel; diff --git a/src/operators/op_param.h b/src/operators/op_param.h index 4d1d5af29b..a139714b2c 100644 --- a/src/operators/op_param.h +++ b/src/operators/op_param.h @@ -210,7 +210,7 @@ class ConvParam : OpParam { const Tensor *Input() const { return input_; } - const Tensor *Filter() const { return filter_; } + Tensor *Filter() const { return filter_; } Tensor *Output() const { return output_; } diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 468cbd4ed6..1033cfa180 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -27,6 +27,11 @@ elseif("resnet" IN_LIST NET) ADD_EXECUTABLE(test-resnet net/test_resnet.cpp test_helper.h test_include.h executor_for_test.h) target_link_libraries(test-resnet paddle-mobile) elseif("FPGAnets" IN_LIST NET) + # ADD_EXECUTABLE(test-resnet net/test_resnet.cpp test_helper.h test_include.h executor_for_test.h) + # target_link_libraries(test-resnet paddle-mobile) + ADD_EXECUTABLE(test-tensor-quant fpga/test_tensor_quant.cpp test_helper.h test_include.h executor_for_test.h) + target_link_libraries(test-tensor-quant paddle-mobile) + else () # gen test @@ -173,8 +178,7 @@ else () endif() -if(FPGA) - ADD_EXECUTABLE(test-tensor-quant fpga/test_tensor_quant.cpp test_helper.h test_include.h executor_for_test.h) - target_link_libraries(test-tensor-quant paddle-mobile) - -endif() +# if(FPGA) +# ADD_EXECUTABLE(test-tensor-quant fpga/test_tensor_quant.cpp test_helper.h test_include.h executor_for_test.h) +# target_link_libraries(test-tensor-quant paddle-mobile) +# endif() diff --git a/test/fpga/test_tensor_quant.cpp b/test/fpga/test_tensor_quant.cpp index 3835c395a4..6cfc27e91c 100644 --- a/test/fpga/test_tensor_quant.cpp +++ b/test/fpga/test_tensor_quant.cpp @@ -12,23 +12,34 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include +#include #include "../test_helper.h" #include "../test_include.h" int main() { - paddle_mobile::PaddleMobile paddle_mobile; - bool optimize = false; - if (paddle_mobile.Load(g_googlenet, optimize)) { - auto time1 = time(); - DLOG << "load cost: " << time_diff(time1, time1) << "ms"; - std::vector input; - std::vector dims{1, 3, 224, 224}; - GetInput(g_test_image_1x3x224x224, &input, dims); + paddle_mobile::PaddleMobile paddle_mobile; + paddle_mobile.SetThreadNum(4); + auto time1 = time(); + if (paddle_mobile.Load(g_resnet, true)) { + auto time2 = time(); + std::cout << "load cost :" << time_diff(time1, time1) << "ms" << std::endl; + std::vector dims{1, 3, 32, 32}; + Tensor input_tensor; + SetupTensor(&input_tensor, {1, 3, 32, 32}, static_cast(0), + static_cast(1)); + + std::vector input(input_tensor.data(), + input_tensor.data() + input_tensor.numel()); + // 预热一次 + paddle_mobile.Predict(input, dims); auto time3 = time(); - auto vec_result = paddle_mobile.Predict(input, dims); + for (int i = 0; i < 10; ++i) { + paddle_mobile.Predict(input, dims); + } auto time4 = time(); - DLOG << "predict cost :" << time_diff(time3, time4) << "ms"; + std::cout << "predict cost :" << time_diff(time3, time4) << "ms" + << std::endl; } + return 0; } diff --git a/tools/op.cmake b/tools/op.cmake index 0eab672670..374a8d91d4 100644 --- a/tools/op.cmake +++ b/tools/op.cmake @@ -82,6 +82,8 @@ if ("FPGAnets" IN_LIST NET) set(CONCAT_OP ON) set(SOFTMAX_OP ON) set(DROPOUT_OP ON) + set(FUSION_CONVADD_OP ON) + # set(CONV_OP ON) set(FOUND_MATCH ON) endif() -- GitLab