提交 6f9a39b2 编写于 作者: Z zhaojiaying01

Merge branch 'develop' of https://github.com/PaddlePaddle/paddle-mobile into develop

...@@ -50,6 +50,7 @@ const char *G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU = ...@@ -50,6 +50,7 @@ const char *G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU =
"fusion_elementwise_add_relu"; "fusion_elementwise_add_relu";
const char *G_OP_TYPE_FUSION_FC_RELU = "fusion_fc_relu"; const char *G_OP_TYPE_FUSION_FC_RELU = "fusion_fc_relu";
const char *G_OP_TYPE_REGION = "region"; const char *G_OP_TYPE_REGION = "region";
const char *G_OP_TYPE_FUSION_CONV_BN = "fusion_conv_bn";
std::unordered_map< std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>> std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
...@@ -85,6 +86,7 @@ std::unordered_map< ...@@ -85,6 +86,7 @@ std::unordered_map<
{G_OP_TYPE_FUSION_POOL_BN, {{"X"}, {"Y"}}}, {G_OP_TYPE_FUSION_POOL_BN, {{"X"}, {"Y"}}},
{G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU, {{"X", "Y"}, {"Out"}}}, {G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU, {{"X", "Y"}, {"Out"}}},
{G_OP_TYPE_FUSION_FC_RELU, {{"X", "Y", "Z"}, {"Out"}}}, {G_OP_TYPE_FUSION_FC_RELU, {{"X", "Y", "Z"}, {"Out"}}},
{G_OP_TYPE_REGION, {{"X"}, {"Out"}}}}; {G_OP_TYPE_REGION, {{"X"}, {"Out"}}},
{G_OP_TYPE_FUSION_CONV_BN, {{"Input"}, {"Y"}}}};
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -113,6 +113,7 @@ extern const char *G_OP_TYPE_FUSION_POOL_BN; ...@@ -113,6 +113,7 @@ extern const char *G_OP_TYPE_FUSION_POOL_BN;
extern const char *G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU; extern const char *G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU;
extern const char *G_OP_TYPE_FUSION_FC_RELU; extern const char *G_OP_TYPE_FUSION_FC_RELU;
extern const char *G_OP_TYPE_REGION; extern const char *G_OP_TYPE_REGION;
extern const char *G_OP_TYPE_FUSION_CONV_BN;
extern std::unordered_map< extern std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>> std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
......
...@@ -47,7 +47,9 @@ static Dtype find_max(Dtype* data, int num) { ...@@ -47,7 +47,9 @@ static Dtype find_max(Dtype* data, int num) {
} }
// template <typename Dtype> // template <typename Dtype>
framework::Tensor* quantify_filter(framework::Tensor* filter) { void quantify_filter(framework::Tensor* filter) {
DLOG << "quantilize_filter........";
float scale = 0; float scale = 0;
float fix_range = static_cast<float>((1 << (8 - 1)) - 1); float fix_range = static_cast<float>((1 << (8 - 1)) - 1);
...@@ -62,25 +64,20 @@ framework::Tensor* quantify_filter(framework::Tensor* filter) { ...@@ -62,25 +64,20 @@ framework::Tensor* quantify_filter(framework::Tensor* filter) {
// 32bit filter -> 8bit filter; // 32bit filter -> 8bit filter;
if (filter->type() == typeid(float)) { if (filter->type() == typeid(float)) {
float* float_data = filter->data<float>(); float* float_data = filter->data<float>();
float max = find_max(float_data, filter->numel()); float max = find_max<float>(float_data, filter->numel());
scale = (max / fix_range); scale = (max / fix_range);
framework::Tensor* filter = filter;
framework::Tensor* quant_filter = new framework::Tensor();
int_data = quant_filter->mutable_data<int8_t>();
for (int i = 0; i < filter->numel(); ++i) { for (int i = 0; i < filter->numel(); ++i) {
tmp_data[i] = (int8_t)float_data[i] * scale; tmp_data[i] = (int8_t)float_data[i] * scale;
} }
filter = quant_filter; int_data = filter->mutable_data<int8_t>();
} else { } else {
int8_t max = find_max(filter->data<int8_t>(), filter->numel()); int8_t max = find_max<int8_t>(filter->data<int8_t>(), filter->numel());
scale = (max / fix_range); scale = (max / fix_range);
int_data = filter->data<int8_t>();
for (int i = 0; i < filter->numel(); ++i) { for (int i = 0; i < filter->numel(); ++i) {
tmp_data[i] = int_data[i]; tmp_data[i] = filter->data<int8_t>()[i];
} }
int_data = filter->mutable_data<int8_t>(); int_data = filter->mutable_data<int8_t>();
} }
...@@ -88,7 +85,6 @@ framework::Tensor* quantify_filter(framework::Tensor* filter) { ...@@ -88,7 +85,6 @@ framework::Tensor* quantify_filter(framework::Tensor* filter) {
chw_to_hwc<int8_t>(tmp_data, int_data, batch_size, channel, height, width); chw_to_hwc<int8_t>(tmp_data, int_data, batch_size, channel, height, width);
delete tmp_data; delete tmp_data;
*(filter->fpga_args().scale_pointer()) = scale; *(filter->fpga_args().scale_pointer()) = scale;
return filter;
} }
} // namespace fpga } // namespace fpga
......
...@@ -25,6 +25,7 @@ static void chw_to_hwc(Dtype* data_in, Dtype* data_out, int num, int channel, ...@@ -25,6 +25,7 @@ static void chw_to_hwc(Dtype* data_in, Dtype* data_out, int num, int channel,
int height, int width); int height, int width);
// template <typename Dtype> // template <typename Dtype>
framework::Tensor* quantify_filter(framework::Tensor* filter); void quantify_filter(framework::Tensor* filter);
} // namespace fpga } // namespace fpga
} // namespace paddle_mobile } // namespace paddle_mobile
...@@ -89,7 +89,6 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size, ...@@ -89,7 +89,6 @@ Executor<Dtype, P>::Executor(const framework::Program<Dtype> p, int batch_size,
} else { } else {
InitMemory(); InitMemory();
} }
std::shared_ptr<framework::BlockDesc> to_predict_block = std::shared_ptr<framework::BlockDesc> to_predict_block =
to_predict_program_->Block(0); to_predict_program_->Block(0);
auto &ops = ops_of_block_[*to_predict_block.get()]; auto &ops = ops_of_block_[*to_predict_block.get()];
......
...@@ -26,7 +26,7 @@ namespace paddle_mobile { ...@@ -26,7 +26,7 @@ namespace paddle_mobile {
namespace memory { namespace memory {
const int MALLOC_ALIGN = 64; const int MALLOC_ALIGN = 64;
#ifdef PADDLE_MOBILE_FPGA #ifdef PADDLE_MOBILE_FPGA__VV
namespace fpga = paddle_mobile::fpga; namespace fpga = paddle_mobile::fpga;
void Copy(void *dst, const void *src, size_t num) { void Copy(void *dst, const void *src, size_t num) {
......
...@@ -41,7 +41,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> { ...@@ -41,7 +41,7 @@ class FeedOp : public framework::OperatorBase<DeviceType> {
void RunImpl() const { fpga::PerformBypass(param_.FpgaArgs()); } void RunImpl() const { fpga::PerformBypass(param_.FpgaArgs()); }
void Init() { void Init() {
const Tensor *input = param_.InputX(); const Tensor *input = param_.InputX();
auto input_ptr = input->data<float>(); auto input_ptr = input->mutable_data<float>();
Tensor *output = param_.Out(); Tensor *output = param_.Out();
auto output_ptr = output->mutable_data<half>(); auto output_ptr = output->mutable_data<half>();
fpga::BypassArgs args; fpga::BypassArgs args;
......
...@@ -28,7 +28,7 @@ using std::vector; ...@@ -28,7 +28,7 @@ using std::vector;
class FusioneElementwiseAddReluMatcher : public framework::FusionOpMatcher { class FusioneElementwiseAddReluMatcher : public framework::FusionOpMatcher {
public: public:
FusioneElementwiseAddReluMatcher() { FusioneElementwiseAddReluMatcher() {
node_ = framework::Node(G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU); node_ = framework::Node(G_OP_TYPE_ELEMENTWISE_ADD);
node_ > std::make_shared<framework::Node>(G_OP_TYPE_RELU); node_ > std::make_shared<framework::Node>(G_OP_TYPE_RELU);
} }
......
...@@ -60,10 +60,7 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam *param) { ...@@ -60,10 +60,7 @@ bool ConvAddBNKernel<FPGA, float>::Init(FusionConvAddBNParam *param) {
param->SetNewScale(new_scale); param->SetNewScale(new_scale);
param->SetNewBias(new_bias); param->SetNewBias(new_bias);
Tensor *quant_filter = fpga::quantify_filter(filter); fpga::quantify_filter(filter);
// delete original filter?
filter = quant_filter;
auto filter_ptr = filter->data<float>(); auto filter_ptr = filter->data<float>();
fpga::ConvArgs convArgs; fpga::ConvArgs convArgs;
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#ifdef CONV_OP
#include "operators/kernel/conv_kernel.h"
#include "operators/kernel/central-arm-func/conv_arm_func.h"
namespace paddle_mobile {
namespace operators {
template <>
bool ConvKernel<FPGA, float>::Init(ConvParam *param) {
return true;
}
template <>
void ConvKernel<FPGA, float>::Compute(const ConvParam &param) const {
// ConvCompute<float>(param);
}
template class ConvKernel<FPGA, float>;
} // namespace operators
} // namespace paddle_mobile
#endif
...@@ -210,7 +210,7 @@ class ConvParam : OpParam { ...@@ -210,7 +210,7 @@ class ConvParam : OpParam {
const Tensor *Input() const { return input_; } const Tensor *Input() const { return input_; }
const Tensor *Filter() const { return filter_; } Tensor *Filter() const { return filter_; }
Tensor *Output() const { return output_; } Tensor *Output() const { return output_; }
......
...@@ -27,6 +27,11 @@ elseif("resnet" IN_LIST NET) ...@@ -27,6 +27,11 @@ elseif("resnet" IN_LIST NET)
ADD_EXECUTABLE(test-resnet net/test_resnet.cpp test_helper.h test_include.h executor_for_test.h) ADD_EXECUTABLE(test-resnet net/test_resnet.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-resnet paddle-mobile) target_link_libraries(test-resnet paddle-mobile)
elseif("FPGAnets" IN_LIST NET) elseif("FPGAnets" IN_LIST NET)
# ADD_EXECUTABLE(test-resnet net/test_resnet.cpp test_helper.h test_include.h executor_for_test.h)
# target_link_libraries(test-resnet paddle-mobile)
ADD_EXECUTABLE(test-tensor-quant fpga/test_tensor_quant.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-tensor-quant paddle-mobile)
else () else ()
# gen test # gen test
...@@ -173,8 +178,7 @@ else () ...@@ -173,8 +178,7 @@ else ()
endif() endif()
if(FPGA) # if(FPGA)
ADD_EXECUTABLE(test-tensor-quant fpga/test_tensor_quant.cpp test_helper.h test_include.h executor_for_test.h) # ADD_EXECUTABLE(test-tensor-quant fpga/test_tensor_quant.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-tensor-quant paddle-mobile) # target_link_libraries(test-tensor-quant paddle-mobile)
# endif()
endif()
...@@ -12,23 +12,34 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,23 +12,34 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <fstream> #include <iostream>
#include "../test_helper.h" #include "../test_helper.h"
#include "../test_include.h" #include "../test_include.h"
int main() { int main() {
paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile; paddle_mobile::PaddleMobile<paddle_mobile::FPGA> paddle_mobile;
bool optimize = false; paddle_mobile.SetThreadNum(4);
if (paddle_mobile.Load(g_googlenet, optimize)) { auto time1 = time();
auto time1 = time(); if (paddle_mobile.Load(g_resnet, true)) {
DLOG << "load cost: " << time_diff(time1, time1) << "ms"; auto time2 = time();
std::vector<float> input; std::cout << "load cost :" << time_diff(time1, time1) << "ms" << std::endl;
std::vector<int64_t> dims{1, 3, 224, 224}; std::vector<int64_t> dims{1, 3, 32, 32};
GetInput<float>(g_test_image_1x3x224x224, &input, dims); Tensor input_tensor;
SetupTensor<float>(&input_tensor, {1, 3, 32, 32}, static_cast<float>(0),
static_cast<float>(1));
std::vector<float> input(input_tensor.data<float>(),
input_tensor.data<float>() + input_tensor.numel());
// 预热一次
paddle_mobile.Predict(input, dims);
auto time3 = time(); auto time3 = time();
auto vec_result = paddle_mobile.Predict(input, dims); for (int i = 0; i < 10; ++i) {
paddle_mobile.Predict(input, dims);
}
auto time4 = time(); auto time4 = time();
DLOG << "predict cost :" << time_diff(time3, time4) << "ms"; std::cout << "predict cost :" << time_diff(time3, time4) << "ms"
<< std::endl;
} }
return 0; return 0;
} }
...@@ -82,6 +82,7 @@ if ("FPGAnets" IN_LIST NET) ...@@ -82,6 +82,7 @@ if ("FPGAnets" IN_LIST NET)
set(CONCAT_OP ON) set(CONCAT_OP ON)
set(SOFTMAX_OP ON) set(SOFTMAX_OP ON)
set(DROPOUT_OP ON) set(DROPOUT_OP ON)
# set(CONV_OP ON)
set(FOUND_MATCH ON) set(FOUND_MATCH ON)
endif() endif()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册