提交 ca3f3f00 编写于 作者: L liuruilong

add con add bn relu op test, modify executor for test

上级 90edd754
...@@ -17,39 +17,39 @@ limitations under the License. */ ...@@ -17,39 +17,39 @@ limitations under the License. */
namespace paddle_mobile { namespace paddle_mobile {
const std::string G_OP_TYPE_CONV = "conv2d"; const char *G_OP_TYPE_CONV = "conv2d";
const std::string G_OP_TYPE_BATCHNORM = "batch_norm"; const char *G_OP_TYPE_BATCHNORM = "batch_norm";
const std::string G_OP_TYPE_BOX_CODER = "box_coder"; const char *G_OP_TYPE_BOX_CODER = "box_coder";
const std::string G_OP_TYPE_CONCAT = "concat"; const char *G_OP_TYPE_CONCAT = "concat";
const std::string G_OP_TYPE_ELEMENTWISE_ADD = "elementwise_add"; const char *G_OP_TYPE_ELEMENTWISE_ADD = "elementwise_add";
const std::string G_OP_TYPE_FUSION_CONV_ADD_RELU = "fusion_conv_add_relu"; const char *G_OP_TYPE_FUSION_CONV_ADD_RELU = "fusion_conv_add_relu";
const std::string G_OP_TYPE_FUSION_CONV_ADD_BN_RELU = "fusion_conv_add_bn_relu"; const char *G_OP_TYPE_FUSION_CONV_ADD_BN_RELU = "fusion_conv_add_bn_relu";
const std::string G_OP_TYPE_FUSION_DWCONV_BN_RELU = "fusion_dwconv_bn_relu"; const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU = "fusion_dwconv_bn_relu";
const std::string G_OP_TYPE_FUSION_CONV_BN_RELU = "fusion_conv_bn_relu"; const char *G_OP_TYPE_FUSION_CONV_BN_RELU = "fusion_conv_bn_relu";
const std::string G_OP_TYPE_FC = "fusion_fc"; const char *G_OP_TYPE_FC = "fusion_fc";
const std::string G_OP_TYPE_FUSION_CONV_ADD = "fusion_conv_add"; const char *G_OP_TYPE_FUSION_CONV_ADD = "fusion_conv_add";
const std::string G_OP_TYPE_LRN = "lrn"; const char *G_OP_TYPE_LRN = "lrn";
const std::string G_OP_TYPE_MUL = "mul"; const char *G_OP_TYPE_MUL = "mul";
const std::string G_OP_TYPE_MULTICLASS_NMS = "multiclass_nms"; const char *G_OP_TYPE_MULTICLASS_NMS = "multiclass_nms";
const std::string G_OP_TYPE_POOL2D = "pool2d"; const char *G_OP_TYPE_POOL2D = "pool2d";
const std::string G_OP_TYPE_PRIOR_BOX = "prior_box"; const char *G_OP_TYPE_PRIOR_BOX = "prior_box";
const std::string G_OP_TYPE_RELU = "relu"; const char *G_OP_TYPE_RELU = "relu";
const std::string G_OP_TYPE_RESHAPE = "reshape"; const char *G_OP_TYPE_RESHAPE = "reshape";
const std::string G_OP_TYPE_SIGMOID = "sigmoid"; const char *G_OP_TYPE_SIGMOID = "sigmoid";
const std::string G_OP_TYPE_SOFTMAX = "softmax"; const char *G_OP_TYPE_SOFTMAX = "softmax";
const std::string G_OP_TYPE_TRANSPOSE = "transpose"; const char *G_OP_TYPE_TRANSPOSE = "transpose";
const std::string G_OP_TYPE_SPLIT = "split"; const char *G_OP_TYPE_SPLIT = "split";
const std::string G_OP_TYPE_FEED = "feed"; const char *G_OP_TYPE_FEED = "feed";
const std::string G_OP_TYPE_FETCH = "fetch"; const char *G_OP_TYPE_FETCH = "fetch";
const std::string G_OP_TYPE_DEPTHWISE_CONV = "depthwise_conv2d"; const char *G_OP_TYPE_DEPTHWISE_CONV = "depthwise_conv2d";
const std::string G_OP_TYPE_IM2SEQUENCE = "im2sequence"; const char *G_OP_TYPE_IM2SEQUENCE = "im2sequence";
const std::string G_OP_TYPE_DROPOUT = "dropout"; const char *G_OP_TYPE_DROPOUT = "dropout";
const std::string G_OP_TYPE_FUSION_CONV_ADD_BN = "fusion_conv_add_bn"; const char *G_OP_TYPE_FUSION_CONV_ADD_BN = "fusion_conv_add_bn";
const std::string G_OP_TYPE_FUSION_POOL_BN = "fusion_pool_bn"; const char *G_OP_TYPE_FUSION_POOL_BN = "fusion_pool_bn";
const std::string G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU = const char *G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU =
"fusion_elementwise_add_relu"; "fusion_elementwise_add_relu";
const std::string G_OP_TYPE_FUSION_FC_RELU = "fusion_fc_relu"; const char *G_OP_TYPE_FUSION_FC_RELU = "fusion_fc_relu";
const std::string G_OP_TYPE_REGION = "region"; const char *G_OP_TYPE_REGION = "region";
std::unordered_map< std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>> std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
......
...@@ -73,40 +73,40 @@ enum PMStatus { ...@@ -73,40 +73,40 @@ enum PMStatus {
PMWrongDevice = 0x08 /*!< un-correct device. */ PMWrongDevice = 0x08 /*!< un-correct device. */
}; };
extern const std::string G_OP_TYPE_CONV; extern const char *G_OP_TYPE_CONV;
extern const std::string G_OP_TYPE_BATCHNORM; extern const char *G_OP_TYPE_BATCHNORM;
extern const std::string G_OP_TYPE_BOX_CODER; extern const char *G_OP_TYPE_BOX_CODER;
extern const std::string G_OP_TYPE_CONCAT; extern const char *G_OP_TYPE_CONCAT;
extern const std::string G_OP_TYPE_ELEMENTWISE_ADD; extern const char *G_OP_TYPE_ELEMENTWISE_ADD;
extern const std::string G_OP_TYPE_FUSION_CONV_ADD_RELU; extern const char *G_OP_TYPE_FUSION_CONV_ADD_RELU;
extern const std::string G_OP_TYPE_FC; extern const char *G_OP_TYPE_FC;
extern const std::string G_OP_TYPE_FUSION_CONV_ADD; extern const char *G_OP_TYPE_FUSION_CONV_ADD;
extern const std::string G_OP_TYPE_FUSION_CONV_ADD_BN_RELU; extern const char *G_OP_TYPE_FUSION_CONV_ADD_BN_RELU;
extern const std::string G_OP_TYPE_FUSION_DWCONV_BN_RELU; extern const char *G_OP_TYPE_FUSION_DWCONV_BN_RELU;
extern const std::string G_OP_TYPE_FUSION_CONV_BN_RELU; extern const char *G_OP_TYPE_FUSION_CONV_BN_RELU;
extern const std::string G_OP_TYPE_LRN; extern const char *G_OP_TYPE_LRN;
extern const std::string G_OP_TYPE_MUL; extern const char *G_OP_TYPE_MUL;
extern const std::string G_OP_TYPE_MULTICLASS_NMS; extern const char *G_OP_TYPE_MULTICLASS_NMS;
extern const std::string G_OP_TYPE_POOL2D; extern const char *G_OP_TYPE_POOL2D;
extern const std::string G_OP_TYPE_PRIOR_BOX; extern const char *G_OP_TYPE_PRIOR_BOX;
extern const std::string G_OP_TYPE_RELU; extern const char *G_OP_TYPE_RELU;
extern const std::string G_OP_TYPE_RESHAPE; extern const char *G_OP_TYPE_RESHAPE;
extern const std::string G_OP_TYPE_SIGMOID; extern const char *G_OP_TYPE_SIGMOID;
extern const std::string G_OP_TYPE_SOFTMAX; extern const char *G_OP_TYPE_SOFTMAX;
extern const std::string G_OP_TYPE_TRANSPOSE; extern const char *G_OP_TYPE_TRANSPOSE;
extern const std::string G_OP_TYPE_SPLIT; extern const char *G_OP_TYPE_SPLIT;
extern const std::string G_OP_TYPE_FEED; extern const char *G_OP_TYPE_FEED;
extern const std::string G_OP_TYPE_FETCH; extern const char *G_OP_TYPE_FETCH;
extern const std::string G_OP_TYPE_DEPTHWISE_CONV; extern const char *G_OP_TYPE_DEPTHWISE_CONV;
extern const std::string G_OP_TYPE_IM2SEQUENCE; extern const char *G_OP_TYPE_IM2SEQUENCE;
extern const std::string G_OP_TYPE_DROPOUT; extern const char *G_OP_TYPE_DROPOUT;
extern const std::string G_OP_TYPE_FUSION_CONV_ADD_BN; extern const char *G_OP_TYPE_FUSION_CONV_ADD_BN;
extern const std::string G_OP_TYPE_FUSION_POOL_BN; extern const char *G_OP_TYPE_FUSION_POOL_BN;
extern const std::string G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU; extern const char *G_OP_TYPE_FUSION_ELEMENTWISE_ADD_RELU;
extern const std::string G_OP_TYPE_FUSION_FC_RELU; extern const char *G_OP_TYPE_FUSION_FC_RELU;
extern const std::string G_OP_TYPE_REGION; extern const char *G_OP_TYPE_REGION;
extern std::unordered_map< extern std::unordered_map<
std::string, std::pair<std::vector<std::string>, std::vector<std::string>>> std::string, std::pair<std::vector<std::string>, std::vector<std::string>>>
......
...@@ -145,6 +145,10 @@ else () ...@@ -145,6 +145,10 @@ else ()
ADD_EXECUTABLE(test-conv-add-relu-op operators/test_conv_add_relu_op.cpp test_helper.h test_include.h executor_for_test.h) ADD_EXECUTABLE(test-conv-add-relu-op operators/test_conv_add_relu_op.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-conv-add-relu-op paddle-mobile) target_link_libraries(test-conv-add-relu-op paddle-mobile)
# gen test
ADD_EXECUTABLE(test-conv-add-bn-relu-op operators/test_fusion_conv_add_bn_relu_op.cpp test_helper.h test_include.h executor_for_test.h)
target_link_libraries(test-conv-add-bn-relu-op paddle-mobile)
#add_library(test-lib-size SHARED common/test_lib_size.h common/test_lib_size.cpp) #add_library(test-lib-size SHARED common/test_lib_size.h common/test_lib_size.cpp)
endif() endif()
...@@ -43,7 +43,7 @@ template <typename DeviceType, typename OpType> ...@@ -43,7 +43,7 @@ template <typename DeviceType, typename OpType>
class Executor4Test : public Executor<DeviceType> { class Executor4Test : public Executor<DeviceType> {
public: public:
Executor4Test(Program<DeviceType> p, string op_type, Executor4Test(Program<DeviceType> p, string op_type,
bool use_optimize = false) bool use_optimize = false, int predict_op_count = 1)
: Executor<DeviceType>() { : Executor<DeviceType>() {
this->use_optimize_ = use_optimize; this->use_optimize_ = use_optimize;
this->program_ = p; this->program_ = p;
...@@ -57,12 +57,14 @@ class Executor4Test : public Executor<DeviceType> { ...@@ -57,12 +57,14 @@ class Executor4Test : public Executor<DeviceType> {
LOG(paddle_mobile::LogLevel::kLOG_ERROR) LOG(paddle_mobile::LogLevel::kLOG_ERROR)
<< "to_predict_program_ == nullptr"; << "to_predict_program_ == nullptr";
} }
const std::vector<std::shared_ptr<BlockDesc>> blocks = const std::vector<std::shared_ptr<BlockDesc>> blocks =
this->to_predict_program_->Blocks(); this->to_predict_program_->Blocks();
for (std::shared_ptr<BlockDesc> block_desc : blocks) { for (std::shared_ptr<BlockDesc> block_desc : blocks) {
std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops(); std::vector<std::shared_ptr<OpDesc>> ops = block_desc->Ops();
for (std::shared_ptr<OpDesc> op : ops) { for (int i = 0; i < ops.size(); ++i) {
if (op->Type() == op_type) { auto op = ops[i];
if (op->Type() == op_type && i < predict_op_count) {
DLOG << "匹配到: " << op->Type(); DLOG << "匹配到: " << op->Type();
/// test first meeting op in program /// test first meeting op in program
...@@ -72,11 +74,17 @@ class Executor4Test : public Executor<DeviceType> { ...@@ -72,11 +74,17 @@ class Executor4Test : public Executor<DeviceType> {
op->Type(), op->GetInputs(), op->GetOutputs(), op->Type(), op->GetInputs(), op->GetOutputs(),
op->GetAttrMap(), this->program_.scope); op->GetAttrMap(), this->program_.scope);
this->ops_of_block_[*block_desc.get()].push_back(op_ptr); this->ops_of_block_[*block_desc.get()].push_back(op_ptr);
break;
} }
} }
} }
this->InitMemory(); this->InitMemory();
std::shared_ptr<paddle_mobile::framework::BlockDesc> to_predict_block =
this->to_predict_program_->Block(0);
auto &ops = this->ops_of_block_[*to_predict_block.get()];
for (const auto &op : ops) {
op->Init();
}
} }
template <typename T = LoDTensor> template <typename T = LoDTensor>
...@@ -130,9 +138,6 @@ class Executor4Test : public Executor<DeviceType> { ...@@ -130,9 +138,6 @@ class Executor4Test : public Executor<DeviceType> {
auto *output_tensor = con_output->GetMutable<LoDTensor>(); auto *output_tensor = con_output->GetMutable<LoDTensor>();
output_tensor->mutable_data<float>(dDim); output_tensor->mutable_data<float>(dDim);
std::shared_ptr<Tensor> out_tensor = std::make_shared<LoDTensor>();
out_tensor.reset(output_tensor);
std::shared_ptr<paddle_mobile::framework::BlockDesc> to_predict_block = std::shared_ptr<paddle_mobile::framework::BlockDesc> to_predict_block =
this->to_predict_program_->Block(0); this->to_predict_program_->Block(0);
for (int j = 0; j < this->ops_of_block_[*to_predict_block.get()].size(); for (int j = 0; j < this->ops_of_block_[*to_predict_block.get()].size();
...@@ -141,6 +146,7 @@ class Executor4Test : public Executor<DeviceType> { ...@@ -141,6 +146,7 @@ class Executor4Test : public Executor<DeviceType> {
op->Run(); op->Run();
} }
return out_tensor; return std::make_shared<paddle_mobile::framework::Tensor>(
paddle_mobile::framework::Tensor(*output_tensor));
} }
}; };
...@@ -20,22 +20,20 @@ int main() { ...@@ -20,22 +20,20 @@ int main() {
paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile; paddle_mobile::PaddleMobile<paddle_mobile::CPU> paddle_mobile;
paddle_mobile.SetThreadNum(4); paddle_mobile.SetThreadNum(4);
auto time1 = time(); auto time1 = time();
auto isok = paddle_mobile.Load(g_mobilenet_ssd_gesture + "/model", auto isok = paddle_mobile.Load(
g_mobilenet_ssd_gesture + "/params", true); std::string(g_mobilenet_ssd_gesture) + "/model",
std::string(g_mobilenet_ssd_gesture) + "/params", true);
// auto isok = paddle_mobile.Load(g_mobilenet_ssd, false); // auto isok = paddle_mobile.Load(g_mobilenet_ssd, false);
if (isok) { if (isok) {
auto time2 = time(); auto time2 = time();
std::cout << "load cost :" << time_diff(time1, time2) << "ms" << std::endl; std::cout << "load cost :" << time_diff(time1, time2) << "ms" << std::endl;
std::vector<float> input;
std::vector<int64_t> dims{1, 3, 300, 300}; std::vector<int64_t> dims{1, 3, 300, 300};
Tensor input_tensor; GetInput<float>(g_hand, &input, dims);
SetupTensor<float>(&input_tensor, {1, 3, 300, 300}, static_cast<float>(0),
static_cast<float>(1));
std::vector<float> input(input_tensor.data<float>(),
input_tensor.data<float>() + input_tensor.numel());
auto time3 = time(); auto time3 = time();
paddle_mobile.Predict(input, dims); auto output = paddle_mobile.Predict(input, dims);
auto time4 = time(); auto time4 = time();
std::cout << "predict cost :" << time_diff(time3, time4) << "ms" std::cout << "predict cost :" << time_diff(time3, time4) << "ms"
<< std::endl; << std::endl;
......
...@@ -24,19 +24,21 @@ int main() { ...@@ -24,19 +24,21 @@ int main() {
auto time2 = time(); auto time2 = time();
std::cout << "load cost :" << time_diff(time1, time1) << "ms" << std::endl; std::cout << "load cost :" << time_diff(time1, time1) << "ms" << std::endl;
std::vector<float> input;
std::vector<int64_t> dims{1, 3, 224, 224}; std::vector<int64_t> dims{1, 3, 224, 224};
Tensor input_tensor; GetInput<float>(g_test_image_1x3x224x224, &input, dims);
SetupTensor<float>(&input_tensor, {1, 3, 224, 224}, static_cast<float>(0),
static_cast<float>(1)); for (int i = 0; i < 10; ++i) {
auto time3 = time();
std::vector<float> input(input_tensor.data<float>(), auto vec_result = paddle_mobile.Predict(input, dims);
input_tensor.data<float>() + input_tensor.numel()); auto time4 = time();
auto time3 = time(); std::vector<float>::iterator biggest =
auto vec_result = paddle_mobile.Predict(input, dims); std::max_element(std::begin(vec_result), std::end(vec_result));
auto time4 = time(); std::cout << " Max element is " << *biggest << " at position "
<< std::distance(std::begin(vec_result), biggest) << std::endl;
std::cout << "predict cost :" << time_diff(time3, time4) << "ms" std::cout << "predict cost :" << time_diff(time3, time4) << "ms"
<< std::endl; << std::endl;
}
} }
return 0; return 0;
......
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "../test_include.h"
#include "operators/fusion_conv_add_bn_relu_op.h"
int main() {
paddle_mobile::Loader<paddle_mobile::CPU> loader;
// ../models/image_classification_resnet.inference.model
auto program = loader.Load(g_mobilenet, true);
PADDLE_MOBILE_ENFORCE(program.originProgram != nullptr,
"program file read fail");
Executor4Test<paddle_mobile::CPU,
paddle_mobile::operators::FusionConvAddBNReluOp<
paddle_mobile::CPU, float>>
executor(program, "fusion_conv_add_bn_relu", true);
std::cout << "executor 4 test: " << std::endl;
paddle_mobile::framework::Tensor input;
GetInput<float>(g_test_image_1x3x224x224_banana, &input, {1, 3, 224, 224});
// // use SetupTensor if not has local input image .
// SetupTensor<float>(&input, {1, 3, 224, 224}, static_cast<float>(0),
// static_cast<float>(1));
DLOG << " fuck: " << input;
auto out_ddim = paddle_mobile::framework::make_ddim({1, 32, 112, 112});
std::cout << "before predict: " << std::endl;
auto output =
executor.Predict(input, "data", "conv2_1_dw_bn.tmp_2", out_ddim);
std::cout << "after predict " << std::endl;
auto output_ptr = output->data<float>();
int stride = output->numel() / 100;
for (int i = 0; i < 100; i++) {
DLOG << " index:" << i * stride << " value: " << output_ptr[i * stride];
}
// for (int i = 0; i < 100; i++) {
// DLOG << " index:" << i << " value: "<< output_ptr[i];
// }
// for (int j = 0; j < output->numel(); ++j) {
// std::cout << " (index: " << j << " value: " << output_ptr[j] << ") ";
// }
std::cout << std::endl;
return 0;
}
...@@ -24,18 +24,21 @@ limitations under the License. */ ...@@ -24,18 +24,21 @@ limitations under the License. */
#include "framework/ddim.h" #include "framework/ddim.h"
#include "framework/tensor.h" #include "framework/tensor.h"
static const std::string g_mobilenet_ssd = "../models/mobilenet+ssd"; static const char *g_mobilenet_ssd = "../models/mobilenet+ssd";
static const std::string g_mobilenet_ssd_gesture = static const char *g_mobilenet_ssd_gesture = "../models/mobilenet+ssd_gesture";
"../models/mobilenet+ssd_gesture"; static const char *g_squeezenet = "../models/squeezenet";
static const std::string g_squeezenet = "../models/squeezenet"; static const char *g_googlenet = "../models/googlenet";
static const std::string g_googlenet = "../models/googlenet"; static const char *g_mobilenet = "../models/mobilenet";
static const std::string g_mobilenet = "../models/mobilenet"; static const char *g_resnet_50 = "../models/resnet_50";
static const std::string g_resnet_50 = "../models/resnet_50"; static const char *g_resnet = "../models/resnet";
static const std::string g_resnet = "../models/resnet"; static const char *g_googlenet_combine = "../models/googlenet_combine";
static const std::string g_googlenet_combine = "../models/googlenet_combine"; static const char *g_yolo = "../models/yolo";
static const std::string g_yolo = "../models/yolo"; static const char *g_test_image_1x3x224x224 =
static const std::string g_test_image_1x3x224x224 =
"../images/test_image_1x3x224x224_float"; "../images/test_image_1x3x224x224_float";
static const char *g_test_image_1x3x224x224_banana =
"../images/input_3x224x224_banana";
static const char *g_hand = "../images/hand_image";
using paddle_mobile::framework::DDim; using paddle_mobile::framework::DDim;
using paddle_mobile::framework::Tensor; using paddle_mobile::framework::Tensor;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册