提交 df028ce2 编写于 作者: H Houjiang Chen 提交者: GitHub

Fix compilation on Raspberry PI3, and fix some code style (#1566)

上级 b7aa9de2
......@@ -15,6 +15,7 @@ limitations under the License. */
#ifdef BEAM_SEARCH_DECODE_OP
#include "operators/kernel/beam_search_decode_kernel.h"
#include <algorithm>
#include "framework/data_type.h"
namespace paddle_mobile {
......
......@@ -15,6 +15,7 @@ limitations under the License. */
#ifdef BEAM_SEARCH_OP
#include "operators/kernel/beam_search_kernel.h"
#include <cmath>
#include <numeric>
namespace paddle_mobile {
......
......@@ -157,39 +157,34 @@ inline float PoolPost<AVG>(const float &x, const float &post) {
template <PoolingType P>
struct Pooling {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &kernel_size,
const std::vector<int> &strides,
const std::vector<int> &paddings,
framework::Tensor *output);
void operator()(const framework::Tensor &input,
const std::vector<int> &kernel_size,
const std::vector<int> &strides,
const std::vector<int> &paddings, framework::Tensor *output);
};
template <PoolingType P, int Stride>
struct Pooling2x2 {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &paddings,
framework::Tensor *output);
void operator()(const framework::Tensor &input,
const std::vector<int> &paddings, framework::Tensor *output);
};
template <PoolingType P, int Stride>
struct Pooling3x3 {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &paddings,
framework::Tensor *output);
void operator()(const framework::Tensor &input,
const std::vector<int> &paddings, framework::Tensor *output);
};
template <PoolingType P, int Stride>
struct Pooling5x5 {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &paddings,
framework::Tensor *output);
void operator()(const framework::Tensor &input,
const std::vector<int> &paddings, framework::Tensor *output);
};
template <PoolingType P, int Stride>
struct Pooling7x7 {
inline void operator()(const framework::Tensor &input,
const std::vector<int> &paddings,
framework::Tensor *output);
void operator()(const framework::Tensor &input,
const std::vector<int> &paddings, framework::Tensor *output);
};
} // namespace math
......
......@@ -46,7 +46,7 @@ int main(int argc, char* argv[]) {
std::shared_ptr<paddle_mobile::framework::Tensor> output;
std::vector<int64_t> dims{1, 3, 224, 224};
if (feed_shape) {
sscanf(feed_shape, "%ld,%ld,%ld,%ld", &dims[0], &dims[1], &dims[2],
sscanf(feed_shape, "%lld,%lld,%lld,%lld", &dims[0], &dims[1], &dims[2],
&dims[3]);
}
std::cout << "feed shape: [" << dims[0] << ", " << dims[1] << ", "
......
......@@ -13,25 +13,31 @@ See the License for the specific language governing permissions and
limitations under the License. */
#include <iostream>
#include <sstream>
#include "../test_helper.h"
#include "../test_include.h"
int main(int argc, char* argv[]) {
if (argc < 2) {
std::cout << "Usage: ./test_benchmark feed_shape [thread_num] [use_fuse]\n"
<< "feed_shape: input tensor shape, such as 3,224,224.\n"
<< "thread_num: optional int, threads count, default is 1.\n"
<< "use_fuse: optional bool, default is 0.\n";
int main(int argc, char *argv[]) {
if (argc < 4) {
std::cout << "Usage: ./test_googlenet fluid-model input-image image-shape "
"[thread-num] [fusion]\n"
<< " fluid-model: fluid model path. \n"
<< " input-image: input raw image path. \n"
<< " image-shape: input tensor shape, such as 1,3,224,224.\n"
<< " thread-num: optional int, threads count, default is 1.\n"
<< " fusion: optional bool, default is 0.\n";
return 1;
}
int thread_num = 1;
bool optimize = false;
char* feed_shape = argv[1];
if (argc >= 3) {
thread_num = atoi(argv[2]);
char *fluid_model = argv[1];
char *input_img = argv[2];
char *feed_shape = argv[3];
if (argc >= 5) {
thread_num = atoi(argv[4]);
}
if (argc >= 4) {
optimize = atoi(argv[3]);
if (argc >= 6) {
optimize = atoi(argv[5]);
}
#ifdef PADDLE_MOBILE_FPGA
paddle_mobile::PaddleMobile<paddle_mobile::FPGA> paddle_mobile;
......@@ -42,18 +48,21 @@ int main(int argc, char* argv[]) {
paddle_mobile.SetThreadNum(thread_num);
auto time1 = time();
std::vector<float> output;
if (paddle_mobile.Load(g_googlenet, optimize, false, 1, true)) {
if (paddle_mobile.Load(fluid_model, optimize, false, 1, true)) {
auto time2 = paddle_mobile::time();
std::cout << "load cost :" << paddle_mobile::time_diff(time1, time2) << "ms"
<< std::endl;
std::vector<float> input;
std::vector<int64_t> dims{1, 3, 224, 224};
if (feed_shape) {
sscanf(feed_shape, "%d,%d,%d", &dims[1], &dims[2], &dims[3]);
sscanf(feed_shape, "%lld,%lld,%lld,%lld", &dims[0], &dims[1], &dims[2],
&dims[3]);
}
std::cout << "feed shape: [" << dims[0] << ", " << dims[1] << ", "
<< dims[2] << ", " << dims[3] << "]" << std::endl;
GetInput<float>(g_test_image_1x3x224x224, &input, dims);
GetInput<float>(input_img, &input, dims);
// warmup
for (int i = 0; i < 10; ++i) {
output = paddle_mobile.Predict(input, dims);
......@@ -64,6 +73,13 @@ int main(int argc, char* argv[]) {
}
auto time4 = time();
std::cout << "predict cost: " << time_diff(time3, time4) / 10 << "ms\n";
std::ostringstream os;
os << output[0];
for (int i = 1; i < output.size(); ++i) {
os << ", " << output[i];
}
DLOG << os.str();
}
return 0;
}
......@@ -118,7 +118,8 @@ build_for_arm_linux() {
fi
cd "../build/release/arm-linux"
make -j 8
make -j 2
cd "../../../test/"
DIRECTORY="models"
if [ "`ls -A $DIRECTORY`" = "" ]; then
......
......@@ -75,7 +75,6 @@ function build_android_armv7_cpu_only() {
-DANDROID=true \
-DWITH_LOGGING=OFF \
-DCPU=ON \
-DGPU_MALI=OFF \
-DGPU_CL=OFF \
-DFPGA=OFF
......@@ -95,7 +94,6 @@ function build_android_armv7_gpu() {
-DANDROID=true \
-DWITH_LOGGING=OFF \
-DCPU=ON \
-DGPU_MALI=ON \
-DGPU_CL=ON \
-DFPGA=OFF
......@@ -115,7 +113,6 @@ function build_android_armv8_cpu_only() {
-DANDROID=true \
-DWITH_LOGGING=OFF \
-DCPU=ON \
-DGPU_MALI=OFF \
-DGPU_CL=OFF \
-DFPGA=OFF
......@@ -135,7 +132,6 @@ function build_android_armv8_gpu() {
-DANDROID=true \
-DWITH_LOGGING=OFF \
-DCPU=ON \
-DGPU_MALI=ON \
-DGPU_CL=ON \
-DFPGA=OFF
......@@ -154,7 +150,6 @@ function build_ios_armv8_cpu_only() {
-DIS_IOS=true \
-DUSE_OPENMP=OFF \
-DCPU=ON \
-DGPU_MALI=OFF \
-DGPU_CL=OFF \
-DFPGA=OFF
......@@ -173,7 +168,6 @@ function build_ios_armv8_gpu() {
-DIS_IOS=true \
-DUSE_OPENMP=OFF \
-DCPU=ON \
-DGPU_MALI=OFF \
-DGPU_CL=ON \
-DFPGA=OFF
......@@ -188,7 +182,6 @@ function build_linux_armv7_cpu_only() {
-DCMAKE_BUILD_TYPE="MinSizeRel" \
-DCMAKE_TOOLCHAIN_FILE="./tools/toolchains/arm-linux-gnueabihf.cmake" \
-DCPU=ON \
-DGPU_MALI=OFF \
-DGPU_CL=OFF \
-DFPGA=OFF
......@@ -203,7 +196,6 @@ function build_linux_armv7_gpu() {
-DCMAKE_BUILD_TYPE="MinSizeRel" \
-DCMAKE_TOOLCHAIN_FILE="./tools/toolchains/arm-linux-gnueabihf.cmake" \
-DCPU=ON \
-DGPU_MALI=ON \
-DGPU_CL=ON \
-DFPGA=OFF
......
......@@ -4,8 +4,7 @@ set(CMAKE_SYSTEM_NAME Linux)
set(CMAKE_SYSTEM_PROCESSOR arm)
set(CMAKE_SYSTEM_VERSION 1)
message("if U build on platform . this is right.")
set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
\ No newline at end of file
set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册