From 047bba855b1cdd99142ec7b15a4f19015378c5b8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E7=9F=B3=E6=99=93=E4=BC=9F?= <39303645+Shixiaowei02@users.noreply.github.com> Date: Wed, 3 Jul 2019 20:29:00 +0800 Subject: [PATCH] Remove the obsolete cmake options (#18481) * remove the obsolete cmake options, test=develop * remove unittests, test=develop --- CMakeLists.txt | 12 +- cmake/configure.cmake | 18 +- cmake/external/anakin.cmake | 76 ------ cmake/inference_lib.cmake | 13 +- .../inference/api/paddle_inference_api.h | 2 +- .../fluid/inference/tests/api/CMakeLists.txt | 21 -- .../tests/api/anakin_mobilenet_tester.cc | 67 ----- .../inference/tests/api/anakin_rnn1_tester.cc | 246 ------------------ 8 files changed, 12 insertions(+), 443 deletions(-) delete mode 100644 cmake/external/anakin.cmake delete mode 100644 paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc delete mode 100644 paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc diff --git a/CMakeLists.txt b/CMakeLists.txt index be0d5bf845..29e24ff724 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -59,7 +59,6 @@ option(WITH_DISTRIBUTE "Compile with distributed support" OFF) option(WITH_BRPC_RDMA "Use brpc rdma as the rpc protocal" OFF) option(ON_INFER "Turn on inference optimization." OFF) ################################ Internal Configurations ####################################### -option(WITH_ANAKIN "Compile with Anakin library" OFF) option(WITH_AMD_GPU "Compile PaddlePaddle with AMD GPU" OFF) option(WITH_NGRAPH "Compile PaddlePaddle with nGraph support." OFF) option(WITH_PROFILER "Compile PaddlePaddle with GPU profiler and gperftools" OFF) @@ -68,9 +67,6 @@ option(COVERALLS_UPLOAD "Package code coverage data to coveralls" OFF) option(WITH_PSLIB "Compile with pslib support" OFF) option(WITH_CONTRIB "Compile the third-party contributation" OFF) option(REPLACE_ENFORCE_GLOG "Replace PADDLE_ENFORCE with glog/CHECK for better debug." OFF) -# TODO(Superjomn) Remove WITH_ANAKIN option if not needed latter. -option(ANAKIN_BUILD_FAT_BIN "Build anakin cuda fat-bin lib for all device plantform, ignored when WITH_ANAKIN=OFF" OFF) -option(ANAKIN_BUILD_CROSS_PLANTFORM "Build anakin lib for any nvidia device plantform. ignored when WITH_ANAKIN=OFF" ON) option(WITH_GRPC "Use grpc as the default rpc framework" ${WITH_DISTRIBUTE}) option(WITH_INFERENCE_API_TEST "Test fluid inference C++ high-level api interface" OFF) option(WITH_HIGH_LEVEL_API_TEST "Test fluid python high-level api interface" OFF) @@ -184,6 +180,7 @@ if(WITH_BRPC_RDMA) endif() endif() +include(anakin_subgraph) include(external/threadpool) include(flags) # set paddle compile flags @@ -193,7 +190,6 @@ include(configure) # add paddle env configuration if(WITH_GPU) include(cuda) include(tensorrt) - include(anakin_subgraph) endif() if(WIN32 OR APPLE OR NOT WITH_GPU OR ON_INFER) @@ -206,12 +202,6 @@ if(WITH_DGC) add_definitions(-DPADDLE_WITH_DGC) endif() -if(WITH_MKL OR WITH_MKLML) - include(external/anakin) -elseif() - set(WITH_ANAKIN OFF CACHE STRING "Anakin is used in MKL only now." FORCE) -endif() - if (WITH_PROFILER) find_package(Gperftools REQUIRED) include_directories(${GPERFTOOLS_INCLUDE_DIR}) diff --git a/cmake/configure.cmake b/cmake/configure.cmake index 279f1eba3f..5f7b4a4698 100644 --- a/cmake/configure.cmake +++ b/cmake/configure.cmake @@ -99,23 +99,15 @@ if(WITH_GPU) endif() include_directories(${TENSORRT_INCLUDE_DIR}) endif() - if(WITH_ANAKIN) + if(ANAKIN_FOUND) if(${CUDA_VERSION_MAJOR} VERSION_LESS 8) - message(WARNING "Anakin needs CUDA >= 8.0 to compile. Force WITH_ANAKIN=OFF") - set(WITH_ANAKIN OFF CACHE STRING "Anakin is valid only when CUDA >= 8.0." FORCE) + message(WARNING "Anakin needs CUDA >= 8.0 to compile. Force ANAKIN_FOUND = OFF") + set(ANAKIN_FOUND OFF CACHE STRING "Anakin is valid only when CUDA >= 8.0." FORCE) endif() if(${CUDNN_MAJOR_VERSION} VERSION_LESS 7) - message(WARNING "Anakin needs CUDNN >= 7.0 to compile. Force WITH_ANAKIN=OFF") - set(WITH_ANAKIN OFF CACHE STRING "Anakin is valid only when CUDNN >= 7.0." FORCE) + message(WARNING "Anakin needs CUDNN >= 7.0 to compile. Force ANAKIN_FOUND = OFF") + set(ANAKIN_FOUND OFF CACHE STRING "Anakin is valid only when CUDNN >= 7.0." FORCE) endif() - add_definitions(-DWITH_ANAKIN) - endif() - if(WITH_ANAKIN) - # NOTICE(minqiyang): the end slash is important because $CUDNN_INCLUDE_DIR - # is a softlink to real cudnn.h directory - set(ENV{CUDNN_INCLUDE_DIR} "${CUDNN_INCLUDE_DIR}/") - get_filename_component(CUDNN_LIBRARY_DIR ${CUDNN_LIBRARY} DIRECTORY) - set(ENV{CUDNN_LIBRARY} ${CUDNN_LIBRARY_DIR}) endif() elseif(WITH_AMD_GPU) add_definitions(-DPADDLE_WITH_HIP) diff --git a/cmake/external/anakin.cmake b/cmake/external/anakin.cmake deleted file mode 100644 index 77f4b34537..0000000000 --- a/cmake/external/anakin.cmake +++ /dev/null @@ -1,76 +0,0 @@ -if (NOT WITH_ANAKIN) - return() -endif() - -option(ANAKIN_ENABLE_OP_TIMER "Get more detailed information with Anakin op time" OFF) -if(ANAKIN_ENABLE_OP_TIMER) - add_definitions(-DPADDLE_ANAKIN_ENABLE_OP_TIMER) -endif() - -INCLUDE(ExternalProject) -set(ANAKIN_SOURCE_DIR ${THIRD_PARTY_PATH}/anakin) -# the anakin install dir is only default one now -set(ANAKIN_INSTALL_DIR ${THIRD_PARTY_PATH}/anakin/src/extern_anakin/output) -set(ANAKIN_INCLUDE ${ANAKIN_INSTALL_DIR}) -set(ANAKIN_LIBRARY ${ANAKIN_INSTALL_DIR}) -set(ANAKIN_SHARED_LIB ${ANAKIN_LIBRARY}/libanakin.so) -set(ANAKIN_SABER_LIB ${ANAKIN_LIBRARY}/libanakin_saber_common.so) - -include_directories(${ANAKIN_INCLUDE}) -include_directories(${ANAKIN_INCLUDE}/saber/) -include_directories(${ANAKIN_INCLUDE}/saber/core/) -include_directories(${ANAKIN_INCLUDE}/saber/funcs/impl/x86/) -include_directories(${ANAKIN_INCLUDE}/saber/funcs/impl/cuda/base/cuda_c/) - -set(ANAKIN_COMPILE_EXTRA_FLAGS - -Wno-error=unused-but-set-variable -Wno-unused-but-set-variable - -Wno-error=unused-variable -Wno-unused-variable - -Wno-error=format-extra-args -Wno-format-extra-args - -Wno-error=comment -Wno-comment - -Wno-error=format -Wno-format - -Wno-error=maybe-uninitialized -Wno-maybe-uninitialized - -Wno-error=switch -Wno-switch - -Wno-error=return-type -Wno-return-type - -Wno-error=non-virtual-dtor -Wno-non-virtual-dtor - -Wno-error=ignored-qualifiers - -Wno-ignored-qualifiers - -Wno-sign-compare - -Wno-reorder - -Wno-error=cpp) - -if(WITH_GPU) - set(CMAKE_ARGS_PREFIX -DUSE_GPU_PLACE=YES -DCUDNN_ROOT=${CUDNN_ROOT} -DCUDNN_INCLUDE_DIR=${CUDNN_INCLUDE_DIR}) -else() - set(CMAKE_ARGS_PREFIX -DUSE_GPU_PLACE=NO) -endif() -ExternalProject_Add( - extern_anakin - ${EXTERNAL_PROJECT_LOG_ARGS} - DEPENDS ${MKLML_PROJECT} - GIT_REPOSITORY "https://github.com/PaddlePaddle/Anakin" - GIT_TAG "3c8554f4978628183566ab7dd6c1e7e66493c7cd" - PREFIX ${ANAKIN_SOURCE_DIR} - UPDATE_COMMAND "" - CMAKE_ARGS ${CMAKE_ARGS_PREFIX} - -DUSE_LOGGER=YES - -DUSE_X86_PLACE=YES - -DBUILD_WITH_UNIT_TEST=NO - -DPROTOBUF_ROOT=${THIRD_PARTY_PATH}/install/protobuf - -DMKLML_ROOT=${THIRD_PARTY_PATH}/install/mklml - -DENABLE_OP_TIMER=${ANAKIN_ENABLE_OP_TIMER} - -DBUILD_FAT_BIN=${ANAKIN_BUILD_FAT_BIN} - -DBUILD_CROSS_PLANTFORM=${ANAKIN_BUILD_CROSS_PLANTFORM} - ${EXTERNAL_OPTIONAL_ARGS} - CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${ANAKIN_INSTALL_DIR} -) - -message(STATUS "Anakin for inference is enabled") -message(STATUS "Anakin is set INCLUDE:${ANAKIN_INCLUDE} LIBRARY:${ANAKIN_LIBRARY}") -add_dependencies(extern_anakin protobuf mklml) -add_library(anakin_shared SHARED IMPORTED GLOBAL) -set_property(TARGET anakin_shared PROPERTY IMPORTED_LOCATION ${ANAKIN_SHARED_LIB}) -add_dependencies(anakin_shared extern_anakin) - -add_library(anakin_saber SHARED IMPORTED GLOBAL) -set_property(TARGET anakin_saber PROPERTY IMPORTED_LOCATION ${ANAKIN_SABER_LIB}) -add_dependencies(anakin_saber extern_anakin) diff --git a/cmake/inference_lib.cmake b/cmake/inference_lib.cmake index a854e27c42..885103a7e9 100644 --- a/cmake/inference_lib.cmake +++ b/cmake/inference_lib.cmake @@ -207,14 +207,6 @@ copy(memory_lib set(inference_deps paddle_fluid_shared paddle_fluid) set(module "inference/api") -if (WITH_ANAKIN AND WITH_MKL) - copy(anakin_inference_lib DEPS paddle_inference_api inference_anakin_api - SRCS - ${PADDLE_BINARY_DIR}/paddle/fluid/inference/api/libinference_anakin_api* # compiled anakin api - ${ANAKIN_INSTALL_DIR} # anakin release - DSTS ${FLUID_INSTALL_DIR}/third_party/install/anakin ${FLUID_INSTALL_DIR}/third_party/install/anakin) - list(APPEND inference_deps anakin_inference_lib) -endif () if (TENSORRT_FOUND) copy(tensorrt_lib DEPS ${inference_deps} @@ -222,6 +214,11 @@ if (TENSORRT_FOUND) DSTS ${FLUID_INSTALL_DIR}/third_party/install/tensorrt/include ${FLUID_INSTALL_DIR}/third_party/install/tensorrt/lib) endif () +if (ANAKIN_FOUND) + copy(anakin_lib DEPS ${inference_deps} + SRCS ${ANAKIN_ROOT}/* + DSTS ${FLUID_INSTALL_DIR}/third_party/install/anakin) +endif () set(module "inference") if(WIN32) diff --git a/paddle/fluid/inference/api/paddle_inference_api.h b/paddle/fluid/inference/api/paddle_inference_api.h index 2906a4926f..feb5373c1d 100644 --- a/paddle/fluid/inference/api/paddle_inference_api.h +++ b/paddle/fluid/inference/api/paddle_inference_api.h @@ -28,6 +28,6 @@ limitations under the License. */ #include "paddle_analysis_config.h" // NOLINT #include "paddle_api.h" // NOLINT -#if (defined WITH_ANAKIN) || (defined PADDLE_WITH_ANAKIN) +#if (defined PADDLE_WITH_ANAKIN) #include "paddle_anakin_config.h" // NOLINT #endif diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 89e843a716..083e1bc59e 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -254,27 +254,6 @@ set(BERT_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/bert_emb128") download_model_and_data(${BERT_INSTALL_DIR} "bert_emb128_model.tar.gz" "bert_data_len20.txt.tar.gz") inference_analysis_api_test(test_analyzer_bert ${BERT_INSTALL_DIR} analyzer_bert_tester.cc) -# anakin -if (ANAKIN_FOUND AND WITH_MKL) # only needed in CI - # anakin rnn1 - set(ANAKIN_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/anakin") - set(ANAKIN_RNN1_INSTALL_DIR "${ANAKIN_INSTALL_DIR}/rnn1") - inference_download(${ANAKIN_RNN1_INSTALL_DIR} ${INFERENCE_URL} "anakin_test%2Fditu_rnn.anakin2.model.bin") - inference_download(${ANAKIN_RNN1_INSTALL_DIR} ${INFERENCE_URL} "anakin_test%2Fditu_rnn_data.txt") - cc_test(test_anakin_rnn1 SRCS anakin_rnn1_tester.cc - ARGS --model=${ANAKIN_RNN1_INSTALL_DIR}/anakin_test%2Fditu_rnn.anakin2.model.bin - --datapath=${ANAKIN_RNN1_INSTALL_DIR}/anakin_test%2Fditu_rnn_data.txt - DEPS inference_anakin_api_shared) - # anakin mobilenet - if(WITH_GPU) - set(ANAKIN_MOBILENET_INSTALL_DIR "${ANAKIN_INSTALL_DIR}/mobilenet") - inference_download(${ANAKIN_MOBILENET_INSTALL_DIR} ${INFERENCE_URL} "mobilenet_v2.anakin.bin") - cc_test(test_anakin_mobilenet SRCS anakin_mobilenet_tester.cc - ARGS --model=${ANAKIN_MOBILENET_INSTALL_DIR}/mobilenet_v2.anakin.bin - DEPS inference_anakin_api_shared dynload_cuda) - endif() -endif() - if(WITH_GPU AND TENSORRT_FOUND) set(TRT_MODEL_INSTALL_DIR "${INFERENCE_DEMO_INSTALL_DIR}/trt") if (NOT EXISTS ${TRT_MODEL_INSTALL_DIR}) diff --git a/paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc b/paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc deleted file mode 100644 index 48689486af..0000000000 --- a/paddle/fluid/inference/tests/api/anakin_mobilenet_tester.cc +++ /dev/null @@ -1,67 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include - -#include "gflags/gflags.h" -#include "paddle/fluid/inference/api/paddle_inference_api.h" - -DEFINE_string(model, "", "Directory of the inference model(mobile_v2)."); - -namespace paddle { - -contrib::AnakinConfig GetConfig() { - contrib::AnakinConfig config; - // using AnakinConfig::X86 if you need to use cpu to do inference - config.target_type = contrib::AnakinConfig::NVGPU; - config.model_file = FLAGS_model; - config.device_id = 0; - config.init_batch_size = 1; - return config; -} - -TEST(inference, anakin) { - auto config = GetConfig(); - auto predictor = - CreatePaddlePredictor( - config); - - float data[1 * 3 * 224 * 224] = {1.0f}; - PaddleTensor tensor; - tensor.name = "input_0"; - tensor.shape = std::vector({1, 3, 224, 224}); - tensor.data = PaddleBuf(data, sizeof(data)); - tensor.dtype = PaddleDType::FLOAT32; - - // For simplicity, we set all the slots with the same data. - std::vector paddle_tensor_feeds(1, tensor); - - PaddleTensor tensor_out; - tensor_out.name = "prob_out"; - tensor_out.shape = std::vector({}); - tensor_out.data = PaddleBuf(); - tensor_out.dtype = PaddleDType::FLOAT32; - - std::vector outputs(1, tensor_out); - - ASSERT_TRUE(predictor->Run(paddle_tensor_feeds, &outputs)); - - float* data_o = static_cast(outputs[0].data.data()); - for (size_t j = 0; j < outputs[0].data.length(); ++j) { - LOG(INFO) << "output[" << j << "]: " << data_o[j]; - } -} - -} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc b/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc deleted file mode 100644 index db01cfebcb..0000000000 --- a/paddle/fluid/inference/tests/api/anakin_rnn1_tester.cc +++ /dev/null @@ -1,246 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include -#include -#include -#include -#include -#include // NOLINT -#include -#include "paddle/fluid/inference/api/helper.h" -#include "paddle/fluid/inference/api/paddle_inference_api.h" -#include "utils/logger/logger.h" - -DEFINE_string(model, "", "Directory of the inference model."); -DEFINE_string(datapath, "", "Path of the dataset."); -DEFINE_int32(batch_size, 1, "batch size."); -DEFINE_int32(repeat, 1, "Running the inference program repeat times."); - -class Data { - public: - Data(std::string file_name, int batch_size) - : _batch_size(batch_size), _total_length(0) { - _file.open(file_name); - _file.seekg(_file.end); - _total_length = _file.tellg(); - _file.seekg(_file.beg); - } - void get_batch_data(std::vector>& fea, // NOLINT - std::vector>& week_fea, // NOLINT - std::vector>& time_fea, // NOLINT - std::vector& seq_offset); // NOLINT - - private: - std::fstream _file; - int _total_length; - int _batch_size; -}; - -void Data::get_batch_data( - std::vector>& fea, // NOLINT - std::vector>& week_fea, // NOLINT - std::vector>& time_fea, // NOLINT - std::vector& seq_offset) { // NOLINT - int seq_num = 0; - long unsigned int cum = 0; // NOLINT - - char buf[10000]; - seq_offset.clear(); - seq_offset.push_back(0); - fea.clear(); - week_fea.clear(); - time_fea.clear(); - while (_file.getline(buf, 10000)) { - std::vector data_vec; - paddle::inference::split(buf, ':', &data_vec); - - std::vector seq; - paddle::inference::split(data_vec[0], '|', &seq); - - for (auto link : seq) { - std::vector vec; - paddle::inference::split_to_float(link, ',', &vec); - fea.push_back(vec); - } - - std::vector vec_w; - paddle::inference::split_to_float(data_vec[2], ',', &vec_w); - week_fea.push_back(vec_w); - - std::vector vec_t; - paddle::inference::split_to_float(data_vec[1], ',', &vec_t); - time_fea.push_back(vec_t); - - cum += seq.size(); - seq_offset.push_back(cum); - - seq_num++; - if (seq_num >= _batch_size) { - break; - } - } -} - -namespace paddle { - -contrib::AnakinConfig GetConfig() { - contrib::AnakinConfig config; - // using AnakinConfig::X86 if you need to use cpu to do inference - config.target_type = contrib::AnakinConfig::X86; - config.model_file = FLAGS_model; - config.device_id = 0; - config.init_batch_size = 1000; // the max number of token - return config; -} - -void set_tensor(std::string name, std::vector shape, - std::vector& vec) { // NOLINT - int sum = 1; - std::for_each(shape.begin(), shape.end(), [&](int n) { sum *= n; }); - float* data = new float[sum]; - PaddleTensor tensor; - tensor.name = name; - tensor.shape = shape; - tensor.data = PaddleBuf(data, sum); - tensor.dtype = PaddleDType::FLOAT32; - vec.push_back(tensor); -} - -void single_test() { - auto config = GetConfig(); - auto predictor = - CreatePaddlePredictor( - config); - - int max_batch_size = 1000; - std::string feature_file = FLAGS_datapath; - Data map_data(feature_file, FLAGS_batch_size); - std::vector> fea; - std::vector> week_fea; - std::vector> time_fea; - std::vector seq_offset; // NOLINT - - paddle::PaddleTensor tensor_0, tensor_1, tensor_2; - tensor_0.name = "input_0"; - tensor_1.name = "input_4"; - tensor_2.name = "input_5"; - - PaddleTensor tensor_out; - tensor_out.name = "final_output.tmp_1_gout"; - tensor_out.shape = std::vector({}); - tensor_out.data = PaddleBuf(); - tensor_out.dtype = PaddleDType::FLOAT32; - - std::vector inputs; - std::vector outputs(1, tensor_out); - - int data_0_dim = 38; - int data_1_dim = 10; - int data_2_dim = 10; - float data_0[max_batch_size * data_0_dim]; // NOLINT - float data_1[max_batch_size * data_1_dim]; // NOLINT - float data_2[max_batch_size * data_2_dim]; // NOLINT - - int count = 0; - while (true) { - if (count++ > 0) break; // only run the first batch in ci. - seq_offset.clear(); - map_data.get_batch_data(fea, week_fea, time_fea, seq_offset); - if (seq_offset.size() <= 1) { - LOG(FATAL) << "seq_offset.size() <= 1, exit."; - break; - } - - std::vector> seq_offset_vec; // NOLINT - seq_offset_vec.push_back(seq_offset); - tensor_0.lod = seq_offset_vec; - - int p_shape_0[] = {(int)fea.size(), 1, 1, data_0_dim}; // NOLINT - int p_shape_1[] = {(int)week_fea.size(), data_1_dim, 1, 1}; // NOLINT - int p_shape_2[] = {(int)time_fea.size(), data_2_dim, 1, 1}; // NOLINT - - std::vector shape_0(p_shape_0, p_shape_0 + 4); - std::vector shape_1(p_shape_1, p_shape_1 + 4); - std::vector shape_2(p_shape_2, p_shape_2 + 4); - - tensor_0.shape = shape_0; - tensor_1.shape = shape_1; - tensor_2.shape = shape_2; - - for (int i = 0; i < fea.size(); i++) { - memcpy(data_0 + i * data_0_dim, &fea[i][0], sizeof(float) * data_0_dim); - } - for (int i = 0; i < week_fea.size(); i++) { - memcpy(data_1 + i * data_1_dim, &week_fea[i][0], - sizeof(float) * data_1_dim); - } - for (int i = 0; i < time_fea.size(); i++) { - memcpy(data_2 + i * data_2_dim, &time_fea[i][0], - sizeof(float) * data_2_dim); - } - - tensor_0.data = - paddle::PaddleBuf(data_0, fea.size() * sizeof(float) * data_0_dim); - tensor_1.data = - paddle::PaddleBuf(data_1, week_fea.size() * sizeof(float) * data_1_dim); - tensor_2.data = - paddle::PaddleBuf(data_2, time_fea.size() * sizeof(float) * data_2_dim); - - tensor_0.dtype = paddle::PaddleDType::FLOAT32; - tensor_1.dtype = paddle::PaddleDType::FLOAT32; - tensor_2.dtype = paddle::PaddleDType::FLOAT32; - - inputs.clear(); - inputs.push_back(tensor_1); - inputs.push_back(tensor_2); - inputs.push_back(tensor_0); - - paddle::inference::Timer timer; - timer.tic(); - for (int i = 0; i < FLAGS_repeat; i++) predictor->Run(inputs, &outputs); - - paddle::inference::PrintTime(FLAGS_batch_size, FLAGS_repeat, 1, 0, - timer.toc() / FLAGS_repeat); - LOG(INFO) << "sequence_length = " << seq_offset[seq_offset.size() - 1]; - - float* data_o = static_cast(outputs[0].data.data()); - VLOG(3) << "outputs[0].data.length() = " << outputs[0].data.length(); - for (size_t j = 0; j < outputs[0].data.length(); ++j) { - VLOG(3) << "output[" << j << "]: " << data_o[j]; - } - } -} -} // namespace paddle - -int main(int argc, char** argv) { - google::ParseCommandLineFlags(&argc, &argv, true); - logger::init(argv[0]); - - paddle::single_test(); - /* multi-threads - std::vector threads; - int num = 1; - for (int i = 0; i < num; i++) { - LOG(INFO) << " thread id : " << i; - threads.emplace_back(paddle::single_test); - } - for (int i = 0; i < num; i++) { - threads[i].join(); - } - threads.clear(); - */ - - return 0; -} -- GitLab