diff --git a/paddle/fluid/inference/capi/paddle_c_api.h b/paddle/fluid/inference/capi/paddle_c_api.h index e9157c06dec3659e3287d8157b6b29a10a7830da..bfac4baef09dbff4a39d18d682645a17071dfad7 100644 --- a/paddle/fluid/inference/capi/paddle_c_api.h +++ b/paddle/fluid/inference/capi/paddle_c_api.h @@ -117,8 +117,8 @@ PADDLE_CAPI_EXPORT extern PD_DataType PD_GetPaddleTensorDType( PADDLE_CAPI_EXPORT extern PD_PaddleBuf* PD_GetPaddleTensorData( const PD_Tensor* tensor); -PADDLE_CAPI_EXPORT extern int* PD_GetPaddleTensorShape(const PD_Tensor* tensor, - int** size); +PADDLE_CAPI_EXPORT extern const int* PD_GetPaddleTensorShape( + const PD_Tensor* tensor, int* size); // AnalysisPredictor PADDLE_CAPI_EXPORT extern bool PD_PredictorRun(const PD_AnalysisConfig* config, @@ -262,22 +262,32 @@ PADDLE_CAPI_EXPORT extern bool PD_ProfileEnabled( PADDLE_CAPI_EXPORT extern void PD_SetInValid(PD_AnalysisConfig* config); PADDLE_CAPI_EXPORT extern bool PD_IsValid(const PD_AnalysisConfig* config); + PADDLE_CAPI_EXPORT extern void PD_DisableGlogInfo(PD_AnalysisConfig* config); + PADDLE_CAPI_EXPORT extern void PD_DeletePass(PD_AnalysisConfig* config, char* pass_name); PADDLE_CAPI_EXPORT extern PD_Predictor* PD_NewPredictor( const PD_AnalysisConfig* config); + PADDLE_CAPI_EXPORT extern void PD_DeletePredictor(PD_Predictor* predictor); + PADDLE_CAPI_EXPORT extern int PD_GetInputNum(const PD_Predictor*); + PADDLE_CAPI_EXPORT extern int PD_GetOutputNum(const PD_Predictor*); + PADDLE_CAPI_EXPORT extern const char* PD_GetInputName(const PD_Predictor*, int); + PADDLE_CAPI_EXPORT extern const char* PD_GetOutputName(const PD_Predictor*, int); + PADDLE_CAPI_EXPORT extern void PD_SetZeroCopyInput( PD_Predictor* predictor, const PD_ZeroCopyTensor* tensor); + PADDLE_CAPI_EXPORT extern void PD_GetZeroCopyOutput(PD_Predictor* predictor, PD_ZeroCopyTensor* tensor); + PADDLE_CAPI_EXPORT extern void PD_ZeroCopyRun(PD_Predictor* predictor); #ifdef __cplusplus diff --git a/paddle/fluid/inference/capi/pd_predictor.cc b/paddle/fluid/inference/capi/pd_predictor.cc index 389f3cf5c08444412d9398a2b4ac6a1a5ed0396b..8aa1e2a7b7f9b99a1636ca2e7396089ab2ae7e15 100644 --- a/paddle/fluid/inference/capi/pd_predictor.cc +++ b/paddle/fluid/inference/capi/pd_predictor.cc @@ -180,7 +180,8 @@ PD_Predictor* PD_NewPredictor(const PD_AnalysisConfig* config) { } void PD_DeletePredictor(PD_Predictor* predictor) { - if (predictor == nullptr) { + if (predictor) { + predictor->predictor = nullptr; delete predictor; predictor = nullptr; } @@ -232,7 +233,8 @@ void PD_SetZeroCopyInput(PD_Predictor* predictor, if (tensor->lod.length) { auto* lod_ptr = reinterpret_cast(tensor->lod.data); - std::vector lod(lod_ptr, lod_ptr + tensor->lod.length); + std::vector lod; + lod.assign(lod_ptr, lod_ptr + tensor->lod.length / sizeof(size_t)); input->SetLoD({std::move(lod)}); } } @@ -265,17 +267,19 @@ void PD_GetZeroCopyOutput(PD_Predictor* predictor, PD_ZeroCopyTensor* tensor) { tensor->data.length = length; auto lod = output->lod(); - tensor->lod.length = lod.front().size() * sizeof(size_t); - if (tensor->lod.capacity < lod.front().size()) { - if (tensor->lod.data) { - std::free(tensor->lod.data); - } + if (!lod.empty()) { + tensor->lod.length = lod.front().size() * sizeof(size_t); + if (tensor->lod.capacity < lod.front().size()) { + if (tensor->lod.data) { + std::free(tensor->lod.data); + } - tensor->lod.data = std::malloc(lod.front().size() * sizeof(size_t)); - tensor->lod.capacity = lod.front().size() * sizeof(size_t); + tensor->lod.data = std::malloc(lod.front().size() * sizeof(size_t)); + tensor->lod.capacity = lod.front().size() * sizeof(size_t); + } + std::copy(lod.front().begin(), lod.front().end(), + reinterpret_cast(tensor->lod.data)); } - std::copy(lod.front().begin(), lod.front().end(), - reinterpret_cast(tensor->lod.data)); switch (tensor->dtype) { case PD_FLOAT32: output->copy_to_cpu(reinterpret_cast(tensor->data.data)); diff --git a/paddle/fluid/inference/capi/pd_tensor.cc b/paddle/fluid/inference/capi/pd_tensor.cc index db428785047558e4aaf850149a94edaaedd420a3..b4811f1d6ff192659fa12b33008fe5ac07e6a6c5 100644 --- a/paddle/fluid/inference/capi/pd_tensor.cc +++ b/paddle/fluid/inference/capi/pd_tensor.cc @@ -73,11 +73,10 @@ PD_PaddleBuf* PD_GetPaddleTensorData(const PD_Tensor* tensor) { return ret; } -int* PD_GetPaddleTensorShape(const PD_Tensor* tensor, int** size) { +const int* PD_GetPaddleTensorShape(const PD_Tensor* tensor, int* size) { PADDLE_ENFORCE_NOT_NULL(tensor); - std::vector shape = tensor->tensor.shape; - int s = shape.size(); - *size = &s; + const std::vector& shape = tensor->tensor.shape; + *size = shape.size(); return shape.data(); } diff --git a/paddle/fluid/inference/tests/api/CMakeLists.txt b/paddle/fluid/inference/tests/api/CMakeLists.txt index 79a74ee9ae4cb624a6e09729b5aaa6af11ff30e1..9a4237349c0d277d5305546046d64a15cd1d3f4b 100644 --- a/paddle/fluid/inference/tests/api/CMakeLists.txt +++ b/paddle/fluid/inference/tests/api/CMakeLists.txt @@ -387,3 +387,7 @@ if(WITH_MKLDNN) EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c ARGS --infer_model=${INT8_DATA_DIR}/resnet50/model) endif() + +inference_analysis_test(test_analyzer_capi_ner SRCS analyzer_capi_ner_tester.cc + EXTRA_DEPS ${INFERENCE_EXTRA_DEPS} paddle_fluid_c + ARGS --infer_model=${CHINESE_NER_INSTALL_DIR}/model) diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc index 4fa58df09d53fdcbd06c39634ea6ff9c100af466..85bd5bafd99abcde98c583cd3a3c15bf3aefa85e 100644 --- a/paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_capi_gpu_tester.cc @@ -15,8 +15,6 @@ limitations under the License. */ #include #include #include -#include -#include #include #include #include "paddle/fluid/inference/capi/paddle_c_api.h" @@ -93,6 +91,8 @@ TEST(PD_AnalysisConfig, trt_fp16) { false); bool trt_enable = PD_TensorrtEngineEnabled(config); CHECK(trt_enable) << "NO"; + PD_Predictor *predictor = PD_NewPredictor(config); + PD_DeletePredictor(predictor); PD_DeleteAnalysisConfig(config); } diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_int_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_int_tester.cc index f619b3759dd65c343f5e678b96db8bac0f299d1f..c0c8ff083de57fb26578cfda4533e74ad52dba15 100644 --- a/paddle/fluid/inference/tests/api/analyzer_capi_int_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_capi_int_tester.cc @@ -15,8 +15,6 @@ limitations under the License. */ #include #include #include -#include -#include #include #include #include "paddle/fluid/inference/capi/paddle_c_api.h" diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_ner_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_ner_tester.cc new file mode 100644 index 0000000000000000000000000000000000000000..bf0576f9f93b19221b147137b47bd0944ccf4479 --- /dev/null +++ b/paddle/fluid/inference/tests/api/analyzer_capi_ner_tester.cc @@ -0,0 +1,117 @@ +// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include "paddle/fluid/inference/capi/paddle_c_api.h" +#include "paddle/fluid/inference/tests/api/tester_helper.h" + +namespace paddle { +namespace inference { +namespace analysis { + +void SetConfig(PD_AnalysisConfig *config) { + auto model_dir = FLAGS_infer_model; + PD_SetModel(config, (model_dir + "/__model__").c_str(), + (model_dir + "/param").c_str()); + PD_SwitchUseFeedFetchOps(config, false); + PD_SwitchSpecifyInputNames(config, true); + PD_DisableGpu(config); +} + +TEST(PD_ZeroCopyRun, zero_copy_run) { + PD_AnalysisConfig *config = PD_NewAnalysisConfig(); + SetConfig(config); + PD_Predictor *predictor = PD_NewPredictor(config); + + int input_num = PD_GetInputNum(predictor); + printf("Input num: %d\n", input_num); + int output_num = PD_GetOutputNum(predictor); + printf("Output num: %d\n", output_num); + + PD_ZeroCopyTensor inputs[2]; + + // inputs[0]: word + PD_InitZeroCopyTensor(&inputs[0]); + inputs[0].name = new char[5]; + snprintf(inputs[0].name, strlen(PD_GetInputName(predictor, 0)) + 1, "%s", + PD_GetInputName(predictor, 0)); + + inputs[0].data.capacity = sizeof(int64_t) * 11 * 1; + inputs[0].data.length = inputs[0].data.capacity; + inputs[0].data.data = malloc(inputs[0].data.capacity); + std::vector ref_word( + {12673, 9763, 905, 284, 45, 7474, 20, 17, 1, 4, 9}); + inputs[0].data.data = reinterpret_cast(ref_word.data()); + + int shape0[] = {11, 1}; + inputs[0].shape.data = reinterpret_cast(shape0); + inputs[0].shape.capacity = sizeof(shape0); + inputs[0].shape.length = sizeof(shape0); + inputs[0].dtype = PD_INT64; + + size_t lod0[] = {0, 11}; + inputs[0].lod.data = reinterpret_cast(lod0); + inputs[0].lod.capacity = sizeof(size_t) * 2; + inputs[0].lod.length = sizeof(size_t) * 2; + + PD_SetZeroCopyInput(predictor, &inputs[0]); + + // inputs[1]: mention + PD_InitZeroCopyTensor(&inputs[1]); + inputs[1].name = new char[8]; + snprintf(inputs[1].name, strlen(PD_GetInputName(predictor, 1)) + 1, "%s", + PD_GetInputName(predictor, 1)); + + inputs[1].data.capacity = sizeof(int64_t) * 11 * 1; + inputs[1].data.length = inputs[1].data.capacity; + inputs[1].data.data = malloc(inputs[1].data.capacity); + std::vector ref_mention({27, 0, 0, 33, 34, 33, 0, 0, 0, 1, 2}); + inputs[1].data.data = reinterpret_cast(ref_mention.data()); + + int shape1[] = {11, 1}; + inputs[1].shape.data = reinterpret_cast(shape1); + inputs[1].shape.capacity = sizeof(shape1); + inputs[1].shape.length = sizeof(shape1); + inputs[1].dtype = PD_INT64; + + size_t lod1[] = {0, 11}; + inputs[1].lod.data = reinterpret_cast(lod1); + inputs[1].lod.capacity = sizeof(size_t) * 2; + inputs[1].lod.length = sizeof(size_t) * 2; + + PD_SetZeroCopyInput(predictor, &inputs[1]); + + PD_ZeroCopyRun(predictor); + PD_ZeroCopyTensor output; + PD_InitZeroCopyTensor(&output); + output.name = new char[21]; + snprintf(output.name, strlen(PD_GetOutputName(predictor, 0)) + 1, "%s", + PD_GetOutputName(predictor, 0)); + + // not necessary, just for converage tests + output.lod.data = std::malloc(sizeof(size_t)); + + PD_GetZeroCopyOutput(predictor, &output); + PD_DestroyZeroCopyTensor(&output); + PD_DeleteAnalysisConfig(config); + PD_DeletePredictor(predictor); +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc index 9edf04c4cfd02eb43bf5a392582529b134d0198b..0bc67aff7af1be9f34ffa2bb71c25d2964a62521 100644 --- a/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_capi_pd_tensor_tester.cc @@ -67,8 +67,14 @@ void PD_run() { float* result = static_cast(PD_PaddleBufData(b)); LOG(INFO) << *result; PD_DeletePaddleTensor(input); - int* size; - PD_GetPaddleTensorShape(out_data, &size); + int size; + const int* out_shape = PD_GetPaddleTensorShape(out_data, &size); + CHECK(size == 2) << "The Output shape's size is NOT match."; + std::vector ref_outshape_size({9, 6}); + for (int i = 0; i < 2; ++i) { + CHECK(out_shape[i] == ref_outshape_size[i]) + << "The Output's shape is NOT match."; + } PD_DeletePaddleBuf(buf); } diff --git a/paddle/fluid/inference/tests/api/analyzer_capi_tester.cc b/paddle/fluid/inference/tests/api/analyzer_capi_tester.cc index 8ec70ff6ed5d862c2a885e4e650842b9d7f8c6a7..93fcb43447d01dcafa10d8c85234d243d5095d4e 100644 --- a/paddle/fluid/inference/tests/api/analyzer_capi_tester.cc +++ b/paddle/fluid/inference/tests/api/analyzer_capi_tester.cc @@ -15,8 +15,6 @@ limitations under the License. */ #include #include #include -#include -#include #include #include #include "paddle/fluid/inference/capi/paddle_c_api.h" @@ -71,7 +69,7 @@ void zero_copy_run() { delete[] outputs; } -TEST(PD_ZeroCopyRun, zero_copy_run) { zero_copy_run(); } +TEST(PD_PredictorZeroCopyRun, zero_copy_run) { zero_copy_run(); } #ifdef PADDLE_WITH_MKLDNN TEST(PD_AnalysisConfig, profile_mkldnn) {