From d97475d53bbd7757a6fce2a790a6e2b30b11250e Mon Sep 17 00:00:00 2001 From: flame Date: Fri, 21 Feb 2020 16:04:45 +0800 Subject: [PATCH] fix CPU C inference API compile bug (#22702) --- go/paddle/common.go | 8 ------- go/paddle/config.go | 8 +++++++ go/paddle/predictor.go | 4 ++-- go/paddle/tensor.go | 8 +++---- paddle/fluid/inference/CMakeLists.txt | 25 ++++++++++++---------- paddle/fluid/inference/capi/CMakeLists.txt | 4 ++-- 6 files changed, 30 insertions(+), 27 deletions(-) diff --git a/go/paddle/common.go b/go/paddle/common.go index 346dc3f395b..b29efbdf302 100644 --- a/go/paddle/common.go +++ b/go/paddle/common.go @@ -21,14 +21,6 @@ package paddle import "C" import "fmt" -type Precision C.Precision - -const ( - kFloat32 Precision = C.kFloat32 - kInt8 Precision = C.kInt8 - kHalf Precision = C.kHalf -) - func ConvertCBooleanToGo(b C.bool) bool { var c_false C.bool if b != c_false { diff --git a/go/paddle/config.go b/go/paddle/config.go index 5480365d580..05e126114b1 100644 --- a/go/paddle/config.go +++ b/go/paddle/config.go @@ -24,6 +24,14 @@ import "C" import "runtime" import "unsafe" +type Precision C.Precision + +const ( + Precision_FLOAT32 Precision = C.kFloat32 + Precision_INT8 Precision = C.kInt8 + Precision_HALF Precision = C.kHalf +) + type AnalysisConfig struct { c *C.PD_AnalysisConfig } diff --git a/go/paddle/predictor.go b/go/paddle/predictor.go index 8d034e0d6ee..2bae7854c31 100644 --- a/go/paddle/predictor.go +++ b/go/paddle/predictor.go @@ -102,12 +102,12 @@ func (predictor *Predictor) SetZeroCopyInput(tensor *ZeroCopyTensor) { func (predictor *Predictor) GetZeroCopyOutput(tensor *ZeroCopyTensor) { C.PD_GetZeroCopyOutput(predictor.c, tensor.c) tensor.name = C.GoString(tensor.c.name) - var shape []int32 + var shape []int32 shape_hdr := (*reflect.SliceHeader)(unsafe.Pointer(&shape)) shape_hdr.Data = uintptr(unsafe.Pointer(tensor.c.shape.data)) shape_hdr.Len = int(tensor.c.shape.length / C.sizeof_int) shape_hdr.Cap = int(tensor.c.shape.length / C.sizeof_int) - tensor.Reshape(shape) + tensor.Reshape(shape) } func (predictor *Predictor) ZeroCopyRun() { diff --git a/go/paddle/tensor.go b/go/paddle/tensor.go index 3a77d92bee8..4da99ea840f 100644 --- a/go/paddle/tensor.go +++ b/go/paddle/tensor.go @@ -137,16 +137,16 @@ func (tensor *ZeroCopyTensor) SetValue(value interface{}) { tensor.c.data.length = length switch dtype { - case PaddleDType(UINT8): + case PaddleDType(UINT8): data := val.Interface().([]uint8) C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length) - case PaddleDType(INT32): + case PaddleDType(INT32): data := val.Interface().([]int32) C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length) - case PaddleDType(INT64): + case PaddleDType(INT64): data := val.Interface().([]int64) C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length) - case PaddleDType(FLOAT32): + case PaddleDType(FLOAT32): data := val.Interface().([]float32) C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length) } diff --git a/paddle/fluid/inference/CMakeLists.txt b/paddle/fluid/inference/CMakeLists.txt index e557df123c6..aa2fce14fa2 100644 --- a/paddle/fluid/inference/CMakeLists.txt +++ b/paddle/fluid/inference/CMakeLists.txt @@ -39,6 +39,20 @@ get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) get_property(cuda_modules GLOBAL PROPERTY CUDA_MODULES) add_subdirectory(api) + +# Create static inference library if needed +# All static libs in inference/api +set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor zero_copy_tensor reset_tensor_array + analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg}) +create_static_lib(paddle_fluid ${fluid_modules} ${STATIC_INFERENCE_API}) + +if(NOT APPLE) + # TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac. + set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym") + set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}") +endif() + +# C inference API add_subdirectory(capi) if(WITH_TESTING) @@ -53,17 +67,6 @@ if(NOT ON_INFER) return() endif() -# Create static inference library if needed -# All static libs in inference/api -set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor zero_copy_tensor reset_tensor_array - analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg}) -create_static_lib(paddle_fluid ${fluid_modules} ${STATIC_INFERENCE_API}) -if(NOT APPLE) - # TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac. - set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym") - set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}") -endif() - set(SHARED_INFERENCE_SRCS io.cc ${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed.cc diff --git a/paddle/fluid/inference/capi/CMakeLists.txt b/paddle/fluid/inference/capi/CMakeLists.txt index 92be4b850fa..7a555279f85 100644 --- a/paddle/fluid/inference/capi/CMakeLists.txt +++ b/paddle/fluid/inference/capi/CMakeLists.txt @@ -15,14 +15,14 @@ set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc) -cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS ${fluid_modules} analysis_predictor) +cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS paddle_fluid) if(NOT ON_INFER) return() endif() # Create inference capi shared library -cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS ${fluid_modules} analysis_predictor) +cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS paddle_fluid) set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c) if(WIN32) target_link_libraries(paddle_fluid_c_shared shlwapi.lib) -- GitLab