未验证 提交 2fe032a1 编写于 作者: L liu zhengxi 提交者: GitHub

fix CPU C inference API compile bug (#22702) (#22752)

Co-authored-by: Nflame <fuchang1991@gmail.com>
上级 78716128
...@@ -21,14 +21,6 @@ package paddle ...@@ -21,14 +21,6 @@ package paddle
import "C" import "C"
import "fmt" import "fmt"
type Precision C.Precision
const (
kFloat32 Precision = C.kFloat32
kInt8 Precision = C.kInt8
kHalf Precision = C.kHalf
)
func ConvertCBooleanToGo(b C.bool) bool { func ConvertCBooleanToGo(b C.bool) bool {
var c_false C.bool var c_false C.bool
if b != c_false { if b != c_false {
......
...@@ -24,6 +24,14 @@ import "C" ...@@ -24,6 +24,14 @@ import "C"
import "runtime" import "runtime"
import "unsafe" import "unsafe"
type Precision C.Precision
const (
Precision_FLOAT32 Precision = C.kFloat32
Precision_INT8 Precision = C.kInt8
Precision_HALF Precision = C.kHalf
)
type AnalysisConfig struct { type AnalysisConfig struct {
c *C.PD_AnalysisConfig c *C.PD_AnalysisConfig
} }
......
...@@ -102,12 +102,12 @@ func (predictor *Predictor) SetZeroCopyInput(tensor *ZeroCopyTensor) { ...@@ -102,12 +102,12 @@ func (predictor *Predictor) SetZeroCopyInput(tensor *ZeroCopyTensor) {
func (predictor *Predictor) GetZeroCopyOutput(tensor *ZeroCopyTensor) { func (predictor *Predictor) GetZeroCopyOutput(tensor *ZeroCopyTensor) {
C.PD_GetZeroCopyOutput(predictor.c, tensor.c) C.PD_GetZeroCopyOutput(predictor.c, tensor.c)
tensor.name = C.GoString(tensor.c.name) tensor.name = C.GoString(tensor.c.name)
var shape []int32 var shape []int32
shape_hdr := (*reflect.SliceHeader)(unsafe.Pointer(&shape)) shape_hdr := (*reflect.SliceHeader)(unsafe.Pointer(&shape))
shape_hdr.Data = uintptr(unsafe.Pointer(tensor.c.shape.data)) shape_hdr.Data = uintptr(unsafe.Pointer(tensor.c.shape.data))
shape_hdr.Len = int(tensor.c.shape.length / C.sizeof_int) shape_hdr.Len = int(tensor.c.shape.length / C.sizeof_int)
shape_hdr.Cap = int(tensor.c.shape.length / C.sizeof_int) shape_hdr.Cap = int(tensor.c.shape.length / C.sizeof_int)
tensor.Reshape(shape) tensor.Reshape(shape)
} }
func (predictor *Predictor) ZeroCopyRun() { func (predictor *Predictor) ZeroCopyRun() {
......
...@@ -137,16 +137,16 @@ func (tensor *ZeroCopyTensor) SetValue(value interface{}) { ...@@ -137,16 +137,16 @@ func (tensor *ZeroCopyTensor) SetValue(value interface{}) {
tensor.c.data.length = length tensor.c.data.length = length
switch dtype { switch dtype {
case PaddleDType(UINT8): case PaddleDType(UINT8):
data := val.Interface().([]uint8) data := val.Interface().([]uint8)
C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length) C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length)
case PaddleDType(INT32): case PaddleDType(INT32):
data := val.Interface().([]int32) data := val.Interface().([]int32)
C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length) C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length)
case PaddleDType(INT64): case PaddleDType(INT64):
data := val.Interface().([]int64) data := val.Interface().([]int64)
C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length) C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length)
case PaddleDType(FLOAT32): case PaddleDType(FLOAT32):
data := val.Interface().([]float32) data := val.Interface().([]float32)
C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length) C.memcpy(tensor.c.data.data, unsafe.Pointer(&data[0]), length)
} }
......
...@@ -39,6 +39,20 @@ get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES) ...@@ -39,6 +39,20 @@ get_property(fluid_modules GLOBAL PROPERTY FLUID_MODULES)
get_property(cuda_modules GLOBAL PROPERTY CUDA_MODULES) get_property(cuda_modules GLOBAL PROPERTY CUDA_MODULES)
add_subdirectory(api) add_subdirectory(api)
# Create static inference library if needed
# All static libs in inference/api
set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor zero_copy_tensor reset_tensor_array
analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg})
create_static_lib(paddle_fluid ${fluid_modules} ${STATIC_INFERENCE_API})
if(NOT APPLE)
# TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac.
set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym")
set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
endif()
# C inference API
add_subdirectory(capi) add_subdirectory(capi)
if(WITH_TESTING) if(WITH_TESTING)
...@@ -53,17 +67,6 @@ if(NOT ON_INFER) ...@@ -53,17 +67,6 @@ if(NOT ON_INFER)
return() return()
endif() endif()
# Create static inference library if needed
# All static libs in inference/api
set(STATIC_INFERENCE_API paddle_inference_api analysis_predictor zero_copy_tensor reset_tensor_array
analysis_config paddle_pass_builder activation_functions ${mkldnn_quantizer_cfg})
create_static_lib(paddle_fluid ${fluid_modules} ${STATIC_INFERENCE_API})
if(NOT APPLE)
# TODO(liuyiqu: Temporarily disable the link flag because it is not support on Mac.
set(LINK_FLAGS "-Wl,--retain-symbols-file ${CMAKE_CURRENT_SOURCE_DIR}/paddle_fluid.sym")
set_target_properties(paddle_fluid PROPERTIES LINK_FLAGS "${LINK_FLAGS}")
endif()
set(SHARED_INFERENCE_SRCS set(SHARED_INFERENCE_SRCS
io.cc io.cc
${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed.cc ${CMAKE_CURRENT_SOURCE_DIR}/../framework/data_feed.cc
......
...@@ -15,14 +15,14 @@ ...@@ -15,14 +15,14 @@
set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc) set(C_API_SRCS pd_config.cc pd_predictor.cc pd_tensor.cc c_api.cc)
cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS ${fluid_modules} analysis_predictor) cc_library(paddle_fluid_c SRCS ${C_API_SRCS} DEPS paddle_fluid)
if(NOT ON_INFER) if(NOT ON_INFER)
return() return()
endif() endif()
# Create inference capi shared library # Create inference capi shared library
cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS ${fluid_modules} analysis_predictor) cc_library(paddle_fluid_c_shared SHARED SRCS ${C_API_SRCS} DEPS paddle_fluid)
set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c) set_target_properties(paddle_fluid_c_shared PROPERTIES OUTPUT_NAME paddle_fluid_c)
if(WIN32) if(WIN32)
target_link_libraries(paddle_fluid_c_shared shlwapi.lib) target_link_libraries(paddle_fluid_c_shared shlwapi.lib)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册