From f6dd20144c64280a771c27ceb693db7c34dd820e Mon Sep 17 00:00:00 2001 From: Yuanle Liu Date: Sat, 17 Sep 2022 11:27:35 +0800 Subject: [PATCH] fix compilation errors on mac arm64 (#46135) --- cmake/external/protobuf.cmake | 2 -- cmake/external/xxhash.cmake | 1 - paddle/fluid/eager/pylayer/py_layer_node.h | 2 +- paddle/fluid/inference/capi_exp/CMakeLists.txt | 12 ++++++++++++ paddle/fluid/operators/abs_op.cc | 2 +- paddle/fluid/operators/activation_op.cc | 2 +- paddle/fluid/operators/cross_entropy_op.cc | 2 +- paddle/fluid/operators/dequantize_abs_max_op.cc | 2 +- paddle/fluid/operators/dequantize_log_op.cc | 2 +- paddle/fluid/operators/kron_op.cc | 4 ++-- paddle/fluid/operators/matmul_op.cc | 2 +- paddle/fluid/operators/matmul_v2_op.cc | 4 ++-- paddle/fluid/operators/pad3d_op.cc | 2 +- paddle/fluid/operators/prelu_op.cc | 4 ++-- paddle/phi/core/string_tensor.h | 2 +- paddle/phi/core/tensor_array.h | 2 +- 16 files changed, 28 insertions(+), 19 deletions(-) diff --git a/cmake/external/protobuf.cmake b/cmake/external/protobuf.cmake index 6f9078c8eee..5903edebae8 100755 --- a/cmake/external/protobuf.cmake +++ b/cmake/external/protobuf.cmake @@ -268,8 +268,6 @@ function(build_protobuf TARGET_NAME BUILD_FOR_HOST) DOWNLOAD_DIR ${PROTOBUF_SOURCE_DIR} DOWNLOAD_COMMAND rm -rf arm_protobuf.tar.gz && wget --no-check-certificate ${ARM_PROTOBUF_URL} && tar zxvf arm_protobuf.tar.gz - #DOWNLOAD_COMMAND cp /home/wangbin44/Paddle/build/arm_protobuf.tar.gz . - # && tar zxvf arm_protobuf.tar.gz UPDATE_COMMAND "" CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR} -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} diff --git a/cmake/external/xxhash.cmake b/cmake/external/xxhash.cmake index 6e685bbde40..648a060f9b7 100644 --- a/cmake/external/xxhash.cmake +++ b/cmake/external/xxhash.cmake @@ -97,5 +97,4 @@ endif() add_library(xxhash STATIC IMPORTED GLOBAL) set_property(TARGET xxhash PROPERTY IMPORTED_LOCATION ${XXHASH_LIBRARIES}) -include_directories(${XXHASH_INCLUDE_DIR}) add_dependencies(xxhash extern_xxhash) diff --git a/paddle/fluid/eager/pylayer/py_layer_node.h b/paddle/fluid/eager/pylayer/py_layer_node.h index f1166c362e1..18c48b62c4f 100644 --- a/paddle/fluid/eager/pylayer/py_layer_node.h +++ b/paddle/fluid/eager/pylayer/py_layer_node.h @@ -45,7 +45,7 @@ class GradNodePyLayer : public GradNodeBase { void ClearTensorWrappers() override { VLOG(6) << "Do nothing here now"; } - std::string name() { + std::string name() override { return "GradNodePyLayer_" + std::string(Py_TYPE(ctx_)->tp_name); } diff --git a/paddle/fluid/inference/capi_exp/CMakeLists.txt b/paddle/fluid/inference/capi_exp/CMakeLists.txt index 089a766b91c..26d76c280bd 100644 --- a/paddle/fluid/inference/capi_exp/CMakeLists.txt +++ b/paddle/fluid/inference/capi_exp/CMakeLists.txt @@ -31,6 +31,18 @@ cc_library( DEPS paddle_inference) set_target_properties(paddle_inference_c_shared PROPERTIES OUTPUT_NAME paddle_inference_c) + +if(APPLE) + target_link_libraries( + paddle_inference_c_shared + xxhash + utf8proc + cryptopp + protobuf + gflags + cblas) +endif() + if(WIN32) target_link_libraries(paddle_inference_c_shared shlwapi.lib) endif() diff --git a/paddle/fluid/operators/abs_op.cc b/paddle/fluid/operators/abs_op.cc index 465637f3ed6..9a2a75a642a 100644 --- a/paddle/fluid/operators/abs_op.cc +++ b/paddle/fluid/operators/abs_op.cc @@ -154,7 +154,7 @@ class AbsDoubleGradOp : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, - const framework::OpKernelType& expected_kernel_type) const { + const framework::OpKernelType& expected_kernel_type) const override { return framework::OpKernelType( framework::TransToProtoVarType(tensor.dtype()), tensor.place(), diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 1337533f3bb..29e5e9c0ace 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -125,7 +125,7 @@ class ActivationOp : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const Tensor& tensor, - const framework::OpKernelType& expected_kernel_type) const { + const framework::OpKernelType& expected_kernel_type) const override { #ifdef PADDLE_WITH_MKLDNN // When activation is first oneDNN op (there was some non oneDNN op // previously) diff --git a/paddle/fluid/operators/cross_entropy_op.cc b/paddle/fluid/operators/cross_entropy_op.cc index 0d98f5b75e4..a02bf699b32 100644 --- a/paddle/fluid/operators/cross_entropy_op.cc +++ b/paddle/fluid/operators/cross_entropy_op.cc @@ -142,7 +142,7 @@ class CrossEntropyGradientOpBase : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - void InferShape(framework::InferShapeContext* ctx) const { + void InferShape(framework::InferShapeContext* ctx) const override { OP_INOUT_CHECK( ctx->HasInput("Label"), "Input", "Label", "CrossEntropyGradientOpBase"); OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), diff --git a/paddle/fluid/operators/dequantize_abs_max_op.cc b/paddle/fluid/operators/dequantize_abs_max_op.cc index 64807329a40..ff4bb5f5334 100644 --- a/paddle/fluid/operators/dequantize_abs_max_op.cc +++ b/paddle/fluid/operators/dequantize_abs_max_op.cc @@ -69,7 +69,7 @@ class DequantizeMaxAbsOp : public framework::OperatorWithKernel { } framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const { + const framework::ExecutionContext& ctx) const override { auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto type = framework::OpKernelType(data_type, ctx.device_context()); return type; diff --git a/paddle/fluid/operators/dequantize_log_op.cc b/paddle/fluid/operators/dequantize_log_op.cc index c80c050b14a..b3c1770493c 100644 --- a/paddle/fluid/operators/dequantize_log_op.cc +++ b/paddle/fluid/operators/dequantize_log_op.cc @@ -76,7 +76,7 @@ class DequantizeLogOp : public framework::OperatorWithKernel { } framework::OpKernelType GetExpectedKernelType( - const framework::ExecutionContext& ctx) const { + const framework::ExecutionContext& ctx) const override { auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto type = framework::OpKernelType(data_type, ctx.device_context()); return type; diff --git a/paddle/fluid/operators/kron_op.cc b/paddle/fluid/operators/kron_op.cc index cede00d5b01..5f7606265e4 100644 --- a/paddle/fluid/operators/kron_op.cc +++ b/paddle/fluid/operators/kron_op.cc @@ -39,7 +39,7 @@ class KronOp : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, - const framework::OpKernelType& expected_kernel_type) const { + const framework::OpKernelType& expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.data_type_)) { // only promote inputs’s types when contains complex input return framework::OpKernelType( @@ -121,7 +121,7 @@ class KronGradOp : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, - const framework::OpKernelType& expected_kernel_type) const { + const framework::OpKernelType& expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.data_type_)) { // only promote inputs’s types when contains complex input return framework::OpKernelType( diff --git a/paddle/fluid/operators/matmul_op.cc b/paddle/fluid/operators/matmul_op.cc index a31c218307b..a49ceb42559 100644 --- a/paddle/fluid/operators/matmul_op.cc +++ b/paddle/fluid/operators/matmul_op.cc @@ -714,7 +714,7 @@ class MatMulOp : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string &var_name, const framework::Tensor &tensor, - const framework::OpKernelType &expected_kernel_type) const { + const framework::OpKernelType &expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.data_type_)) { // only promote inputs’s types when contains complex input return framework::OpKernelType( diff --git a/paddle/fluid/operators/matmul_v2_op.cc b/paddle/fluid/operators/matmul_v2_op.cc index 8f233d7650d..b1483c3fd6e 100644 --- a/paddle/fluid/operators/matmul_v2_op.cc +++ b/paddle/fluid/operators/matmul_v2_op.cc @@ -150,7 +150,7 @@ class MatMulV2Op : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, - const framework::OpKernelType& expected_kernel_type) const { + const framework::OpKernelType& expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.data_type_)) { // only promote inputs’s types when contains complex input return framework::OpKernelType( @@ -225,7 +225,7 @@ class MatMulV2OpGrad : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const framework::Tensor& tensor, - const framework::OpKernelType& expected_kernel_type) const { + const framework::OpKernelType& expected_kernel_type) const override { if (framework::IsComplexType(expected_kernel_type.data_type_)) { // only promote inputs’s types when contains complex input return framework::OpKernelType( diff --git a/paddle/fluid/operators/pad3d_op.cc b/paddle/fluid/operators/pad3d_op.cc index 301c21b2fcd..02d2eab181b 100644 --- a/paddle/fluid/operators/pad3d_op.cc +++ b/paddle/fluid/operators/pad3d_op.cc @@ -54,7 +54,7 @@ class Pad3dOp : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string& var_name, const Tensor& tensor, - const framework::OpKernelType& expected_kernel_type) const { + const framework::OpKernelType& expected_kernel_type) const override { #ifdef PADDLE_WITH_MKLDNN if ((expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) && (tensor.layout() != framework::DataLayout::kMKLDNN)) { diff --git a/paddle/fluid/operators/prelu_op.cc b/paddle/fluid/operators/prelu_op.cc index df58a2abe87..f7abaf648eb 100644 --- a/paddle/fluid/operators/prelu_op.cc +++ b/paddle/fluid/operators/prelu_op.cc @@ -71,7 +71,7 @@ class PReluOp : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string &var_name, const Tensor &tensor, - const framework::OpKernelType &expected_kernel_type) const { + const framework::OpKernelType &expected_kernel_type) const override { return innerGetKernelTypeForVar(tensor, expected_kernel_type); } }; @@ -150,7 +150,7 @@ class PReluGradOp : public framework::OperatorWithKernel { framework::OpKernelType GetKernelTypeForVar( const std::string &var_name, const Tensor &tensor, - const framework::OpKernelType &expected_kernel_type) const { + const framework::OpKernelType &expected_kernel_type) const override { return innerGetKernelTypeForVar(tensor, expected_kernel_type); } }; diff --git a/paddle/phi/core/string_tensor.h b/paddle/phi/core/string_tensor.h index 0391099faab..80d6b69aa6c 100644 --- a/paddle/phi/core/string_tensor.h +++ b/paddle/phi/core/string_tensor.h @@ -123,7 +123,7 @@ class StringTensor : public TensorBase, } void* AllocateFrom(Allocator* allocator, DataType dtype, - size_t requested_size = 0); + size_t requested_size = 0) override; dtype::pstring* mutable_data(const phi::Place& place, size_t requested_size = 0); diff --git a/paddle/phi/core/tensor_array.h b/paddle/phi/core/tensor_array.h index ade33099eee..6d834a9375a 100644 --- a/paddle/phi/core/tensor_array.h +++ b/paddle/phi/core/tensor_array.h @@ -83,7 +83,7 @@ class TensorArray : public TensorBase, /// \return Void pointer void* AllocateFrom(Allocator* allocator, DataType dtype, - size_t requested_size = 0); + size_t requested_size = 0) override; bool empty() const { return tensors_.empty(); } -- GitLab