未验证 提交 abe1dca3 编写于 作者: Y Yuanle Liu 提交者: GitHub

fix compilation errors on mac arm64 (#46117)

上级 b680fb80
...@@ -268,8 +268,6 @@ function(build_protobuf TARGET_NAME BUILD_FOR_HOST) ...@@ -268,8 +268,6 @@ function(build_protobuf TARGET_NAME BUILD_FOR_HOST)
DOWNLOAD_DIR ${PROTOBUF_SOURCE_DIR} DOWNLOAD_DIR ${PROTOBUF_SOURCE_DIR}
DOWNLOAD_COMMAND rm -rf arm_protobuf.tar.gz && wget --no-check-certificate DOWNLOAD_COMMAND rm -rf arm_protobuf.tar.gz && wget --no-check-certificate
${ARM_PROTOBUF_URL} && tar zxvf arm_protobuf.tar.gz ${ARM_PROTOBUF_URL} && tar zxvf arm_protobuf.tar.gz
#DOWNLOAD_COMMAND cp /home/wangbin44/Paddle/build/arm_protobuf.tar.gz .
# && tar zxvf arm_protobuf.tar.gz
UPDATE_COMMAND "" UPDATE_COMMAND ""
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR} CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${PROTOBUF_INSTALL_DIR}
-DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE} -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
......
...@@ -97,5 +97,4 @@ endif() ...@@ -97,5 +97,4 @@ endif()
add_library(xxhash STATIC IMPORTED GLOBAL) add_library(xxhash STATIC IMPORTED GLOBAL)
set_property(TARGET xxhash PROPERTY IMPORTED_LOCATION ${XXHASH_LIBRARIES}) set_property(TARGET xxhash PROPERTY IMPORTED_LOCATION ${XXHASH_LIBRARIES})
include_directories(${XXHASH_INCLUDE_DIR})
add_dependencies(xxhash extern_xxhash) add_dependencies(xxhash extern_xxhash)
...@@ -45,7 +45,7 @@ class GradNodePyLayer : public GradNodeBase { ...@@ -45,7 +45,7 @@ class GradNodePyLayer : public GradNodeBase {
void ClearTensorWrappers() override { VLOG(6) << "Do nothing here now"; } void ClearTensorWrappers() override { VLOG(6) << "Do nothing here now"; }
std::string name() { std::string name() override {
return "GradNodePyLayer_" + std::string(Py_TYPE(ctx_)->tp_name); return "GradNodePyLayer_" + std::string(Py_TYPE(ctx_)->tp_name);
} }
......
...@@ -31,6 +31,18 @@ cc_library( ...@@ -31,6 +31,18 @@ cc_library(
DEPS paddle_inference) DEPS paddle_inference)
set_target_properties(paddle_inference_c_shared PROPERTIES OUTPUT_NAME set_target_properties(paddle_inference_c_shared PROPERTIES OUTPUT_NAME
paddle_inference_c) paddle_inference_c)
if(APPLE)
target_link_libraries(
paddle_inference_c_shared
xxhash
utf8proc
cryptopp
protobuf
gflags
cblas)
endif()
if(WIN32) if(WIN32)
target_link_libraries(paddle_inference_c_shared shlwapi.lib) target_link_libraries(paddle_inference_c_shared shlwapi.lib)
endif() endif()
...@@ -154,7 +154,7 @@ class AbsDoubleGradOp : public framework::OperatorWithKernel { ...@@ -154,7 +154,7 @@ class AbsDoubleGradOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const std::string& var_name,
const framework::Tensor& tensor, const framework::Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const { const framework::OpKernelType& expected_kernel_type) const override {
return framework::OpKernelType( return framework::OpKernelType(
framework::TransToProtoVarType(tensor.dtype()), framework::TransToProtoVarType(tensor.dtype()),
tensor.place(), tensor.place(),
......
...@@ -125,7 +125,7 @@ class ActivationOp : public framework::OperatorWithKernel { ...@@ -125,7 +125,7 @@ class ActivationOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const std::string& var_name,
const Tensor& tensor, const Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const { const framework::OpKernelType& expected_kernel_type) const override {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
// When activation is first oneDNN op (there was some non oneDNN op // When activation is first oneDNN op (there was some non oneDNN op
// previously) // previously)
......
...@@ -142,7 +142,7 @@ class CrossEntropyGradientOpBase : public framework::OperatorWithKernel { ...@@ -142,7 +142,7 @@ class CrossEntropyGradientOpBase : public framework::OperatorWithKernel {
public: public:
using framework::OperatorWithKernel::OperatorWithKernel; using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const { void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK( OP_INOUT_CHECK(
ctx->HasInput("Label"), "Input", "Label", "CrossEntropyGradientOpBase"); ctx->HasInput("Label"), "Input", "Label", "CrossEntropyGradientOpBase");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")), OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Y")),
......
...@@ -69,7 +69,7 @@ class DequantizeMaxAbsOp : public framework::OperatorWithKernel { ...@@ -69,7 +69,7 @@ class DequantizeMaxAbsOp : public framework::OperatorWithKernel {
} }
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
auto type = framework::OpKernelType(data_type, ctx.device_context()); auto type = framework::OpKernelType(data_type, ctx.device_context());
return type; return type;
......
...@@ -76,7 +76,7 @@ class DequantizeLogOp : public framework::OperatorWithKernel { ...@@ -76,7 +76,7 @@ class DequantizeLogOp : public framework::OperatorWithKernel {
} }
framework::OpKernelType GetExpectedKernelType( framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const { const framework::ExecutionContext& ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X"); auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
auto type = framework::OpKernelType(data_type, ctx.device_context()); auto type = framework::OpKernelType(data_type, ctx.device_context());
return type; return type;
......
...@@ -39,7 +39,7 @@ class KronOp : public framework::OperatorWithKernel { ...@@ -39,7 +39,7 @@ class KronOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const std::string& var_name,
const framework::Tensor& tensor, const framework::Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const { const framework::OpKernelType& expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) { if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input // only promote inputs’s types when contains complex input
return framework::OpKernelType( return framework::OpKernelType(
...@@ -121,7 +121,7 @@ class KronGradOp : public framework::OperatorWithKernel { ...@@ -121,7 +121,7 @@ class KronGradOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const std::string& var_name,
const framework::Tensor& tensor, const framework::Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const { const framework::OpKernelType& expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) { if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input // only promote inputs’s types when contains complex input
return framework::OpKernelType( return framework::OpKernelType(
......
...@@ -714,7 +714,7 @@ class MatMulOp : public framework::OperatorWithKernel { ...@@ -714,7 +714,7 @@ class MatMulOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name, const std::string &var_name,
const framework::Tensor &tensor, const framework::Tensor &tensor,
const framework::OpKernelType &expected_kernel_type) const { const framework::OpKernelType &expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) { if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input // only promote inputs’s types when contains complex input
return framework::OpKernelType( return framework::OpKernelType(
......
...@@ -150,7 +150,7 @@ class MatMulV2Op : public framework::OperatorWithKernel { ...@@ -150,7 +150,7 @@ class MatMulV2Op : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const std::string& var_name,
const framework::Tensor& tensor, const framework::Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const { const framework::OpKernelType& expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) { if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input // only promote inputs’s types when contains complex input
return framework::OpKernelType( return framework::OpKernelType(
...@@ -225,7 +225,7 @@ class MatMulV2OpGrad : public framework::OperatorWithKernel { ...@@ -225,7 +225,7 @@ class MatMulV2OpGrad : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const std::string& var_name,
const framework::Tensor& tensor, const framework::Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const { const framework::OpKernelType& expected_kernel_type) const override {
if (framework::IsComplexType(expected_kernel_type.data_type_)) { if (framework::IsComplexType(expected_kernel_type.data_type_)) {
// only promote inputs’s types when contains complex input // only promote inputs’s types when contains complex input
return framework::OpKernelType( return framework::OpKernelType(
......
...@@ -54,7 +54,7 @@ class Pad3dOp : public framework::OperatorWithKernel { ...@@ -54,7 +54,7 @@ class Pad3dOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string& var_name, const std::string& var_name,
const Tensor& tensor, const Tensor& tensor,
const framework::OpKernelType& expected_kernel_type) const { const framework::OpKernelType& expected_kernel_type) const override {
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
if ((expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) && if ((expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
(tensor.layout() != framework::DataLayout::kMKLDNN)) { (tensor.layout() != framework::DataLayout::kMKLDNN)) {
......
...@@ -71,7 +71,7 @@ class PReluOp : public framework::OperatorWithKernel { ...@@ -71,7 +71,7 @@ class PReluOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name, const std::string &var_name,
const Tensor &tensor, const Tensor &tensor,
const framework::OpKernelType &expected_kernel_type) const { const framework::OpKernelType &expected_kernel_type) const override {
return innerGetKernelTypeForVar(tensor, expected_kernel_type); return innerGetKernelTypeForVar(tensor, expected_kernel_type);
} }
}; };
...@@ -150,7 +150,7 @@ class PReluGradOp : public framework::OperatorWithKernel { ...@@ -150,7 +150,7 @@ class PReluGradOp : public framework::OperatorWithKernel {
framework::OpKernelType GetKernelTypeForVar( framework::OpKernelType GetKernelTypeForVar(
const std::string &var_name, const std::string &var_name,
const Tensor &tensor, const Tensor &tensor,
const framework::OpKernelType &expected_kernel_type) const { const framework::OpKernelType &expected_kernel_type) const override {
return innerGetKernelTypeForVar(tensor, expected_kernel_type); return innerGetKernelTypeForVar(tensor, expected_kernel_type);
} }
}; };
......
...@@ -123,7 +123,7 @@ class StringTensor : public TensorBase, ...@@ -123,7 +123,7 @@ class StringTensor : public TensorBase,
} }
void* AllocateFrom(Allocator* allocator, void* AllocateFrom(Allocator* allocator,
DataType dtype, DataType dtype,
size_t requested_size = 0); size_t requested_size = 0) override;
dtype::pstring* mutable_data(const phi::Place& place, dtype::pstring* mutable_data(const phi::Place& place,
size_t requested_size = 0); size_t requested_size = 0);
......
...@@ -83,7 +83,7 @@ class TensorArray : public TensorBase, ...@@ -83,7 +83,7 @@ class TensorArray : public TensorBase,
/// \return Void pointer /// \return Void pointer
void* AllocateFrom(Allocator* allocator, void* AllocateFrom(Allocator* allocator,
DataType dtype, DataType dtype,
size_t requested_size = 0); size_t requested_size = 0) override;
bool empty() const { return tensors_.empty(); } bool empty() const { return tensors_.empty(); }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册