From 882053dc30867b406ba7016e97287c80cd677962 Mon Sep 17 00:00:00 2001 From: WangZhen <23097963+0x45f@users.noreply.github.com> Date: Thu, 4 Aug 2022 10:57:08 +0800 Subject: [PATCH] [JitLayer]Move Function classes to a sub dir (#44844) * Move Function classes to a sub dir * Format code --- paddle/fluid/jit/CMakeLists.txt | 10 +- paddle/fluid/jit/all.h | 8 +- paddle/fluid/jit/compilation_unit.cc | 2 +- paddle/fluid/jit/function/CMakeLists.txt | 9 ++ .../fluid/jit/{ => function}/base_function.h | 0 .../fluid/jit/function/executor_function.cc | 63 ++++++++ .../jit/{ => function}/executor_function.h | 39 +---- paddle/fluid/jit/function/pe_function.cc | 115 ++++++++++++++ paddle/fluid/jit/function/pe_function.h | 60 ++++++++ paddle/fluid/jit/layer.cc | 2 +- paddle/fluid/jit/layer.h | 2 +- paddle/fluid/jit/pe_function.h | 144 ------------------ paddle/fluid/jit/serializer.cc | 4 +- paddle/fluid/pybind/eager_utils.cc | 4 +- paddle/fluid/pybind/eager_utils.h | 2 +- paddle/fluid/pybind/jit.cc | 4 +- python/setup.py.in | 2 +- 17 files changed, 275 insertions(+), 195 deletions(-) create mode 100644 paddle/fluid/jit/function/CMakeLists.txt rename paddle/fluid/jit/{ => function}/base_function.h (100%) create mode 100644 paddle/fluid/jit/function/executor_function.cc rename paddle/fluid/jit/{ => function}/executor_function.h (51%) create mode 100644 paddle/fluid/jit/function/pe_function.cc create mode 100644 paddle/fluid/jit/function/pe_function.h delete mode 100644 paddle/fluid/jit/pe_function.h diff --git a/paddle/fluid/jit/CMakeLists.txt b/paddle/fluid/jit/CMakeLists.txt index 38d2ae54de..85148f1387 100644 --- a/paddle/fluid/jit/CMakeLists.txt +++ b/paddle/fluid/jit/CMakeLists.txt @@ -1,3 +1,4 @@ +add_subdirectory(function) proto_library(paddle_jit_property_proto SRCS property.proto) cc_library( @@ -33,8 +34,13 @@ cc_library( cc_library( jit_layer SRCS layer.cc - DEPS jit_serializer jit_function_utils jit_serializer_utils - jit_compilation_unit jit_function_schema) + DEPS jit_serializer + jit_function_utils + jit_serializer_utils + jit_compilation_unit + jit_function_schema + jit_executor_function + jit_pe_function) if(WITH_TESTING AND NOT WIN32) add_custom_target( diff --git a/paddle/fluid/jit/all.h b/paddle/fluid/jit/all.h index 5a571a72a2..6e768b66f8 100644 --- a/paddle/fluid/jit/all.h +++ b/paddle/fluid/jit/all.h @@ -14,7 +14,7 @@ #pragma once -#include "base_function.h" -#include "layer.h" -#include "serializer.h" -#include "serializer_utils.h" +#include "function/base_function.h" // NOLINT +#include "layer.h" // NOLINT +#include "serializer.h" // NOLINT +#include "serializer_utils.h" // NOLINT diff --git a/paddle/fluid/jit/compilation_unit.cc b/paddle/fluid/jit/compilation_unit.cc index 5a434fba17..19c9d38034 100644 --- a/paddle/fluid/jit/compilation_unit.cc +++ b/paddle/fluid/jit/compilation_unit.cc @@ -16,7 +16,7 @@ #include "paddle/phi/core/enforce.h" -#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/function/base_function.h" namespace paddle { namespace jit { diff --git a/paddle/fluid/jit/function/CMakeLists.txt b/paddle/fluid/jit/function/CMakeLists.txt new file mode 100644 index 0000000000..7726ea2386 --- /dev/null +++ b/paddle/fluid/jit/function/CMakeLists.txt @@ -0,0 +1,9 @@ +cc_library( + jit_executor_function + SRCS executor_function.cc + DEPS executor) + +cc_library( + jit_pe_function + SRCS pe_function.cc + DEPS parallel_executor) diff --git a/paddle/fluid/jit/base_function.h b/paddle/fluid/jit/function/base_function.h similarity index 100% rename from paddle/fluid/jit/base_function.h rename to paddle/fluid/jit/function/base_function.h diff --git a/paddle/fluid/jit/function/executor_function.cc b/paddle/fluid/jit/function/executor_function.cc new file mode 100644 index 0000000000..01e58776b3 --- /dev/null +++ b/paddle/fluid/jit/function/executor_function.cc @@ -0,0 +1,63 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/function/executor_function.h" + +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/fluid/framework/variable.h" +#include "paddle/phi/core/enforce.h" + +namespace paddle { +namespace jit { + +ExecutorFunction::ExecutorFunction(const std::shared_ptr &info, + const Name2VariableMap ¶ms_dict, + const phi::Place &place) + : info_(info), place_(place), inner_exe_(place_) { + info_->RemoveDescFeedFetch(); + PADDLE_ENFORCE_GT( + static_cast(info_->ProgramDesc().Block(0).OpSize()), + 0, + platform::errors::PreconditionNotMet( + "There is no operator in ProgramDesc.")); + utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_); + VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); +} + +std::vector ExecutorFunction::operator()( + const std::vector &inputs) { + auto dense_tensors = utils::ToDenseTensors(inputs); + return utils::ToTensors(this->operator()(dense_tensors)); +} + +std::vector ExecutorFunction::operator()( + const std::vector &inputs) { + utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_); + inner_exe_.Run(info_->ProgramDesc(), + &scope_, + /*blockID=*/0, + false, + true, + info_->OutputArgNames()); + std::vector outputs; + utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs); + return outputs; +} + +const std::shared_ptr &ExecutorFunction::Info() const { + return info_; +} + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/executor_function.h b/paddle/fluid/jit/function/executor_function.h similarity index 51% rename from paddle/fluid/jit/executor_function.h rename to paddle/fluid/jit/function/executor_function.h index 87a31a9194..5136afaf02 100644 --- a/paddle/fluid/jit/executor_function.h +++ b/paddle/fluid/jit/function/executor_function.h @@ -14,17 +14,12 @@ #pragma once -#include -#include #include #include "paddle/fluid/framework/executor.h" -#include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/framework/variable.h" -#include "paddle/phi/core/enforce.h" -#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/function/base_function.h" #include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/function_utils.h" @@ -35,39 +30,15 @@ class ExecutorFunction : public BaseFunction { public: ExecutorFunction(const std::shared_ptr &info, const Name2VariableMap ¶ms_dict, - const phi::Place &place) - : info_(info), place_(place), inner_exe_(place_) { - info_->RemoveDescFeedFetch(); - PADDLE_ENFORCE_GT( - static_cast(info_->ProgramDesc().Block(0).OpSize()), - 0, - platform::errors::PreconditionNotMet( - "There is no operator in ProgramDesc.")); - utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_); - VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); - } + const phi::Place &place); ~ExecutorFunction() noexcept {} - std::vector operator()(const std::vector &inputs) { - auto dense_tensors = utils::ToDenseTensors(inputs); - return utils::ToTensors(this->operator()(dense_tensors)); - } + std::vector operator()(const std::vector &inputs); - std::vector operator()(const std::vector &inputs) { - utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_); - inner_exe_.Run(info_->ProgramDesc(), - &scope_, - /*blockID=*/0, - false, - true, - info_->OutputArgNames()); - std::vector outputs; - utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs); - return outputs; - } + std::vector operator()(const std::vector &inputs); - const std::shared_ptr &Info() const { return info_; } + const std::shared_ptr &Info() const; private: std::shared_ptr info_; diff --git a/paddle/fluid/jit/function/pe_function.cc b/paddle/fluid/jit/function/pe_function.cc new file mode 100644 index 0000000000..1632001844 --- /dev/null +++ b/paddle/fluid/jit/function/pe_function.cc @@ -0,0 +1,115 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/jit/function/pe_function.h" + +#include "paddle/fluid/framework/block_desc.h" +#include "paddle/fluid/framework/details/build_strategy.h" +#include "paddle/fluid/framework/program_desc.h" +#include "paddle/phi/core/enforce.h" + +namespace paddle { +namespace jit { + +static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) { + ExecutionStrategy execution_strategy; + + auto device_type = platform::Place2DeviceType(place); + switch (device_type) { + case platform::DeviceType::CPU: { + execution_strategy.num_threads_ = 2; + break; + } + case platform::DeviceType::CUDA: { + // NOTE: According experiments, one thread is faster in + // most model training. + execution_strategy.num_threads_ = 1; + break; + } + case platform::DeviceType::XPU: { + execution_strategy.num_threads_ = 1; + break; + } + case platform::DeviceType::IPU: { + execution_strategy.num_threads_ = 1; + break; + } + default: + PADDLE_THROW(platform::errors::Unavailable("Unsupported Device type %d.", + device_type)); + } + execution_strategy.use_device_ = device_type; + + return execution_strategy; +} + +PEFunction::PEFunction(const std::shared_ptr &info, + const Name2VariableMap ¶ms_dict, + const phi::Place &place) + : info_(info), place_(place) { + info_->RemoveDescFeedFetch(); + PADDLE_ENFORCE_GT( + static_cast(info_->ProgramDesc().Block(0).OpSize()), + 0, + platform::errors::PreconditionNotMet( + "There is no operator in ProgramDesc.")); + utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_); + VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); + CreateGraphAndPE(); +} + +void PEFunction::CreateGraphAndPE() { + framework::details::BuildStrategy build_strategy; + auto execution_strategy = GetExecutionStrategy(place_); + + auto &program_desc = info_->ProgramDesc(); + const framework::BlockDesc &global_block = program_desc.Block(0); + int64_t start_op_index = 0; + int64_t end_op_index = static_cast(global_block.OpSize()); + + graph_ = std::make_shared(program_desc, start_op_index, end_op_index); + inner_pe_ = std::make_shared( + place_, &scope_, execution_strategy, build_strategy, graph_.get()); + inner_pe_->PrepareVariables(&scope_); + inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames()); +} + +std::vector PEFunction::operator()(const std::vector &inputs) { + auto dense_tensors = utils::ToDenseTensors(inputs); + return utils::ToTensors(this->operator()(dense_tensors)); +} + +std::vector PEFunction::operator()( + const std::vector &inputs) { + utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_); + + // update op_handle scope_map in pe->executor_->Graph + std::unordered_map scope_map = { + {inner_pe_->GetLocalScopes().front(), &scope_}}; + inner_pe_->ResetOpHandleScopeMapOfGraphs(scope_map); + // need to recreate tmp variables in new scope + inner_pe_->PrepareVariables(&scope_); + + inner_pe_->RunWithoutFetch(info_->OutputArgNames()); + + std::vector outputs; + utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs); + scope_.DropKids(); + return outputs; +} + +const std::shared_ptr &PEFunction::Info() const { return info_; } + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/function/pe_function.h b/paddle/fluid/jit/function/pe_function.h new file mode 100644 index 0000000000..f847b3702d --- /dev/null +++ b/paddle/fluid/jit/function/pe_function.h @@ -0,0 +1,60 @@ +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +#include "paddle/fluid/framework/details/execution_strategy.h" +#include "paddle/fluid/framework/ir/graph.h" +#include "paddle/fluid/framework/parallel_executor.h" +#include "paddle/fluid/framework/scope.h" + +#include "paddle/fluid/jit/function/base_function.h" +#include "paddle/fluid/jit/function_schema.h" +#include "paddle/fluid/jit/function_utils.h" + +namespace paddle { +namespace jit { + +using ExecutionStrategy = framework::details::ExecutionStrategy; +using ParallelExecutor = framework::ParallelExecutor; +using Graph = framework::ir::Graph; + +class PEFunction : public BaseFunction { + public: + PEFunction(const std::shared_ptr &info, + const Name2VariableMap ¶ms_dict, + const phi::Place &place); + + ~PEFunction() noexcept {} + + void CreateGraphAndPE(); + + std::vector operator()(const std::vector &inputs); + + std::vector operator()(const std::vector &inputs); + + const std::shared_ptr &Info() const; + + private: + std::shared_ptr info_; + framework::Scope scope_; + phi::Place place_; + std::shared_ptr inner_pe_; + std::shared_ptr graph_; +}; + +} // namespace jit +} // namespace paddle diff --git a/paddle/fluid/jit/layer.cc b/paddle/fluid/jit/layer.cc index a80b05e45c..e41f78a487 100644 --- a/paddle/fluid/jit/layer.cc +++ b/paddle/fluid/jit/layer.cc @@ -16,8 +16,8 @@ #include "paddle/fluid/framework/variable.h" -#include "paddle/fluid/jit/base_function.h" #include "paddle/fluid/jit/compilation_unit.h" +#include "paddle/fluid/jit/function/base_function.h" #include "paddle/fluid/jit/function_schema.h" #include "paddle/phi/core/enforce.h" #include "paddle/phi/core/errors.h" diff --git a/paddle/fluid/jit/layer.h b/paddle/fluid/jit/layer.h index 4c6c714d37..69304c800f 100644 --- a/paddle/fluid/jit/layer.h +++ b/paddle/fluid/jit/layer.h @@ -21,7 +21,7 @@ #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/common/place.h" -#include "base_function.h" //NOLINT +#include "function/base_function.h" //NOLINT namespace paddle { diff --git a/paddle/fluid/jit/pe_function.h b/paddle/fluid/jit/pe_function.h deleted file mode 100644 index 809ad5ecbe..0000000000 --- a/paddle/fluid/jit/pe_function.h +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#pragma once - -#include -#include -#include - -#include "paddle/fluid/framework/block_desc.h" -#include "paddle/fluid/framework/details/build_strategy.h" -#include "paddle/fluid/framework/details/execution_strategy.h" -#include "paddle/fluid/framework/executor_cache.h" -#include "paddle/fluid/framework/ir/graph.h" -#include "paddle/fluid/framework/program_desc.h" -#include "paddle/fluid/framework/scope.h" -#include "paddle/fluid/framework/variable.h" -#include "paddle/phi/core/enforce.h" - -#include "paddle/fluid/jit/base_function.h" -#include "paddle/fluid/jit/function_schema.h" -#include "paddle/fluid/jit/function_utils.h" - -namespace paddle { -namespace jit { - -using ExecutionStrategy = framework::details::ExecutionStrategy; -using ParallelExecutor = framework::ParallelExecutor; -using Graph = framework::ir::Graph; - -class PEFunction : public BaseFunction { - public: - PEFunction(const std::shared_ptr &info, - const Name2VariableMap ¶ms_dict, - const phi::Place &place) - : info_(info), place_(place) { - info_->RemoveDescFeedFetch(); - PADDLE_ENFORCE_GT( - static_cast(info_->ProgramDesc().Block(0).OpSize()), - 0, - platform::errors::PreconditionNotMet( - "There is no operator in ProgramDesc.")); - utils::ShareParamsIntoScope(info_->ParamNames(), params_dict, &scope_); - VLOG(6) << framework::GenScopeTreeDebugInfo(&scope_); - CreateGraphAndPE(); - } - - ~PEFunction() noexcept {} - - static ExecutionStrategy GetExecutionStrategy(const platform::Place &place) { - ExecutionStrategy execution_strategy; - - auto device_type = platform::Place2DeviceType(place); - switch (device_type) { - case platform::DeviceType::CPU: { - execution_strategy.num_threads_ = 2; - break; - } - case platform::DeviceType::CUDA: { - // NOTE: According experiments, one thread is faster in - // most model training. - execution_strategy.num_threads_ = 1; - break; - } - case platform::DeviceType::XPU: { - execution_strategy.num_threads_ = 1; - break; - } - case platform::DeviceType::IPU: { - execution_strategy.num_threads_ = 1; - break; - } - default: - PADDLE_THROW(platform::errors::Unavailable( - "Unsupported Device type %d.", device_type)); - } - execution_strategy.use_device_ = device_type; - - return execution_strategy; - } - - void CreateGraphAndPE() { - framework::details::BuildStrategy build_strategy; - auto execution_strategy = GetExecutionStrategy(place_); - - auto &program_desc = info_->ProgramDesc(); - const framework::BlockDesc &global_block = program_desc.Block(0); - int64_t start_op_index = 0; - int64_t end_op_index = static_cast(global_block.OpSize()); - - graph_ = - std::make_shared(program_desc, start_op_index, end_op_index); - inner_pe_ = std::make_shared( - place_, &scope_, execution_strategy, build_strategy, graph_.get()); - inner_pe_->PrepareVariables(&scope_); - inner_pe_->SkipMemoryReuse(/*scope_idx=*/0, info_->InputArgNames()); - } - - std::vector operator()(const std::vector &inputs) { - auto dense_tensors = utils::ToDenseTensors(inputs); - return utils::ToTensors(this->operator()(dense_tensors)); - } - - std::vector operator()(const std::vector &inputs) { - utils::ShareIntoScope(info_->InputArgNames(), inputs, &scope_); - - // update op_handle scope_map in pe->executor_->Graph - std::unordered_map scope_map = { - {inner_pe_->GetLocalScopes().front(), &scope_}}; - inner_pe_->ResetOpHandleScopeMapOfGraphs(scope_map); - // need to recreate tmp variables in new scope - inner_pe_->PrepareVariables(&scope_); - - inner_pe_->RunWithoutFetch(info_->OutputArgNames()); - - std::vector outputs; - utils::FetchOuts(info_->OutputArgNames(), scope_, &outputs); - scope_.DropKids(); - return outputs; - } - - const std::shared_ptr &Info() const { return info_; } - - private: - std::shared_ptr info_; - framework::Scope scope_; - phi::Place place_; - std::shared_ptr inner_pe_; - std::shared_ptr graph_; -}; - -} // namespace jit -} // namespace paddle diff --git a/paddle/fluid/jit/serializer.cc b/paddle/fluid/jit/serializer.cc index a9bd5676ad..6ec321168c 100644 --- a/paddle/fluid/jit/serializer.cc +++ b/paddle/fluid/jit/serializer.cc @@ -20,9 +20,9 @@ #include "paddle/fluid/framework/variable.h" #include "paddle/fluid/platform/device_context.h" -#include "paddle/fluid/jit/executor_function.h" +#include "paddle/fluid/jit/function/executor_function.h" +#include "paddle/fluid/jit/function/pe_function.h" #include "paddle/fluid/jit/layer.h" -#include "paddle/fluid/jit/pe_function.h" #include "paddle/fluid/jit/property.h" #include "paddle/fluid/jit/serializer_utils.h" diff --git a/paddle/fluid/pybind/eager_utils.cc b/paddle/fluid/pybind/eager_utils.cc index 253291256e..3516a6a963 100644 --- a/paddle/fluid/pybind/eager_utils.cc +++ b/paddle/fluid/pybind/eager_utils.cc @@ -22,8 +22,8 @@ limitations under the License. */ #include "paddle/fluid/framework/convert_utils.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/framework/scope_guard.h" -#include "paddle/fluid/jit/executor_function.h" -#include "paddle/fluid/jit/pe_function.h" +#include "paddle/fluid/jit/function/executor_function.h" +#include "paddle/fluid/jit/function/pe_function.h" #include "paddle/fluid/memory/allocation/allocator.h" #include "paddle/fluid/operators/py_func_op.h" #include "paddle/fluid/operators/utils.h" diff --git a/paddle/fluid/pybind/eager_utils.h b/paddle/fluid/pybind/eager_utils.h index 4fab8534b7..9d261c8a95 100644 --- a/paddle/fluid/pybind/eager_utils.h +++ b/paddle/fluid/pybind/eager_utils.h @@ -20,7 +20,7 @@ typedef SSIZE_T ssize_t; #include "paddle/fluid/eager/hooks.h" #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/framework/tensor.h" -#include "paddle/fluid/jit/base_function.h" +#include "paddle/fluid/jit/function/base_function.h" #include "paddle/fluid/platform/place.h" #include "paddle/phi/common/backend.h" #include "paddle/phi/common/data_type.h" diff --git a/paddle/fluid/pybind/jit.cc b/paddle/fluid/pybind/jit.cc index 79576e6547..4ddae883d1 100644 --- a/paddle/fluid/pybind/jit.cc +++ b/paddle/fluid/pybind/jit.cc @@ -18,10 +18,10 @@ limitations under the License. */ #include "paddle/fluid/imperative/layer.h" #include "paddle/fluid/platform/place.h" -#include "paddle/fluid/jit/executor_function.h" +#include "paddle/fluid/jit/function/executor_function.h" +#include "paddle/fluid/jit/function/pe_function.h" #include "paddle/fluid/jit/function_schema.h" #include "paddle/fluid/jit/layer.h" -#include "paddle/fluid/jit/pe_function.h" #include "paddle/fluid/jit/serializer.h" namespace py = pybind11; diff --git a/python/setup.py.in b/python/setup.py.in index 864a0a417c..287cb2e08e 100755 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -630,7 +630,7 @@ headers = ( jit_layer_headers = ['layer.h', 'serializer.h', 'serializer_utils.h', 'all.h', 'base_function.h'] for f in jit_layer_headers: - headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=False)) + headers += list(find_files(f, '@PADDLE_SOURCE_DIR@/paddle/fluid/jit', recursive=True)) if '${WITH_MKLDNN}' == 'ON': headers += list(find_files('*', '${MKLDNN_INSTALL_DIR}/include')) # mkldnn -- GitLab