diff --git a/paddle/fluid/distributed/store/tcp_store.cc b/paddle/fluid/distributed/store/tcp_store.cc index 25b66718e4a92ea33693375164d29f34af7816a7..0ecfcef42458d18a482860579111031f9b3ab787 100644 --- a/paddle/fluid/distributed/store/tcp_store.cc +++ b/paddle/fluid/distributed/store/tcp_store.cc @@ -20,7 +20,7 @@ #include "paddle/fluid/distributed/store/tcp_utils.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" namespace paddle { namespace distributed { diff --git a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc index 273d0555288888f53e12932d940db7d7f061f40e..0474f253dece30a5c77490b162278d5a05407f35 100644 --- a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc +++ b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc @@ -25,7 +25,7 @@ #include "paddle/fluid/eager/tests/performance_tests/benchmark_utils.h" #include "paddle/fluid/eager/tests/test_utils.h" #include "paddle/fluid/imperative/tracer.h" -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" #ifdef WITH_GPERFTOOLS #include "gperftools/profiler.h" diff --git a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc index 93260a315b32e30d158d09aec43d67845097cbfa..72dd1cc8ff56a74fd6e893ab704c8af6b6aca367 100644 --- a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc +++ b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc @@ -24,7 +24,7 @@ #include "paddle/fluid/eager/tests/performance_tests/benchmark_utils.h" #include "paddle/fluid/eager/tests/test_utils.h" #include "paddle/fluid/imperative/tracer.h" -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" #ifdef WITH_GPERFTOOLS #include "gperftools/profiler.h" diff --git a/paddle/fluid/framework/new_executor/executor_statistics.cc b/paddle/fluid/framework/new_executor/executor_statistics.cc index c1ba3b193f1deac0e3060c15be9b537e4d505872..f773a386af47358493aba88b62d8359a50e91a82 100644 --- a/paddle/fluid/framework/new_executor/executor_statistics.cc +++ b/paddle/fluid/framework/new_executor/executor_statistics.cc @@ -24,9 +24,9 @@ #include #include "glog/logging.h" -#include "paddle/fluid/platform/flags.h" #include "paddle/fluid/platform/os_info.h" #include "paddle/fluid/platform/profiler/utils.h" +#include "paddle/phi/core/flags.h" DECLARE_bool(use_stream_safe_cuda_allocator); PADDLE_DEFINE_EXPORTED_string(static_executor_perfstat_filepath, diff --git a/paddle/fluid/imperative/flags.cc b/paddle/fluid/imperative/flags.cc index f66aacc89ec506e4054eef9864f938c930b28cfe..06c2719ebfa6399511817792e7a808349ffb9641 100644 --- a/paddle/fluid/imperative/flags.cc +++ b/paddle/fluid/imperative/flags.cc @@ -14,7 +14,7 @@ #include "paddle/fluid/imperative/flags.h" -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" PADDLE_DEFINE_EXPORTED_uint64(dygraph_debug, 0, diff --git a/paddle/fluid/imperative/profiler.cc b/paddle/fluid/imperative/profiler.cc index 3365d5239da96b1f351d3991ad7680eb1defc5d3..fc3066051d8533618130c9c5c7eb8929564e4a58 100644 --- a/paddle/fluid/imperative/profiler.cc +++ b/paddle/fluid/imperative/profiler.cc @@ -21,7 +21,7 @@ #include // NOLINT -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" PADDLE_DEFINE_EXPORTED_string( tracer_profile_fname, diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index bff18b330081d76c1e1892a04d56a3f5ee30133e..af4d83f55a6ee2fef289e9e693fb7190cfcb2c4a 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -1422,8 +1422,7 @@ CreatePaddlePredictor( } // support set flags from enviorment. - const platform::ExportedFlagInfoMap &env_map = - platform::GetExportedFlagInfoMap(); + const phi::ExportedFlagInfoMap &env_map = phi::GetExportedFlagInfoMap(); std::ostringstream os; os << "--tryfromenv="; for (auto &pair : env_map) { diff --git a/paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.cc b/paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.cc index 972665562c30fa98ab88b933ec40fe5b8b68ddb9..309f6c3fdd124043ee582e682502c513f1b5283f 100644 --- a/paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.cc +++ b/paddle/fluid/memory/allocation/auto_growth_best_fit_allocator.cc @@ -18,8 +18,8 @@ #include // NOLINT #include "paddle/fluid/memory/allocation/aligned_allocator.h" -#include "paddle/fluid/platform/flags.h" #include "paddle/fluid/platform/profiler/event_tracing.h" +#include "paddle/phi/core/flags.h" PADDLE_DEFINE_EXPORTED_READONLY_bool( free_idle_chunk, diff --git a/paddle/fluid/operators/controlflow/conditional_block_op.cc b/paddle/fluid/operators/controlflow/conditional_block_op.cc index 1efc5085165776a276c145d23e8f4c22841cb5e0..f11bf6612c8cab0131a20d1e08aaa1776ffddacf 100644 --- a/paddle/fluid/operators/controlflow/conditional_block_op.cc +++ b/paddle/fluid/operators/controlflow/conditional_block_op.cc @@ -17,7 +17,7 @@ limitations under the License. */ #include "paddle/fluid/framework/new_executor/standalone_executor.h" #include "paddle/fluid/operators/assign_op.h" #include "paddle/fluid/operators/controlflow/control_flow_op_helper.h" -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" #include "paddle/phi/kernels/funcs/math_function.h" #ifdef PADDLE_WITH_MKLDNN diff --git a/paddle/fluid/operators/conv_op.cc b/paddle/fluid/operators/conv_op.cc index 107e3b5a3de495016ded58c45a835cea836ba999..0262c74923e1e80b8c5e4f5b61106682c26853fb 100644 --- a/paddle/fluid/operators/conv_op.cc +++ b/paddle/fluid/operators/conv_op.cc @@ -25,7 +25,6 @@ limitations under the License. */ #include "paddle/fluid/platform/mkldnn_helper.h" #endif #include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/phi/infermeta/binary.h" namespace paddle { diff --git a/paddle/fluid/operators/conv_transpose_op.cc b/paddle/fluid/operators/conv_transpose_op.cc index 99ec8d0a8b11dcc048041228fdd2cd2b5fe69df0..ebc9f8afdb0b7f84644d718bd3f3427ac8d9938c 100644 --- a/paddle/fluid/operators/conv_transpose_op.cc +++ b/paddle/fluid/operators/conv_transpose_op.cc @@ -21,7 +21,6 @@ limitations under the License. */ #include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_version_registry.h" -#include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/infermeta/backward.h" #include "paddle/phi/infermeta/binary.h" diff --git a/paddle/fluid/operators/fused/fusion_conv_inception_op.cc b/paddle/fluid/operators/fused/fusion_conv_inception_op.cc index 4dd0b964838b777ddf1c7241ec68259cbc6fad27..9df22199106726bc1f3922459bf6e9066b4d6ef5 100644 --- a/paddle/fluid/operators/fused/fusion_conv_inception_op.cc +++ b/paddle/fluid/operators/fused/fusion_conv_inception_op.cc @@ -16,7 +16,7 @@ limitations under the License. */ #include #include "paddle/fluid/framework/op_registry.h" -#include "paddle/fluid/platform/cudnn_workspace_helper.h" +#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h" namespace paddle { namespace operators { @@ -113,7 +113,7 @@ class ConvInceptionFusionOpMaker : public framework::OpProtoAndCheckerMaker { "allocated/freed each time the operator runs, larger " "workspace size can increase performance but also requires " "better hardware. This size should be chosen carefully.") - .SetDefault(platform::GetDefaultConvWorkspaceSizeLimitMB()); + .SetDefault(phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB()); AddComment(R"DOC( )DOC"); } diff --git a/paddle/fluid/platform/CMakeLists.txt b/paddle/fluid/platform/CMakeLists.txt index 2db144f423fc7cbfb0e2144cb349aad3875284cd..1dc762b9e185466c39ee7b6388ab34aea499c152 100644 --- a/paddle/fluid/platform/CMakeLists.txt +++ b/paddle/fluid/platform/CMakeLists.txt @@ -31,10 +31,6 @@ if(WITH_PYTHON) endif() endif() -cc_library( - flags - SRCS flags.cc - DEPS gflags) cc_library( denormal SRCS denormal.cc @@ -178,11 +174,6 @@ if(WITH_GLOO) DEPS framework_proto gloo_wrapper enforce) endif() -cc_library( - cudnn_workspace_helper - SRCS cudnn_workspace_helper.cc - DEPS) - # separate init from device_context to avoid cycle dependencies cc_library( init diff --git a/paddle/fluid/platform/cpu_info.cc b/paddle/fluid/platform/cpu_info.cc index 8ce9dc54b021d4a6033f9189c2e5bb6bb4f4c409..7edc322a90f5aef6d37cdd0ce8edd4dfee9613cf 100644 --- a/paddle/fluid/platform/cpu_info.cc +++ b/paddle/fluid/platform/cpu_info.cc @@ -32,7 +32,7 @@ limitations under the License. */ #include -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" DECLARE_double(fraction_of_cpu_memory_to_use); DECLARE_uint64(initial_cpu_memory_in_mb); diff --git a/paddle/fluid/platform/device/gpu/gpu_info.cc b/paddle/fluid/platform/device/gpu/gpu_info.cc index b25f15d688730f182e6d195bc8c9ae052eb1354d..6952ce33a9318626fd2d9675b264bc77819c7a74 100644 --- a/paddle/fluid/platform/device/gpu/gpu_info.cc +++ b/paddle/fluid/platform/device/gpu/gpu_info.cc @@ -24,7 +24,6 @@ limitations under the License. */ #include "paddle/fluid/memory/memory.h" #include "paddle/fluid/platform/cuda_device_guard.h" #include "paddle/fluid/platform/enforce.h" -#include "paddle/fluid/platform/flags.h" #include "paddle/fluid/platform/lock_guard_ptr.h" #include "paddle/fluid/platform/macros.h" #include "paddle/fluid/platform/monitor.h" @@ -32,6 +31,7 @@ limitations under the License. */ #include "paddle/fluid/platform/profiler/mem_tracing.h" #include "paddle/fluid/string/split.h" #include "paddle/phi/backends/gpu/gpu_info.h" +#include "paddle/phi/core/flags.h" #ifdef PADDLE_WITH_HIP #include "paddle/fluid/platform/dynload/miopen.h" diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 4be05f24bc70adbafb903c9620cb929c8cac00d8..eff252b7d79701e46ad42def45385ee4bb85e6b2 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -101,7 +101,7 @@ limitations under the License. */ #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) #include "paddle/fluid/platform/device/gpu/gpu_types.h" #endif -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" namespace phi { class ErrorSummary; diff --git a/paddle/fluid/platform/profiler/host_tracer.cc b/paddle/fluid/platform/profiler/host_tracer.cc index db452b3427fd1a68280ca62a2496b273c7392cf7..185792f6750cdba1fdcdcb4ea617a82a3ca08ded 100644 --- a/paddle/fluid/platform/profiler/host_tracer.cc +++ b/paddle/fluid/platform/profiler/host_tracer.cc @@ -17,9 +17,9 @@ #include "glog/logging.h" #include "paddle/fluid/framework/op_proto_maker.h" -#include "paddle/fluid/platform/flags.h" #include "paddle/fluid/platform/profiler/common_event.h" #include "paddle/fluid/platform/profiler/host_event_recorder.h" +#include "paddle/phi/core/flags.h" // Used to filter events, works like glog VLOG(level). // RecordEvent will works if host_trace_level >= level. diff --git a/paddle/fluid/pybind/global_value_getter_setter.cc b/paddle/fluid/pybind/global_value_getter_setter.cc index c45566ba356732bd9ada901c4e92aee4bbe5085c..e6935b577d737705bffabb8f47c43b6cd5d21ce0 100644 --- a/paddle/fluid/pybind/global_value_getter_setter.cc +++ b/paddle/fluid/pybind/global_value_getter_setter.cc @@ -252,7 +252,7 @@ static void RegisterGlobalVarGetterSetter() { REGISTER_PUBLIC_GLOBAL_VAR(FLAGS_rpc_prefetch_thread_num); #endif - const auto &flag_map = platform::GetExportedFlagInfoMap(); + const auto &flag_map = phi::GetExportedFlagInfoMap(); for (const auto &pair : flag_map) { const std::string &name = pair.second.name; bool is_writable = pair.second.is_writable; diff --git a/paddle/phi/api/yaml/generator/ops_extra_info_gen.py b/paddle/phi/api/yaml/generator/ops_extra_info_gen.py index 34b1f76338110257b93425635bc10c43cc924b03..9109399f9276ba4247eeeedd63dd07e591ab9669 100644 --- a/paddle/phi/api/yaml/generator/ops_extra_info_gen.py +++ b/paddle/phi/api/yaml/generator/ops_extra_info_gen.py @@ -22,7 +22,7 @@ def map_code_template(attrs_str, attrs_checker_str): return f"""// This file is generated by paddle/phi/api/yaml/generator/ops_extra_info_gen.py #include "paddle/fluid/operators/ops_extra_info.h" -#include "paddle/fluid/platform/cudnn_workspace_helper.h" +#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h" namespace paddle {{ namespace operators {{ diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 843ff811f911424be50855df7088a603a8eae10d..7d69e45638beb02b536fa667294485dc5687460d 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -202,7 +202,7 @@ str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false, bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, - int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] + int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] - op : conv2d_fusion extra : @@ -211,7 +211,7 @@ str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false, bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, - int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] + int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] - op : conv2d_transpose backward : conv2d_transpose_grad @@ -219,7 +219,7 @@ attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false, str mkldnn_data_type = "float32", bool fuse_relu = false, str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, - int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] + int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB()] - op : conv3d backward : conv3d_grad @@ -227,12 +227,12 @@ attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false, str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false, - int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] + int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] - op : conv3d_transpose backward : conv3d_transpose_grad extra : - attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] + attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB()] - op : cos backward : cos_grad, cos_double_grad, cos_triple_grad @@ -273,7 +273,7 @@ str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, bool use_addto = false, bool fuse_residual_connection = false, float Scale_in = 1.0f, float Scale_out = 1.0f, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, - int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] + int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] - op : depthwise_conv2d_transpose backward : depthwise_conv2d_transpose_grad @@ -281,7 +281,7 @@ attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false, str mkldnn_data_type = "float32", bool fuse_relu = false, str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, - int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] + int workspace_size_MB = phi::backends::gpu::GetDefaultConvWorkspaceSizeLimitMB()] - op : dequantize_linear extra : diff --git a/paddle/phi/backends/CMakeLists.txt b/paddle/phi/backends/CMakeLists.txt index f8a6b2174a830acf212e26bb289204ede08943e6..ddb7adaa92b83897158fc91db10bfae798103355 100644 --- a/paddle/phi/backends/CMakeLists.txt +++ b/paddle/phi/backends/CMakeLists.txt @@ -1,4 +1,5 @@ add_subdirectory(dynload) +add_subdirectory(gpu) set(BACKENDS_SRCS all_context.cc cpu/cpu_context.cc) set(BACKENDS_DEPS enforce place flags eigen3 phi_device_context) diff --git a/paddle/phi/backends/gpu/CMakeLists.txt b/paddle/phi/backends/gpu/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..85d57c9b14482bbeaaf8476acaee9a50efb2daa0 --- /dev/null +++ b/paddle/phi/backends/gpu/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(cuda) diff --git a/paddle/phi/backends/gpu/cuda/CMakeLists.txt b/paddle/phi/backends/gpu/cuda/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..7768cdd116122a140d67246b0d7fc8450b4cae22 --- /dev/null +++ b/paddle/phi/backends/gpu/cuda/CMakeLists.txt @@ -0,0 +1 @@ +cc_library(cudnn_workspace_helper SRCS cudnn_workspace_helper.cc) diff --git a/paddle/fluid/platform/cudnn_workspace_helper.cc b/paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.cc similarity index 79% rename from paddle/fluid/platform/cudnn_workspace_helper.cc rename to paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.cc index bb0e9a226d15001d8f19eff136cb29152e1906fb..d5b484c8eeb561a9bbd652e8e39f887c41bc4a27 100644 --- a/paddle/fluid/platform/cudnn_workspace_helper.cc +++ b/paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.cc @@ -1,4 +1,4 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,13 +12,14 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/platform/cudnn_workspace_helper.h" +#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h" #include #include -namespace paddle { -namespace platform { +namespace phi { +namespace backends { +namespace gpu { static int GetDefaultConvWorkspaceSizeLimitMBImpl() { const char *env_str = std::getenv("FLAGS_conv_workspace_size_limit"); @@ -30,6 +31,6 @@ int GetDefaultConvWorkspaceSizeLimitMB() { static auto workspace_size = GetDefaultConvWorkspaceSizeLimitMBImpl(); return workspace_size; } - -} // namespace platform -} // namespace paddle +} // namespace gpu +} // namespace backends +} // namespace phi diff --git a/paddle/fluid/platform/cudnn_workspace_helper.h b/paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h similarity index 78% rename from paddle/fluid/platform/cudnn_workspace_helper.h rename to paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h index 766dbe113c33705d54ce9e25aae5c20ecaf2a068..6b7a1f6eec7bb1bb170594a8d58a21b300346ef9 100644 --- a/paddle/fluid/platform/cudnn_workspace_helper.h +++ b/paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h @@ -1,4 +1,4 @@ -// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. +// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -14,12 +14,13 @@ #pragma once -namespace paddle { -namespace platform { +namespace phi { +namespace backends { +namespace gpu { static constexpr int kDefaultConvWorkspaceSizeLimitMB = 512; int GetDefaultConvWorkspaceSizeLimitMB(); - -} // namespace platform -} // namespace paddle +} // namespace gpu +} // namespace backends +} // namespace phi diff --git a/paddle/phi/backends/xpu/xpu_info.cc b/paddle/phi/backends/xpu/xpu_info.cc index 89ebce438a46492db369345434d8ac577fc634d5..997d3cd9d11bb4df39c0691f70cb30e38fc569cb 100644 --- a/paddle/phi/backends/xpu/xpu_info.cc +++ b/paddle/phi/backends/xpu/xpu_info.cc @@ -21,7 +21,7 @@ limitations under the License. */ // TODO(wilber): The phi computing library requires a component to manage // flags. -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" PADDLE_DEFINE_EXPORTED_string( selected_xpus, diff --git a/paddle/phi/core/CMakeLists.txt b/paddle/phi/core/CMakeLists.txt index 6dc43ff633f1910d89c70d05b23f22260fc32be5..92d87cf79bd7045abc7b336d54f914960dd464ce 100644 --- a/paddle/phi/core/CMakeLists.txt +++ b/paddle/phi/core/CMakeLists.txt @@ -5,6 +5,11 @@ if(WITH_GPU) proto_library(external_error_proto SRCS external_error.proto) endif() +cc_library( + flags + SRCS flags.cc + DEPS gflags) + cc_library(errors SRCS errors.cc) set(phi_enforce_deps errors flags) if(WITH_GPU) diff --git a/paddle/fluid/platform/flags.cc b/paddle/phi/core/flags.cc similarity index 98% rename from paddle/fluid/platform/flags.cc rename to paddle/phi/core/flags.cc index b809e026544bb8715508e9582087c4bd6d272f06..f11e09cf890e892745b2bd903e28112e85d635f9 100644 --- a/paddle/fluid/platform/flags.cc +++ b/paddle/phi/core/flags.cc @@ -13,13 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "paddle/fluid/platform/flags.h" +#include "paddle/phi/core/flags.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) -#include "paddle/fluid/platform/cudnn_workspace_helper.h" +#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h" #endif -namespace paddle { -namespace platform { +namespace phi { const ExportedFlagInfoMap &GetExportedFlagInfoMap() { return *GetMutableExportedFlagInfoMap(); @@ -30,8 +29,7 @@ ExportedFlagInfoMap *GetMutableExportedFlagInfoMap() { return &g_exported_flag_info_map; } -} // namespace platform -} // namespace paddle +} // namespace phi PADDLE_DEFINE_EXPORTED_int32(inner_op_parallelism, 0, @@ -261,9 +259,10 @@ PADDLE_DEFINE_EXPORTED_bool( * increased. * Users need to balance memory and speed. */ -PADDLE_DEFINE_EXPORTED_int64(conv_workspace_size_limit, - paddle::platform::kDefaultConvWorkspaceSizeLimitMB, - "cuDNN convolution workspace limit in MB unit."); +PADDLE_DEFINE_EXPORTED_int64( + conv_workspace_size_limit, + phi::backends::gpu::kDefaultConvWorkspaceSizeLimitMB, + "cuDNN convolution workspace limit in MB unit."); /** * CUDNN related FLAG diff --git a/paddle/fluid/platform/flags.h b/paddle/phi/core/flags.h similarity index 95% rename from paddle/fluid/platform/flags.h rename to paddle/phi/core/flags.h index 6db5e710b8dc83f6614f7cc6e284f046e4d74908..e9ace6206255d4992ecee894dde4a97bbd034c09 100644 --- a/paddle/fluid/platform/flags.h +++ b/paddle/phi/core/flags.h @@ -20,12 +20,11 @@ #include #include "gflags/gflags.h" -#include "paddle/fluid/platform/macros.h" +#include "paddle/phi/core/macros.h" #include "paddle/utils/variant.h" -namespace paddle { -namespace platform { +namespace phi { struct FlagInfo { using ValueType = @@ -51,7 +50,7 @@ ExportedFlagInfoMap *GetMutableExportedFlagInfoMap(); static_assert(std::is_same::value || \ std::is_arithmetic::value, \ "FLAGS should be std::string or arithmetic type"); \ - auto *instance = ::paddle::platform::GetMutableExportedFlagInfoMap(); \ + auto *instance = ::phi::GetMutableExportedFlagInfoMap(); \ auto &info = (*instance)[#__name]; \ info.name = #__name; \ info.value_ptr = &(FLAGS_##__name); \ @@ -96,5 +95,4 @@ ExportedFlagInfoMap *GetMutableExportedFlagInfoMap(); __PADDLE_DEFINE_EXPORTED_FLAG( \ name, true, ::std::string, string, default_value, doc) -} // namespace platform -} // namespace paddle +} // namespace phi diff --git a/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu b/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu index fd6e92b2ffe06df67056874c545b9264b76a743d..252fbe7d21b74406fa2a89d10f1ed77230555f09 100644 --- a/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu +++ b/paddle/phi/kernels/gpu/batch_norm_grad_kernel.cu @@ -14,11 +14,11 @@ #include "paddle/fluid/operators/layout_utils.h" #include "paddle/fluid/operators/norm_utils.cu.h" -#include "paddle/fluid/platform/flags.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_dnn.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/core/enforce.h" +#include "paddle/phi/core/flags.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/batch_norm_kernel.h" #include "paddle/phi/kernels/empty_kernel.h" diff --git a/paddle/phi/kernels/gpu/batch_norm_kernel.cu b/paddle/phi/kernels/gpu/batch_norm_kernel.cu index cd1665ba9032e6b9c19c4629268ab7dfce5a5615..d01397c1fa0665a2059ed0e11a1c306acddfb98e 100644 --- a/paddle/phi/kernels/gpu/batch_norm_kernel.cu +++ b/paddle/phi/kernels/gpu/batch_norm_kernel.cu @@ -22,11 +22,11 @@ namespace cub = hipcub; #include "paddle/fluid/operators/layout_utils.h" #include "paddle/fluid/operators/norm_utils.cu.h" -#include "paddle/fluid/platform/flags.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/backends/gpu/gpu_dnn.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/core/enforce.h" +#include "paddle/phi/core/flags.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/batch_norm_kernel.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" diff --git a/paddle/phi/kernels/gpu/gelu_funcs.h b/paddle/phi/kernels/gpu/gelu_funcs.h index 81f5354d42c5666590e4cc9e284c3ad3615512ae..42b5322426f70f85d4fcff283d344f138cd0f634 100644 --- a/paddle/phi/kernels/gpu/gelu_funcs.h +++ b/paddle/phi/kernels/gpu/gelu_funcs.h @@ -14,10 +14,10 @@ #pragma once -#include "paddle/fluid/platform/flags.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/amp_type_traits.h" #include "paddle/phi/common/place.h" +#include "paddle/phi/core/flags.h" #include "paddle/phi/kernels/funcs/aligned_vector.h" DECLARE_bool(use_fast_math); diff --git a/paddle/phi/kernels/gpudnn/conv_grad_kernel.cu b/paddle/phi/kernels/gpudnn/conv_grad_kernel.cu index f1114e59bad75454e63e1a8d4aba1d7a40482f2a..166403454a4176be8610dce0d26085de55ab8814 100644 --- a/paddle/phi/kernels/gpudnn/conv_grad_kernel.cu +++ b/paddle/phi/kernels/gpudnn/conv_grad_kernel.cu @@ -23,8 +23,8 @@ #include "paddle/phi/kernels/gpudnn/conv_cudnn_v7.h" #endif -#include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/profiler.h" +#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" diff --git a/paddle/phi/kernels/gpudnn/conv_kernel.cu b/paddle/phi/kernels/gpudnn/conv_kernel.cu index 04ac84d606effd425df0aaf645bc7bce1d0003d0..40d75ffcdf1f94eaacfe65e06ecd568d94a882bc 100644 --- a/paddle/phi/kernels/gpudnn/conv_kernel.cu +++ b/paddle/phi/kernels/gpudnn/conv_kernel.cu @@ -24,8 +24,8 @@ #include "paddle/phi/kernels/gpudnn/conv_cudnn_v7.h" #endif -#include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/profiler.h" +#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h" #include "paddle/phi/common/bfloat16.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" diff --git a/paddle/phi/kernels/impl/conv_cudnn_impl.h b/paddle/phi/kernels/impl/conv_cudnn_impl.h index cf419cd1fd4b38a26e8888b401899ff26b9c1405..300976fbef6b13f2acb57de81c7f4671ad540f90 100644 --- a/paddle/phi/kernels/impl/conv_cudnn_impl.h +++ b/paddle/phi/kernels/impl/conv_cudnn_impl.h @@ -23,9 +23,9 @@ #include "paddle/phi/kernels/gpudnn/conv_cudnn_v7.h" #endif -#include "paddle/fluid/platform/cudnn_workspace_helper.h" #include "paddle/fluid/platform/profiler.h" #include "paddle/phi/backends/dynload/cudnn.h" +#include "paddle/phi/backends/gpu/cuda/cudnn_workspace_helper.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/kernels/cpu/conv_util.h" #include "paddle/phi/kernels/funcs/batch_norm_utils.h" diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 16c683e39fa8cd4b0e3ed469d6ea46ba221e5ee9..6a57737ac4127da6a30b635f0c72932ba228d091 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -17,8 +17,8 @@ limitations under the License. */ #include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/memory/allocation/allocator_strategy.h" #include "paddle/fluid/platform/device/npu/npu_info.h" -#include "paddle/fluid/platform/flags.h" #include "paddle/fluid/platform/init.h" +#include "paddle/phi/core/flags.h" #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) DECLARE_bool(enable_gpu_memory_usage_log); @@ -43,7 +43,7 @@ int main(int argc, char** argv) { } #endif - const auto& flag_map = paddle::platform::GetExportedFlagInfoMap(); + const auto& flag_map = phi::GetExportedFlagInfoMap(); for (const auto& pair : flag_map) { const std::string& name = pair.second.name; // NOTE(zhiqiu): some names may not linked in some tests, so add to