diff --git a/paddle/fluid/inference/analysis/passes/CMakeLists.txt b/paddle/fluid/inference/analysis/passes/CMakeLists.txt index 9d74dc6c211e4fcb6d1e7de5369eee847f49fc78..a8d0c69a54ab39781613d26474098450398d4c1b 100644 --- a/paddle/fluid/inference/analysis/passes/CMakeLists.txt +++ b/paddle/fluid/inference/analysis/passes/CMakeLists.txt @@ -3,11 +3,13 @@ cc_library(ir_analysis_pass SRCS ir_analysis_pass.cc DEPS analysis_pass argument cc_library(memory_optim_pass SRCS memory_optimize_pass.cc DEPS analysis_pass zero_copy_tensor) cc_library(ir_params_sync_among_devices_pass SRCS ir_params_sync_among_devices_pass.cc DEPS analysis_pass argument ir_pass_manager) cc_library(ir_graph_to_program_pass SRCS ir_graph_to_program_pass.cc DEPS analysis_pass graph_to_program_pass) +cc_library(adjust_cudnn_workspace_size_pass SRCS adjust_cudnn_workspace_size_pass.cc DEPS analysis_pass graph_to_program_pass) cc_library(analysis_passes SRCS passes.cc DEPS ir_graph_build_pass ir_analysis_pass ir_params_sync_among_devices_pass + adjust_cudnn_workspace_size_pass memory_optim_pass ir_graph_to_program_pass ) diff --git a/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc new file mode 100644 index 0000000000000000000000000000000000000000..0470e0d5a247163ecd7e7dd1e8f88e6b71ae93d7 --- /dev/null +++ b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.cc @@ -0,0 +1,43 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h" + +namespace paddle { +namespace inference { +namespace analysis { + +void AdjustCudnnWorkSpacePass::RunImpl(Argument* argument) { + if (!argument->use_gpu()) return; + auto& graph = argument->main_graph(); + auto nodes = graph.Nodes(); + const int cudnn_workspace_size_MB = 64; + const std::string attr_name = "workspace_size_MB"; + + for (auto& node : nodes) { + if (!node->IsOp()) continue; + auto* op_desc = node->Op(); + if (!op_desc->HasAttr(attr_name)) continue; + op_desc->SetAttr(attr_name, cudnn_workspace_size_MB); + op_desc->Flush(); + } +} + +std::string AdjustCudnnWorkSpacePass::repr() const { + return "adjust-cudnn-work-space-pass"; +} + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h new file mode 100644 index 0000000000000000000000000000000000000000..65d1c545313e110028a92776e73a070d32010420 --- /dev/null +++ b/paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h @@ -0,0 +1,41 @@ +// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include + +#include "paddle/fluid/framework/ir/fuse_pass_base.h" +#include "paddle/fluid/framework/scope.h" +#include "paddle/fluid/inference/analysis/analysis_pass.h" +#include "paddle/fluid/platform/place.h" + +namespace paddle { +namespace inference { +namespace analysis { + +/* + * The default cudnn workspace is 4G, we set it to 64M in this pass, which + * is applicable for most inference tasks. + */ +class AdjustCudnnWorkSpacePass : public AnalysisPass { + public: + void RunImpl(Argument *argument) override; + std::string repr() const override; +}; + +} // namespace analysis +} // namespace inference +} // namespace paddle diff --git a/paddle/fluid/inference/analysis/passes/passes.cc b/paddle/fluid/inference/analysis/passes/passes.cc index 161b127d6d5ceb3e8b9c1cf98c69eb0387bfb905..a55904ed536bad31c82888ede2db3178f3fd5e47 100644 --- a/paddle/fluid/inference/analysis/passes/passes.cc +++ b/paddle/fluid/inference/analysis/passes/passes.cc @@ -13,6 +13,7 @@ // limitations under the License. #include "paddle/fluid/inference/analysis/passes/passes.h" +#include "paddle/fluid/inference/analysis/passes/adjust_cudnn_workspace_size_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_analysis_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_build_pass.h" #include "paddle/fluid/inference/analysis/passes/ir_graph_to_program_pass.h" @@ -35,6 +36,8 @@ PassRegistry::PassRegistry() { passes_.emplace( "ir_params_sync_among_devices_pass", std::unique_ptr(new IrParamsSyncAmongDevicesPass)); + passes_.emplace("adjust_cudnn_workspace_size_pass", + std::unique_ptr(new AdjustCudnnWorkSpacePass)); passes_.emplace( "ir_graph_to_program_pass", std::unique_ptr(new IrGraphToProgramPass)); diff --git a/paddle/fluid/inference/analysis/passes/passes.h b/paddle/fluid/inference/analysis/passes/passes.h index ea07e0dcbd992c9d10c6662909798ef79a01e3a7..8a13091d083e51ecc84e6790f973ffa39ba5a6b9 100644 --- a/paddle/fluid/inference/analysis/passes/passes.h +++ b/paddle/fluid/inference/analysis/passes/passes.h @@ -14,7 +14,9 @@ #pragma once +#include #include +#include #include "paddle/fluid/inference/analysis/analysis_pass.h" namespace paddle { diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 321107377c286eb4212b62be481a3417ef3f72d2..e57d3a80456767848143412b2524f94fa09c7c13 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -120,7 +120,11 @@ bool AnalysisPredictor::PrepareScope( scope_ = parent_scope; status_is_cloned_ = true; } else { - paddle::framework::InitDevices(false); + if (config_.use_gpu_) { + paddle::framework::InitDevices(false, {config_.device_id_}); + } else { + paddle::framework::InitDevices(false, {}); + } scope_.reset(new paddle::framework::Scope()); status_is_cloned_ = false; } @@ -459,6 +463,8 @@ std::unique_ptr CreatePaddlePredictor< std::string flag = "--fraction_of_gpu_memory_to_use=" + std::to_string(fraction_of_gpu_memory); flags.push_back(flag); + flags.push_back("--selected_gpus=" + + std::to_string(config.gpu_device_id())); VLOG(3) << "set flag: " << flag; framework::InitGflags(flags); } diff --git a/paddle/fluid/inference/api/paddle_pass_builder.h b/paddle/fluid/inference/api/paddle_pass_builder.h index 09ef195d5e66aff0cef17f1594de34c656187a35..057e7dc65d5fd41212cbee77a2a4f4431b011182 100644 --- a/paddle/fluid/inference/api/paddle_pass_builder.h +++ b/paddle/fluid/inference/api/paddle_pass_builder.h @@ -73,7 +73,8 @@ class PaddlePassBuilder { protected: std::vector analysis_passes_{ {"ir_graph_build_pass", "ir_analysis_pass", - "ir_params_sync_among_devices_pass"}}; + "ir_params_sync_among_devices_pass", + "adjust_cudnn_workspace_size_pass"}}; std::vector passes_; };