From 5f98500009e058a22211e55b7d9c5884d4d7a89f Mon Sep 17 00:00:00 2001 From: dzhwinter Date: Wed, 10 Jan 2018 13:05:56 +0800 Subject: [PATCH] Make init device on all gpu by default (#7345) * "init use all default devices" * "fix init test" --- .../framework/device_data_transform_test.cu | 3 +- paddle/framework/init.cc | 39 ++++++------------- paddle/framework/init.h | 2 +- paddle/framework/init_test.cc | 17 +++----- paddle/framework/operator_test.cc | 8 ++-- paddle/inference/inference.cc | 2 +- paddle/platform/device_context.h | 2 + paddle/testing/paddle_gtest_main.cc | 6 +-- python/paddle/v2/fluid/__init__.py | 6 +-- .../v2/fluid/tests/test_batch_norm_op.py | 3 -- 10 files changed, 30 insertions(+), 58 deletions(-) diff --git a/paddle/framework/device_data_transform_test.cu b/paddle/framework/device_data_transform_test.cu index 9fb26f09c7..5d89f5546f 100644 --- a/paddle/framework/device_data_transform_test.cu +++ b/paddle/framework/device_data_transform_test.cu @@ -105,8 +105,7 @@ static void BuildVar(const std::string& param_name, TEST(Operator, CPUtoGPU) { using namespace paddle::framework; using namespace paddle::platform; - - ASSERT_EQ(InitDevices({"CPU", "GPU:0"}), true); + InitDevices(); paddle::framework::Scope scope; paddle::platform::CPUPlace cpu_place; diff --git a/paddle/framework/init.cc b/paddle/framework/init.cc index e7087e063c..e12bac1d78 100644 --- a/paddle/framework/init.cc +++ b/paddle/framework/init.cc @@ -40,40 +40,23 @@ void InitGflags(std::vector &argv) { }); } -bool InitDevices(const std::vector &devices) { - // device format - // CPU - // GPU:1 - // TODO(dzhwinter) : add device format annotation for users. +void InitDevices() { + /*Init all avaiable devices by default */ + std::vector places; - for (auto &device : devices) { - auto p = string::Piece(device); - if (string::HasPrefix(p, "CPU")) { - places.emplace_back(platform::CPUPlace()); - } else if (string::HasPrefix(p, "GPU")) { + places.emplace_back(platform::CPUPlace()); + #ifdef PADDLE_WITH_CUDA - auto pos = string::RFind(p, ':', string::Piece::npos); - auto number = device.substr(pos + 1); - places.emplace_back(platform::CUDAPlace(std::stoi(number))); + int count = platform::GetCUDADeviceCount(); + for (int i = 0; i < count; ++i) { + places.emplace_back(platform::CUDAPlace(i)); + } #else - LOG(WARNING) - << "'GPU' is not supported, Please re-compile with WITH_GPU option"; + LOG(WARNING) + << "'GPU' is not supported, Please re-compile with WITH_GPU option"; #endif - } else { - return false; - } - } - if (std::find_if(places.begin(), places.end(), - [&](const platform::Place &place) { - return platform::is_cpu_place(place); - }) == places.end()) { - places.emplace_back(platform::CPUPlace()); - LOG(WARNING) << "Not specified CPU device, create CPU by Default."; - } platform::DeviceContextPool::Init(places); - // framework::UseALL(); - return true; } void InitGLOG(const std::string &prog_name) { diff --git a/paddle/framework/init.h b/paddle/framework/init.h index 9c84a03ded..c8fd964d00 100644 --- a/paddle/framework/init.h +++ b/paddle/framework/init.h @@ -24,7 +24,7 @@ void InitGflags(std::vector &argv); void InitGLOG(const std::string &prog_name); -bool InitDevices(const std::vector &devices); +void InitDevices(); } // namespace framework } // namespace paddle diff --git a/paddle/framework/init_test.cc b/paddle/framework/init_test.cc index f0788051d4..f837a965d3 100644 --- a/paddle/framework/init_test.cc +++ b/paddle/framework/init_test.cc @@ -14,18 +14,13 @@ limitations under the License. */ #include "gtest/gtest.h" #include "paddle/framework/init.h" +#include "paddle/platform/device_context.h" -TEST(Init, InitDevices) { +TEST(InitDevices, CPU) { using paddle::framework::InitDevices; - std::vector ds1 = {"CPU"}; - ASSERT_EQ(InitDevices(ds1), true); + using paddle::platform::DeviceContextPool; -#ifdef PADDLE_WITH_CUDA - std::vector ds2 = {"CPU", "GPU:0", "GPU:1"}; - ASSERT_EQ(InitDevices(ds2), true); - - // test re-init - std::vector ds3 = {"GPU:0", "GPU:1"}; - ASSERT_EQ(InitDevices(ds3), true); -#endif + InitDevices(); + DeviceContextPool& pool = DeviceContextPool::Instance(); + ASSERT_GE(pool.size(), 1U); } diff --git a/paddle/framework/operator_test.cc b/paddle/framework/operator_test.cc index d002f3f238..b69d7c7a74 100644 --- a/paddle/framework/operator_test.cc +++ b/paddle/framework/operator_test.cc @@ -69,7 +69,7 @@ REGISTER_OP_WITHOUT_GRADIENT(test_operator, paddle::framework::OpWithoutKernelCheckerMaker); TEST(OperatorBase, all) { - paddle::framework::InitDevices({"CPU"}); + paddle::framework::InitDevices(); paddle::framework::proto::OpDesc op_desc; op_desc.set_type("test_operator"); BuildVar("input", {"IN1"}, op_desc.add_inputs()); @@ -195,7 +195,7 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel, // test with single input TEST(OpKernel, all) { - paddle::framework::InitDevices({"CPU"}); + paddle::framework::InitDevices(); paddle::framework::proto::OpDesc op_desc; op_desc.set_type("op_with_kernel"); BuildVar("x", {"IN1"}, op_desc.add_inputs()); @@ -225,7 +225,7 @@ REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel, TEST(OpKernel, multi_inputs) { using namespace paddle::framework; - paddle::framework::InitDevices({"CPU"}); + paddle::framework::InitDevices(); proto::OpDesc op_desc; op_desc.set_type("op_multi_inputs_with_kernel"); @@ -264,7 +264,7 @@ class OperatorClone : public paddle::framework::OperatorBase { }; TEST(Operator, Clone) { - paddle::framework::InitDevices({"CPU"}); + paddle::framework::InitDevices(); OperatorClone a("ABC", paddle::framework::VariableNameMap{}, paddle::framework::VariableNameMap{}, paddle::framework::AttributeMap{}); diff --git a/paddle/inference/inference.cc b/paddle/inference/inference.cc index 49e39358e8..37b8b20ddf 100644 --- a/paddle/inference/inference.cc +++ b/paddle/inference/inference.cc @@ -169,7 +169,7 @@ void InferenceEngine::Execute(const std::vector& feeds, } auto* place = new platform::CPUPlace(); - framework::InitDevices({"CPU"}); + framework::InitDevices(); framework::Executor* executor = new framework::Executor(*place); framework::Scope* scope = new framework::Scope(); diff --git a/paddle/platform/device_context.h b/paddle/platform/device_context.h index 7a0040c9c2..9826a64276 100644 --- a/paddle/platform/device_context.h +++ b/paddle/platform/device_context.h @@ -185,6 +185,8 @@ class DeviceContextPool { const typename DefaultDeviceContextType::TYPE*>(Get(place)); } + size_t size() const { return device_contexts_.size(); } + private: static DeviceContextPool* pool; constexpr static int LEFT_SHIFT = 8; diff --git a/paddle/testing/paddle_gtest_main.cc b/paddle/testing/paddle_gtest_main.cc index 108ff335bf..a7fb50ee41 100644 --- a/paddle/testing/paddle_gtest_main.cc +++ b/paddle/testing/paddle_gtest_main.cc @@ -34,11 +34,11 @@ int main(int argc, char** argv) { google::ParseCommandLineFlags(&new_argc, &new_argv_address, false); testing::InitGoogleTest(&argc, argv); paddle::memory::Used(paddle::platform::CPUPlace()); - std::vector devs = {"CPU"}; + #ifdef PADDLE_WITH_CUDA paddle::memory::Used(paddle::platform::CUDAPlace(0)); - devs.push_back("GPU:0"); #endif - paddle::framework::InitDevices(devs); + + paddle::framework::InitDevices(); return RUN_ALL_TESTS(); } diff --git a/python/paddle/v2/fluid/__init__.py b/python/paddle/v2/fluid/__init__.py index 3f178e252c..ccd5998e35 100644 --- a/python/paddle/v2/fluid/__init__.py +++ b/python/paddle/v2/fluid/__init__.py @@ -62,11 +62,7 @@ def __bootstrap__(): core.init_gflags([sys.argv[0]] + ["--tryfromenv=" + ",".join(read_env_flags)]) core.init_glog(sys.argv[0]) - - if core.is_compile_gpu(): - core.init_devices(["CPU", "GPU:0"]) - else: - core.init_devices(["CPU"]) + core.init_devices() __bootstrap__() diff --git a/python/paddle/v2/fluid/tests/test_batch_norm_op.py b/python/paddle/v2/fluid/tests/test_batch_norm_op.py index abbd48d2b8..ac9418549f 100644 --- a/python/paddle/v2/fluid/tests/test_batch_norm_op.py +++ b/python/paddle/v2/fluid/tests/test_batch_norm_op.py @@ -341,9 +341,6 @@ class TestBatchNormOp(OpTest): if core.is_compile_gpu() and core.op_support_gpu("batch_norm"): places.append(core.CUDAPlace(0)) - core.init_devices(["CPU", "GPU:0"]) - else: - core.init_devices(["CPU"]) for place in places: for data_format in ["NCHW", "NHWC"]: test_with_place(place, data_format, [2, 3, 4, 5]) -- GitLab