提交 5f985000 编写于 作者: D dzhwinter 提交者: Yang Yang(Tony)

Make init device on all gpu by default (#7345)

* "init use all default devices"

* "fix init test"
上级 f8d13c2a
......@@ -105,8 +105,7 @@ static void BuildVar(const std::string& param_name,
TEST(Operator, CPUtoGPU) {
using namespace paddle::framework;
using namespace paddle::platform;
ASSERT_EQ(InitDevices({"CPU", "GPU:0"}), true);
InitDevices();
paddle::framework::Scope scope;
paddle::platform::CPUPlace cpu_place;
......
......@@ -40,40 +40,23 @@ void InitGflags(std::vector<std::string> &argv) {
});
}
bool InitDevices(const std::vector<std::string> &devices) {
// device format
// CPU
// GPU:1
// TODO(dzhwinter) : add device format annotation for users.
void InitDevices() {
/*Init all avaiable devices by default */
std::vector<platform::Place> places;
for (auto &device : devices) {
auto p = string::Piece(device);
if (string::HasPrefix(p, "CPU")) {
places.emplace_back(platform::CPUPlace());
} else if (string::HasPrefix(p, "GPU")) {
#ifdef PADDLE_WITH_CUDA
auto pos = string::RFind(p, ':', string::Piece::npos);
auto number = device.substr(pos + 1);
places.emplace_back(platform::CUDAPlace(std::stoi(number)));
int count = platform::GetCUDADeviceCount();
for (int i = 0; i < count; ++i) {
places.emplace_back(platform::CUDAPlace(i));
}
#else
LOG(WARNING)
<< "'GPU' is not supported, Please re-compile with WITH_GPU option";
#endif
} else {
return false;
}
}
if (std::find_if(places.begin(), places.end(),
[&](const platform::Place &place) {
return platform::is_cpu_place(place);
}) == places.end()) {
places.emplace_back(platform::CPUPlace());
LOG(WARNING) << "Not specified CPU device, create CPU by Default.";
}
platform::DeviceContextPool::Init(places);
// framework::UseALL();
return true;
}
void InitGLOG(const std::string &prog_name) {
......
......@@ -24,7 +24,7 @@ void InitGflags(std::vector<std::string> &argv);
void InitGLOG(const std::string &prog_name);
bool InitDevices(const std::vector<std::string> &devices);
void InitDevices();
} // namespace framework
} // namespace paddle
......@@ -14,18 +14,13 @@ limitations under the License. */
#include "gtest/gtest.h"
#include "paddle/framework/init.h"
#include "paddle/platform/device_context.h"
TEST(Init, InitDevices) {
TEST(InitDevices, CPU) {
using paddle::framework::InitDevices;
std::vector<std::string> ds1 = {"CPU"};
ASSERT_EQ(InitDevices(ds1), true);
using paddle::platform::DeviceContextPool;
#ifdef PADDLE_WITH_CUDA
std::vector<std::string> ds2 = {"CPU", "GPU:0", "GPU:1"};
ASSERT_EQ(InitDevices(ds2), true);
// test re-init
std::vector<std::string> ds3 = {"GPU:0", "GPU:1"};
ASSERT_EQ(InitDevices(ds3), true);
#endif
InitDevices();
DeviceContextPool& pool = DeviceContextPool::Instance();
ASSERT_GE(pool.size(), 1U);
}
......@@ -69,7 +69,7 @@ REGISTER_OP_WITHOUT_GRADIENT(test_operator,
paddle::framework::OpWithoutKernelCheckerMaker);
TEST(OperatorBase, all) {
paddle::framework::InitDevices({"CPU"});
paddle::framework::InitDevices();
paddle::framework::proto::OpDesc op_desc;
op_desc.set_type("test_operator");
BuildVar("input", {"IN1"}, op_desc.add_inputs());
......@@ -195,7 +195,7 @@ REGISTER_OP_CPU_KERNEL(op_with_kernel,
// test with single input
TEST(OpKernel, all) {
paddle::framework::InitDevices({"CPU"});
paddle::framework::InitDevices();
paddle::framework::proto::OpDesc op_desc;
op_desc.set_type("op_with_kernel");
BuildVar("x", {"IN1"}, op_desc.add_inputs());
......@@ -225,7 +225,7 @@ REGISTER_OP_CPU_KERNEL(op_multi_inputs_with_kernel,
TEST(OpKernel, multi_inputs) {
using namespace paddle::framework;
paddle::framework::InitDevices({"CPU"});
paddle::framework::InitDevices();
proto::OpDesc op_desc;
op_desc.set_type("op_multi_inputs_with_kernel");
......@@ -264,7 +264,7 @@ class OperatorClone : public paddle::framework::OperatorBase {
};
TEST(Operator, Clone) {
paddle::framework::InitDevices({"CPU"});
paddle::framework::InitDevices();
OperatorClone a("ABC", paddle::framework::VariableNameMap{},
paddle::framework::VariableNameMap{},
paddle::framework::AttributeMap{});
......
......@@ -169,7 +169,7 @@ void InferenceEngine::Execute(const std::vector<framework::LoDTensor>& feeds,
}
auto* place = new platform::CPUPlace();
framework::InitDevices({"CPU"});
framework::InitDevices();
framework::Executor* executor = new framework::Executor(*place);
framework::Scope* scope = new framework::Scope();
......
......@@ -185,6 +185,8 @@ class DeviceContextPool {
const typename DefaultDeviceContextType<Place>::TYPE*>(Get(place));
}
size_t size() const { return device_contexts_.size(); }
private:
static DeviceContextPool* pool;
constexpr static int LEFT_SHIFT = 8;
......
......@@ -34,11 +34,11 @@ int main(int argc, char** argv) {
google::ParseCommandLineFlags(&new_argc, &new_argv_address, false);
testing::InitGoogleTest(&argc, argv);
paddle::memory::Used(paddle::platform::CPUPlace());
std::vector<std::string> devs = {"CPU"};
#ifdef PADDLE_WITH_CUDA
paddle::memory::Used(paddle::platform::CUDAPlace(0));
devs.push_back("GPU:0");
#endif
paddle::framework::InitDevices(devs);
paddle::framework::InitDevices();
return RUN_ALL_TESTS();
}
......@@ -62,11 +62,7 @@ def __bootstrap__():
core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0])
if core.is_compile_gpu():
core.init_devices(["CPU", "GPU:0"])
else:
core.init_devices(["CPU"])
core.init_devices()
__bootstrap__()
......@@ -341,9 +341,6 @@ class TestBatchNormOp(OpTest):
if core.is_compile_gpu() and core.op_support_gpu("batch_norm"):
places.append(core.CUDAPlace(0))
core.init_devices(["CPU", "GPU:0"])
else:
core.init_devices(["CPU"])
for place in places:
for data_format in ["NCHW", "NHWC"]:
test_with_place(place, data_format, [2, 3, 4, 5])
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册