提交 b5dbe88b 编写于 作者: Q qijun

follow comments

上级 f29a6b02
......@@ -44,5 +44,5 @@ add_custom_command(TARGET framework_py_proto POST_BUILD
cc_library(backward SRCS backward.cc DEPS net_op)
cc_test(backward_test SRCS backward_test.cc DEPS backward recurrent_op device_context)
cc_library(executor SRCS executor.cc DEPS op_registry device_context scope framework_proto)
cc_library(executor SRCS executor.cc DEPS op_registry device scope framework_proto)
cc_test(executor_test SRCS executor_test.cc DEPS executor)
......@@ -15,162 +15,31 @@ limitations under the License. */
#include "paddle/framework/executor.h"
#include <memory>
#include "paddle/framework/op_registry.h"
#include "paddle/framework/operator.h"
#include "paddle/framework/scope.h"
#include "paddle/platform/device_context.h"
namespace paddle {
namespace framework {
class LinearListView;
class GraphView;
// Immutable view of a ProgramDesc organized for efficient execution.
class ProgramDescView {
public:
virtual ~ProgramDescView() {}
virtual void Initialize(const ProgramDesc*) = 0;
static ProgramDescView* Create(bool is_linear);
};
class LinearListView : public ProgramDescView {
public:
void Initialize(const ProgramDesc*) override;
private:
std::vector<std::unique_ptr<OperatorBase>> ops_;
};
class GraphView : public ProgramDescView {
public:
void Initialize(const ProgramDesc*) override;
};
ProgramDescView* ProgramDescView::Create(bool is_linear) {
if (is_linear) {
return new LinearListView();
} else {
return new GraphView();
Executor::Executor(const std::vector<platform::Place>& places) {
devices_.resize(places.size());
for (size_t i = 0; i < places.size(); i++) {
devices_[i] = platform::GetDevice(places[i]);
}
}
void LinearListView::Initialize(const ProgramDesc* pdesc) {
// get a LinearView of ProgramDesc
for (auto& block_desc : pdesc->blocks()) {
for (auto& op_desc : block_desc.ops()) {
ops_.emplace_back(OpRegistry::CreateOp(op_desc));
}
}
}
void GraphView::Initialize(const ProgramDesc* pdesc) {
// get a GraphView of ProgramDesc
}
struct Device {
platform::CPUDeviceContext* cpu_device_context;
#ifndef PADDLE_ONLY_CPU
platform::CUDADeviceContext* cuda_device_context;
#endif
#ifndef PADDLE_ONLY_CPU
Device(platform::CPUDeviceContext* cpu, platform::CUDADeviceContext* gpu)
: cpu_device_context(cpu), cuda_device_context(gpu) {}
#else
explicit Device(platform::CPUDeviceContext* cpu) : cpu_device_context(cpu) {}
#endif
};
class ExecutorImpl : public Executor {
public:
ExecutorImpl(Scope* scope, const Device* device, const ProgramDesc* pdesc,
bool is_linear)
: scope_(scope),
device_(device),
program_desc_(pdesc),
view_(ProgramDescView::Create(is_linear)) {}
virtual ~ExecutorImpl() {
if (view_) delete view_;
}
void Run() override;
void Initialize();
private:
Scope* scope_;
const Device* device_;
const ProgramDesc* program_desc_;
ProgramDescView* view_;
};
template <typename T, typename... Args>
std::unique_ptr<T> make_unique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
platform::CPUDeviceContext* GetCPUDeviceContext(
const platform::CPUPlace& place) {
static std::unique_ptr<platform::CPUDeviceContext> g_cpu_device_context =
make_unique<platform::CPUDeviceContext>(place);
return g_cpu_device_context.get();
}
#ifndef PADDLE_ONLY_CPU
platform::CUDADeviceContext* GetCUDADeviceContext(
const platform::GPUPlace& place) {
static std::unique_ptr<platform::CUDADeviceContext> g_cuda_device_context =
make_unique<platform::CUDADeviceContext>(place);
return g_cuda_device_context.get();
}
#endif
Device* GetDevice(const platform::Place& place) {
platform::CPUPlace cpu_place;
#ifndef PADDLE_ONLY_CPU
if (platform::is_gpu_place(place)) {
platform::GPUPlace gpu_place = boost::get<platform::GPUPlace>(place);
static std::unique_ptr<Device> g_device = make_unique<Device>(
GetCPUDeviceContext(cpu_place), GetCUDADeviceContext(gpu_place));
return g_device.get();
} else {
static std::unique_ptr<Device> g_device =
make_unique<Device>(GetCPUDeviceContext(cpu_place), nullptr);
return g_device.get();
}
#else
static std::unique_ptr<Device> g_device =
make_unique<Device>(GetCPUDeviceContext(cpu_place));
return g_device.get();
#endif
}
framework::Scope* GetScope() {
static std::unique_ptr<framework::Scope> g_scope =
make_unique<framework::Scope>();
return g_scope.get();
}
Executor* NewLocalExecutor(const platform::Place& place,
const ProgramDesc& pdesc, bool is_linear) {
return new ExecutorImpl(GetScope(), GetDevice(place), &pdesc, is_linear);
}
void ExecutorImpl::Run() {
void Executor::Run(const ProgramDesc& pdesc, Scope* scope,
std::vector<Tensor>* outputs) {
// operators running
scope_->NewVar();
device_->cpu_device_context->Wait();
Scope& local_scope = scope->NewScope();
local_scope.NewVar();
for (auto device : devices_) {
device->cpu_device_context->Wait();
#ifndef PADDLE_ONLY_CPU
if (device_->cuda_device_context) {
device_->cuda_device_context->Wait();
if (device->cuda_device_context) {
device->cuda_device_context->Wait();
}
#endif
}
void ExecutorImpl::Initialize() {
// Initialize the ProgramDescView
view_->Initialize(program_desc_);
}
}
} // namespace framework
......
......@@ -15,18 +15,22 @@ limitations under the License. */
#pragma once
#include "paddle/framework/framework.pb.h"
#include "paddle/platform/place.h"
#include "paddle/framework/scope.h"
#include "paddle/framework/tensor.h"
#include "paddle/platform/device.h"
namespace paddle {
namespace framework {
class Executor {
public:
virtual ~Executor() {}
virtual void Run() = 0;
};
explicit Executor(const std::vector<platform::Place>& places);
~Executor() {}
void Run(const ProgramDesc&, Scope*, std::vector<Tensor>*);
Executor* NewLocalExecutor(const platform::Place&, const ProgramDesc&, bool);
private:
std::vector<platform::Device*> devices_;
};
} // namespace framework
} // namespace paddle
......@@ -19,9 +19,15 @@ using namespace paddle::platform;
using namespace paddle::framework;
TEST(Executor, Init) {
CPUPlace cpu_place1, cpu_place2;
std::vector<Place> places;
places.push_back(cpu_place1);
places.push_back(cpu_place2);
Executor* executor = new Executor(places);
ProgramDesc pdesc;
CPUPlace cpu_place;
Executor* executor = NewLocalExecutor(cpu_place, pdesc, true);
executor->Run();
Scope s;
std::vector<Tensor>* outputs{nullptr};
executor->Run(pdesc, &s, outputs);
delete executor;
}
\ No newline at end of file
......@@ -23,5 +23,7 @@ cc_library(device_context SRCS device_context.cc DEPS memory buddy_allocator
system_allocator memory_block meta_data meta_cache place eigen3 ${GPU_CTX_DEPS})
nv_test(device_context_test SRCS device_context_test.cc DEPS device_context gpu_info)
cc_library(device SRCS device.cc DEPS device_context)
nv_test(cudnn_helper_test SRCS cudnn_helper_test.cc DEPS dynload_cuda)
nv_test(transform_test SRCS transform_test.cu DEPS paddle_memory place device_context)
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/platform/device.h"
namespace paddle {
namespace platform {
template <typename T, typename... Args>
std::unique_ptr<T> make_unique(Args&&... args) {
return std::unique_ptr<T>(new T(std::forward<Args>(args)...));
}
CPUDeviceContext* GetCPUDeviceContext(const CPUPlace& place) {
static std::unique_ptr<CPUDeviceContext> g_cpu_device_context =
make_unique<CPUDeviceContext>(place);
return g_cpu_device_context.get();
}
#ifndef PADDLE_ONLY_CPU
CUDADeviceContext* GetCUDADeviceContext(const GPUPlace& place) {
static std::unique_ptr<CUDADeviceContext> g_cuda_device_context =
make_unique<CUDADeviceContext>(place);
return g_cuda_device_context.get();
}
#endif
Device* GetDevice(const Place& place) {
CPUPlace cpu_place;
#ifndef PADDLE_ONLY_CPU
if (is_gpu_place(place)) {
GPUPlace gpu_place = boost::get<GPUPlace>(place);
static std::unique_ptr<Device> g_device = make_unique<Device>(
GetCPUDeviceContext(cpu_place), GetCUDADeviceContext(gpu_place));
return g_device.get();
} else {
static std::unique_ptr<Device> g_device =
make_unique<Device>(GetCPUDeviceContext(cpu_place), nullptr);
return g_device.get();
}
#else
static std::unique_ptr<Device> g_device =
make_unique<Device>(GetCPUDeviceContext(cpu_place));
return g_device.get();
#endif
}
} // namespace platform
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "paddle/platform/device_context.h"
#include "paddle/platform/place.h"
namespace paddle {
namespace platform {
struct Device {
CPUDeviceContext* cpu_device_context;
#ifndef PADDLE_ONLY_CPU
CUDADeviceContext* cuda_device_context;
#endif
#ifndef PADDLE_ONLY_CPU
Device(CPUDeviceContext* cpu, CUDADeviceContext* gpu)
: cpu_device_context(cpu), cuda_device_context(gpu) {}
#else
explicit Device(CPUDeviceContext* cpu) : cpu_device_context(cpu) {}
#endif
};
CPUDeviceContext* GetCPUDeviceContext(const platform::CPUPlace& place);
#ifndef PADDLE_ONLY_CPU
CUDADeviceContext* GetCUDADeviceContext(const platform::GPUPlace& place);
#endif
Device* GetDevice(const platform::Place& place);
} // namespace platform
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册