提交 4a12e221 编写于 作者: H hedaoyuan

Merge branch 'develop' of https://github.com/baidu/Paddle into armv8

...@@ -93,7 +93,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR}) ...@@ -93,7 +93,7 @@ include_directories(${CMAKE_CURRENT_BINARY_DIR})
if(NOT APPLE AND NOT ANDROID) if(NOT APPLE AND NOT ANDROID)
find_package(Threads REQUIRED) find_package(Threads REQUIRED)
link_libraries(${CMAKE_THREAD_LIBS_INIT}) link_libraries(${CMAKE_THREAD_LIBS_INIT})
set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -ldl") set(CMAKE_CXX_LINK_EXECUTABLE "${CMAKE_CXX_LINK_EXECUTABLE} -ldl -lrt")
endif(NOT APPLE) endif(NOT APPLE)
function(merge_static_libs TARGET_NAME) function(merge_static_libs TARGET_NAME)
......
...@@ -15,7 +15,6 @@ if(Boost_FOUND) ...@@ -15,7 +15,6 @@ if(Boost_FOUND)
add_subdirectory(memory) add_subdirectory(memory)
add_subdirectory(platform) add_subdirectory(platform)
add_subdirectory(framework) add_subdirectory(framework)
add_subdirectory(operators)
add_subdirectory(pybind) add_subdirectory(pybind)
endif() endif()
......
...@@ -12,7 +12,7 @@ cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf) ...@@ -12,7 +12,7 @@ cc_test(op_proto_test SRCS op_proto_test.cc DEPS op_proto protobuf)
proto_library(op_desc SRCS op_desc.proto DEPS attr_type) proto_library(op_desc SRCS op_desc.proto DEPS attr_type)
cc_test(op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf) cc_test(op_desc_test SRCS op_desc_test.cc DEPS op_desc protobuf)
cc_library(operator SRCS operator.cc DEPS op_desc protobuf) cc_library(operator SRCS operator.cc DEPS op_desc protobuf)
cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry) cc_test(operator_test SRCS operator_test.cc DEPS operator op_registry place)
cc_library(op_registry SRCS op_registry.cc DEPS op_proto op_desc) cc_library(op_registry SRCS op_registry.cc DEPS op_proto op_desc)
cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry operator) cc_test(op_registry_test SRCS op_registry_test.cc DEPS op_registry operator)
py_proto_compile(framework_py_proto SRCS attr_type.proto op_proto.proto op_desc.proto) py_proto_compile(framework_py_proto SRCS attr_type.proto op_proto.proto op_desc.proto)
......
...@@ -147,13 +147,13 @@ class OpRegisterHelper { ...@@ -147,13 +147,13 @@ class OpRegisterHelper {
} }
}; };
#define REGISTER_OP(__op_class, __op_maker_class, __op_type) \ #define REGISTER_OP(type, op_class, op_maker_class) \
class __op_class##Register { \ class op_class##Register { \
private: \ private: \
const static OpRegisterHelper<__op_class, __op_maker_class> reg; \ const static OpRegisterHelper<op_class, op_maker_class> reg; \
}; \ }; \
const OpRegisterHelper<__op_class, __op_maker_class> \ const OpRegisterHelper<op_class, op_maker_class> op_class##Register::reg( \
__op_class##Register::reg(#__op_type); #type)
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include <gtest/gtest.h> #include <gtest/gtest.h>
#include "paddle/framework/operator.h"
#include "paddle/operators/demo_op.h"
using namespace paddle::framework; using namespace paddle::framework;
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class CosineOp : public OperatorWithKernel { class CosineOp : public OperatorBase {
public: public:
void Run(const OpRunContext* context) const override { void Run(const std::shared_ptr<Scope>& scope,
printf("%s\n", DebugString().c_str()); const platform::DeviceContext& dev_ctx) const override {}
} void InferShape(const std::shared_ptr<Scope>& scope) const override {}
}; };
class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
...@@ -28,14 +26,15 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -28,14 +26,15 @@ class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
} }
}; };
REGISTER_OP(CosineOp, CosineOpProtoAndCheckerMaker, cos_sim) REGISTER_OP(cos_sim, CosineOp, CosineOpProtoAndCheckerMaker);
class MyTestOp : public OperatorBase {
public:
void InferShape(const std::shared_ptr<Scope>& scope) const override {}
void Run(const std::shared_ptr<Scope>& scope,
const platform::DeviceContext& dev_ctx) const override {}
class MyTestOp : public OperatorWithKernel {
public: public:
void Run(const OpRunContext* ctx) const override {
printf("%s\n", DebugString().c_str());
printf("test_attr = %d\n", ctx->op_->GetAttr<int>("test_attr"));
}
}; };
class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
...@@ -54,7 +53,7 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -54,7 +53,7 @@ class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
} }
}; };
REGISTER_OP(MyTestOp, MyTestOpProtoAndCheckerMaker, my_test_op) REGISTER_OP(my_test_op, MyTestOp, MyTestOpProtoAndCheckerMaker);
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -73,8 +72,8 @@ TEST(OpRegistry, CreateOp) { ...@@ -73,8 +72,8 @@ TEST(OpRegistry, CreateOp) {
paddle::framework::OperatorBase* op = paddle::framework::OperatorBase* op =
paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::OpRegistry::CreateOp(op_desc);
auto scope = std::make_shared<Scope>(); auto scope = std::make_shared<Scope>();
auto dev_ctx = DeviceContext(); paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, &dev_ctx); op->Run(scope, dev_ctx);
float scale_get = op->GetAttr<float>("scale"); float scale_get = op->GetAttr<float>("scale");
ASSERT_EQ(scale_get, scale); ASSERT_EQ(scale_get, scale);
} }
...@@ -116,8 +115,8 @@ TEST(OpRegistry, DefaultValue) { ...@@ -116,8 +115,8 @@ TEST(OpRegistry, DefaultValue) {
paddle::framework::OperatorBase* op = paddle::framework::OperatorBase* op =
paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::OpRegistry::CreateOp(op_desc);
auto scope = std::make_shared<Scope>(); auto scope = std::make_shared<Scope>();
auto dev_ctx = DeviceContext(); paddle::platform::CPUDeviceContext dev_ctx;
op->Run(scope, &dev_ctx); op->Run(scope, dev_ctx);
ASSERT_EQ(op->GetAttr<float>("scale"), 1.0); ASSERT_EQ(op->GetAttr<float>("scale"), 1.0);
} }
...@@ -169,9 +168,9 @@ TEST(OpRegistry, CustomChecker) { ...@@ -169,9 +168,9 @@ TEST(OpRegistry, CustomChecker) {
attr->set_i(4); attr->set_i(4);
paddle::framework::OperatorBase* op = paddle::framework::OperatorBase* op =
paddle::framework::OpRegistry::CreateOp(op_desc); paddle::framework::OpRegistry::CreateOp(op_desc);
auto dev_ctx = DeviceContext(); paddle::platform::CPUDeviceContext dev_ctx;
auto scope = std::make_shared<Scope>(); auto scope = std::make_shared<Scope>();
op->Run(scope, &dev_ctx); op->Run(scope, dev_ctx);
int test_attr = op->GetAttr<int>("test_attr"); int test_attr = op->GetAttr<int>("test_attr");
ASSERT_EQ(test_attr, 4); ASSERT_EQ(test_attr, 4);
} }
......
...@@ -39,13 +39,5 @@ std::string OperatorBase::DebugString() const { ...@@ -39,13 +39,5 @@ std::string OperatorBase::DebugString() const {
return ss.str(); return ss.str();
} }
const Variable* OpRunContext::Input(int index) const {
return scope_->GetVariable(op_->inputs_[index]);
}
Variable* OpRunContext::Output(int index) const {
return scope_->GetVariable(op_->outputs_[index]);
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
\ No newline at end of file
...@@ -14,44 +14,22 @@ limitations under the License. */ ...@@ -14,44 +14,22 @@ limitations under the License. */
#pragma once #pragma once
#include <paddle/framework/attr_checker.h>
#include <paddle/framework/op_desc.pb.h>
#include <paddle/framework/scope.h>
#include <paddle/platform/device_context.h>
#include <paddle/platform/place.h>
#include <paddle/utils/Error.h>
#include <boost/variant.hpp> #include <boost/variant.hpp>
#include <string> #include <string>
#include <unordered_map> #include <unordered_map>
#include <vector> #include <vector>
#include "paddle/framework/attr_checker.h"
#include "paddle/framework/op_desc.pb.h"
#include "paddle/framework/scope.h"
#include "paddle/utils/Error.h"
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class OperatorBase; class OperatorBase;
class DeviceContext {};
/**
* OpRunContext is the only parameter of Operator's Run function.
* Run will get input/output variables, state such as momentum and
* device resource such as CUDA stream, cublas handle, etc. from
* OpRunContext. User should construct it before run the Operator.
*/
class OpRunContext {
public:
OpRunContext(const OperatorBase* op, const std::shared_ptr<Scope> scope,
const DeviceContext* device_context)
: op_(op), scope_(scope), device_context_(device_context) {}
const Variable* Input(int index) const;
Variable* Output(int index) const;
public:
const OperatorBase* op_;
const std::shared_ptr<Scope> scope_;
const DeviceContext* device_context_;
};
/** /**
* OperatorBase has the basic element that Net will call to do computation. * OperatorBase has the basic element that Net will call to do computation.
* Only CreateOperator from OpRegistry will new Operator directly. User * Only CreateOperator from OpRegistry will new Operator directly. User
...@@ -77,7 +55,10 @@ class OperatorBase { ...@@ -77,7 +55,10 @@ class OperatorBase {
/// Net will call this function to Run an op. /// Net will call this function to Run an op.
virtual void Run(const std::shared_ptr<Scope>& scope, virtual void Run(const std::shared_ptr<Scope>& scope,
const DeviceContext* dev_ctx) const = 0; const platform::DeviceContext& dev_ctx) const = 0;
protected:
std::string Type() const { return desc_.type(); }
public: public:
OpDesc desc_; OpDesc desc_;
...@@ -86,22 +67,84 @@ class OperatorBase { ...@@ -86,22 +67,84 @@ class OperatorBase {
AttributeMap attrs_; AttributeMap attrs_;
}; };
class OpKernel {
public:
/**
* KernelContext is the only parameter of Kernel Run function.
* Run will get input/output variables, state such as momentum and
* device resource such as CUDA stream, cublas handle, etc. from
* KernelContext. User should construct it before run the Operator.
*/
class KernelContext {
public:
KernelContext(const OperatorBase* op, const std::shared_ptr<Scope>& scope,
const platform::DeviceContext& device_context)
: op_(*op), scope_(scope), device_context_(device_context) {}
const Variable* Input(int index) const {
return scope_->GetVariable(op_.inputs_[index]);
}
Variable* Output(int index) const {
return scope_->GetVariable(op_.outputs_[index]);
}
const OperatorBase& op_;
const std::shared_ptr<Scope>& scope_;
const platform::DeviceContext& device_context_;
};
virtual void Compute(const KernelContext& context) const = 0;
virtual ~OpKernel() {}
};
class OperatorWithKernel : public OperatorBase { class OperatorWithKernel : public OperatorBase {
public: public:
virtual ~OperatorWithKernel() {} struct OpKernelKey {
platform::Place place_;
virtual void InferShape(const std::shared_ptr<Scope>& scope) const {} OpKernelKey() = default;
OpKernelKey(const platform::DeviceContext& dev_ctx) {
place_ = dev_ctx.GetPlace();
}
bool operator==(const OpKernelKey& o) const { return place_ == o.place_; }
};
struct OpKernelHash {
std::hash<bool> hash_;
size_t operator()(const OpKernelKey& key) const {
return hash_(platform::is_gpu_place(key.place_));
}
};
using OpKernelMap =
std::unordered_map<OpKernelKey, std::unique_ptr<OpKernel>, OpKernelHash>;
void Run(const std::shared_ptr<Scope>& scope, void Run(const std::shared_ptr<Scope>& scope,
const DeviceContext* dev_ctx) const { const platform::DeviceContext& dev_ctx) const final {
OpRunContext op_ctx(this, scope, dev_ctx); auto& opKernel = AllOpKernels().at(Type()).at(OpKernelKey(dev_ctx));
Run(&op_ctx); opKernel->Compute(OpKernel::KernelContext(this, scope, dev_ctx));
} }
/// when implement an Op, your should implement this function. static std::unordered_map<std::string /* op_type */, OpKernelMap>&
/// this function should be moved to OpKernel later AllOpKernels() {
virtual void Run(const OpRunContext* context) const = 0; static std::unordered_map<std::string, OpKernelMap> g_all_op_kernels;
return g_all_op_kernels;
};
}; };
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
#define REGISTER_OP_KERNEL(type, PlaceType, KernelType) \
struct __op_kernel_register__##type##__ { \
__op_kernel_register__##type##__() { \
::paddle::framework::OperatorWithKernel::OpKernelKey key; \
key.place_ = PlaceType(); \
::paddle::framework::OperatorWithKernel::AllOpKernels()[#type][key] \
.reset(new KernelType()); \
} \
}; \
static __op_kernel_register__##type##__ __reg_kernel_##type##__
...@@ -19,17 +19,15 @@ limitations under the License. */ ...@@ -19,17 +19,15 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace framework { namespace framework {
class OperatorTest : public OperatorWithKernel { class OperatorTest : public OperatorBase {
public: public:
void Run(const OpRunContext* ctx) const override { void InferShape(const std::shared_ptr<Scope>& scope) const override {}
float scale = ctx->op_->GetAttr<float>("scale"); void Run(const std::shared_ptr<Scope>& scope,
PADDLE_ENFORCE(ctx->Input(0) == nullptr, "Input(0) should not initialized"); const platform::DeviceContext& dev_ctx) const override {
PADDLE_ENFORCE(ctx->Output(0) == nullptr, float scale = GetAttr<float>("scale");
"Output(1) should not initialized"); ASSERT_NEAR(scale, 3.14, 1e-5);
auto output1 = ctx->scope_->CreateVariable("output1"); ASSERT_EQ(scope->GetVariable(inputs_[0]), nullptr);
PADDLE_ENFORCE(output1 != nullptr, "should create output1 from scope"); ASSERT_NE(scope->GetVariable(outputs_[0]), nullptr);
printf("get attr %s = %f\n", "scale", scale);
printf("%s\n", DebugString().c_str());
} }
}; };
...@@ -47,34 +45,79 @@ class OperatorTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker { ...@@ -47,34 +45,79 @@ class OperatorTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
} }
}; };
REGISTER_OP(OperatorTest, OperatorTestProtoAndCheckerMaker, test_operator) REGISTER_OP(test_operator, OperatorTest, OperatorTestProtoAndCheckerMaker);
TEST(OperatorBase, DebugString) { TEST(OperatorBase, all) {
OpDesc op_desc; OpDesc op_desc;
op_desc.set_type("test_operator"); op_desc.set_type("test_operator");
std::vector<std::string> inputs = {"IN1", "IN2"}; *op_desc.mutable_inputs()->Add() = "IN1";
for (auto& input : inputs) { *op_desc.mutable_outputs()->Add() = "OUT1";
op_desc.add_inputs(input);
}
std::vector<std::string> outputs = {"OUT1", "OUT2"};
for (auto& output : outputs) {
op_desc.add_outputs(output);
}
auto attr = op_desc.mutable_attrs()->Add(); auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale"); attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT); attr->set_type(paddle::framework::AttrType::FLOAT);
float scale = 3.14; float scale = 3.14;
attr->set_f(scale); attr->set_f(scale);
DeviceContext device_context; platform::CPUDeviceContext device_context;
auto scope = std::make_shared<Scope>(); auto scope = std::make_shared<Scope>();
OperatorBase* op = paddle::framework::OpRegistry::CreateOp(op_desc); OperatorBase* op = paddle::framework::OpRegistry::CreateOp(op_desc);
ASSERT_EQ(op->inputs_, inputs);
ASSERT_EQ(op->outputs_, outputs);
ASSERT_EQ(op->GetAttr<float>("scale"), scale); ASSERT_EQ(op->GetAttr<float>("scale"), scale);
op->Run(scope, &device_context); scope->CreateVariable("OUT1");
op->Run(scope, device_context);
std::cout << op->DebugString() << std::endl;
delete op;
} }
class OpKernelTestProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
public:
OpKernelTestProtoAndCheckerMaker(OpProto* proto, OpAttrChecker* op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of test op");
AddOutput("output", "output of test op");
AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0)
.LargerThan(0.0);
AddType("test_operator");
AddComment("This is test op");
}
};
class OpWithKernelTest : public OperatorWithKernel {
public:
void InferShape(const std::shared_ptr<Scope>& scope) const override {}
};
class CPUKernelTest : public OpKernel {
public:
void Compute(const KernelContext& context) const {
float scale = context.op_.GetAttr<float>("scale");
ASSERT_NEAR(scale, 3.14, 1e-5);
std::cout << "this is cpu kernel" << std::endl;
std::cout << context.op_.DebugString() << std::endl;
}
};
REGISTER_OP(op_with_kernel, OpWithKernelTest, OpKernelTestProtoAndCheckerMaker);
REGISTER_OP_KERNEL(op_with_kernel, platform::CPUPlace, CPUKernelTest);
TEST(OpKernel, all) {
OpDesc op_desc;
op_desc.set_type("op_with_kernel");
*op_desc.mutable_inputs()->Add() = "IN1";
*op_desc.mutable_outputs()->Add() = "OUT1";
auto attr = op_desc.mutable_attrs()->Add();
attr->set_name("scale");
attr->set_type(paddle::framework::AttrType::FLOAT);
attr->set_f(3.14);
platform::CPUDeviceContext cpu_device_context;
auto scope = std::make_shared<Scope>();
OperatorBase* op = paddle::framework::OpRegistry::CreateOp(op_desc);
op->Run(scope, cpu_device_context);
delete op;
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
\ No newline at end of file
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
#pragma once
#include "paddle/framework/op_registry.h"
using namespace paddle::framework;
namespace paddle {
namespace operators {
class CosineOp : public OperatorWithKernel {
public:
void Run(const OpRunContext *context) const override {
printf("%s\n", DebugString().c_str());
}
};
class CosineOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
public:
CosineOpProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of cosine op");
AddOutput("output", "output of cosine op");
AddAttr<float>("scale", "scale of cosine op")
.SetDefault(1.0)
.LargerThan(0.0);
AddType("cos");
AddComment("This is cos op");
}
};
REGISTER_OP(CosineOp, CosineOpProtoAndCheckerMaker, cos_sim)
class MyTestOp : public OperatorWithKernel {
public:
void Run(const OpRunContext *context) const override {
printf("%s\n", DebugString().c_str());
}
};
class MyTestOpProtoAndCheckerMaker : public OpProtoAndCheckerMaker {
public:
MyTestOpProtoAndCheckerMaker(OpProto *proto, OpAttrChecker *op_checker)
: OpProtoAndCheckerMaker(proto, op_checker) {
AddInput("input", "input of cosine op");
AddOutput("output", "output of cosine op");
auto my_checker = [](int i) {
PADDLE_ENFORCE(i % 2 == 0, "'test_attr' must be even!");
};
AddAttr<int>("test_attr", "a simple test attribute")
.AddCustomChecker(my_checker);
AddType("my_test_op");
AddComment("This is my_test op");
}
};
REGISTER_OP(MyTestOp, MyTestOpProtoAndCheckerMaker, my_test_op)
} // namespace operators
} // namespace operators
add_subdirectory(dynload) add_subdirectory(dynload)
nv_test(cuda_test SRCS cuda_test.cu DEPS dyload_cuda) nv_test(cuda_test SRCS cuda_test.cu)
cc_library(place SRCS place.cc) cc_library(place SRCS place.cc)
cc_test(place_test SRCS place_test.cc DEPS place glog gflags) cc_test(place_test SRCS place_test.cc DEPS place glog gflags)
IF(WITH_GPU) IF(WITH_GPU)
set(GPU_CTX_DEPS dyload_cuda dynamic_loader ) set(GPU_CTX_DEPS dynload_cuda dynamic_loader)
ELSE() ELSE()
set(GPU_CTX_DEPS) set(GPU_CTX_DEPS)
ENDIF() ENDIF()
......
#include <paddle/platform/device_context.h> /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/platform/device_context.h"
namespace paddle { namespace paddle {
namespace platform { namespace platform {
namespace dynload {
namespace dummy {
// Make DeviceContext A library.
int DUMMY_VAR_FOR_DEV_CTX = 0;
} // namespace dummy template <>
} // namespace dynload Eigen::DefaultDevice* DeviceContext::get_eigen_device<Eigen::DefaultDevice>() {
return reinterpret_cast<CPUDeviceContext*>(this)->eigen_device();
}
#ifndef PADDLE_ONLY_CPU
template <>
Eigen::GpuDevice* DeviceContext::get_eigen_device<Eigen::GpuDevice>() {
return reinterpret_cast<CUDADeviceContext*>(this)->eigen_device();
}
#endif
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
\ No newline at end of file
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
You may obtain a copy of the License at You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
...@@ -22,8 +19,9 @@ limitations under the License. */ ...@@ -22,8 +19,9 @@ limitations under the License. */
#include "paddle/platform/dynload/curand.h" #include "paddle/platform/dynload/curand.h"
#define EIGEN_USE_GPU #define EIGEN_USE_GPU
#endif #endif
#include "paddle/platform/place.h" #include <paddle/platform/place.h>
#include "unsupported/Eigen/CXX11/Tensor" #include <memory>
#include <unsupported/Eigen/CXX11/Tensor>
namespace paddle { namespace paddle {
namespace platform { namespace platform {
...@@ -31,9 +29,29 @@ namespace platform { ...@@ -31,9 +29,29 @@ namespace platform {
class DeviceContext { class DeviceContext {
public: public:
virtual ~DeviceContext() {} virtual ~DeviceContext() {}
virtual Place GetPlace() const = 0;
template <typename DeviceType>
DeviceType* get_eigen_device();
}; };
class CPUDeviceContext : public DeviceContext {}; class CPUDeviceContext : public DeviceContext {
public:
Eigen::DefaultDevice* eigen_device() {
if (!eigen_device_) {
eigen_device_.reset(new Eigen::DefaultDevice());
}
return eigen_device_.get();
}
Place GetPlace() const override {
Place retv = CPUPlace();
return retv;
}
private:
std::unique_ptr<Eigen::DefaultDevice> eigen_device_;
};
#ifndef PADDLE_ONLY_CPU #ifndef PADDLE_ONLY_CPU
...@@ -57,8 +75,13 @@ class CUDADeviceContext : public DeviceContext { ...@@ -57,8 +75,13 @@ class CUDADeviceContext : public DeviceContext {
GPUPlaceGuard guard(gpu_place_); GPUPlaceGuard guard(gpu_place_);
paddle::platform::throw_on_error(cudaStreamCreate(&stream_), paddle::platform::throw_on_error(cudaStreamCreate(&stream_),
"cudaStreamCreate failed"); "cudaStreamCreate failed");
eigen_stream_ = new Eigen::CudaStreamDevice(&stream_); eigen_stream_.reset(new Eigen::CudaStreamDevice(&stream_));
eigen_device_ = new Eigen::GpuDevice(eigen_stream_); eigen_device_.reset(new Eigen::GpuDevice(eigen_stream_.get()));
}
Place GetPlace() const override {
Place retv = GPUPlace();
return retv;
} }
void Wait() { void Wait() {
...@@ -68,7 +91,7 @@ class CUDADeviceContext : public DeviceContext { ...@@ -68,7 +91,7 @@ class CUDADeviceContext : public DeviceContext {
cudaStream_t stream() { return stream_; } cudaStream_t stream() { return stream_; }
Eigen::GpuDevice eigen_device() { return *eigen_device_; } Eigen::GpuDevice* eigen_device() { return eigen_device_.get(); }
cublasHandle_t cublas_handle() { cublasHandle_t cublas_handle() {
if (!blas_handle_) { if (!blas_handle_) {
...@@ -133,10 +156,8 @@ class CUDADeviceContext : public DeviceContext { ...@@ -133,10 +156,8 @@ class CUDADeviceContext : public DeviceContext {
rand_generator_) == CURAND_STATUS_SUCCESS, rand_generator_) == CURAND_STATUS_SUCCESS,
"curandDestroyGenerator failed"); "curandDestroyGenerator failed");
} }
eigen_stream_.reset();
delete eigen_stream_; eigen_device_.reset();
delete eigen_device_;
paddle::platform::throw_on_error(cudaStreamDestroy(stream_), paddle::platform::throw_on_error(cudaStreamDestroy(stream_),
"cudaStreamDestroy failed"); "cudaStreamDestroy failed");
} }
...@@ -145,8 +166,8 @@ class CUDADeviceContext : public DeviceContext { ...@@ -145,8 +166,8 @@ class CUDADeviceContext : public DeviceContext {
GPUPlace gpu_place_; GPUPlace gpu_place_;
cudaStream_t stream_; cudaStream_t stream_;
Eigen::CudaStreamDevice* eigen_stream_; std::unique_ptr<Eigen::CudaStreamDevice> eigen_stream_;
Eigen::GpuDevice* eigen_device_; std::unique_ptr<Eigen::GpuDevice> eigen_device_;
cublasHandle_t blas_handle_{nullptr}; cublasHandle_t blas_handle_{nullptr};
...@@ -155,6 +176,8 @@ class CUDADeviceContext : public DeviceContext { ...@@ -155,6 +176,8 @@ class CUDADeviceContext : public DeviceContext {
int random_seed_; int random_seed_;
curandGenerator_t rand_generator_{nullptr}; curandGenerator_t rand_generator_{nullptr};
}; };
#endif #endif
} // namespace platform } // namespace platform
} // namespace paddle } // namespace paddle
...@@ -15,13 +15,26 @@ limitations under the License. */ ...@@ -15,13 +15,26 @@ limitations under the License. */
#include "paddle/platform/device_context.h" #include "paddle/platform/device_context.h"
#include "gtest/gtest.h" #include "gtest/gtest.h"
TEST(CUDADeviceContext, Init) { using DEVICE_GPU = Eigen::GpuDevice;
TEST(Device, Init) {
int count = paddle::platform::GetDeviceCount();
for (int i = 0; i < count; i++) {
paddle::platform::DeviceContext* device_context =
new paddle::platform::CUDADeviceContext(i);
Eigen::GpuDevice* gpu_device =
device_context->template get_eigen_device<DEVICE_GPU>();
ASSERT_NE(nullptr, gpu_device);
delete device_context;
}
}
TEST(Device, CUDADeviceContext) {
int count = paddle::platform::GetDeviceCount(); int count = paddle::platform::GetDeviceCount();
for (int i = 0; i < count; i++) { for (int i = 0; i < count; i++) {
paddle::platform::CUDADeviceContext* device_context = paddle::platform::CUDADeviceContext* device_context =
new paddle::platform::CUDADeviceContext(i); new paddle::platform::CUDADeviceContext(i);
Eigen::GpuDevice gpu_device = device_context->eigen_device(); Eigen::GpuDevice* gpu_device = device_context->eigen_device();
ASSERT_NE(nullptr, gpu_device.stream()); ASSERT_NE(nullptr, gpu_device);
cudnnHandle_t cudnn_handle = device_context->cudnn_handle(); cudnnHandle_t cudnn_handle = device_context->cudnn_handle();
ASSERT_NE(nullptr, cudnn_handle); ASSERT_NE(nullptr, cudnn_handle);
cublasHandle_t cublas_handle = device_context->cublas_handle(); cublasHandle_t cublas_handle = device_context->cublas_handle();
......
cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags) cc_library(dynamic_loader SRCS dynamic_loader.cc DEPS glog gflags)
nv_library(dyload_cuda SRCS cublas.cc cudnn.cc curand.cc) nv_library(dynload_cuda SRCS cublas.cc cudnn.cc curand.cc)
...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ...@@ -12,6 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. */ limitations under the License. */
#include <Python.h>
#include <paddle/framework/scope.h> #include <paddle/framework/scope.h>
#include <pybind11/pybind11.h> #include <pybind11/pybind11.h>
...@@ -43,4 +44,4 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -43,4 +44,4 @@ All parameter, weight, gradient are variables in Paddle.
py::return_value_policy::reference); py::return_value_policy::reference);
return m.ptr(); return m.ptr();
} }
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册