未验证 提交 cd5fad13 编写于 作者: Y Yu Yang 提交者: GitHub

Merge pull request #7160 from reyoung/feature/expose_activations

Expose some activations
...@@ -75,5 +75,10 @@ bool InitDevices(const std::vector<std::string> &devices) { ...@@ -75,5 +75,10 @@ bool InitDevices(const std::vector<std::string> &devices) {
return true; return true;
} }
void InitGLOG(const std::string &prog_name) {
google::InitGoogleLogging(prog_name.c_str());
google::InstallFailureSignalHandler();
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -22,6 +22,8 @@ namespace framework { ...@@ -22,6 +22,8 @@ namespace framework {
void InitGflags(std::vector<std::string> &argv); void InitGflags(std::vector<std::string> &argv);
void InitGLOG(const std::string &prog_name);
bool InitDevices(const std::vector<std::string> &devices); bool InitDevices(const std::vector<std::string> &devices);
} // namespace framework } // namespace framework
......
...@@ -177,6 +177,9 @@ void AppendLoD(LoD *lod, const LoD &lod_length) { ...@@ -177,6 +177,9 @@ void AppendLoD(LoD *lod, const LoD &lod_length) {
lod->empty() || lod->size() == lod_length.size(), lod->empty() || lod->size() == lod_length.size(),
"The lod_length should has the same size with the appended lod."); "The lod_length should has the same size with the appended lod.");
if (lod->empty()) { if (lod->empty()) {
for (size_t i = 0; i < lod_length.size(); ++i) {
lod->emplace_back(1, 0); // size = 1, value = 0;
}
*lod = LoD(lod_length.size(), std::vector<size_t>({0})); *lod = LoD(lod_length.size(), std::vector<size_t>({0}));
} }
for (size_t i = 0; i < lod->size(); ++i) { for (size_t i = 0; i < lod->size(); ++i) {
......
...@@ -15,6 +15,7 @@ limitations under the License. */ ...@@ -15,6 +15,7 @@ limitations under the License. */
#pragma once #pragma once
#include "paddle/framework/eigen.h" #include "paddle/framework/eigen.h"
#include "paddle/framework/op_registry.h" #include "paddle/framework/op_registry.h"
#include "paddle/operators/detail/safe_ref.h"
namespace paddle { namespace paddle {
namespace operators { namespace operators {
...@@ -26,12 +27,16 @@ class ActivationKernel ...@@ -26,12 +27,16 @@ class ActivationKernel
using T = typename Functor::ELEMENT_TYPE; using T = typename Functor::ELEMENT_TYPE;
void Compute(const framework::ExecutionContext& context) const override { void Compute(const framework::ExecutionContext& context) const override {
auto* X = context.Input<framework::Tensor>("X"); auto& X = detail::Ref(context.Input<framework::Tensor>("X"),
auto* Out = context.Output<framework::Tensor>("Out"); "Cannot get input tensor X, variable name = %s",
Out->mutable_data<T>(context.GetPlace()); context.op().Input("X"));
auto x = framework::EigenVector<T>::Flatten(*X); auto& Out = detail::Ref(context.Output<framework::Tensor>("Out"),
auto out = framework::EigenVector<T>::Flatten(*Out); "Cannot get output tensor Out, variable name = %s",
context.op().Output("Out"));
Out.mutable_data<T>(context.GetPlace());
auto x = framework::EigenVector<T>::Flatten(X);
auto out = framework::EigenVector<T>::Flatten(Out);
auto* place = auto* place =
context.template device_context<DeviceContext>().eigen_device(); context.template device_context<DeviceContext>().eigen_device();
Functor functor; Functor functor;
......
...@@ -427,6 +427,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -427,6 +427,7 @@ All parameter, weight, gradient are variables in Paddle.
m.def("unique_integer", UniqueIntegerGenerator); m.def("unique_integer", UniqueIntegerGenerator);
m.def("init_gflags", framework::InitGflags); m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG);
m.def("init_devices", &framework::InitDevices); m.def("init_devices", &framework::InitDevices);
m.def("is_compile_gpu", IsCompileGPU); m.def("is_compile_gpu", IsCompileGPU);
......
...@@ -27,7 +27,7 @@ __all__ = framework.__all__ + executor.__all__ + [ ...@@ -27,7 +27,7 @@ __all__ = framework.__all__ + executor.__all__ + [
] ]
def __read_gflags_from_env__(): def __bootstrap__():
""" """
Enable reading gflags from environment variables. Enable reading gflags from environment variables.
...@@ -41,6 +41,7 @@ def __read_gflags_from_env__(): ...@@ -41,6 +41,7 @@ def __read_gflags_from_env__():
read_env_flags.append('fraction_of_gpu_memory_to_use') read_env_flags.append('fraction_of_gpu_memory_to_use')
core.init_gflags([sys.argv[0]] + core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)]) ["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0])
if core.is_compile_gpu(): if core.is_compile_gpu():
core.init_devices(["CPU", "GPU:0"]) core.init_devices(["CPU", "GPU:0"])
...@@ -48,4 +49,4 @@ def __read_gflags_from_env__(): ...@@ -48,4 +49,4 @@ def __read_gflags_from_env__():
core.init_devices(["CPU"]) core.init_devices(["CPU"])
__read_gflags_from_env__() __bootstrap__()
from ..registry import register_layer from ..registry import register_layer
__all__ = [
'mean', 'mul', 'dropout', 'reshape', 'sigmoid', 'scale', 'transpose', __activations__ = [
'sigmoid_cross_entropy_with_logits', 'elementwise_add', 'elementwise_div', 'abs', 'tanh', 'sigmoid', 'relu', 'sqrt', 'ceil', 'floor', 'log', 'round'
'elementwise_sub', 'elementwise_mul', 'clip', 'abs', 'sequence_softmax'
] ]
__all__ = [
'mean',
'mul',
'dropout',
'reshape',
'scale',
'transpose',
'sigmoid_cross_entropy_with_logits',
'elementwise_add',
'elementwise_div',
'elementwise_sub',
'elementwise_mul',
'clip',
'sequence_softmax',
] + __activations__
for _OP in set(__all__): for _OP in set(__all__):
globals()[_OP] = register_layer(_OP) globals()[_OP] = register_layer(_OP)
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册