提交 05909674 编写于 作者: Y Yang Yu

Add init glog

上级 90a5a55a
...@@ -75,5 +75,9 @@ bool InitDevices(const std::vector<std::string> &devices) { ...@@ -75,5 +75,9 @@ bool InitDevices(const std::vector<std::string> &devices) {
return true; return true;
} }
void InitGLOG(const std::string &prog_name) {
google::InitGoogleLogging(prog_name.c_str());
}
} // namespace framework } // namespace framework
} // namespace paddle } // namespace paddle
...@@ -22,6 +22,8 @@ namespace framework { ...@@ -22,6 +22,8 @@ namespace framework {
void InitGflags(std::vector<std::string> &argv); void InitGflags(std::vector<std::string> &argv);
void InitGLOG(const std::string &prog_name);
bool InitDevices(const std::vector<std::string> &devices); bool InitDevices(const std::vector<std::string> &devices);
} // namespace framework } // namespace framework
......
...@@ -427,6 +427,7 @@ All parameter, weight, gradient are variables in Paddle. ...@@ -427,6 +427,7 @@ All parameter, weight, gradient are variables in Paddle.
m.def("unique_integer", UniqueIntegerGenerator); m.def("unique_integer", UniqueIntegerGenerator);
m.def("init_gflags", framework::InitGflags); m.def("init_gflags", framework::InitGflags);
m.def("init_glog", framework::InitGLOG);
m.def("init_devices", &framework::InitDevices); m.def("init_devices", &framework::InitDevices);
m.def("is_compile_gpu", IsCompileGPU); m.def("is_compile_gpu", IsCompileGPU);
......
# import all class inside framework into fluid module # import all class inside framework into fluid module
import framework from core import LoDTensor
from framework import *
# import all class inside executor into fluid module
import executor
from executor import *
import io import backward
import clip
import evaluator import evaluator
# import all class inside executor into fluid module
import executor
import framework
import initializer import initializer
import io
import layers import layers
import nets import nets
import optimizer import optimizer
import backward
import regularizer import regularizer
from param_attr import ParamAttr
from data_feeder import DataFeeder from data_feeder import DataFeeder
from core import LoDTensor, CPUPlace, CUDAPlace
from distribute_transpiler import DistributeTranspiler from distribute_transpiler import DistributeTranspiler
import clip from executor import *
from framework import *
from param_attr import ParamAttr
Tensor = LoDTensor Tensor = LoDTensor
__all__ = framework.__all__ + executor.__all__ + [ __all__ = framework.__all__ + executor.__all__ + [
...@@ -27,7 +27,7 @@ __all__ = framework.__all__ + executor.__all__ + [ ...@@ -27,7 +27,7 @@ __all__ = framework.__all__ + executor.__all__ + [
] ]
def __read_gflags_from_env__(): def __bootstrap__():
""" """
Enable reading gflags from environment variables. Enable reading gflags from environment variables.
...@@ -41,6 +41,7 @@ def __read_gflags_from_env__(): ...@@ -41,6 +41,7 @@ def __read_gflags_from_env__():
read_env_flags.append('fraction_of_gpu_memory_to_use') read_env_flags.append('fraction_of_gpu_memory_to_use')
core.init_gflags([sys.argv[0]] + core.init_gflags([sys.argv[0]] +
["--tryfromenv=" + ",".join(read_env_flags)]) ["--tryfromenv=" + ",".join(read_env_flags)])
core.init_glog(sys.argv[0])
if core.is_compile_gpu(): if core.is_compile_gpu():
core.init_devices(["CPU", "GPU:0"]) core.init_devices(["CPU", "GPU:0"])
...@@ -48,4 +49,4 @@ def __read_gflags_from_env__(): ...@@ -48,4 +49,4 @@ def __read_gflags_from_env__():
core.init_devices(["CPU"]) core.init_devices(["CPU"])
__read_gflags_from_env__() __bootstrap__()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册