__init__.py 4.3 KB
Newer Older
Q
qiaolongfei 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Y
yi.wu 已提交
14
import os
Q
qiaolongfei 已提交
15
import optimizer
Q
qiaolongfei 已提交
16
import layer
17
import activation
Y
Yu Yang 已提交
18 19
import parameters
import trainer
Y
Yu Yang 已提交
20
import event
21
import data_type
Q
qiaolongfei 已提交
22
import topology
Q
qiaolongfei 已提交
23
import networks
Y
Yu Yang 已提交
24
import evaluator
Y
Yu Yang 已提交
25
from . import dataset
Y
Yu Yang 已提交
26
from . import reader
Y
Yancey1989 已提交
27
from . import plot
L
Luo Tao 已提交
28
import attr
X
xuwei06 已提交
29
import op
L
Luo Tao 已提交
30
import pooling
31
import inference
Y
Yu Yang 已提交
32
import networks
H
Helin Wang 已提交
33
import minibatch
Y
Yancey1989 已提交
34
import plot
D
dangqingqing 已提交
35
import image
36
import paddle.trainer.config_parser as cp
37

Q
qiaolongfei 已提交
38
__all__ = [
39 40
    'default_startup_program',
    'default_main_program',
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58
    'optimizer',
    'layer',
    'activation',
    'parameters',
    'init',
    'trainer',
    'event',
    'data_type',
    'attr',
    'pooling',
    'dataset',
    'reader',
    'topology',
    'networks',
    'infer',
    'plot',
    'evaluator',
    'image',
H
Helin Wang 已提交
59
    'master',
Q
qiaolongfei 已提交
60
]
Y
Yu Yang 已提交
61

62 63
cp.begin_parse()

Y
Yu Yang 已提交
64

T
tensor-tang 已提交
65 66 67 68 69 70 71 72
def set_omp_mkl_env_vars(trainer_count):
    '''Auto set CPU environment if have not set before.
       export KMP_AFFINITY, OMP_DYNAMIC according to the Hyper Threading status.
       export OMP_NUM_THREADS, MKL_NUM_THREADS according to trainer_count.
    '''
    import platform
    if not platform.system() in ['Linux', 'Darwin']:
        return
Y
yi.wu 已提交
73

T
tensor-tang 已提交
74 75 76 77 78 79 80 81
    def set_env(key, value):
        '''If the key has not been set in the environment, set it with value.'''
        assert isinstance(key, str)
        assert isinstance(value, str)
        envset = os.environ.get(key)
        if envset is None:
            os.environ[key] = value

T
tensor-tang 已提交
82 83 84 85
    def num_physical_cores():
        '''Get the number of physical cores'''
        if platform.system() == "Linux":
            num_sockets = int(
L
Luo Tao 已提交
86
                os.popen("grep 'physical id' /proc/cpuinfo | sort -u | wc -l")
T
tensor-tang 已提交
87 88
                .read())
            num_cores_per_socket = int(
L
Luo Tao 已提交
89
                os.popen("grep 'core id' /proc/cpuinfo | sort -u | wc -l")
T
tensor-tang 已提交
90 91 92
                .read())
            return num_sockets * num_cores_per_socket
        else:
T
tensor-tang 已提交
93
            cmds = {"Darwin": "sysctl -n hw.physicalcpu"}
T
tensor-tang 已提交
94 95 96 97 98 99
            return int(os.popen(cmds.get(platform.system(), "expr 1")).read())

    def num_logical_processors():
        '''Get the number of logical processors'''
        cmds = {
            "Linux": "grep \"processor\" /proc/cpuinfo|sort -u|wc -l",
T
tensor-tang 已提交
100
            "Darwin": "sysctl -n hw.logicalcpu"
T
tensor-tang 已提交
101 102 103 104 105 106
        }
        return int(os.popen(cmds.get(platform.system(), "expr 1")).read())

    num_cores = num_physical_cores()
    num_processors = num_logical_processors()
    if num_processors > num_cores:  # Hyper Threading is enabled
T
tensor-tang 已提交
107 108
        set_env("OMP_DYNAMIC", "true")
        set_env("KMP_AFFINITY", "granularity=fine,compact,1,0")
T
tensor-tang 已提交
109 110 111 112
    else:
        set_env("OMP_DYNAMIC", "false")
        set_env("KMP_AFFINITY", "granularity=fine,compact,0,0")
    threads = num_processors / trainer_count
T
tensor-tang 已提交
113 114 115 116
    threads = '1' if threads < 1 else str(threads)
    set_env("OMP_NUM_THREADS", threads)
    set_env("MKL_NUM_THREADS", threads)

T
tensor-tang 已提交
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133

def init(**kwargs):
    import py_paddle.swig_paddle as api
    args = []
    args_dict = {}
    # NOTE: append arguments if they are in ENV
    for ek, ev in os.environ.iteritems():
        if ek.startswith("PADDLE_INIT_"):
            args_dict[ek.replace("PADDLE_INIT_", "").lower()] = str(ev)

    args_dict.update(kwargs)
    # NOTE: overwrite arguments from ENV if it is in kwargs
    for key in args_dict.keys():
        args.append('--%s=%s' % (key, str(args_dict[key])))

    set_omp_mkl_env_vars(kwargs.get('trainer_count', 1))

134 135
    if 'use_gpu' in kwargs:
        cp.g_command_config_args['use_gpu'] = kwargs['use_gpu']
T
tensor-tang 已提交
136 137
    if 'use_mkldnn' in kwargs:
        cp.g_command_config_args['use_mkldnn'] = kwargs['use_mkldnn']
138 139 140
    assert 'parallel_nn' not in kwargs, ("currently 'parallel_nn' is not "
                                         "supported in v2 APIs.")

Y
Yu Yang 已提交
141
    api.initPaddle(*args)
Y
Yu Yang 已提交
142

Y
Yu Yang 已提交
143

Y
Yu Yang 已提交
144 145
infer = inference.infer
batch = minibatch.batch