__init__.py 8.2 KB
Newer Older
1
#   Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
D
dzhwinter 已提交
2
#
D
dzhwinter 已提交
3 4 5
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
D
dzhwinter 已提交
6
#
D
dzhwinter 已提交
7
#     http://www.apache.org/licenses/LICENSE-2.0
D
dzhwinter 已提交
8
#
D
dzhwinter 已提交
9 10 11 12 13 14
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Y
Yang Yu 已提交
15
from __future__ import print_function
P
peizhilin 已提交
16
import os
17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
import sys

# The legacy core need to be removed before "import core",
# in case of users installing paddlepadde without -U option
core_suffix = 'so'
if os.name == 'nt':
    core_suffix = 'pyd'

legacy_core = os.path.abspath(os.path.dirname(
    __file__)) + os.sep + 'core.' + core_suffix
if os.path.exists(legacy_core):
    sys.stderr.write('Deleting legacy file ' + legacy_core + '\n')
    try:
        os.remove(legacy_core)
    except Exception as e:
        raise e

34
# import all class inside framework into fluid module
35 36
from . import framework
from .framework import *
Y
Yang Yu 已提交
37
# import all class inside executor into fluid module
38 39
from . import executor
from .executor import *
W
Wang Guibao 已提交
40 41 42 43

from . import data_feed_desc
from .data_feed_desc import *

44 45 46
from . import dataset
from .dataset import *

H
Huihuang Zheng 已提交
47 48
from .data import *

49
from . import trainer_desc
H
Helin Wang 已提交
50

51 52 53
from . import io
from . import evaluator
from . import initializer
54
from .initializer import set_global_initializer
55
from . import layers
L
lujun 已提交
56
from . import dygraph
57 58 59 60
from . import contrib
from . import nets
from . import optimizer
from . import backward
61
from .backward import gradients
62 63 64 65
from . import regularizer
from . import average
from . import metrics
from . import transpiler
66
from . import incubate
67
from .input import embedding, one_hot
Q
Qiao Longfei 已提交
68
from . import distribute_lookup_table
69 70
from .param_attr import ParamAttr, WeightNormParamAttr
from .data_feeder import DataFeeder
71
from .core import LoDTensor, LoDTensorArray, CPUPlace, XPUPlace, CUDAPlace, CUDAPinnedPlace, Scope, _Scope
72 73
from .incubate import fleet
from .incubate import data_generator
74
from .transpiler import DistributeTranspiler, \
G
gongweibao 已提交
75
    memory_optimize, release_memory, DistributeTranspilerConfig
76 77 78 79 80 81
from .lod_tensor import create_lod_tensor, create_random_int_lodtensor
from . import clip
from . import profiler
from . import unique_name
from . import parallel_executor
from .parallel_executor import *
X
Xin Pan 已提交
82 83
from . import compiler
from .compiler import *
X
Xin Pan 已提交
84
from paddle.fluid.layers.math_op_patch import monkey_patch_variable
J
Jiabin Yang 已提交
85
from . import install_check
86 87
from .dygraph.nn import *
from .dygraph.layers import *
88
from .dygraph.base import enable_dygraph, disable_dygraph
89
from .io import save, load, load_program_state, set_program_state
H
hong 已提交
90
from .dygraph.checkpoint import save_dygraph, load_dygraph
91
from .dygraph.varbase_patch_methods import monkey_patch_varbase
Y
yaoxuefeng 已提交
92
from . import generator
93 94
from .core import _cuda_synchronize

95
Tensor = LoDTensor
96 97
enable_imperative = enable_dygraph
disable_imperative = disable_dygraph
Y
Yang Yu 已提交
98

W
Wu Yi 已提交
99
__all__ = framework.__all__ + executor.__all__ + \
Z
zhangchunle 已提交
100
    trainer_desc.__all__ + transpiler.__all__ + \
W
Wang Guibao 已提交
101
    parallel_executor.__all__ + lod_tensor.__all__ + \
Y
yaoxuefeng 已提交
102
    data_feed_desc.__all__ + compiler.__all__ + backward.__all__  + generator.__all__ + [
103 104
        'io',
        'initializer',
105 106
        'embedding',
        'one_hot',
107 108
        'layers',
        'contrib',
H
Huihuang Zheng 已提交
109
        'data',
L
lujun 已提交
110
        'dygraph',
111 112
        'enable_dygraph',
        'disable_dygraph',
113 114
        'enable_imperative',
        'disable_imperative',
115 116 117 118 119 120 121 122 123
        'transpiler',
        'nets',
        'optimizer',
        'learning_rate_decay',
        'backward',
        'regularizer',
        'LoDTensor',
        'LoDTensorArray',
        'CPUPlace',
124
        'XPUPlace',
125 126 127 128 129 130 131 132 133 134
        'CUDAPlace',
        'CUDAPinnedPlace',
        'Tensor',
        'ParamAttr',
        'WeightNormParamAttr',
        'DataFeeder',
        'clip',
        'profiler',
        'unique_name',
        'Scope',
J
Jiabin Yang 已提交
135
        'install_check',
H
hong 已提交
136 137
        'save',
        'load',
138 139
        'VarBase',
        '_cuda_synchronize'
140
    ]
141 142


Y
Yang Yu 已提交
143
def __bootstrap__():
144 145
    """
    Enable reading gflags from environment variables.
Y
Yu Yang 已提交
146

147 148 149 150
    Returns:
        None
    """
    import sys
Y
Yang Yu 已提交
151
    import os
J
JiabinYang 已提交
152
    import platform
153
    from . import core
Y
Yang Yu 已提交
154

L
Leo Chen 已提交
155 156 157
    # NOTE(zhiqiu): When (1)numpy < 1.19; (2) python < 3.7, 
    # unittest is always imported in numpy (maybe some versions not). 
    # so is_test is True and p2p is not inited.
X
Xin Pan 已提交
158 159
    in_test = 'unittest' in sys.modules

Y
Yang Yu 已提交
160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
    try:
        num_threads = int(os.getenv('OMP_NUM_THREADS', '1'))
    except ValueError:
        num_threads = 1

    if num_threads > 1:
        print(
            'WARNING: OMP_NUM_THREADS set to {0}, not 1. The computation '
            'speed will not be optimized if you use data parallel. It will '
            'fail if this PaddlePaddle binary is compiled with OpenBlas since'
            ' OpenBlas does not support multi-threads.'.format(num_threads),
            file=sys.stderr)
        print('PLEASE USE OMP_NUM_THREADS WISELY.', file=sys.stderr)

    os.environ['OMP_NUM_THREADS'] = str(num_threads)
J
JiabinYang 已提交
175
    sysstr = platform.system()
176
    read_env_flags = [
177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
        'check_nan_inf',
        'fast_check_nan_inf',
        'benchmark',
        'eager_delete_scope',
        'fraction_of_cpu_memory_to_use',
        'initial_cpu_memory_in_mb',
        'init_allocated_mem',
        'paddle_num_threads',
        'dist_threadpool_size',
        'eager_delete_tensor_gb',
        'fast_eager_deletion_mode',
        'memory_fraction_of_eager_deletion',
        'allocator_strategy',
        'reader_queue_speed_test_mode',
        'print_sub_graph_dir',
        'pe_profile_fname',
        'inner_op_parallelism',
        'enable_parallel_graph',
        'fuse_parameter_groups_size',
        'multiple_of_cupti_buffer_size',
        'fuse_parameter_memory_size',
        'tracer_profile_fname',
        'dygraph_debug',
        'use_system_allocator',
        'enable_unused_var_check',
        'free_idle_chunk',
        'free_when_no_cache_hit',
        'call_stack_level',
205
        'sort_sum_gradient',
206
        'max_inplace_grad_add',
207
    ]
J
JiabinYang 已提交
208
    if 'Darwin' not in sysstr:
J
JiabinYang 已提交
209 210
        read_env_flags.append('use_pinned_memory')

P
peizhilin 已提交
211 212 213
    if os.name != 'nt':
        read_env_flags.append('cpu_deterministic')

214 215
    if core.is_compiled_with_mkldnn():
        read_env_flags.append('use_mkldnn')
216 217
        read_env_flags.append('tracer_mkldnn_ops_on')
        read_env_flags.append('tracer_mkldnn_ops_off')
218

Y
update  
Yancey1989 已提交
219
    if core.is_compiled_with_dist():
Q
Qiao Longfei 已提交
220
        #env for rpc
Y
update  
Yancey1989 已提交
221
        read_env_flags.append('rpc_deadline')
222
        read_env_flags.append('rpc_retry_times')
Q
qiaolongfei 已提交
223
        read_env_flags.append('rpc_server_profile_path')
G
gongweibao 已提交
224
        read_env_flags.append('enable_rpc_profiler')
Q
Qiao Longfei 已提交
225 226 227
        read_env_flags.append('rpc_send_thread_num')
        read_env_flags.append('rpc_get_thread_num')
        read_env_flags.append('rpc_prefetch_thread_num')
228
        read_env_flags.append('rpc_disable_reuse_port')
229
        read_env_flags.append('rpc_retry_bind_port')
Q
Qiao Longfei 已提交
230

231 232
        read_env_flags.append('worker_update_interval_secs')

233 234 235 236
        if core.is_compiled_with_brpc():
            read_env_flags.append('max_body_size')
            #set brpc max body size
            os.environ['FLAGS_max_body_size'] = "2147483647"
Y
update  
Yancey1989 已提交
237

238
    if core.is_compiled_with_cuda():
239
        read_env_flags += [
240 241 242 243 244 245 246 247 248 249 250 251 252
            'fraction_of_gpu_memory_to_use',
            'initial_gpu_memory_in_mb',
            'reallocate_gpu_memory_in_mb',
            'cudnn_deterministic',
            'enable_cublas_tensor_op_math',
            'conv_workspace_size_limit',
            'cudnn_exhaustive_search',
            'selected_gpus',
            'sync_nccl_allreduce',
            'cudnn_batchnorm_spatial_persistent',
            'gpu_allocator_retry_time',
            'local_exe_sub_scope_limit',
            'gpu_memory_limit_mb',
253
        ]
L
Leo Chen 已提交
254
    core.init_gflags(["--tryfromenv=" + ",".join(read_env_flags)])
Y
Yang Yu 已提交
255
    core.init_glog(sys.argv[0])
X
Xin Pan 已提交
256
    # don't init_p2p when in unittest to save time.
257
    core.init_devices()
D
dzhwinter 已提交
258

259

X
Xin Pan 已提交
260 261
# TODO(panyx0718): Avoid doing complex initialization logic in __init__.py.
# Consider paddle.init(args) or paddle.main(args)
X
Xin Pan 已提交
262
monkey_patch_variable()
Y
Yang Yu 已提交
263
__bootstrap__()
264
monkey_patch_varbase()