未验证 提交 0d8f40d2 编写于 作者: Z Zhang Ting 提交者: GitHub

remove init_on_cpu and force_init_on_cpu APIs, test=develop (#22202)

上级 abee05a8
......@@ -24,56 +24,11 @@ from . import unique_name
__all__ = [
'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear',
'MSRA', 'force_init_on_cpu', 'init_on_cpu', 'ConstantInitializer',
'UniformInitializer', 'NormalInitializer', 'TruncatedNormalInitializer',
'XavierInitializer', 'BilinearInitializer', 'MSRAInitializer',
'NumpyArrayInitializer'
'MSRA', 'ConstantInitializer', 'UniformInitializer', 'NormalInitializer',
'TruncatedNormalInitializer', 'XavierInitializer', 'BilinearInitializer',
'MSRAInitializer', 'NumpyArrayInitializer'
]
_force_init_on_cpu_ = False
def force_init_on_cpu():
"""
The flag of whether force to init variables on CPU.
Returns:
bool: the state if we should force init on CPU.
Examples:
.. code-block:: python
import paddle.fluid as fluid
if fluid.initializer.force_init_on_cpu():
step = fluid.layers.create_global_var(
shape=[2,3], value=1.0, dtype='float32')
"""
return _force_init_on_cpu_
@signature_safe_contextmanager
def init_on_cpu():
"""
Force the variable to be inited on CPU.
Examples:
.. code-block:: python
import paddle.fluid as fluid
with fluid.initializer.init_on_cpu():
step = fluid.layers.create_global_var(
shape=[2,3], value=1.0, dtype='float32')
"""
global _force_init_on_cpu_
pre_state = force_init_on_cpu()
_force_init_on_cpu_ = True
yield
_force_init_on_cpu_ = pre_state
class Initializer(object):
"""Base class for variable initializers
......@@ -186,7 +141,7 @@ class ConstantInitializer(Initializer):
"shape": var.shape,
"dtype": int(out_dtype),
"value": float(self._value),
'force_cpu': self._force_cpu or force_init_on_cpu()
'force_cpu': self._force_cpu
},
stop_gradient=True)
......
......@@ -29,7 +29,6 @@ from . import control_flow
from . import nn
from . import ops
from . import tensor
from ..initializer import init_on_cpu
from ..framework import default_main_program, Parameter, unique_name, name_scope
from ..framework import Variable
from ..framework import in_dygraph_mode
......
......@@ -17,7 +17,6 @@ from __future__ import print_function
from .. import core
from ..framework import Variable, unique_name
from .layer_function_generator import OpProtoHolder
from ..initializer import force_init_on_cpu
_supported_int_dtype_ = [
core.VarDesc.VarType.UINT8,
......@@ -58,7 +57,7 @@ def monkey_patch_variable():
'dtype': var.dtype,
'shape': shape,
'value': value,
'force_cpu': force_init_on_cpu()
'force_cpu': False
},
stop_gradient=True)
var.stop_gradient = True
......
......@@ -18,7 +18,7 @@ from ..layer_helper import LayerHelper
from ..param_attr import ParamAttr
from ..framework import convert_np_dtype_to_dtype_, in_dygraph_mode, _varbase_creator
from ..framework import Variable
from ..initializer import Constant, force_init_on_cpu
from ..initializer import Constant
from ..core import VarDesc
from .. import core
from .layer_function_generator import templatedoc
......@@ -569,10 +569,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
shape = fluid.layers.fill_constant([1,2], "int32", 2) # shape=[2,2]
data4 = fluid.layers.fill_constant(shape=shape, dtype='bool', value=True) # data4=[[True,True],[True,True]]
"""
attrs = {
'value': float(value),
'force_cpu': force_cpu or force_init_on_cpu()
}
attrs = {'value': float(value), 'force_cpu': force_cpu}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
......@@ -601,10 +598,7 @@ def fill_constant(shape, dtype, value, force_cpu=False, out=None):
'fill_constant')
check_type(shape, 'shape', (Variable, list, tuple), 'fill_constant')
inputs = {}
attrs = {
'value': float(value),
'force_cpu': force_cpu or force_init_on_cpu()
}
attrs = {'value': float(value), 'force_cpu': force_cpu}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
......@@ -722,7 +716,7 @@ def fill_constant_batch_size_like(input,
'value': float(value),
'input_dim_idx': input_dim_idx,
'output_dim_idx': output_dim_idx,
'force_cpu': force_cpu or force_init_on_cpu()
'force_cpu': force_cpu
}
if convert_dtype(dtype) in ['int64', 'int32']:
attrs['str_value'] = str(int(value))
......
......@@ -17,7 +17,6 @@ import paddle.fluid as fluid
fluid.core._set_eager_deletion_mode(-1, -1, False)
import paddle.fluid.layers.ops as ops
from paddle.fluid.initializer import init_on_cpu
from paddle.fluid.layers.learning_rate_scheduler import cosine_decay
from simple_nets import init_data
import math
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册