未验证 提交 de605cc0 编写于 作者: L lujun 提交者: GitHub

Merge pull request #16523 from junjun315/tensor_api

move imperative to dygraph
...@@ -34,7 +34,7 @@ from . import io ...@@ -34,7 +34,7 @@ from . import io
from . import evaluator from . import evaluator
from . import initializer from . import initializer
from . import layers from . import layers
from . import imperative from . import dygraph
from . import contrib from . import contrib
from . import nets from . import nets
from . import optimizer from . import optimizer
...@@ -71,7 +71,7 @@ __all__ = framework.__all__ + executor.__all__ + \ ...@@ -71,7 +71,7 @@ __all__ = framework.__all__ + executor.__all__ + \
'initializer', 'initializer',
'layers', 'layers',
'contrib', 'contrib',
'imperative', 'dygraph',
'transpiler', 'transpiler',
'nets', 'nets',
'optimizer', 'optimizer',
......
...@@ -22,7 +22,7 @@ __all__ = ['enabled', 'guard', 'to_variable'] ...@@ -22,7 +22,7 @@ __all__ = ['enabled', 'guard', 'to_variable']
def enabled(): def enabled():
return framework._in_imperative_mode() return framework._in_dygraph_mode()
@signature_safe_contextmanager @signature_safe_contextmanager
...@@ -39,14 +39,14 @@ def guard(place=None): ...@@ -39,14 +39,14 @@ def guard(place=None):
with framework.program_guard(train, startup): with framework.program_guard(train, startup):
with framework.unique_name.guard(): with framework.unique_name.guard():
with framework._imperative_guard(tracer): with framework._dygraph_guard(tracer):
with framework._imperative_place_guard(place): with framework._dygraph_place_guard(place):
yield yield
def to_variable(value, block=None, name=None): def to_variable(value, block=None, name=None):
if isinstance(value, np.ndarray): if isinstance(value, np.ndarray):
assert enabled(), "to_variable could only be called in imperative mode" assert enabled(), "to_variable could only be called in dygraph mode"
if not block: if not block:
block = framework.default_main_program().current_block() block = framework.default_main_program().current_block()
......
...@@ -68,7 +68,7 @@ def save_persistables(vardict, dirname, filename=None): ...@@ -68,7 +68,7 @@ def save_persistables(vardict, dirname, filename=None):
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell) init_cell)
param_path = "./my_paddle_model" param_path = "./my_paddle_model"
fluid.imperative.checkpoint.save_persistables(ptb_model.state_dict(), dirname=param_path, fluid.dygraph.save_persistables(ptb_model.state_dict(), dirname=param_path,
layer=ptb_model) layer=ptb_model)
""" """
if isinstance(vardict, collections.OrderedDict): if isinstance(vardict, collections.OrderedDict):
...@@ -97,17 +97,17 @@ def load_persistables(vardict, dirname, filename=None): ...@@ -97,17 +97,17 @@ def load_persistables(vardict, dirname, filename=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
my_layer = layer(fluid.imperative.Layer) my_layer = layer(fluid.dygraph.Layer)
param_path = "./my_paddle_model" param_path = "./my_paddle_model"
param_dict = fluid.imperative.checkpoint.load_persistables(my_layer.parameters(), param_path) param_dict = fluid.dygraph.load_persistables(my_layer.parameters(), param_path)
param_1 = param_dict['PtbModel_0.w_1'] param_1 = param_dict['PtbModel_0.w_1']
or: or:
my_layer = layer(fluid.imperative.Layer) my_layer = layer(fluid.dygraph.Layer)
param_path = "./my_paddle_model" param_path = "./my_paddle_model"
filename = "model.file" filename = "model.file"
param_dict = fluid.imperative.checkpoint.load_persistables(my_layer.state_dict(), param_path, param_dict = fluid.dygraph.load_persistables(my_layer.state_dict(), param_path,
filename=filename) filename=filename)
param_1 = param_dict['PtbModel_0.w_1'] param_1 = param_dict['PtbModel_0.w_1']
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import copy import copy
import six import six
from ..framework import Parameter, _in_imperative_mode from ..framework import Parameter, _in_dygraph_mode
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
from .. import core from .. import core
from six.moves import zip from six.moves import zip
......
...@@ -283,7 +283,7 @@ class PyLayer(core.PyLayer): ...@@ -283,7 +283,7 @@ class PyLayer(core.PyLayer):
@classmethod @classmethod
def __call__(cls, *inputs): def __call__(cls, *inputs):
tracer = framework._imperative_tracer() tracer = framework._dygraph_tracer()
block = framework.default_main_program().current_block() block = framework.default_main_program().current_block()
ivar_inputs = [x._ivar for x in inputs] ivar_inputs = [x._ivar for x in inputs]
......
...@@ -133,7 +133,7 @@ class Conv2D(layers.Layer): ...@@ -133,7 +133,7 @@ class Conv2D(layers.Layer):
outputs={'Out': [pre_act]}, outputs={'Out': [pre_act]},
attrs={'axis': 1}) attrs={'axis': 1})
# Currently, we don't support inplace in imperative mode # Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_act, act=self._act) return self._helper.append_activation(pre_act, act=self._act)
...@@ -265,7 +265,7 @@ class FC(layers.Layer): ...@@ -265,7 +265,7 @@ class FC(layers.Layer):
attrs={'axis': self._num_flatten_dims}) attrs={'axis': self._num_flatten_dims})
else: else:
pre_activation = pre_bias pre_activation = pre_bias
# Currently, we don't support inplace in imperative mode # Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(pre_activation, act=self._act) return self._helper.append_activation(pre_activation, act=self._act)
...@@ -387,7 +387,7 @@ class BatchNorm(layers.Layer): ...@@ -387,7 +387,7 @@ class BatchNorm(layers.Layer):
"use_global_stats": self._use_global_stats "use_global_stats": self._use_global_stats
}) })
# Currently, we don't support inplace in imperative mode # Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(batch_norm_out, self._act) return self._helper.append_activation(batch_norm_out, self._act)
...@@ -426,7 +426,7 @@ class Embedding(layers.Layer): ...@@ -426,7 +426,7 @@ class Embedding(layers.Layer):
dict_size = len(dataset.ids) dict_size = len(dataset.ids)
input = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32') input = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32')
embedding = fluid.imperative.Embedding(size=[dict_size, 16]) embedding = fluid.dygraph.Embedding(size=[dict_size, 16])
fc = embedding(input) fc = embedding(input)
""" """
......
...@@ -24,12 +24,12 @@ __all__ = ['Tracer'] ...@@ -24,12 +24,12 @@ __all__ = ['Tracer']
def release_op(op): def release_op(op):
del framework._imperative_tracer()._ops[op._trace_id] del framework._dygraph_tracer()._ops[op._trace_id]
class Tracer(core.Tracer): class Tracer(core.Tracer):
""" """
Python wrapper of imperative tracer Python wrapper of dygraph tracer
""" """
def __init__(self, block): def __init__(self, block):
......
...@@ -75,20 +75,20 @@ GRAD_VAR_SUFFIX = core.kGradVarSuffix() ...@@ -75,20 +75,20 @@ GRAD_VAR_SUFFIX = core.kGradVarSuffix()
ZERO_VAR_SUFFIX = core.kZeroVarSuffix() ZERO_VAR_SUFFIX = core.kZeroVarSuffix()
CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName() CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName()
_imperative_tracer_ = None _dygraph_tracer_ = None
_imperative_current_expected_place_ = None _dygraph_current_expected_place_ = None
def _in_imperative_mode(): def _in_dygraph_mode():
return _imperative_tracer_ is not None return _dygraph_tracer_ is not None
def _imperative_tracer(): def _dygraph_tracer():
return _imperative_tracer_ return _dygraph_tracer_
def _current_expected_place(): def _current_expected_place():
return _imperative_current_expected_place_ return _dygraph_current_expected_place_
def _cpu_num(): def _cpu_num():
...@@ -396,7 +396,7 @@ class Variable(object): ...@@ -396,7 +396,7 @@ class Variable(object):
if not isinstance(dtype, core.VarDesc.VarType): if not isinstance(dtype, core.VarDesc.VarType):
dtype = convert_np_dtype_to_dtype_(dtype) dtype = convert_np_dtype_to_dtype_(dtype)
if _in_imperative_mode(): if _in_dygraph_mode():
# record vars in tracer rather than blocks # record vars in tracer rather than blocks
self._ivar = kwargs.get("ivar", None) self._ivar = kwargs.get("ivar", None)
if not self._ivar: if not self._ivar:
...@@ -406,7 +406,7 @@ class Variable(object): ...@@ -406,7 +406,7 @@ class Variable(object):
_current_expected_place(), stop_gradient, True _current_expected_place(), stop_gradient, True
if persistable else False) if persistable else False)
if persistable: if persistable:
_imperative_tracer().trace_var(name, self) _dygraph_tracer().trace_var(name, self)
else: else:
self.error_clip = error_clip self.error_clip = error_clip
...@@ -515,8 +515,8 @@ class Variable(object): ...@@ -515,8 +515,8 @@ class Variable(object):
Returns: Returns:
str: The debug string. str: The debug string.
""" """
if _in_imperative_mode(): if _in_dygraph_mode():
# TODO(panyx0718): add more imperative debug info. # TODO(panyx0718): add more dygraph debug info.
return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype, return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype,
self.shape) self.shape)
...@@ -548,42 +548,42 @@ class Variable(object): ...@@ -548,42 +548,42 @@ class Variable(object):
@property @property
def _stop_gradient(self): def _stop_gradient(self):
if _in_imperative_mode(): if _in_dygraph_mode():
return self._ivar.stop_gradient return self._ivar.stop_gradient
else: else:
return self.stop_gradient return self.stop_gradient
@_stop_gradient.setter @_stop_gradient.setter
def _stop_gradient(self, s): def _stop_gradient(self, s):
if _in_imperative_mode(): if _in_dygraph_mode():
self._ivar.stop_gradient = s self._ivar.stop_gradient = s
else: else:
self.stop_gradient = s self.stop_gradient = s
@property @property
def persistable(self): def persistable(self):
if _in_imperative_mode(): if _in_dygraph_mode():
return self._ivar.persistable return self._ivar.persistable
else: else:
return self.desc.persistable() return self.desc.persistable()
@persistable.setter @persistable.setter
def persistable(self, p): def persistable(self, p):
if _in_imperative_mode(): if _in_dygraph_mode():
return self._ivar.persistable return self._ivar.persistable
else: else:
self.desc.set_persistable(p) self.desc.set_persistable(p)
@property @property
def name(self): def name(self):
if _in_imperative_mode(): if _in_dygraph_mode():
return self._ivar.name return self._ivar.name
else: else:
return cpt.to_text(self.desc.name()) return cpt.to_text(self.desc.name())
@name.setter @name.setter
def name(self, new_name): def name(self, new_name):
if _in_imperative_mode(): if _in_dygraph_mode():
self._ivar.name = new_name self._ivar.name = new_name
else: else:
self.desc.set_name(new_name) self.desc.set_name(new_name)
...@@ -591,26 +591,26 @@ class Variable(object): ...@@ -591,26 +591,26 @@ class Variable(object):
@property @property
def shape(self): def shape(self):
# convert to tuple, make it as same as numpy API. # convert to tuple, make it as same as numpy API.
if _in_imperative_mode(): if _in_dygraph_mode():
return self._ivar.shape return self._ivar.shape
else: else:
return tuple(self.desc.shape()) return tuple(self.desc.shape())
@property @property
def dtype(self): def dtype(self):
if _in_imperative_mode(): if _in_dygraph_mode():
return self._ivar.dtype return self._ivar.dtype
else: else:
return self.desc.dtype() return self.desc.dtype()
@property @property
def lod_level(self): def lod_level(self):
# TODO(minqiyang): Support lod_level in imperative mode # TODO(minqiyang): Support lod_level in dygraph mode
return self.desc.lod_level() return self.desc.lod_level()
@property @property
def type(self): def type(self):
if _in_imperative_mode(): if _in_dygraph_mode():
return self._ivar.dtype return self._ivar.dtype
else: else:
return self.desc.type() return self.desc.type()
...@@ -918,7 +918,7 @@ class Operator(object): ...@@ -918,7 +918,7 @@ class Operator(object):
inputs=None, inputs=None,
outputs=None, outputs=None,
attrs=None): attrs=None):
if _in_imperative_mode(): if _in_dygraph_mode():
if type is None: if type is None:
raise ValueError( raise ValueError(
"`type` to initialized an Operator can not be None.") "`type` to initialized an Operator can not be None.")
...@@ -1037,7 +1037,7 @@ class Operator(object): ...@@ -1037,7 +1037,7 @@ class Operator(object):
for arg in out_args: for arg in out_args:
out_arg_names.append(cpt.to_text(arg.name)) out_arg_names.append(cpt.to_text(arg.name))
# TODO(minqiyang): could we remove variable's op in static mode? # TODO(minqiyang): could we remove variable's op in static mode?
if not _in_imperative_mode(): if not _in_dygraph_mode():
arg.op = self arg.op = self
self.desc.set_output(out_proto.name, out_arg_names) self.desc.set_output(out_proto.name, out_arg_names)
...@@ -1083,7 +1083,7 @@ class Operator(object): ...@@ -1083,7 +1083,7 @@ class Operator(object):
@property @property
def type(self): def type(self):
if _in_imperative_mode(): if _in_dygraph_mode():
return self.iop.type return self.iop.type
else: else:
return self.desc.type() return self.desc.type()
...@@ -1626,7 +1626,7 @@ class Block(object): ...@@ -1626,7 +1626,7 @@ class Block(object):
Returns: Returns:
Operator: the append Operator. Operator: the append Operator.
""" """
if _in_imperative_mode(): if _in_dygraph_mode():
op = Operator( op = Operator(
block=self, block=self,
desc=None, desc=None,
...@@ -1638,9 +1638,8 @@ class Block(object): ...@@ -1638,9 +1638,8 @@ class Block(object):
# record ops in tracer rather than blocks # record ops in tracer rather than blocks
# #
# TODO(minqiyang): add op stop_gradient support in static mode too. # TODO(minqiyang): add op stop_gradient support in static mode too.
# currently, we only support stop_gradient in imperative mode. # currently, we only support stop_gradient in dygraph mode.
_imperative_tracer().trace_op(op, _dygraph_tracer().trace_op(op, kwargs.get("stop_gradient", False))
kwargs.get("stop_gradient", False))
else: else:
op_desc = self.desc.append_op() op_desc = self.desc.append_op()
op = Operator( op = Operator(
...@@ -1699,7 +1698,7 @@ class Block(object): ...@@ -1699,7 +1698,7 @@ class Block(object):
return self.ops[start:end] return self.ops[start:end]
def _prepend_op(self, *args, **kwargs): def _prepend_op(self, *args, **kwargs):
if _in_imperative_mode(): if _in_dygraph_mode():
op = Operator( op = Operator(
self, self,
None, None,
...@@ -1707,8 +1706,7 @@ class Block(object): ...@@ -1707,8 +1706,7 @@ class Block(object):
inputs=kwargs.get("inputs", None), inputs=kwargs.get("inputs", None),
outputs=kwargs.get("outputs", None), outputs=kwargs.get("outputs", None),
attrs=kwargs.get("attrs", None)) attrs=kwargs.get("attrs", None))
_imperative_tracer().trace_op(op, _dygraph_tracer().trace_op(op, kwargs.get("stop_gradient", False))
kwargs.get("stop_gradient", False))
else: else:
op_desc = self.desc._prepend_op() op_desc = self.desc._prepend_op()
op = Operator( op = Operator(
...@@ -3511,22 +3509,22 @@ def _get_var(name, program=None): ...@@ -3511,22 +3509,22 @@ def _get_var(name, program=None):
@signature_safe_contextmanager @signature_safe_contextmanager
def _imperative_guard(tracer): def _dygraph_guard(tracer):
global _imperative_tracer_ global _dygraph_tracer_
tmp_trace = _imperative_tracer_ tmp_trace = _dygraph_tracer_
_imperative_tracer_ = tracer _dygraph_tracer_ = tracer
yield yield
_imperative_tracer_ = tmp_trace _dygraph_tracer_ = tmp_trace
@signature_safe_contextmanager @signature_safe_contextmanager
def _imperative_place_guard(place): def _dygraph_place_guard(place):
global _imperative_current_expected_place_ global _dygraph_current_expected_place_
tmp_place = _imperative_current_expected_place_ tmp_place = _dygraph_current_expected_place_
_imperative_current_expected_place_ = place _dygraph_current_expected_place_ = place
yield yield
_imperative_current_expected_place_ = tmp_place _dygraph_current_expected_place_ = tmp_place
...@@ -165,7 +165,7 @@ class ConstantInitializer(Initializer): ...@@ -165,7 +165,7 @@ class ConstantInitializer(Initializer):
'force_cpu': self._force_cpu or force_init_on_cpu() 'force_cpu': self._force_cpu or force_init_on_cpu()
}, },
stop_gradient=True) stop_gradient=True)
if not framework._in_imperative_mode(): if not framework._in_dygraph_mode():
var.op = op var.op = op
return op return op
...@@ -245,7 +245,7 @@ class UniformInitializer(Initializer): ...@@ -245,7 +245,7 @@ class UniformInitializer(Initializer):
attrs={"in_dtype": out_var.dtype, attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype}) "out_dtype": var.dtype})
if not framework._in_imperative_mode(): if not framework._in_dygraph_mode():
var.op = op var.op = op
return op return op
...@@ -324,7 +324,7 @@ class NormalInitializer(Initializer): ...@@ -324,7 +324,7 @@ class NormalInitializer(Initializer):
outputs={"Out": var}, outputs={"Out": var},
attrs={"in_dtype": out_var.dtype, attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype}) "out_dtype": var.dtype})
if not framework._in_imperative_mode(): if not framework._in_dygraph_mode():
var.op = op var.op = op
return op return op
...@@ -403,7 +403,7 @@ class TruncatedNormalInitializer(Initializer): ...@@ -403,7 +403,7 @@ class TruncatedNormalInitializer(Initializer):
outputs={"Out": var}, outputs={"Out": var},
attrs={"in_dtype": out_var.dtype, attrs={"in_dtype": out_var.dtype,
"out_dtype": var.dtype}) "out_dtype": var.dtype})
if not framework._in_imperative_mode(): if not framework._in_dygraph_mode():
var.op = op var.op = op
return op return op
...@@ -509,7 +509,7 @@ class XavierInitializer(Initializer): ...@@ -509,7 +509,7 @@ class XavierInitializer(Initializer):
"seed": self._seed "seed": self._seed
}, },
stop_gradient=True) stop_gradient=True)
if not framework._in_imperative_mode(): if not framework._in_dygraph_mode():
var.op = op var.op = op
return op return op
...@@ -610,7 +610,7 @@ class MSRAInitializer(Initializer): ...@@ -610,7 +610,7 @@ class MSRAInitializer(Initializer):
"seed": self._seed "seed": self._seed
}, },
stop_gradient=True) stop_gradient=True)
if not framework._in_imperative_mode(): if not framework._in_dygraph_mode():
var.op = op var.op = op
return op return op
...@@ -709,7 +709,7 @@ class BilinearInitializer(Initializer): ...@@ -709,7 +709,7 @@ class BilinearInitializer(Initializer):
'shape': list(shape), 'shape': list(shape),
value_name: values value_name: values
}) })
if not framework._in_imperative_mode(): if not framework._in_dygraph_mode():
var.op = op var.op = op
return op return op
...@@ -768,7 +768,7 @@ class NumpyArrayInitializer(Initializer): ...@@ -768,7 +768,7 @@ class NumpyArrayInitializer(Initializer):
value_name: values value_name: values
}, },
stop_gradient=True) stop_gradient=True)
if not framework._in_imperative_mode(): if not framework._in_dygraph_mode():
var.op = op var.op = op
return op return op
......
...@@ -17,7 +17,7 @@ from .param_attr import ParamAttr ...@@ -17,7 +17,7 @@ from .param_attr import ParamAttr
from .initializer import Constant from .initializer import Constant
from . import layers from . import layers
from . import backward from . import backward
from .imperative import Layer, nn from .dygraph import Layer, nn
from . import executor from . import executor
from . import core from . import core
......
...@@ -17,7 +17,7 @@ from __future__ import print_function ...@@ -17,7 +17,7 @@ from __future__ import print_function
import copy import copy
import six import six
from .framework import Parameter, dtype_is_floating, _in_imperative_mode from .framework import Parameter, dtype_is_floating, _in_dygraph_mode
from . import unique_name from . import unique_name
from paddle.fluid.initializer import Constant, Xavier from paddle.fluid.initializer import Constant, Xavier
from .param_attr import ParamAttr from .param_attr import ParamAttr
...@@ -30,9 +30,9 @@ class LayerHelper(LayerHelperBase): ...@@ -30,9 +30,9 @@ class LayerHelper(LayerHelperBase):
def __init__(self, layer_type, **kwargs): def __init__(self, layer_type, **kwargs):
self.kwargs = kwargs self.kwargs = kwargs
name = self.kwargs.get('name', None) name = self.kwargs.get('name', None)
# TODO(panyx0718, minqiyang): imperative mode # TODO(panyx0718, minqiyang): dygraph mode
# can not use both `layer_type` and `name`. Deprecate LayerHelper # can not use both `layer_type` and `name`. Deprecate LayerHelper
# and write a Helper for imperative mode. # and write a Helper for dygraph mode.
if name is None: if name is None:
self.kwargs['name'] = unique_name.generate(layer_type) self.kwargs['name'] = unique_name.generate(layer_type)
......
...@@ -17,7 +17,7 @@ from __future__ import print_function ...@@ -17,7 +17,7 @@ from __future__ import print_function
import copy import copy
import numpy as np import numpy as np
from .framework import Variable, default_main_program, default_startup_program, _in_imperative_mode, _current_expected_place from .framework import Variable, default_main_program, default_startup_program, _in_dygraph_mode, _current_expected_place
from . import unique_name from . import unique_name
from .param_attr import ParamAttr, WeightNormParamAttr from .param_attr import ParamAttr, WeightNormParamAttr
from . import core from . import core
...@@ -54,8 +54,8 @@ class LayerHelperBase(object): ...@@ -54,8 +54,8 @@ class LayerHelperBase(object):
Return Variable construct from value Return Variable construct from value
""" """
if isinstance(value, np.ndarray): if isinstance(value, np.ndarray):
assert _in_imperative_mode( assert _in_dygraph_mode(
), "to_variable could only be called in imperative mode" ), "to_variable could only be called in dygraph mode"
if not block: if not block:
block = default_main_program().current_block() block = default_main_program().current_block()
...@@ -302,8 +302,8 @@ class LayerHelperBase(object): ...@@ -302,8 +302,8 @@ class LayerHelperBase(object):
param = self._create_weight_normalize(attr, shape, dtype) param = self._create_weight_normalize(attr, shape, dtype)
WeightNormParamAttr.params_with_weight_norm.append(param) WeightNormParamAttr.params_with_weight_norm.append(param)
return param return param
if _in_imperative_mode(): if _in_dygraph_mode():
# In imperative mode, we want the returned parameter to be # In dygraph mode, we want the returned parameter to be
# initialized so that it can be used imperatively. # initialized so that it can be used imperatively.
return self.main_program.global_block().create_parameter( return self.main_program.global_block().create_parameter(
dtype=dtype, dtype=dtype,
...@@ -370,7 +370,7 @@ class LayerHelperBase(object): ...@@ -370,7 +370,7 @@ class LayerHelperBase(object):
initializer: initializer to use initializer: initializer to use
""" """
assert isinstance(var, Variable) assert isinstance(var, Variable)
if _in_imperative_mode(): if _in_dygraph_mode():
initializer(var, var.block) initializer(var, var.block)
else: else:
self.startup_program.global_block().create_var( self.startup_program.global_block().create_var(
......
...@@ -23,8 +23,8 @@ import os ...@@ -23,8 +23,8 @@ import os
import inspect import inspect
from ..layer_helper import LayerHelper from ..layer_helper import LayerHelper
from ..initializer import Normal, Constant, NumpyArrayInitializer from ..initializer import Normal, Constant, NumpyArrayInitializer
from ..framework import Variable, OpProtoHolder, _in_imperative_mode from ..framework import Variable, OpProtoHolder, _in_dygraph_mode
from ..imperative import base from ..dygraph import base
from ..param_attr import ParamAttr from ..param_attr import ParamAttr
from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_
from .tensor import concat, assign from .tensor import concat, assign
...@@ -32,7 +32,7 @@ from . import utils ...@@ -32,7 +32,7 @@ from . import utils
from .. import unique_name from .. import unique_name
from functools import reduce from functools import reduce
from .. import core from .. import core
from ..imperative import layers from ..dygraph import layers
__all__ = [ __all__ = [
'fc', 'fc',
...@@ -296,7 +296,6 @@ def fc(input, ...@@ -296,7 +296,6 @@ def fc(input,
data_2 = fluid.layers.data(name="data_2", shape=[24, 36], dtype="float32") data_2 = fluid.layers.data(name="data_2", shape=[24, 36], dtype="float32")
fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh") fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh")
""" """
helper = LayerHelper("fc", **locals()) helper = LayerHelper("fc", **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -3279,6 +3278,8 @@ def layer_norm(input, ...@@ -3279,6 +3278,8 @@ def layer_norm(input,
>>> dtype='float32') >>> dtype='float32')
>>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1)
""" """
assert _in_dygraph_mode(
) is not True, "please use FC instead of fc in dygraph mode!"
helper = LayerHelper('layer_norm', **locals()) helper = LayerHelper('layer_norm', **locals())
dtype = helper.input_dtype() dtype = helper.input_dtype()
...@@ -6405,8 +6406,8 @@ def squeeze(input, axes, name=None): ...@@ -6405,8 +6406,8 @@ def squeeze(input, axes, name=None):
x = layers.data(name='x', shape=[5, 1, 10]) x = layers.data(name='x', shape=[5, 1, 10])
y = layers.sequeeze(input=x, axes=[1]) y = layers.sequeeze(input=x, axes=[1])
""" """
assert not _in_imperative_mode(), ( assert not _in_dygraph_mode(), (
"squeeze layer is not supported in imperative mode yet.") "squeeze layer is not supported in dygraph mode yet.")
helper = LayerHelper("squeeze", **locals()) helper = LayerHelper("squeeze", **locals())
out = helper.create_variable_for_type_inference(dtype=input.dtype) out = helper.create_variable_for_type_inference(dtype=input.dtype)
x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype)
...@@ -9144,7 +9145,7 @@ def _elementwise_op(helper): ...@@ -9144,7 +9145,7 @@ def _elementwise_op(helper):
op_type = helper.layer_type op_type = helper.layer_type
x = helper.kwargs.get('x', None) x = helper.kwargs.get('x', None)
y = helper.kwargs.get('y', None) y = helper.kwargs.get('y', None)
if _in_imperative_mode(): if _in_dygraph_mode():
x = base.to_variable(x) x = base.to_variable(x)
y = base.to_variable(y) y = base.to_variable(y)
......
...@@ -20,7 +20,6 @@ from ..framework import convert_np_dtype_to_dtype_ ...@@ -20,7 +20,6 @@ from ..framework import convert_np_dtype_to_dtype_
from ..framework import Variable from ..framework import Variable
from ..initializer import Constant, force_init_on_cpu from ..initializer import Constant, force_init_on_cpu
from ..core import VarDesc from ..core import VarDesc
from ..imperative import base as imperative_base
from .layer_function_generator import templatedoc from .layer_function_generator import templatedoc
import numpy import numpy
......
...@@ -30,7 +30,6 @@ from .initializer import Constant ...@@ -30,7 +30,6 @@ from .initializer import Constant
from .layer_helper import LayerHelper from .layer_helper import LayerHelper
from .layers import ops from .layers import ops
from .regularizer import append_regularization_ops from .regularizer import append_regularization_ops
from .imperative import base as imperative_base
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.layers import tensor from paddle.fluid.layers import tensor
from functools import reduce from functools import reduce
...@@ -169,7 +168,7 @@ class Optimizer(object): ...@@ -169,7 +168,7 @@ class Optimizer(object):
name = self._name + "_" + name name = self._name + "_" + name
if (name in self._accumulators and if (name in self._accumulators and
param.name in self._accumulators[name]): param.name in self._accumulators[name]):
if framework._in_imperative_mode(): if framework._in_dygraph_mode():
return self._accumulators[name][param.name] return self._accumulators[name][param.name]
raise Exception("Accumulator {} already exists for parameter {}". raise Exception("Accumulator {} already exists for parameter {}".
format(name, param.name)) format(name, param.name))
...@@ -396,11 +395,11 @@ class Optimizer(object): ...@@ -396,11 +395,11 @@ class Optimizer(object):
""" """
self._dtype = loss.dtype self._dtype = loss.dtype
optimize_ops = [] optimize_ops = []
if framework._in_imperative_mode(): if framework._in_dygraph_mode():
if parameter_list is not None: if parameter_list is not None:
parameters = parameter_list parameters = parameter_list
else: else:
parameters = framework._imperative_tracer().all_parameters() parameters = framework._dygraph_tracer().all_parameters()
params_grads = [] params_grads = []
for param in parameters: for param in parameters:
......
...@@ -262,14 +262,14 @@ class OpTest(unittest.TestCase): ...@@ -262,14 +262,14 @@ class OpTest(unittest.TestCase):
if isinstance(value, tuple): if isinstance(value, tuple):
data = value[0] data = value[0]
lod = value[1] lod = value[1]
v = fluid.imperative.base.to_variable(value=data) v = fluid.dygraph.base.to_variable(value=data)
v._ivar.value().get_tensor().set_recursive_sequence_lengths(lod) v._ivar.value().get_tensor().set_recursive_sequence_lengths(lod)
return v return v
else: else:
return fluid.imperative.base.to_variable(value) return fluid.dygraph.base.to_variable(value)
def _calc_imperative_output(self, place, parallel=False, no_check_set=None): def _calc_dygraph_output(self, place, parallel=False, no_check_set=None):
with fluid.imperative.base.guard(place=place): with fluid.dygraph.base.guard(place=place):
block = fluid.default_main_program().global_block() block = fluid.default_main_program().global_block()
# prepare input variable # prepare input variable
...@@ -316,7 +316,7 @@ class OpTest(unittest.TestCase): ...@@ -316,7 +316,7 @@ class OpTest(unittest.TestCase):
return outputs return outputs
def _calc_output(self, place, parallel=False, no_check_set=None): def _calc_output(self, place, parallel=False, no_check_set=None, loss=None):
program = Program() program = Program()
block = program.global_block() block = program.global_block()
self._append_ops(block) self._append_ops(block)
...@@ -329,8 +329,14 @@ class OpTest(unittest.TestCase): ...@@ -329,8 +329,14 @@ class OpTest(unittest.TestCase):
use_cuda = False use_cuda = False
if isinstance(place, fluid.CUDAPlace(0)): if isinstance(place, fluid.CUDAPlace(0)):
use_cuda = True use_cuda = True
executor = fluid.ParallelExecutor( if loss:
use_cuda=use_cuda, loss_name=loss.name, main_program=program) executor = fluid.ParallelExecutor(
use_cuda=use_cuda,
loss_name=loss.name,
main_program=program)
else:
executor = fluid.ParallelExecutor(
use_cuda=use_cuda, main_program=program)
else: else:
executor = Executor(place) executor = Executor(place)
...@@ -364,9 +370,9 @@ class OpTest(unittest.TestCase): ...@@ -364,9 +370,9 @@ class OpTest(unittest.TestCase):
atol, atol,
no_check_set=None, no_check_set=None,
equal_nan=False, equal_nan=False,
check_imperative=False): check_dygraph=False):
if check_imperative: if check_dygraph:
imperative_outs = self._calc_imperative_output( dygraph_outs = self._calc_dygraph_output(
place, no_check_set=no_check_set) place, no_check_set=no_check_set)
outs, fetch_list = self._calc_output(place, no_check_set=no_check_set) outs, fetch_list = self._calc_output(place, no_check_set=no_check_set)
...@@ -393,8 +399,8 @@ class OpTest(unittest.TestCase): ...@@ -393,8 +399,8 @@ class OpTest(unittest.TestCase):
type(sub_out)) type(sub_out))
for item in sub_out: for item in sub_out:
sub_out_name, expect = item[0], item[1] sub_out_name, expect = item[0], item[1]
if check_imperative: if check_dygraph:
imperative_actual = imperative_outs[sub_out_name][0] imperative_actual = dygraph_outs[sub_out_name][0]
imperative_actual_t = np.array( imperative_actual_t = np.array(
imperative_actual._ivar.value().get_tensor()) imperative_actual._ivar.value().get_tensor())
idx = find_actual(sub_out_name, fetch_list) idx = find_actual(sub_out_name, fetch_list)
...@@ -407,7 +413,7 @@ class OpTest(unittest.TestCase): ...@@ -407,7 +413,7 @@ class OpTest(unittest.TestCase):
actual_t, expect_t, atol=atol, equal_nan=equal_nan), actual_t, expect_t, atol=atol, equal_nan=equal_nan),
"Output (" + sub_out_name + ") has diff at " + "Output (" + sub_out_name + ") has diff at " +
str(place)) str(place))
if check_imperative: if check_dygraph:
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
imperative_actual_t, imperative_actual_t,
...@@ -415,21 +421,21 @@ class OpTest(unittest.TestCase): ...@@ -415,21 +421,21 @@ class OpTest(unittest.TestCase):
atol=atol, atol=atol,
equal_nan=equal_nan), equal_nan=equal_nan),
"Output (" + sub_out_name + ") has diff at " + "Output (" + sub_out_name + ") has diff at " +
str(place) + " in imperative mode") str(place) + " in dygraph mode")
if isinstance(expect, tuple): if isinstance(expect, tuple):
self.assertListEqual( self.assertListEqual(
actual.recursive_sequence_lengths(), expect[1], actual.recursive_sequence_lengths(), expect[1],
"Output (" + sub_out_name + "Output (" + sub_out_name +
") has different lod at " + str(place)) ") has different lod at " + str(place))
if check_imperative: if check_dygraph:
self.assertListEqual( self.assertListEqual(
imperative_actual._ivar.value().get_tensor() imperative_actual._ivar.value().get_tensor()
.recursive_sequence_lengths(), expect[1], .recursive_sequence_lengths(), expect[1],
"Output (" + out_name + ") has different lod at " + "Output (" + out_name + ") has different lod at " +
str(place) + " in imperative mode") str(place) + " in dygraph mode")
else: else:
if check_imperative: if check_dygraph:
imperative_actual = imperative_outs[out_name][0] imperative_actual = dygraph_outs[out_name][0]
imperative_actual_t = np.array( imperative_actual_t = np.array(
imperative_actual._ivar.value().get_tensor()) imperative_actual._ivar.value().get_tensor())
idx = find_actual(out_name, fetch_list) idx = find_actual(out_name, fetch_list)
...@@ -443,7 +449,7 @@ class OpTest(unittest.TestCase): ...@@ -443,7 +449,7 @@ class OpTest(unittest.TestCase):
"Output (" + out_name + ") has diff at " + str(place) + "Output (" + out_name + ") has diff at " + str(place) +
"\nExpect " + str(expect_t) + "\n" + "But Got" + "\nExpect " + str(expect_t) + "\n" + "But Got" +
str(actual_t) + " in class " + self.__class__.__name__) str(actual_t) + " in class " + self.__class__.__name__)
if check_imperative: if check_dygraph:
self.assertTrue( self.assertTrue(
np.allclose( np.allclose(
imperative_actual_t, imperative_actual_t,
...@@ -458,12 +464,12 @@ class OpTest(unittest.TestCase): ...@@ -458,12 +464,12 @@ class OpTest(unittest.TestCase):
self.assertListEqual(actual.recursive_sequence_lengths(), self.assertListEqual(actual.recursive_sequence_lengths(),
expect[1], "Output (" + out_name + expect[1], "Output (" + out_name +
") has different lod at " + str(place)) ") has different lod at " + str(place))
if check_imperative: if check_dygraph:
self.assertListEqual( self.assertListEqual(
imperative_actual._ivar.value().get_tensor() imperative_actual._ivar.value().get_tensor()
.recursive_sequence_lengths(), expect[1], .recursive_sequence_lengths(), expect[1],
"Output (" + out_name + ") has different lod at " + "Output (" + out_name + ") has different lod at " +
str(place) + " in imperative mode") str(place) + " in dygraph mode")
def _get_places(self): def _get_places(self):
if self.dtype == np.float16: if self.dtype == np.float16:
...@@ -490,11 +496,11 @@ class OpTest(unittest.TestCase): ...@@ -490,11 +496,11 @@ class OpTest(unittest.TestCase):
atol=1e-5, atol=1e-5,
no_check_set=None, no_check_set=None,
equal_nan=False, equal_nan=False,
check_imperative=False): check_dygraph=False):
places = self._get_places() places = self._get_places()
for place in places: for place in places:
self.check_output_with_place(place, atol, no_check_set, equal_nan, self.check_output_with_place(place, atol, no_check_set, equal_nan,
check_imperative) check_dygraph)
def check_output_customized(self, checker): def check_output_customized(self, checker):
places = self._get_places() places = self._get_places()
......
...@@ -18,7 +18,7 @@ import numpy as np ...@@ -18,7 +18,7 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
class L1(fluid.imperative.Layer): class L1(fluid.dygraph.Layer):
def __init__(self, prefix): def __init__(self, prefix):
super(L1, self).__init__(prefix) super(L1, self).__init__(prefix)
self._param_attr = fluid.ParamAttr( self._param_attr = fluid.ParamAttr(
...@@ -32,7 +32,7 @@ class L1(fluid.imperative.Layer): ...@@ -32,7 +32,7 @@ class L1(fluid.imperative.Layer):
return self.w1 + self.w2 return self.w1 + self.w2
class L2(fluid.imperative.Layer): class L2(fluid.dygraph.Layer):
def __init__(self, prefix): def __init__(self, prefix):
super(L2, self).__init__(prefix) super(L2, self).__init__(prefix)
self.layer1 = L1(self.full_name()) self.layer1 = L1(self.full_name())
...@@ -42,7 +42,7 @@ class L2(fluid.imperative.Layer): ...@@ -42,7 +42,7 @@ class L2(fluid.imperative.Layer):
return self.layer1() + self.layer2() return self.layer1() + self.layer2()
class L3(fluid.imperative.Layer): class L3(fluid.dygraph.Layer):
def __init__(self, prefix): def __init__(self, prefix):
super(L3, self).__init__(prefix) super(L3, self).__init__(prefix)
self.layer1 = L2(self.full_name()) self.layer1 = L2(self.full_name())
...@@ -54,7 +54,7 @@ class L3(fluid.imperative.Layer): ...@@ -54,7 +54,7 @@ class L3(fluid.imperative.Layer):
class TestBaseLayer(unittest.TestCase): class TestBaseLayer(unittest.TestCase):
def test_one_level(self): def test_one_level(self):
with fluid.imperative.guard(): with fluid.dygraph.guard():
l = L1('test_one_level') l = L1('test_one_level')
ret = l() ret = l()
self.assertEqual(l.w1.name, "test_one_level/L1_0.w_0") self.assertEqual(l.w1.name, "test_one_level/L1_0.w_0")
...@@ -62,7 +62,7 @@ class TestBaseLayer(unittest.TestCase): ...@@ -62,7 +62,7 @@ class TestBaseLayer(unittest.TestCase):
self.assertTrue(np.allclose(ret._numpy(), 0.2 * np.ones([2, 2]))) self.assertTrue(np.allclose(ret._numpy(), 0.2 * np.ones([2, 2])))
def test_three_level(self): def test_three_level(self):
with fluid.imperative.guard(): with fluid.dygraph.guard():
l = L3('test_three_level') l = L3('test_three_level')
names = [p.name for p in l.parameters()] names = [p.name for p in l.parameters()]
ret = l() ret = l()
......
...@@ -156,7 +156,7 @@ class TestGRUOp(OpTest): ...@@ -156,7 +156,7 @@ class TestGRUOp(OpTest):
} }
def test_check_output(self): def test_check_output(self):
self.check_output(atol=1e-8, check_imperative=True) self.check_output(atol=1e-8, check_dygraph=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['Input', 'H0', 'Weight', 'Bias'], ['Hidden']) self.check_grad(['Input', 'H0', 'Weight', 'Bias'], ['Hidden'])
......
...@@ -18,11 +18,11 @@ import numpy as np ...@@ -18,11 +18,11 @@ import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.imperative.nn import FC from paddle.fluid.dygraph.nn import FC
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
class MyLayer(fluid.imperative.Layer): class MyLayer(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(MyLayer, self).__init__(name_scope) super(MyLayer, self).__init__(name_scope)
...@@ -34,7 +34,7 @@ class MyLayer(fluid.imperative.Layer): ...@@ -34,7 +34,7 @@ class MyLayer(fluid.imperative.Layer):
return [x] return [x]
class MyPyLayer(fluid.imperative.PyLayer): class MyPyLayer(fluid.dygraph.PyLayer):
def __init__(self): def __init__(self):
super(MyPyLayer, self).__init__() super(MyPyLayer, self).__init__()
...@@ -48,7 +48,7 @@ class MyPyLayer(fluid.imperative.PyLayer): ...@@ -48,7 +48,7 @@ class MyPyLayer(fluid.imperative.PyLayer):
return np.array(dout) * (1 - np.square(np.array(out))) return np.array(dout) * (1 - np.square(np.array(out)))
class MLP(fluid.imperative.Layer): class MLP(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(MLP, self).__init__(name_scope) super(MLP, self).__init__(name_scope)
self._fc1 = FC(self.full_name(), self._fc1 = FC(self.full_name(),
...@@ -71,7 +71,7 @@ class MLP(fluid.imperative.Layer): ...@@ -71,7 +71,7 @@ class MLP(fluid.imperative.Layer):
return x return x
class SimpleRNNCell(fluid.imperative.Layer): class SimpleRNNCell(fluid.dygraph.Layer):
def __init__(self, name_scope, step_input_size, hidden_size, output_size, def __init__(self, name_scope, step_input_size, hidden_size, output_size,
param_attr): param_attr):
super(SimpleRNNCell, self).__init__(name_scope) super(SimpleRNNCell, self).__init__(name_scope)
...@@ -159,7 +159,7 @@ class SimpleRNNCell(fluid.imperative.Layer): ...@@ -159,7 +159,7 @@ class SimpleRNNCell(fluid.imperative.Layer):
return reduce_out, hidden return reduce_out, hidden
class SimpleRNN(fluid.imperative.Layer): class SimpleRNN(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(SimpleRNN, self).__init__(name_scope) super(SimpleRNN, self).__init__(name_scope)
self.seq_len = 4 self.seq_len = 4
...@@ -194,10 +194,10 @@ class SimpleRNN(fluid.imperative.Layer): ...@@ -194,10 +194,10 @@ class SimpleRNN(fluid.imperative.Layer):
class TestImperative(unittest.TestCase): class TestImperative(unittest.TestCase):
def test_sum_op(self): def test_sum_op(self):
x = np.ones([2, 2], np.float32) x = np.ones([2, 2], np.float32)
with fluid.imperative.guard(): with fluid.dygraph.guard():
inputs = [] inputs = []
for _ in range(10): for _ in range(10):
inputs.append(fluid.imperative.base.to_variable(x)) inputs.append(fluid.dygraph.base.to_variable(x))
ret = fluid.layers.sums(inputs) ret = fluid.layers.sums(inputs)
loss = fluid.layers.reduce_sum(ret) loss = fluid.layers.reduce_sum(ret)
loss._backward() loss._backward()
...@@ -205,17 +205,17 @@ class TestImperative(unittest.TestCase): ...@@ -205,17 +205,17 @@ class TestImperative(unittest.TestCase):
self.assertTrue(np.allclose(inputs[0]._gradient(), x)) self.assertTrue(np.allclose(inputs[0]._gradient(), x))
def test_layer(self): def test_layer(self):
with fluid.imperative.guard(): with fluid.dygraph.guard():
cl = core.Layer() cl = core.Layer()
cl.forward([]) cl.forward([])
l = fluid.imperative.Layer("l") l = fluid.dygraph.Layer("l")
self.assertRaises(NotImplementedError, l.forward, []) self.assertRaises(NotImplementedError, l.forward, [])
def test_pylayer_func_id(self): def test_pylayer_func_id(self):
with fluid.imperative.guard(): with fluid.dygraph.guard():
class PyLayer1(fluid.imperative.PyLayer): class PyLayer1(fluid.dygraph.PyLayer):
def __init__(self): def __init__(self):
super(PyLayer1, self).__init__() super(PyLayer1, self).__init__()
...@@ -227,7 +227,7 @@ class TestImperative(unittest.TestCase): ...@@ -227,7 +227,7 @@ class TestImperative(unittest.TestCase):
def backward(input): def backward(input):
return input return input
class PyLayer2(fluid.imperative.PyLayer): class PyLayer2(fluid.dygraph.PyLayer):
def __init__(self): def __init__(self):
super(PyLayer2, self).__init__() super(PyLayer2, self).__init__()
...@@ -241,21 +241,21 @@ class TestImperative(unittest.TestCase): ...@@ -241,21 +241,21 @@ class TestImperative(unittest.TestCase):
py_layer_1 = PyLayer1() py_layer_1 = PyLayer1()
py_layer_2 = PyLayer2() py_layer_2 = PyLayer2()
py_layer_1(fluid.imperative.base.to_variable(np.ones([2, 2]))) py_layer_1(fluid.dygraph.base.to_variable(np.ones([2, 2])))
py_layer_2(fluid.imperative.base.to_variable(np.ones([2, 2]))) py_layer_2(fluid.dygraph.base.to_variable(np.ones([2, 2])))
id = py_layer_1.forward_id id = py_layer_1.forward_id
self.assertGreater(id, 0) self.assertGreater(id, 0)
self.assertEqual(py_layer_1.backward_id, id + 1) self.assertEqual(py_layer_1.backward_id, id + 1)
self.assertEqual(py_layer_2.forward_id, id + 2) self.assertEqual(py_layer_2.forward_id, id + 2)
self.assertEqual(py_layer_2.backward_id, id + 3) self.assertEqual(py_layer_2.backward_id, id + 3)
py_layer_1(fluid.imperative.base.to_variable(np.ones([2, 2]))) py_layer_1(fluid.dygraph.base.to_variable(np.ones([2, 2])))
self.assertEqual(py_layer_1.forward_id, id) self.assertEqual(py_layer_1.forward_id, id)
def test_pylayer(self): def test_pylayer(self):
np_inp = np.ones([2, 2], np.float32) np_inp = np.ones([2, 2], np.float32)
with fluid.imperative.guard(): with fluid.dygraph.guard():
my_py_layer = MyPyLayer() my_py_layer = MyPyLayer()
var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.dygraph.base.to_variable(np_inp)
outs = my_py_layer(var_inp) outs = my_py_layer(var_inp)
dy_out = np.sum(outs[0]._numpy()) dy_out = np.sum(outs[0]._numpy())
outs[0]._backward() outs[0]._backward()
...@@ -282,8 +282,8 @@ class TestImperative(unittest.TestCase): ...@@ -282,8 +282,8 @@ class TestImperative(unittest.TestCase):
def test_layer_in_out(self): def test_layer_in_out(self):
np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32) np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32)
with fluid.imperative.guard(): with fluid.dygraph.guard():
var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.dygraph.base.to_variable(np_inp)
l = MyLayer("my_layer") l = MyLayer("my_layer")
x = l(var_inp)[0] x = l(var_inp)[0]
self.assertIsNotNone(x) self.assertIsNotNone(x)
...@@ -310,8 +310,8 @@ class TestImperative(unittest.TestCase): ...@@ -310,8 +310,8 @@ class TestImperative(unittest.TestCase):
def test_mlp(self): def test_mlp(self):
np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)
with fluid.imperative.guard(): with fluid.dygraph.guard():
var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.dygraph.base.to_variable(np_inp)
mlp = MLP("mlp") mlp = MLP("mlp")
out = mlp(var_inp) out = mlp(var_inp)
dy_out = out._numpy() dy_out = out._numpy()
...@@ -353,8 +353,8 @@ class TestImperative(unittest.TestCase): ...@@ -353,8 +353,8 @@ class TestImperative(unittest.TestCase):
[10.0, 11.0, 12.0]]) [10.0, 11.0, 12.0]])
np_inp = np_inp.reshape((1, 4, 3)) np_inp = np_inp.reshape((1, 4, 3))
np_inp = np_inp.astype(np.float32) np_inp = np_inp.astype(np.float32)
with fluid.imperative.guard(): with fluid.dygraph.guard():
var_inp = fluid.imperative.base.to_variable(np_inp) var_inp = fluid.dygraph.base.to_variable(np_inp)
var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3])
simple_rnn = SimpleRNN("simple_rnn") simple_rnn = SimpleRNN("simple_rnn")
outs, pre_hiddens = simple_rnn.forward(var_inp) outs, pre_hiddens = simple_rnn.forward(var_inp)
......
...@@ -18,11 +18,11 @@ import numpy as np ...@@ -18,11 +18,11 @@ import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC
from paddle.fluid.imperative.base import to_variable from paddle.fluid.dygraph.base import to_variable
class SimpleImgConvPool(fluid.imperative.Layer): class SimpleImgConvPool(fluid.dygraph.Layer):
def __init__(self, def __init__(self,
name_scope, name_scope,
num_channels, num_channels,
...@@ -71,7 +71,7 @@ class SimpleImgConvPool(fluid.imperative.Layer): ...@@ -71,7 +71,7 @@ class SimpleImgConvPool(fluid.imperative.Layer):
return x return x
class MNIST(fluid.imperative.Layer): class MNIST(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(MNIST, self).__init__(name_scope) super(MNIST, self).__init__(name_scope)
...@@ -98,12 +98,12 @@ class MNIST(fluid.imperative.Layer): ...@@ -98,12 +98,12 @@ class MNIST(fluid.imperative.Layer):
return x return x
class TestImperativeCheckpoint(unittest.TestCase): class TestDygraphCheckpoint(unittest.TestCase):
def save_load_persistables(self): def save_load_persistables(self):
seed = 90 seed = 90
epoch_num = 1 epoch_num = 1
with fluid.imperative.guard(): with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
...@@ -135,14 +135,14 @@ class TestImperativeCheckpoint(unittest.TestCase): ...@@ -135,14 +135,14 @@ class TestImperativeCheckpoint(unittest.TestCase):
avg_loss._backward() avg_loss._backward()
sgd.minimize(avg_loss) sgd.minimize(avg_loss)
fluid.imperative.save_persistables(mnist, "save_dir") fluid.dygraph.save_persistables(mnist, "save_dir")
mnist.clear_gradients() mnist.clear_gradients()
for param in mnist.parameters(): for param in mnist.parameters():
dy_param_init_value[param.name] = param._numpy() dy_param_init_value[param.name] = param._numpy()
mnist.load_dict( mnist.load_dict(
fluid.imperative.load_persistables(mnist, "save_dir")) fluid.dygraph.load_persistables(mnist, "save_dir"))
restore = mnist.parameters() restore = mnist.parameters()
......
...@@ -22,7 +22,7 @@ import paddle ...@@ -22,7 +22,7 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
from paddle.fluid.imperative.base import to_variable from paddle.fluid.dygraph.base import to_variable
# Can use Amusic dataset as the DeepCF describes. # Can use Amusic dataset as the DeepCF describes.
DATA_PATH = os.environ.get('DATA_PATH', '') DATA_PATH = os.environ.get('DATA_PATH', '')
...@@ -32,11 +32,11 @@ NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5)) ...@@ -32,11 +32,11 @@ NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5))
NUM_EPOCHES = int(os.environ.get('NUM_EPOCHES', 1)) NUM_EPOCHES = int(os.environ.get('NUM_EPOCHES', 1))
class DMF(fluid.imperative.Layer): class DMF(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(DMF, self).__init__(name_scope) super(DMF, self).__init__(name_scope)
self._user_latent = fluid.imperative.FC(self.full_name(), 256) self._user_latent = fluid.dygraph.FC(self.full_name(), 256)
self._item_latent = fluid.imperative.FC(self.full_name(), 256) self._item_latent = fluid.dygraph.FC(self.full_name(), 256)
self._user_layers = [] self._user_layers = []
self._item_layers = [] self._item_layers = []
...@@ -45,12 +45,12 @@ class DMF(fluid.imperative.Layer): ...@@ -45,12 +45,12 @@ class DMF(fluid.imperative.Layer):
self._user_layers.append( self._user_layers.append(
self.add_sublayer( self.add_sublayer(
'user_layer_%d' % i, 'user_layer_%d' % i,
fluid.imperative.FC( fluid.dygraph.FC(
self.full_name(), self._hid_sizes[i], act='relu'))) self.full_name(), self._hid_sizes[i], act='relu')))
self._item_layers.append( self._item_layers.append(
self.add_sublayer( self.add_sublayer(
'item_layer_%d' % i, 'item_layer_%d' % i,
fluid.imperative.FC( fluid.dygraph.FC(
self.full_name(), self._hid_sizes[i], act='relu'))) self.full_name(), self._hid_sizes[i], act='relu')))
def forward(self, users, items): def forward(self, users, items):
...@@ -63,18 +63,18 @@ class DMF(fluid.imperative.Layer): ...@@ -63,18 +63,18 @@ class DMF(fluid.imperative.Layer):
return fluid.layers.elementwise_mul(users, items) return fluid.layers.elementwise_mul(users, items)
class MLP(fluid.imperative.Layer): class MLP(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(MLP, self).__init__(name_scope) super(MLP, self).__init__(name_scope)
self._user_latent = fluid.imperative.FC(self.full_name(), 256) self._user_latent = fluid.dygraph.FC(self.full_name(), 256)
self._item_latent = fluid.imperative.FC(self.full_name(), 256) self._item_latent = fluid.dygraph.FC(self.full_name(), 256)
self._match_layers = [] self._match_layers = []
self._hid_sizes = [128, 64] self._hid_sizes = [128, 64]
for i in range(len(self._hid_sizes)): for i in range(len(self._hid_sizes)):
self._match_layers.append( self._match_layers.append(
self.add_sublayer( self.add_sublayer(
'match_layer_%d' % i, 'match_layer_%d' % i,
fluid.imperative.FC( fluid.dygraph.FC(
self.full_name(), self._hid_sizes[i], act='relu'))) self.full_name(), self._hid_sizes[i], act='relu')))
self._mat self._mat
...@@ -88,7 +88,7 @@ class MLP(fluid.imperative.Layer): ...@@ -88,7 +88,7 @@ class MLP(fluid.imperative.Layer):
return match_vec return match_vec
class DeepCF(fluid.imperative.Layer): class DeepCF(fluid.dygraph.Layer):
def __init__(self, name_scope, num_users, num_items, matrix): def __init__(self, name_scope, num_users, num_items, matrix):
super(DeepCF, self).__init__(name_scope) super(DeepCF, self).__init__(name_scope)
self._num_users = num_users self._num_users = num_users
...@@ -103,7 +103,7 @@ class DeepCF(fluid.imperative.Layer): ...@@ -103,7 +103,7 @@ class DeepCF(fluid.imperative.Layer):
self._mlp = MLP(self.full_name()) self._mlp = MLP(self.full_name())
self._dmf = DMF(self.full_name()) self._dmf = DMF(self.full_name())
self._match_fc = fluid.imperative.FC(self.full_name(), 1, act='sigmoid') self._match_fc = fluid.dygraph.FC(self.full_name(), 1, act='sigmoid')
def forward(self, users, items): def forward(self, users, items):
# users_emb = self._user_emb(users) # users_emb = self._user_emb(users)
...@@ -191,7 +191,7 @@ def load_data(DATA_PATH): ...@@ -191,7 +191,7 @@ def load_data(DATA_PATH):
np.expand_dims(labels_np, -1), num_users, num_items, matrix np.expand_dims(labels_np, -1), num_users, num_items, matrix
class TestImperativeDeepCF(unittest.TestCase): class TestDygraphDeepCF(unittest.TestCase):
def test_deefcf(self): def test_deefcf(self):
seed = 90 seed = 90
if DATA_PATH: if DATA_PATH:
...@@ -237,7 +237,7 @@ class TestImperativeDeepCF(unittest.TestCase): ...@@ -237,7 +237,7 @@ class TestImperativeDeepCF(unittest.TestCase):
fetch_list=[loss])[0] fetch_list=[loss])[0]
sys.stderr.write('static loss %s\n' % static_loss) sys.stderr.write('static loss %s\n' % static_loss)
with fluid.imperative.guard(): with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
......
...@@ -22,12 +22,12 @@ import paddle ...@@ -22,12 +22,12 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
from paddle.fluid.imperative.base import to_variable from paddle.fluid.dygraph.base import to_variable
class Discriminator(fluid.imperative.Layer): class Discriminator(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(Discriminator, self).__init__(name_scope) super(Discriminator, self).__init__(name_scope)
self._fc1 = FC(self.full_name(), size=32, act='elu') self._fc1 = FC(self.full_name(), size=32, act='elu')
...@@ -38,7 +38,7 @@ class Discriminator(fluid.imperative.Layer): ...@@ -38,7 +38,7 @@ class Discriminator(fluid.imperative.Layer):
return self._fc2(x) return self._fc2(x)
class Generator(fluid.imperative.Layer): class Generator(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(Generator, self).__init__(name_scope) super(Generator, self).__init__(name_scope)
self._fc1 = FC(self.full_name(), size=64, act='elu') self._fc1 = FC(self.full_name(), size=64, act='elu')
...@@ -51,7 +51,7 @@ class Generator(fluid.imperative.Layer): ...@@ -51,7 +51,7 @@ class Generator(fluid.imperative.Layer):
return self._fc3(x) return self._fc3(x)
class TestImperativeGAN(unittest.TestCase): class TestDygraphGAN(unittest.TestCase):
def test_gan_float32(self): def test_gan_float32(self):
seed = 90 seed = 90
...@@ -130,7 +130,7 @@ class TestImperativeGAN(unittest.TestCase): ...@@ -130,7 +130,7 @@ class TestImperativeGAN(unittest.TestCase):
scope.find_var(param.name).get_tensor()) scope.find_var(param.name).get_tensor())
dy_params = dict() dy_params = dict()
with fluid.imperative.guard(): with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
......
...@@ -22,16 +22,16 @@ import paddle ...@@ -22,16 +22,16 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
from paddle.fluid.optimizer import AdamOptimizer from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
from paddle.fluid.imperative.base import to_variable from paddle.fluid.dygraph.base import to_variable
def gen_data(): def gen_data():
pass pass
class GraphConv(fluid.imperative.Layer): class GraphConv(fluid.dygraph.Layer):
def __init__(self, name_scope, in_features, out_features): def __init__(self, name_scope, in_features, out_features):
super(GraphConv, self).__init__(name_scope) super(GraphConv, self).__init__(name_scope)
...@@ -50,7 +50,7 @@ class GraphConv(fluid.imperative.Layer): ...@@ -50,7 +50,7 @@ class GraphConv(fluid.imperative.Layer):
return fluid.layers.matmul(adj, support) + self.bias return fluid.layers.matmul(adj, support) + self.bias
class GCN(fluid.imperative.Layer): class GCN(fluid.dygraph.Layer):
def __init__(self, name_scope, num_hidden): def __init__(self, name_scope, num_hidden):
super(GCN, self).__init__(name_scope) super(GCN, self).__init__(name_scope)
self.gc = GraphConv(self.full_name(), num_hidden, 32) self.gc = GraphConv(self.full_name(), num_hidden, 32)
...@@ -61,7 +61,7 @@ class GCN(fluid.imperative.Layer): ...@@ -61,7 +61,7 @@ class GCN(fluid.imperative.Layer):
return self.gc2(x, adj) return self.gc2(x, adj)
class TestImperativeGNN(unittest.TestCase): class TestDygraphGNN(unittest.TestCase):
def test_gnn_float32(self): def test_gnn_float32(self):
seed = 90 seed = 90
...@@ -115,7 +115,7 @@ class TestImperativeGNN(unittest.TestCase): ...@@ -115,7 +115,7 @@ class TestImperativeGNN(unittest.TestCase):
static_weight = np.array( static_weight = np.array(
scope.find_var(model.gc.weight.name).get_tensor()) scope.find_var(model.gc.weight.name).get_tensor())
with fluid.imperative.guard(): with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
......
...@@ -23,12 +23,12 @@ import paddle ...@@ -23,12 +23,12 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC
from paddle.fluid.imperative.base import to_variable from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
class SimpleImgConvPool(fluid.imperative.Layer): class SimpleImgConvPool(fluid.dygraph.Layer):
def __init__(self, def __init__(self,
name_scope, name_scope,
num_channels, num_channels,
...@@ -77,7 +77,7 @@ class SimpleImgConvPool(fluid.imperative.Layer): ...@@ -77,7 +77,7 @@ class SimpleImgConvPool(fluid.imperative.Layer):
return x return x
class MNIST(fluid.imperative.Layer): class MNIST(fluid.dygraph.Layer):
def __init__(self, name_scope): def __init__(self, name_scope):
super(MNIST, self).__init__(name_scope) super(MNIST, self).__init__(name_scope)
...@@ -104,11 +104,11 @@ class MNIST(fluid.imperative.Layer): ...@@ -104,11 +104,11 @@ class MNIST(fluid.imperative.Layer):
return x return x
class TestImperativeMnist(unittest.TestCase): class TestDygraphMnist(unittest.TestCase):
def test_mnist_float32(self): def test_mnist_float32(self):
seed = 90 seed = 90
epoch_num = 1 epoch_num = 1
with fluid.imperative.guard(): with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
......
...@@ -16,17 +16,17 @@ from __future__ import print_function ...@@ -16,17 +16,17 @@ from __future__ import print_function
import unittest import unittest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.imperative.nn import Embedding from paddle.fluid.dygraph.nn import Embedding
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
from paddle.fluid.optimizer import SGDOptimizer from paddle.fluid.optimizer import SGDOptimizer
from paddle.fluid.imperative.base import to_variable from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
import numpy as np import numpy as np
import six import six
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
class SimpleLSTMRNN(fluid.imperative.Layer): class SimpleLSTMRNN(fluid.dygraph.Layer):
def __init__(self, def __init__(self,
name_scope, name_scope,
hidden_size, hidden_size,
...@@ -131,7 +131,7 @@ class SimpleLSTMRNN(fluid.imperative.Layer): ...@@ -131,7 +131,7 @@ class SimpleLSTMRNN(fluid.imperative.Layer):
return real_res, last_hidden, last_cell return real_res, last_hidden, last_cell
class PtbModel(fluid.imperative.Layer): class PtbModel(fluid.dygraph.Layer):
def __init__(self, def __init__(self,
name_scope, name_scope,
hidden_size, hidden_size,
...@@ -214,7 +214,7 @@ class PtbModel(fluid.imperative.Layer): ...@@ -214,7 +214,7 @@ class PtbModel(fluid.imperative.Layer):
return loss, last_hidden, last_cell return loss, last_hidden, last_cell
class TestImperativePtbRnn(unittest.TestCase): class TestDygraphPtbRnn(unittest.TestCase):
def test_ptb_rnn_cpu_float32(self): def test_ptb_rnn_cpu_float32(self):
seed = 90 seed = 90
hidden_size = 10 hidden_size = 10
...@@ -224,7 +224,7 @@ class TestImperativePtbRnn(unittest.TestCase): ...@@ -224,7 +224,7 @@ class TestImperativePtbRnn(unittest.TestCase):
init_scale = 0.1 init_scale = 0.1
batch_size = 4 batch_size = 4
with fluid.imperative.guard(): with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to # TODO: marsyang1993 Change seed to
......
...@@ -21,8 +21,8 @@ import paddle ...@@ -21,8 +21,8 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.imperative.nn import Conv2D, Pool2D, BatchNorm, FC from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, FC
from paddle.fluid.imperative.base import to_variable from paddle.fluid.dygraph.base import to_variable
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
batch_size = 8 batch_size = 8
...@@ -57,7 +57,7 @@ def optimizer_setting(params): ...@@ -57,7 +57,7 @@ def optimizer_setting(params):
lr = [] lr = []
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
optimizer = fluid.optimizer.SGD(learning_rate=0.01) optimizer = fluid.optimizer.SGD(learning_rate=0.01)
# TODO(minqiyang): Add learning rate scheduler support to imperative mode # TODO(minqiyang): Add learning rate scheduler support to dygraph mode
# optimizer = fluid.optimizer.Momentum( # optimizer = fluid.optimizer.Momentum(
# learning_rate=params["lr"], # learning_rate=params["lr"],
# learning_rate=fluid.layers.piecewise_decay( # learning_rate=fluid.layers.piecewise_decay(
...@@ -68,7 +68,7 @@ def optimizer_setting(params): ...@@ -68,7 +68,7 @@ def optimizer_setting(params):
return optimizer return optimizer
class ConvBNLayer(fluid.imperative.Layer): class ConvBNLayer(fluid.dygraph.Layer):
def __init__(self, def __init__(self,
name_scope, name_scope,
num_channels, num_channels,
...@@ -99,7 +99,7 @@ class ConvBNLayer(fluid.imperative.Layer): ...@@ -99,7 +99,7 @@ class ConvBNLayer(fluid.imperative.Layer):
return y return y
class BottleneckBlock(fluid.imperative.Layer): class BottleneckBlock(fluid.dygraph.Layer):
def __init__(self, def __init__(self,
name_scope, name_scope,
num_channels, num_channels,
...@@ -156,7 +156,7 @@ class BottleneckBlock(fluid.imperative.Layer): ...@@ -156,7 +156,7 @@ class BottleneckBlock(fluid.imperative.Layer):
return layer_helper.append_activation(y) return layer_helper.append_activation(y)
class ResNet(fluid.imperative.Layer): class ResNet(fluid.dygraph.Layer):
def __init__(self, name_scope, layers=50, class_dim=102): def __init__(self, name_scope, layers=50, class_dim=102):
super(ResNet, self).__init__(name_scope) super(ResNet, self).__init__(name_scope)
...@@ -226,13 +226,13 @@ class ResNet(fluid.imperative.Layer): ...@@ -226,13 +226,13 @@ class ResNet(fluid.imperative.Layer):
return y return y
class TestImperativeResnet(unittest.TestCase): class TestDygraphResnet(unittest.TestCase):
def test_resnet_float32(self): def test_resnet_float32(self):
seed = 90 seed = 90
batch_size = train_parameters["batch_size"] batch_size = train_parameters["batch_size"]
batch_num = 20 batch_num = 20
with fluid.imperative.guard(): with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed fluid.default_main_program().random_seed = seed
......
...@@ -16,7 +16,7 @@ from __future__ import print_function ...@@ -16,7 +16,7 @@ from __future__ import print_function
import unittest import unittest
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.imperative import Embedding, LayerNorm, FC, to_variable, Layer, guard from paddle.fluid.dygraph import Embedding, LayerNorm, FC, to_variable, Layer, guard
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
from paddle.fluid import core from paddle.fluid import core
import numpy as np import numpy as np
...@@ -623,7 +623,7 @@ class PrepareEncoderDecoderLayer(Layer): ...@@ -623,7 +623,7 @@ class PrepareEncoderDecoderLayer(Layer):
initializer=fluid.initializer.NumpyArrayInitializer(pos_inp), initializer=fluid.initializer.NumpyArrayInitializer(pos_inp),
trainable=False)) trainable=False))
# use in imperative_mode to fit different length batch # use in dygraph_mode to fit different length batch
# self._pos_emb._w = to_variable( # self._pos_emb._w = to_variable(
# position_encoding_init(self._src_max_len, self._src_emb_dim)) # position_encoding_init(self._src_max_len, self._src_emb_dim))
...@@ -946,7 +946,7 @@ class TransFormer(Layer): ...@@ -946,7 +946,7 @@ class TransFormer(Layer):
return sum_cost, avg_cost, predict, token_num return sum_cost, avg_cost, predict, token_num
class TestImperativeTransformer(unittest.TestCase): class TestDygraphTransformer(unittest.TestCase):
def test_transformer_float32(self): def test_transformer_float32(self):
seed = 90 seed = 90
with guard(): with guard():
......
...@@ -29,8 +29,8 @@ from paddle.fluid import core ...@@ -29,8 +29,8 @@ from paddle.fluid import core
from paddle.fluid.initializer import Constant from paddle.fluid.initializer import Constant
import paddle.fluid.layers as layers import paddle.fluid.layers as layers
from test_imperative_base import new_program_scope from test_imperative_base import new_program_scope
from paddle.fluid.imperative import nn from paddle.fluid.dygraph import nn
from paddle.fluid.imperative import base from paddle.fluid.dygraph import base
class LayerTest(unittest.TestCase): class LayerTest(unittest.TestCase):
...@@ -68,7 +68,7 @@ class LayerTest(unittest.TestCase): ...@@ -68,7 +68,7 @@ class LayerTest(unittest.TestCase):
@contextlib.contextmanager @contextlib.contextmanager
def dynamic_graph(self, force_to_use_cpu=False): def dynamic_graph(self, force_to_use_cpu=False):
with fluid.imperative.guard( with fluid.dygraph.guard(
self._get_place(force_to_use_cpu=force_to_use_cpu)): self._get_place(force_to_use_cpu=force_to_use_cpu)):
fluid.default_startup_program().random_seed = self.seed fluid.default_startup_program().random_seed = self.seed
fluid.default_main_program().random_seed = self.seed fluid.default_main_program().random_seed = self.seed
......
...@@ -19,7 +19,6 @@ from paddle.fluid.framework import default_main_program, Program, convert_np_dty ...@@ -19,7 +19,6 @@ from paddle.fluid.framework import default_main_program, Program, convert_np_dty
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.core as core import paddle.fluid.core as core
import numpy as np import numpy as np
from test_imperative_base import new_program_scope
class TestVariable(unittest.TestCase): class TestVariable(unittest.TestCase):
...@@ -153,7 +152,7 @@ class TestVariableImperative(unittest.TestCase): ...@@ -153,7 +152,7 @@ class TestVariableImperative(unittest.TestCase):
self.assertEqual([1, 1, 100], nw.shape) self.assertEqual([1, 1, 100], nw.shape)
def test_slice(self): def test_slice(self):
with fluid.imperative.guard(): with fluid.dygraph.guard():
self._test_slice() self._test_slice()
......
...@@ -102,7 +102,7 @@ packages=['paddle', ...@@ -102,7 +102,7 @@ packages=['paddle',
'paddle.reader', 'paddle.reader',
'paddle.distributed', 'paddle.distributed',
'paddle.fluid', 'paddle.fluid',
'paddle.fluid.imperative', 'paddle.fluid.dygraph',
'paddle.fluid.proto', 'paddle.fluid.proto',
'paddle.fluid.proto.profiler', 'paddle.fluid.proto.profiler',
'paddle.fluid.distributed', 'paddle.fluid.distributed',
......
...@@ -28,7 +28,7 @@ import hashlib ...@@ -28,7 +28,7 @@ import hashlib
member_dict = collections.OrderedDict() member_dict = collections.OrderedDict()
experimental_namespace = {"paddle.fluid.imperative"} experimental_namespace = {"paddle.fluid.dygraph"}
def md5(doc): def md5(doc):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册