diff --git a/python/paddle/fluid/__init__.py b/python/paddle/fluid/__init__.py index 18f01ca1374a24cec3bf882d347596dd38f4fd21..24c8a6934fe355b2de388e7f90b6e40d4871f0d8 100644 --- a/python/paddle/fluid/__init__.py +++ b/python/paddle/fluid/__init__.py @@ -34,7 +34,7 @@ from . import io from . import evaluator from . import initializer from . import layers -from . import imperative +from . import dygraph from . import contrib from . import nets from . import optimizer @@ -71,7 +71,7 @@ __all__ = framework.__all__ + executor.__all__ + \ 'initializer', 'layers', 'contrib', - 'imperative', + 'dygraph', 'transpiler', 'nets', 'optimizer', diff --git a/python/paddle/fluid/imperative/__init__.py b/python/paddle/fluid/dygraph/__init__.py similarity index 100% rename from python/paddle/fluid/imperative/__init__.py rename to python/paddle/fluid/dygraph/__init__.py diff --git a/python/paddle/fluid/imperative/base.py b/python/paddle/fluid/dygraph/base.py similarity index 88% rename from python/paddle/fluid/imperative/base.py rename to python/paddle/fluid/dygraph/base.py index 097cd2be35b01aced30486b874f202381c4d9962..d55dbbb9c72cb887e169849c3a3e32a13c202a7b 100644 --- a/python/paddle/fluid/imperative/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -22,7 +22,7 @@ __all__ = ['enabled', 'guard', 'to_variable'] def enabled(): - return framework._in_imperative_mode() + return framework._in_dygraph_mode() @signature_safe_contextmanager @@ -39,14 +39,14 @@ def guard(place=None): with framework.program_guard(train, startup): with framework.unique_name.guard(): - with framework._imperative_guard(tracer): - with framework._imperative_place_guard(place): + with framework._dygraph_guard(tracer): + with framework._dygraph_place_guard(place): yield def to_variable(value, block=None, name=None): if isinstance(value, np.ndarray): - assert enabled(), "to_variable could only be called in imperative mode" + assert enabled(), "to_variable could only be called in dygraph mode" if not block: block = framework.default_main_program().current_block() diff --git a/python/paddle/fluid/imperative/checkpoint.py b/python/paddle/fluid/dygraph/checkpoint.py similarity index 93% rename from python/paddle/fluid/imperative/checkpoint.py rename to python/paddle/fluid/dygraph/checkpoint.py index 37c43f29d2ae9214058238e4f834dbbcd9e42df1..f992ae0576c81ed98a3e9f7a446b0c2e808622ea 100644 --- a/python/paddle/fluid/imperative/checkpoint.py +++ b/python/paddle/fluid/dygraph/checkpoint.py @@ -68,7 +68,7 @@ def save_persistables(vardict, dirname, filename=None): dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden, init_cell) param_path = "./my_paddle_model" - fluid.imperative.checkpoint.save_persistables(ptb_model.state_dict(), dirname=param_path, + fluid.dygraph.save_persistables(ptb_model.state_dict(), dirname=param_path, layer=ptb_model) """ if isinstance(vardict, collections.OrderedDict): @@ -97,17 +97,17 @@ def load_persistables(vardict, dirname, filename=None): Examples: .. code-block:: python - my_layer = layer(fluid.imperative.Layer) + my_layer = layer(fluid.dygraph.Layer) param_path = "./my_paddle_model" - param_dict = fluid.imperative.checkpoint.load_persistables(my_layer.parameters(), param_path) + param_dict = fluid.dygraph.load_persistables(my_layer.parameters(), param_path) param_1 = param_dict['PtbModel_0.w_1'] or: - my_layer = layer(fluid.imperative.Layer) + my_layer = layer(fluid.dygraph.Layer) param_path = "./my_paddle_model" filename = "model.file" - param_dict = fluid.imperative.checkpoint.load_persistables(my_layer.state_dict(), param_path, + param_dict = fluid.dygraph.load_persistables(my_layer.state_dict(), param_path, filename=filename) param_1 = param_dict['PtbModel_0.w_1'] diff --git a/python/paddle/fluid/imperative/layer_object_helper.py b/python/paddle/fluid/dygraph/layer_object_helper.py similarity index 99% rename from python/paddle/fluid/imperative/layer_object_helper.py rename to python/paddle/fluid/dygraph/layer_object_helper.py index 3d4426e8cdfe79a6fa2d6452e7cb3ab0a458c0bc..c56652e103ce93bf5459b30b66c7b1f04e7c14d0 100644 --- a/python/paddle/fluid/imperative/layer_object_helper.py +++ b/python/paddle/fluid/dygraph/layer_object_helper.py @@ -16,7 +16,7 @@ from __future__ import print_function import copy import six -from ..framework import Parameter, _in_imperative_mode +from ..framework import Parameter, _in_dygraph_mode from ..param_attr import ParamAttr from .. import core from six.moves import zip diff --git a/python/paddle/fluid/imperative/layers.py b/python/paddle/fluid/dygraph/layers.py similarity index 99% rename from python/paddle/fluid/imperative/layers.py rename to python/paddle/fluid/dygraph/layers.py index e64667f7f467d0d0a3c07d14ce22c3f231e82eb6..014ee41f4c5aa280fb5b366d8f1704290cc067d4 100644 --- a/python/paddle/fluid/imperative/layers.py +++ b/python/paddle/fluid/dygraph/layers.py @@ -283,7 +283,7 @@ class PyLayer(core.PyLayer): @classmethod def __call__(cls, *inputs): - tracer = framework._imperative_tracer() + tracer = framework._dygraph_tracer() block = framework.default_main_program().current_block() ivar_inputs = [x._ivar for x in inputs] diff --git a/python/paddle/fluid/imperative/nn.py b/python/paddle/fluid/dygraph/nn.py similarity index 99% rename from python/paddle/fluid/imperative/nn.py rename to python/paddle/fluid/dygraph/nn.py index 9856276b20b7affb548847d359463451bb238518..8925381119272d7462562c0952d3e157f78f25af 100644 --- a/python/paddle/fluid/imperative/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -133,7 +133,7 @@ class Conv2D(layers.Layer): outputs={'Out': [pre_act]}, attrs={'axis': 1}) - # Currently, we don't support inplace in imperative mode + # Currently, we don't support inplace in dygraph mode return self._helper.append_activation(pre_act, act=self._act) @@ -265,7 +265,7 @@ class FC(layers.Layer): attrs={'axis': self._num_flatten_dims}) else: pre_activation = pre_bias - # Currently, we don't support inplace in imperative mode + # Currently, we don't support inplace in dygraph mode return self._helper.append_activation(pre_activation, act=self._act) @@ -387,7 +387,7 @@ class BatchNorm(layers.Layer): "use_global_stats": self._use_global_stats }) - # Currently, we don't support inplace in imperative mode + # Currently, we don't support inplace in dygraph mode return self._helper.append_activation(batch_norm_out, self._act) @@ -426,7 +426,7 @@ class Embedding(layers.Layer): dict_size = len(dataset.ids) input = fluid.layers.data(name='ids', shape=[32, 32], dtype='float32') - embedding = fluid.imperative.Embedding(size=[dict_size, 16]) + embedding = fluid.dygraph.Embedding(size=[dict_size, 16]) fc = embedding(input) """ diff --git a/python/paddle/fluid/imperative/profiler.py b/python/paddle/fluid/dygraph/profiler.py similarity index 100% rename from python/paddle/fluid/imperative/profiler.py rename to python/paddle/fluid/dygraph/profiler.py diff --git a/python/paddle/fluid/imperative/tracer.py b/python/paddle/fluid/dygraph/tracer.py similarity index 95% rename from python/paddle/fluid/imperative/tracer.py rename to python/paddle/fluid/dygraph/tracer.py index 28c8586813410f7349da7943a966eaa9cc3816d2..94e212b139b2b375aa9f5252d396e90235ba33c1 100644 --- a/python/paddle/fluid/imperative/tracer.py +++ b/python/paddle/fluid/dygraph/tracer.py @@ -24,12 +24,12 @@ __all__ = ['Tracer'] def release_op(op): - del framework._imperative_tracer()._ops[op._trace_id] + del framework._dygraph_tracer()._ops[op._trace_id] class Tracer(core.Tracer): """ - Python wrapper of imperative tracer + Python wrapper of dygraph tracer """ def __init__(self, block): diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index be8008cabd08de0cf957d93ca71ac38fc592af79..a49fafa97da45adc25ba7de6d2e5ff19f1a87fc4 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -75,20 +75,20 @@ GRAD_VAR_SUFFIX = core.kGradVarSuffix() ZERO_VAR_SUFFIX = core.kZeroVarSuffix() CONTROL_DEP_VAR_PREFIX = core.kControlDepVarName() -_imperative_tracer_ = None -_imperative_current_expected_place_ = None +_dygraph_tracer_ = None +_dygraph_current_expected_place_ = None -def _in_imperative_mode(): - return _imperative_tracer_ is not None +def _in_dygraph_mode(): + return _dygraph_tracer_ is not None -def _imperative_tracer(): - return _imperative_tracer_ +def _dygraph_tracer(): + return _dygraph_tracer_ def _current_expected_place(): - return _imperative_current_expected_place_ + return _dygraph_current_expected_place_ def _cpu_num(): @@ -396,7 +396,7 @@ class Variable(object): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if _in_imperative_mode(): + if _in_dygraph_mode(): # record vars in tracer rather than blocks self._ivar = kwargs.get("ivar", None) if not self._ivar: @@ -406,7 +406,7 @@ class Variable(object): _current_expected_place(), stop_gradient, True if persistable else False) if persistable: - _imperative_tracer().trace_var(name, self) + _dygraph_tracer().trace_var(name, self) else: self.error_clip = error_clip @@ -515,8 +515,8 @@ class Variable(object): Returns: str: The debug string. """ - if _in_imperative_mode(): - # TODO(panyx0718): add more imperative debug info. + if _in_dygraph_mode(): + # TODO(panyx0718): add more dygraph debug info. return 'name %s, dtype: %s shape: %s' % (self.name, self.dtype, self.shape) @@ -548,42 +548,42 @@ class Variable(object): @property def _stop_gradient(self): - if _in_imperative_mode(): + if _in_dygraph_mode(): return self._ivar.stop_gradient else: return self.stop_gradient @_stop_gradient.setter def _stop_gradient(self, s): - if _in_imperative_mode(): + if _in_dygraph_mode(): self._ivar.stop_gradient = s else: self.stop_gradient = s @property def persistable(self): - if _in_imperative_mode(): + if _in_dygraph_mode(): return self._ivar.persistable else: return self.desc.persistable() @persistable.setter def persistable(self, p): - if _in_imperative_mode(): + if _in_dygraph_mode(): return self._ivar.persistable else: self.desc.set_persistable(p) @property def name(self): - if _in_imperative_mode(): + if _in_dygraph_mode(): return self._ivar.name else: return cpt.to_text(self.desc.name()) @name.setter def name(self, new_name): - if _in_imperative_mode(): + if _in_dygraph_mode(): self._ivar.name = new_name else: self.desc.set_name(new_name) @@ -591,26 +591,26 @@ class Variable(object): @property def shape(self): # convert to tuple, make it as same as numpy API. - if _in_imperative_mode(): + if _in_dygraph_mode(): return self._ivar.shape else: return tuple(self.desc.shape()) @property def dtype(self): - if _in_imperative_mode(): + if _in_dygraph_mode(): return self._ivar.dtype else: return self.desc.dtype() @property def lod_level(self): - # TODO(minqiyang): Support lod_level in imperative mode + # TODO(minqiyang): Support lod_level in dygraph mode return self.desc.lod_level() @property def type(self): - if _in_imperative_mode(): + if _in_dygraph_mode(): return self._ivar.dtype else: return self.desc.type() @@ -918,7 +918,7 @@ class Operator(object): inputs=None, outputs=None, attrs=None): - if _in_imperative_mode(): + if _in_dygraph_mode(): if type is None: raise ValueError( "`type` to initialized an Operator can not be None.") @@ -1037,7 +1037,7 @@ class Operator(object): for arg in out_args: out_arg_names.append(cpt.to_text(arg.name)) # TODO(minqiyang): could we remove variable's op in static mode? - if not _in_imperative_mode(): + if not _in_dygraph_mode(): arg.op = self self.desc.set_output(out_proto.name, out_arg_names) @@ -1083,7 +1083,7 @@ class Operator(object): @property def type(self): - if _in_imperative_mode(): + if _in_dygraph_mode(): return self.iop.type else: return self.desc.type() @@ -1626,7 +1626,7 @@ class Block(object): Returns: Operator: the append Operator. """ - if _in_imperative_mode(): + if _in_dygraph_mode(): op = Operator( block=self, desc=None, @@ -1638,9 +1638,8 @@ class Block(object): # record ops in tracer rather than blocks # # TODO(minqiyang): add op stop_gradient support in static mode too. - # currently, we only support stop_gradient in imperative mode. - _imperative_tracer().trace_op(op, - kwargs.get("stop_gradient", False)) + # currently, we only support stop_gradient in dygraph mode. + _dygraph_tracer().trace_op(op, kwargs.get("stop_gradient", False)) else: op_desc = self.desc.append_op() op = Operator( @@ -1699,7 +1698,7 @@ class Block(object): return self.ops[start:end] def _prepend_op(self, *args, **kwargs): - if _in_imperative_mode(): + if _in_dygraph_mode(): op = Operator( self, None, @@ -1707,8 +1706,7 @@ class Block(object): inputs=kwargs.get("inputs", None), outputs=kwargs.get("outputs", None), attrs=kwargs.get("attrs", None)) - _imperative_tracer().trace_op(op, - kwargs.get("stop_gradient", False)) + _dygraph_tracer().trace_op(op, kwargs.get("stop_gradient", False)) else: op_desc = self.desc._prepend_op() op = Operator( @@ -3511,22 +3509,22 @@ def _get_var(name, program=None): @signature_safe_contextmanager -def _imperative_guard(tracer): - global _imperative_tracer_ - tmp_trace = _imperative_tracer_ - _imperative_tracer_ = tracer +def _dygraph_guard(tracer): + global _dygraph_tracer_ + tmp_trace = _dygraph_tracer_ + _dygraph_tracer_ = tracer yield - _imperative_tracer_ = tmp_trace + _dygraph_tracer_ = tmp_trace @signature_safe_contextmanager -def _imperative_place_guard(place): - global _imperative_current_expected_place_ - tmp_place = _imperative_current_expected_place_ - _imperative_current_expected_place_ = place +def _dygraph_place_guard(place): + global _dygraph_current_expected_place_ + tmp_place = _dygraph_current_expected_place_ + _dygraph_current_expected_place_ = place yield - _imperative_current_expected_place_ = tmp_place + _dygraph_current_expected_place_ = tmp_place diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 8358bb1aba98d8f5699cbda27e657ba6c470d333..6aff93dceaf5cfd299bdc9f68246ed579f248f3c 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -165,7 +165,7 @@ class ConstantInitializer(Initializer): 'force_cpu': self._force_cpu or force_init_on_cpu() }, stop_gradient=True) - if not framework._in_imperative_mode(): + if not framework._in_dygraph_mode(): var.op = op return op @@ -245,7 +245,7 @@ class UniformInitializer(Initializer): attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}) - if not framework._in_imperative_mode(): + if not framework._in_dygraph_mode(): var.op = op return op @@ -324,7 +324,7 @@ class NormalInitializer(Initializer): outputs={"Out": var}, attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}) - if not framework._in_imperative_mode(): + if not framework._in_dygraph_mode(): var.op = op return op @@ -403,7 +403,7 @@ class TruncatedNormalInitializer(Initializer): outputs={"Out": var}, attrs={"in_dtype": out_var.dtype, "out_dtype": var.dtype}) - if not framework._in_imperative_mode(): + if not framework._in_dygraph_mode(): var.op = op return op @@ -509,7 +509,7 @@ class XavierInitializer(Initializer): "seed": self._seed }, stop_gradient=True) - if not framework._in_imperative_mode(): + if not framework._in_dygraph_mode(): var.op = op return op @@ -610,7 +610,7 @@ class MSRAInitializer(Initializer): "seed": self._seed }, stop_gradient=True) - if not framework._in_imperative_mode(): + if not framework._in_dygraph_mode(): var.op = op return op @@ -709,7 +709,7 @@ class BilinearInitializer(Initializer): 'shape': list(shape), value_name: values }) - if not framework._in_imperative_mode(): + if not framework._in_dygraph_mode(): var.op = op return op @@ -768,7 +768,7 @@ class NumpyArrayInitializer(Initializer): value_name: values }, stop_gradient=True) - if not framework._in_imperative_mode(): + if not framework._in_dygraph_mode(): var.op = op return op diff --git a/python/paddle/fluid/install_check.py b/python/paddle/fluid/install_check.py index 3569a8bc357daf9408e8ae3eb53ad9d2942cfeaa..3cdd05533f703ac27333daab7ada0c26392a24f5 100644 --- a/python/paddle/fluid/install_check.py +++ b/python/paddle/fluid/install_check.py @@ -17,7 +17,7 @@ from .param_attr import ParamAttr from .initializer import Constant from . import layers from . import backward -from .imperative import Layer, nn +from .dygraph import Layer, nn from . import executor from . import core diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index a85ef3c13f845959200d26391f6c95923a11c6ed..7eb912645e5077d35a2d11d7d09a033d28345e15 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -17,7 +17,7 @@ from __future__ import print_function import copy import six -from .framework import Parameter, dtype_is_floating, _in_imperative_mode +from .framework import Parameter, dtype_is_floating, _in_dygraph_mode from . import unique_name from paddle.fluid.initializer import Constant, Xavier from .param_attr import ParamAttr @@ -30,9 +30,9 @@ class LayerHelper(LayerHelperBase): def __init__(self, layer_type, **kwargs): self.kwargs = kwargs name = self.kwargs.get('name', None) - # TODO(panyx0718, minqiyang): imperative mode + # TODO(panyx0718, minqiyang): dygraph mode # can not use both `layer_type` and `name`. Deprecate LayerHelper - # and write a Helper for imperative mode. + # and write a Helper for dygraph mode. if name is None: self.kwargs['name'] = unique_name.generate(layer_type) diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index a68160d797bcaca8cff849c560960d6a8823de53..869a5f54e9cdf5740c5e216917d92880d7d61e2d 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -17,7 +17,7 @@ from __future__ import print_function import copy import numpy as np -from .framework import Variable, default_main_program, default_startup_program, _in_imperative_mode, _current_expected_place +from .framework import Variable, default_main_program, default_startup_program, _in_dygraph_mode, _current_expected_place from . import unique_name from .param_attr import ParamAttr, WeightNormParamAttr from . import core @@ -54,8 +54,8 @@ class LayerHelperBase(object): Return Variable construct from value """ if isinstance(value, np.ndarray): - assert _in_imperative_mode( - ), "to_variable could only be called in imperative mode" + assert _in_dygraph_mode( + ), "to_variable could only be called in dygraph mode" if not block: block = default_main_program().current_block() @@ -302,8 +302,8 @@ class LayerHelperBase(object): param = self._create_weight_normalize(attr, shape, dtype) WeightNormParamAttr.params_with_weight_norm.append(param) return param - if _in_imperative_mode(): - # In imperative mode, we want the returned parameter to be + if _in_dygraph_mode(): + # In dygraph mode, we want the returned parameter to be # initialized so that it can be used imperatively. return self.main_program.global_block().create_parameter( dtype=dtype, @@ -370,7 +370,7 @@ class LayerHelperBase(object): initializer: initializer to use """ assert isinstance(var, Variable) - if _in_imperative_mode(): + if _in_dygraph_mode(): initializer(var, var.block) else: self.startup_program.global_block().create_var( diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index f2413f603304f8262476ca3ae2b820c89d009c3d..fc02aa3c66ccc6954f199a01790fb1534927f4c0 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -23,8 +23,8 @@ import os import inspect from ..layer_helper import LayerHelper from ..initializer import Normal, Constant, NumpyArrayInitializer -from ..framework import Variable, OpProtoHolder, _in_imperative_mode -from ..imperative import base +from ..framework import Variable, OpProtoHolder, _in_dygraph_mode +from ..dygraph import base from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ from .tensor import concat, assign @@ -32,7 +32,7 @@ from . import utils from .. import unique_name from functools import reduce from .. import core -from ..imperative import layers +from ..dygraph import layers __all__ = [ 'fc', @@ -296,7 +296,6 @@ def fc(input, data_2 = fluid.layers.data(name="data_2", shape=[24, 36], dtype="float32") fc = fluid.layers.fc(input=[data_1, data_2], size=1000, act="tanh") """ - helper = LayerHelper("fc", **locals()) dtype = helper.input_dtype() @@ -3279,6 +3278,8 @@ def layer_norm(input, >>> dtype='float32') >>> x = fluid.layers.layer_norm(input=data, begin_norm_axis=1) """ + assert _in_dygraph_mode( + ) is not True, "please use FC instead of fc in dygraph mode!" helper = LayerHelper('layer_norm', **locals()) dtype = helper.input_dtype() @@ -6405,8 +6406,8 @@ def squeeze(input, axes, name=None): x = layers.data(name='x', shape=[5, 1, 10]) y = layers.sequeeze(input=x, axes=[1]) """ - assert not _in_imperative_mode(), ( - "squeeze layer is not supported in imperative mode yet.") + assert not _in_dygraph_mode(), ( + "squeeze layer is not supported in dygraph mode yet.") helper = LayerHelper("squeeze", **locals()) out = helper.create_variable_for_type_inference(dtype=input.dtype) x_shape = helper.create_variable_for_type_inference(dtype=input.dtype) @@ -9144,7 +9145,7 @@ def _elementwise_op(helper): op_type = helper.layer_type x = helper.kwargs.get('x', None) y = helper.kwargs.get('y', None) - if _in_imperative_mode(): + if _in_dygraph_mode(): x = base.to_variable(x) y = base.to_variable(y) diff --git a/python/paddle/fluid/layers/tensor.py b/python/paddle/fluid/layers/tensor.py index ef90638c721810e618ce4760e83e1a63b86c2325..80450119f44e93aae4b483983484ea18be5b2035 100644 --- a/python/paddle/fluid/layers/tensor.py +++ b/python/paddle/fluid/layers/tensor.py @@ -20,7 +20,6 @@ from ..framework import convert_np_dtype_to_dtype_ from ..framework import Variable from ..initializer import Constant, force_init_on_cpu from ..core import VarDesc -from ..imperative import base as imperative_base from .layer_function_generator import templatedoc import numpy diff --git a/python/paddle/fluid/optimizer.py b/python/paddle/fluid/optimizer.py index e21f303a3e07fe176920cd0650fb96f600dd4743..479c0b0a4abef23b9aed646eb34a476e443016d5 100644 --- a/python/paddle/fluid/optimizer.py +++ b/python/paddle/fluid/optimizer.py @@ -30,7 +30,6 @@ from .initializer import Constant from .layer_helper import LayerHelper from .layers import ops from .regularizer import append_regularization_ops -from .imperative import base as imperative_base from paddle.fluid import core from paddle.fluid.layers import tensor from functools import reduce @@ -169,7 +168,7 @@ class Optimizer(object): name = self._name + "_" + name if (name in self._accumulators and param.name in self._accumulators[name]): - if framework._in_imperative_mode(): + if framework._in_dygraph_mode(): return self._accumulators[name][param.name] raise Exception("Accumulator {} already exists for parameter {}". format(name, param.name)) @@ -396,11 +395,11 @@ class Optimizer(object): """ self._dtype = loss.dtype optimize_ops = [] - if framework._in_imperative_mode(): + if framework._in_dygraph_mode(): if parameter_list is not None: parameters = parameter_list else: - parameters = framework._imperative_tracer().all_parameters() + parameters = framework._dygraph_tracer().all_parameters() params_grads = [] for param in parameters: diff --git a/python/paddle/fluid/tests/unittests/op_test.py b/python/paddle/fluid/tests/unittests/op_test.py index b84ce2b3aeab7963f8de85eb09ff6e085e52c198..6b8622b6f26f6102e5ee02716f30a847ed9a2fed 100644 --- a/python/paddle/fluid/tests/unittests/op_test.py +++ b/python/paddle/fluid/tests/unittests/op_test.py @@ -262,14 +262,14 @@ class OpTest(unittest.TestCase): if isinstance(value, tuple): data = value[0] lod = value[1] - v = fluid.imperative.base.to_variable(value=data) + v = fluid.dygraph.base.to_variable(value=data) v._ivar.value().get_tensor().set_recursive_sequence_lengths(lod) return v else: - return fluid.imperative.base.to_variable(value) + return fluid.dygraph.base.to_variable(value) - def _calc_imperative_output(self, place, parallel=False, no_check_set=None): - with fluid.imperative.base.guard(place=place): + def _calc_dygraph_output(self, place, parallel=False, no_check_set=None): + with fluid.dygraph.base.guard(place=place): block = fluid.default_main_program().global_block() # prepare input variable @@ -316,7 +316,7 @@ class OpTest(unittest.TestCase): return outputs - def _calc_output(self, place, parallel=False, no_check_set=None): + def _calc_output(self, place, parallel=False, no_check_set=None, loss=None): program = Program() block = program.global_block() self._append_ops(block) @@ -329,8 +329,14 @@ class OpTest(unittest.TestCase): use_cuda = False if isinstance(place, fluid.CUDAPlace(0)): use_cuda = True - executor = fluid.ParallelExecutor( - use_cuda=use_cuda, loss_name=loss.name, main_program=program) + if loss: + executor = fluid.ParallelExecutor( + use_cuda=use_cuda, + loss_name=loss.name, + main_program=program) + else: + executor = fluid.ParallelExecutor( + use_cuda=use_cuda, main_program=program) else: executor = Executor(place) @@ -364,9 +370,9 @@ class OpTest(unittest.TestCase): atol, no_check_set=None, equal_nan=False, - check_imperative=False): - if check_imperative: - imperative_outs = self._calc_imperative_output( + check_dygraph=False): + if check_dygraph: + dygraph_outs = self._calc_dygraph_output( place, no_check_set=no_check_set) outs, fetch_list = self._calc_output(place, no_check_set=no_check_set) @@ -393,8 +399,8 @@ class OpTest(unittest.TestCase): type(sub_out)) for item in sub_out: sub_out_name, expect = item[0], item[1] - if check_imperative: - imperative_actual = imperative_outs[sub_out_name][0] + if check_dygraph: + imperative_actual = dygraph_outs[sub_out_name][0] imperative_actual_t = np.array( imperative_actual._ivar.value().get_tensor()) idx = find_actual(sub_out_name, fetch_list) @@ -407,7 +413,7 @@ class OpTest(unittest.TestCase): actual_t, expect_t, atol=atol, equal_nan=equal_nan), "Output (" + sub_out_name + ") has diff at " + str(place)) - if check_imperative: + if check_dygraph: self.assertTrue( np.allclose( imperative_actual_t, @@ -415,21 +421,21 @@ class OpTest(unittest.TestCase): atol=atol, equal_nan=equal_nan), "Output (" + sub_out_name + ") has diff at " + - str(place) + " in imperative mode") + str(place) + " in dygraph mode") if isinstance(expect, tuple): self.assertListEqual( actual.recursive_sequence_lengths(), expect[1], "Output (" + sub_out_name + ") has different lod at " + str(place)) - if check_imperative: + if check_dygraph: self.assertListEqual( imperative_actual._ivar.value().get_tensor() .recursive_sequence_lengths(), expect[1], "Output (" + out_name + ") has different lod at " + - str(place) + " in imperative mode") + str(place) + " in dygraph mode") else: - if check_imperative: - imperative_actual = imperative_outs[out_name][0] + if check_dygraph: + imperative_actual = dygraph_outs[out_name][0] imperative_actual_t = np.array( imperative_actual._ivar.value().get_tensor()) idx = find_actual(out_name, fetch_list) @@ -443,7 +449,7 @@ class OpTest(unittest.TestCase): "Output (" + out_name + ") has diff at " + str(place) + "\nExpect " + str(expect_t) + "\n" + "But Got" + str(actual_t) + " in class " + self.__class__.__name__) - if check_imperative: + if check_dygraph: self.assertTrue( np.allclose( imperative_actual_t, @@ -458,12 +464,12 @@ class OpTest(unittest.TestCase): self.assertListEqual(actual.recursive_sequence_lengths(), expect[1], "Output (" + out_name + ") has different lod at " + str(place)) - if check_imperative: + if check_dygraph: self.assertListEqual( imperative_actual._ivar.value().get_tensor() .recursive_sequence_lengths(), expect[1], "Output (" + out_name + ") has different lod at " + - str(place) + " in imperative mode") + str(place) + " in dygraph mode") def _get_places(self): if self.dtype == np.float16: @@ -490,11 +496,11 @@ class OpTest(unittest.TestCase): atol=1e-5, no_check_set=None, equal_nan=False, - check_imperative=False): + check_dygraph=False): places = self._get_places() for place in places: self.check_output_with_place(place, atol, no_check_set, equal_nan, - check_imperative) + check_dygraph) def check_output_customized(self, checker): places = self._get_places() diff --git a/python/paddle/fluid/tests/unittests/test_base_layer.py b/python/paddle/fluid/tests/unittests/test_base_layer.py index b12aaea3219cb81e8fa0e7584120db510fb7b62c..9cb88d4a8553f3b750f6cf3b24115b4d188ed1d6 100644 --- a/python/paddle/fluid/tests/unittests/test_base_layer.py +++ b/python/paddle/fluid/tests/unittests/test_base_layer.py @@ -18,7 +18,7 @@ import numpy as np import paddle.fluid as fluid -class L1(fluid.imperative.Layer): +class L1(fluid.dygraph.Layer): def __init__(self, prefix): super(L1, self).__init__(prefix) self._param_attr = fluid.ParamAttr( @@ -32,7 +32,7 @@ class L1(fluid.imperative.Layer): return self.w1 + self.w2 -class L2(fluid.imperative.Layer): +class L2(fluid.dygraph.Layer): def __init__(self, prefix): super(L2, self).__init__(prefix) self.layer1 = L1(self.full_name()) @@ -42,7 +42,7 @@ class L2(fluid.imperative.Layer): return self.layer1() + self.layer2() -class L3(fluid.imperative.Layer): +class L3(fluid.dygraph.Layer): def __init__(self, prefix): super(L3, self).__init__(prefix) self.layer1 = L2(self.full_name()) @@ -54,7 +54,7 @@ class L3(fluid.imperative.Layer): class TestBaseLayer(unittest.TestCase): def test_one_level(self): - with fluid.imperative.guard(): + with fluid.dygraph.guard(): l = L1('test_one_level') ret = l() self.assertEqual(l.w1.name, "test_one_level/L1_0.w_0") @@ -62,7 +62,7 @@ class TestBaseLayer(unittest.TestCase): self.assertTrue(np.allclose(ret._numpy(), 0.2 * np.ones([2, 2]))) def test_three_level(self): - with fluid.imperative.guard(): + with fluid.dygraph.guard(): l = L3('test_three_level') names = [p.name for p in l.parameters()] ret = l() diff --git a/python/paddle/fluid/tests/unittests/test_gru_op.py b/python/paddle/fluid/tests/unittests/test_gru_op.py index 848c9a4952aebcf93fd7bf12f7bc4cd15c7a8b28..c66d59aceb05dfbf9beac809ff13841a77953695 100644 --- a/python/paddle/fluid/tests/unittests/test_gru_op.py +++ b/python/paddle/fluid/tests/unittests/test_gru_op.py @@ -156,7 +156,7 @@ class TestGRUOp(OpTest): } def test_check_output(self): - self.check_output(atol=1e-8, check_imperative=True) + self.check_output(atol=1e-8, check_dygraph=True) def test_check_grad(self): self.check_grad(['Input', 'H0', 'Weight', 'Bias'], ['Hidden']) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_basic.py b/python/paddle/fluid/tests/unittests/test_imperative_basic.py index 4c44195a3d42a1a2a4a072b0513f212b22269c31..13f2d662178c7e1474ec43fdeadf7046516eb8e5 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_basic.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_basic.py @@ -18,11 +18,11 @@ import numpy as np import paddle.fluid as fluid from paddle.fluid import core -from paddle.fluid.imperative.nn import FC +from paddle.fluid.dygraph.nn import FC from test_imperative_base import new_program_scope -class MyLayer(fluid.imperative.Layer): +class MyLayer(fluid.dygraph.Layer): def __init__(self, name_scope): super(MyLayer, self).__init__(name_scope) @@ -34,7 +34,7 @@ class MyLayer(fluid.imperative.Layer): return [x] -class MyPyLayer(fluid.imperative.PyLayer): +class MyPyLayer(fluid.dygraph.PyLayer): def __init__(self): super(MyPyLayer, self).__init__() @@ -48,7 +48,7 @@ class MyPyLayer(fluid.imperative.PyLayer): return np.array(dout) * (1 - np.square(np.array(out))) -class MLP(fluid.imperative.Layer): +class MLP(fluid.dygraph.Layer): def __init__(self, name_scope): super(MLP, self).__init__(name_scope) self._fc1 = FC(self.full_name(), @@ -71,7 +71,7 @@ class MLP(fluid.imperative.Layer): return x -class SimpleRNNCell(fluid.imperative.Layer): +class SimpleRNNCell(fluid.dygraph.Layer): def __init__(self, name_scope, step_input_size, hidden_size, output_size, param_attr): super(SimpleRNNCell, self).__init__(name_scope) @@ -159,7 +159,7 @@ class SimpleRNNCell(fluid.imperative.Layer): return reduce_out, hidden -class SimpleRNN(fluid.imperative.Layer): +class SimpleRNN(fluid.dygraph.Layer): def __init__(self, name_scope): super(SimpleRNN, self).__init__(name_scope) self.seq_len = 4 @@ -194,10 +194,10 @@ class SimpleRNN(fluid.imperative.Layer): class TestImperative(unittest.TestCase): def test_sum_op(self): x = np.ones([2, 2], np.float32) - with fluid.imperative.guard(): + with fluid.dygraph.guard(): inputs = [] for _ in range(10): - inputs.append(fluid.imperative.base.to_variable(x)) + inputs.append(fluid.dygraph.base.to_variable(x)) ret = fluid.layers.sums(inputs) loss = fluid.layers.reduce_sum(ret) loss._backward() @@ -205,17 +205,17 @@ class TestImperative(unittest.TestCase): self.assertTrue(np.allclose(inputs[0]._gradient(), x)) def test_layer(self): - with fluid.imperative.guard(): + with fluid.dygraph.guard(): cl = core.Layer() cl.forward([]) - l = fluid.imperative.Layer("l") + l = fluid.dygraph.Layer("l") self.assertRaises(NotImplementedError, l.forward, []) def test_pylayer_func_id(self): - with fluid.imperative.guard(): + with fluid.dygraph.guard(): - class PyLayer1(fluid.imperative.PyLayer): + class PyLayer1(fluid.dygraph.PyLayer): def __init__(self): super(PyLayer1, self).__init__() @@ -227,7 +227,7 @@ class TestImperative(unittest.TestCase): def backward(input): return input - class PyLayer2(fluid.imperative.PyLayer): + class PyLayer2(fluid.dygraph.PyLayer): def __init__(self): super(PyLayer2, self).__init__() @@ -241,21 +241,21 @@ class TestImperative(unittest.TestCase): py_layer_1 = PyLayer1() py_layer_2 = PyLayer2() - py_layer_1(fluid.imperative.base.to_variable(np.ones([2, 2]))) - py_layer_2(fluid.imperative.base.to_variable(np.ones([2, 2]))) + py_layer_1(fluid.dygraph.base.to_variable(np.ones([2, 2]))) + py_layer_2(fluid.dygraph.base.to_variable(np.ones([2, 2]))) id = py_layer_1.forward_id self.assertGreater(id, 0) self.assertEqual(py_layer_1.backward_id, id + 1) self.assertEqual(py_layer_2.forward_id, id + 2) self.assertEqual(py_layer_2.backward_id, id + 3) - py_layer_1(fluid.imperative.base.to_variable(np.ones([2, 2]))) + py_layer_1(fluid.dygraph.base.to_variable(np.ones([2, 2]))) self.assertEqual(py_layer_1.forward_id, id) def test_pylayer(self): np_inp = np.ones([2, 2], np.float32) - with fluid.imperative.guard(): + with fluid.dygraph.guard(): my_py_layer = MyPyLayer() - var_inp = fluid.imperative.base.to_variable(np_inp) + var_inp = fluid.dygraph.base.to_variable(np_inp) outs = my_py_layer(var_inp) dy_out = np.sum(outs[0]._numpy()) outs[0]._backward() @@ -282,8 +282,8 @@ class TestImperative(unittest.TestCase): def test_layer_in_out(self): np_inp = np.array([1.0, 2.0, -1.0], dtype=np.float32) - with fluid.imperative.guard(): - var_inp = fluid.imperative.base.to_variable(np_inp) + with fluid.dygraph.guard(): + var_inp = fluid.dygraph.base.to_variable(np_inp) l = MyLayer("my_layer") x = l(var_inp)[0] self.assertIsNotNone(x) @@ -310,8 +310,8 @@ class TestImperative(unittest.TestCase): def test_mlp(self): np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32) - with fluid.imperative.guard(): - var_inp = fluid.imperative.base.to_variable(np_inp) + with fluid.dygraph.guard(): + var_inp = fluid.dygraph.base.to_variable(np_inp) mlp = MLP("mlp") out = mlp(var_inp) dy_out = out._numpy() @@ -353,8 +353,8 @@ class TestImperative(unittest.TestCase): [10.0, 11.0, 12.0]]) np_inp = np_inp.reshape((1, 4, 3)) np_inp = np_inp.astype(np.float32) - with fluid.imperative.guard(): - var_inp = fluid.imperative.base.to_variable(np_inp) + with fluid.dygraph.guard(): + var_inp = fluid.dygraph.base.to_variable(np_inp) var_inp = fluid.layers.reshape(var_inp, shape=[1, 4, 3]) simple_rnn = SimpleRNN("simple_rnn") outs, pre_hiddens = simple_rnn.forward(var_inp) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py b/python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py index 62c25f734598e35b7c668d1ec1b89b5c57449f73..a92b7d62fa598a3ec9b53bade2805cc033f4b9d9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_checkpoint.py @@ -18,11 +18,11 @@ import numpy as np import paddle import paddle.fluid as fluid from paddle.fluid.optimizer import SGDOptimizer -from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC -from paddle.fluid.imperative.base import to_variable +from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC +from paddle.fluid.dygraph.base import to_variable -class SimpleImgConvPool(fluid.imperative.Layer): +class SimpleImgConvPool(fluid.dygraph.Layer): def __init__(self, name_scope, num_channels, @@ -71,7 +71,7 @@ class SimpleImgConvPool(fluid.imperative.Layer): return x -class MNIST(fluid.imperative.Layer): +class MNIST(fluid.dygraph.Layer): def __init__(self, name_scope): super(MNIST, self).__init__(name_scope) @@ -98,12 +98,12 @@ class MNIST(fluid.imperative.Layer): return x -class TestImperativeCheckpoint(unittest.TestCase): +class TestDygraphCheckpoint(unittest.TestCase): def save_load_persistables(self): seed = 90 epoch_num = 1 - with fluid.imperative.guard(): + with fluid.dygraph.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed @@ -135,14 +135,14 @@ class TestImperativeCheckpoint(unittest.TestCase): avg_loss._backward() sgd.minimize(avg_loss) - fluid.imperative.save_persistables(mnist, "save_dir") + fluid.dygraph.save_persistables(mnist, "save_dir") mnist.clear_gradients() for param in mnist.parameters(): dy_param_init_value[param.name] = param._numpy() mnist.load_dict( - fluid.imperative.load_persistables(mnist, "save_dir")) + fluid.dygraph.load_persistables(mnist, "save_dir")) restore = mnist.parameters() diff --git a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py index ac123ee8db26ac23bbf9454e399a592a28c91c32..ccebd4a54727f383bd4e46ff57bfdc9381577d05 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_deepcf.py @@ -22,7 +22,7 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from test_imperative_base import new_program_scope -from paddle.fluid.imperative.base import to_variable +from paddle.fluid.dygraph.base import to_variable # Can use Amusic dataset as the DeepCF describes. DATA_PATH = os.environ.get('DATA_PATH', '') @@ -32,11 +32,11 @@ NUM_BATCHES = int(os.environ.get('NUM_BATCHES', 5)) NUM_EPOCHES = int(os.environ.get('NUM_EPOCHES', 1)) -class DMF(fluid.imperative.Layer): +class DMF(fluid.dygraph.Layer): def __init__(self, name_scope): super(DMF, self).__init__(name_scope) - self._user_latent = fluid.imperative.FC(self.full_name(), 256) - self._item_latent = fluid.imperative.FC(self.full_name(), 256) + self._user_latent = fluid.dygraph.FC(self.full_name(), 256) + self._item_latent = fluid.dygraph.FC(self.full_name(), 256) self._user_layers = [] self._item_layers = [] @@ -45,12 +45,12 @@ class DMF(fluid.imperative.Layer): self._user_layers.append( self.add_sublayer( 'user_layer_%d' % i, - fluid.imperative.FC( + fluid.dygraph.FC( self.full_name(), self._hid_sizes[i], act='relu'))) self._item_layers.append( self.add_sublayer( 'item_layer_%d' % i, - fluid.imperative.FC( + fluid.dygraph.FC( self.full_name(), self._hid_sizes[i], act='relu'))) def forward(self, users, items): @@ -63,18 +63,18 @@ class DMF(fluid.imperative.Layer): return fluid.layers.elementwise_mul(users, items) -class MLP(fluid.imperative.Layer): +class MLP(fluid.dygraph.Layer): def __init__(self, name_scope): super(MLP, self).__init__(name_scope) - self._user_latent = fluid.imperative.FC(self.full_name(), 256) - self._item_latent = fluid.imperative.FC(self.full_name(), 256) + self._user_latent = fluid.dygraph.FC(self.full_name(), 256) + self._item_latent = fluid.dygraph.FC(self.full_name(), 256) self._match_layers = [] self._hid_sizes = [128, 64] for i in range(len(self._hid_sizes)): self._match_layers.append( self.add_sublayer( 'match_layer_%d' % i, - fluid.imperative.FC( + fluid.dygraph.FC( self.full_name(), self._hid_sizes[i], act='relu'))) self._mat @@ -88,7 +88,7 @@ class MLP(fluid.imperative.Layer): return match_vec -class DeepCF(fluid.imperative.Layer): +class DeepCF(fluid.dygraph.Layer): def __init__(self, name_scope, num_users, num_items, matrix): super(DeepCF, self).__init__(name_scope) self._num_users = num_users @@ -103,7 +103,7 @@ class DeepCF(fluid.imperative.Layer): self._mlp = MLP(self.full_name()) self._dmf = DMF(self.full_name()) - self._match_fc = fluid.imperative.FC(self.full_name(), 1, act='sigmoid') + self._match_fc = fluid.dygraph.FC(self.full_name(), 1, act='sigmoid') def forward(self, users, items): # users_emb = self._user_emb(users) @@ -191,7 +191,7 @@ def load_data(DATA_PATH): np.expand_dims(labels_np, -1), num_users, num_items, matrix -class TestImperativeDeepCF(unittest.TestCase): +class TestDygraphDeepCF(unittest.TestCase): def test_deefcf(self): seed = 90 if DATA_PATH: @@ -237,7 +237,7 @@ class TestImperativeDeepCF(unittest.TestCase): fetch_list=[loss])[0] sys.stderr.write('static loss %s\n' % static_loss) - with fluid.imperative.guard(): + with fluid.dygraph.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gan.py b/python/paddle/fluid/tests/unittests/test_imperative_gan.py index 6024fb5f816d10cedad36272e353704797526676..58faa1cb85af9cedb70f3a12244cfeb44e0f4f52 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gan.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gan.py @@ -22,12 +22,12 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.optimizer import SGDOptimizer -from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC +from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC from test_imperative_base import new_program_scope -from paddle.fluid.imperative.base import to_variable +from paddle.fluid.dygraph.base import to_variable -class Discriminator(fluid.imperative.Layer): +class Discriminator(fluid.dygraph.Layer): def __init__(self, name_scope): super(Discriminator, self).__init__(name_scope) self._fc1 = FC(self.full_name(), size=32, act='elu') @@ -38,7 +38,7 @@ class Discriminator(fluid.imperative.Layer): return self._fc2(x) -class Generator(fluid.imperative.Layer): +class Generator(fluid.dygraph.Layer): def __init__(self, name_scope): super(Generator, self).__init__(name_scope) self._fc1 = FC(self.full_name(), size=64, act='elu') @@ -51,7 +51,7 @@ class Generator(fluid.imperative.Layer): return self._fc3(x) -class TestImperativeGAN(unittest.TestCase): +class TestDygraphGAN(unittest.TestCase): def test_gan_float32(self): seed = 90 @@ -130,7 +130,7 @@ class TestImperativeGAN(unittest.TestCase): scope.find_var(param.name).get_tensor()) dy_params = dict() - with fluid.imperative.guard(): + with fluid.dygraph.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py index 2086fab5c81e241d1a49386d8285289b14364dc8..a8fb9ecfe4be16b73ac2144259f25ed3859ece7e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_gnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_gnn.py @@ -22,16 +22,16 @@ import paddle import paddle.fluid as fluid import paddle.fluid.core as core from paddle.fluid.optimizer import AdamOptimizer -from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC +from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC from test_imperative_base import new_program_scope -from paddle.fluid.imperative.base import to_variable +from paddle.fluid.dygraph.base import to_variable def gen_data(): pass -class GraphConv(fluid.imperative.Layer): +class GraphConv(fluid.dygraph.Layer): def __init__(self, name_scope, in_features, out_features): super(GraphConv, self).__init__(name_scope) @@ -50,7 +50,7 @@ class GraphConv(fluid.imperative.Layer): return fluid.layers.matmul(adj, support) + self.bias -class GCN(fluid.imperative.Layer): +class GCN(fluid.dygraph.Layer): def __init__(self, name_scope, num_hidden): super(GCN, self).__init__(name_scope) self.gc = GraphConv(self.full_name(), num_hidden, 32) @@ -61,7 +61,7 @@ class GCN(fluid.imperative.Layer): return self.gc2(x, adj) -class TestImperativeGNN(unittest.TestCase): +class TestDygraphGNN(unittest.TestCase): def test_gnn_float32(self): seed = 90 @@ -115,7 +115,7 @@ class TestImperativeGNN(unittest.TestCase): static_weight = np.array( scope.find_var(model.gc.weight.name).get_tensor()) - with fluid.imperative.guard(): + with fluid.dygraph.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py index 5b3c250501386a7854313218f5ea338281824252..829274afc7e17fb0b5f4d8200c5e1f7bbbe02393 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_optimizer.py @@ -23,12 +23,12 @@ import paddle import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.optimizer import SGDOptimizer -from paddle.fluid.imperative.nn import Conv2D, Pool2D, FC -from paddle.fluid.imperative.base import to_variable +from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC +from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope -class SimpleImgConvPool(fluid.imperative.Layer): +class SimpleImgConvPool(fluid.dygraph.Layer): def __init__(self, name_scope, num_channels, @@ -77,7 +77,7 @@ class SimpleImgConvPool(fluid.imperative.Layer): return x -class MNIST(fluid.imperative.Layer): +class MNIST(fluid.dygraph.Layer): def __init__(self, name_scope): super(MNIST, self).__init__(name_scope) @@ -104,11 +104,11 @@ class MNIST(fluid.imperative.Layer): return x -class TestImperativeMnist(unittest.TestCase): +class TestDygraphMnist(unittest.TestCase): def test_mnist_float32(self): seed = 90 epoch_num = 1 - with fluid.imperative.guard(): + with fluid.dygraph.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py index 460ba65a48c863315cda4847aee1b4e2366bba96..998c675815ece9236c819bffc4a4b74d44ff790e 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ptb_rnn.py @@ -16,17 +16,17 @@ from __future__ import print_function import unittest import paddle.fluid as fluid -from paddle.fluid.imperative.nn import Embedding +from paddle.fluid.dygraph.nn import Embedding import paddle.fluid.framework as framework from paddle.fluid.optimizer import SGDOptimizer -from paddle.fluid.imperative.base import to_variable +from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope import numpy as np import six from paddle.fluid.backward import append_backward -class SimpleLSTMRNN(fluid.imperative.Layer): +class SimpleLSTMRNN(fluid.dygraph.Layer): def __init__(self, name_scope, hidden_size, @@ -131,7 +131,7 @@ class SimpleLSTMRNN(fluid.imperative.Layer): return real_res, last_hidden, last_cell -class PtbModel(fluid.imperative.Layer): +class PtbModel(fluid.dygraph.Layer): def __init__(self, name_scope, hidden_size, @@ -214,7 +214,7 @@ class PtbModel(fluid.imperative.Layer): return loss, last_hidden, last_cell -class TestImperativePtbRnn(unittest.TestCase): +class TestDygraphPtbRnn(unittest.TestCase): def test_ptb_rnn_cpu_float32(self): seed = 90 hidden_size = 10 @@ -224,7 +224,7 @@ class TestImperativePtbRnn(unittest.TestCase): init_scale = 0.1 batch_size = 4 - with fluid.imperative.guard(): + with fluid.dygraph.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed # TODO: marsyang1993 Change seed to diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index ab9298890bf69774fd842ec202d833be0a57f7ad..1d786d584632769e4318bcdeb24ef7ef8ea18597 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -21,8 +21,8 @@ import paddle import paddle.fluid as fluid from paddle.fluid import core from paddle.fluid.layer_helper import LayerHelper -from paddle.fluid.imperative.nn import Conv2D, Pool2D, BatchNorm, FC -from paddle.fluid.imperative.base import to_variable +from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, FC +from paddle.fluid.dygraph.base import to_variable from test_imperative_base import new_program_scope batch_size = 8 @@ -57,7 +57,7 @@ def optimizer_setting(params): lr = [] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] optimizer = fluid.optimizer.SGD(learning_rate=0.01) - # TODO(minqiyang): Add learning rate scheduler support to imperative mode + # TODO(minqiyang): Add learning rate scheduler support to dygraph mode # optimizer = fluid.optimizer.Momentum( # learning_rate=params["lr"], # learning_rate=fluid.layers.piecewise_decay( @@ -68,7 +68,7 @@ def optimizer_setting(params): return optimizer -class ConvBNLayer(fluid.imperative.Layer): +class ConvBNLayer(fluid.dygraph.Layer): def __init__(self, name_scope, num_channels, @@ -99,7 +99,7 @@ class ConvBNLayer(fluid.imperative.Layer): return y -class BottleneckBlock(fluid.imperative.Layer): +class BottleneckBlock(fluid.dygraph.Layer): def __init__(self, name_scope, num_channels, @@ -156,7 +156,7 @@ class BottleneckBlock(fluid.imperative.Layer): return layer_helper.append_activation(y) -class ResNet(fluid.imperative.Layer): +class ResNet(fluid.dygraph.Layer): def __init__(self, name_scope, layers=50, class_dim=102): super(ResNet, self).__init__(name_scope) @@ -226,13 +226,13 @@ class ResNet(fluid.imperative.Layer): return y -class TestImperativeResnet(unittest.TestCase): +class TestDygraphResnet(unittest.TestCase): def test_resnet_float32(self): seed = 90 batch_size = train_parameters["batch_size"] batch_num = 20 - with fluid.imperative.guard(): + with fluid.dygraph.guard(): fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py index b06d3e8894072943b06456340f928cda260763c3..3bdf3349730b0c9916449cfe0658d5a3c88834ed 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_transformer.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_transformer.py @@ -16,7 +16,7 @@ from __future__ import print_function import unittest import paddle.fluid as fluid -from paddle.fluid.imperative import Embedding, LayerNorm, FC, to_variable, Layer, guard +from paddle.fluid.dygraph import Embedding, LayerNorm, FC, to_variable, Layer, guard from test_imperative_base import new_program_scope from paddle.fluid import core import numpy as np @@ -623,7 +623,7 @@ class PrepareEncoderDecoderLayer(Layer): initializer=fluid.initializer.NumpyArrayInitializer(pos_inp), trainable=False)) - # use in imperative_mode to fit different length batch + # use in dygraph_mode to fit different length batch # self._pos_emb._w = to_variable( # position_encoding_init(self._src_max_len, self._src_emb_dim)) @@ -946,7 +946,7 @@ class TransFormer(Layer): return sum_cost, avg_cost, predict, token_num -class TestImperativeTransformer(unittest.TestCase): +class TestDygraphTransformer(unittest.TestCase): def test_transformer_float32(self): seed = 90 with guard(): diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 7fd9617cc7687a5a553ed22cfed560aef8058496..90487d4ef22cd47c5e503bebf40c7ac8adfd83e1 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -29,8 +29,8 @@ from paddle.fluid import core from paddle.fluid.initializer import Constant import paddle.fluid.layers as layers from test_imperative_base import new_program_scope -from paddle.fluid.imperative import nn -from paddle.fluid.imperative import base +from paddle.fluid.dygraph import nn +from paddle.fluid.dygraph import base class LayerTest(unittest.TestCase): @@ -68,7 +68,7 @@ class LayerTest(unittest.TestCase): @contextlib.contextmanager def dynamic_graph(self, force_to_use_cpu=False): - with fluid.imperative.guard( + with fluid.dygraph.guard( self._get_place(force_to_use_cpu=force_to_use_cpu)): fluid.default_startup_program().random_seed = self.seed fluid.default_main_program().random_seed = self.seed diff --git a/python/paddle/fluid/tests/unittests/test_variable.py b/python/paddle/fluid/tests/unittests/test_variable.py index 076ee3baf96ab3c16f3ed9a3b9a15e2eb2aaed77..601da5839015efd81ea302e1cae65ba3c7bb22fc 100644 --- a/python/paddle/fluid/tests/unittests/test_variable.py +++ b/python/paddle/fluid/tests/unittests/test_variable.py @@ -19,7 +19,6 @@ from paddle.fluid.framework import default_main_program, Program, convert_np_dty import paddle.fluid as fluid import paddle.fluid.core as core import numpy as np -from test_imperative_base import new_program_scope class TestVariable(unittest.TestCase): @@ -153,7 +152,7 @@ class TestVariableImperative(unittest.TestCase): self.assertEqual([1, 1, 100], nw.shape) def test_slice(self): - with fluid.imperative.guard(): + with fluid.dygraph.guard(): self._test_slice() diff --git a/python/setup.py.in b/python/setup.py.in index 9f87f5644fc969f3f55fd08689f3e2bbaf36dc39..68f96273a23c725d1643e8e7397bc970411dd191 100644 --- a/python/setup.py.in +++ b/python/setup.py.in @@ -102,7 +102,7 @@ packages=['paddle', 'paddle.reader', 'paddle.distributed', 'paddle.fluid', - 'paddle.fluid.imperative', + 'paddle.fluid.dygraph', 'paddle.fluid.proto', 'paddle.fluid.proto.profiler', 'paddle.fluid.distributed', diff --git a/tools/print_signatures.py b/tools/print_signatures.py index d32b247342cc0c37b7bcff7b676cb47a4f429dfd..6a262529b5cac7e596e65d23de6cc4b5d720cacb 100644 --- a/tools/print_signatures.py +++ b/tools/print_signatures.py @@ -28,7 +28,7 @@ import hashlib member_dict = collections.OrderedDict() -experimental_namespace = {"paddle.fluid.imperative"} +experimental_namespace = {"paddle.fluid.dygraph"} def md5(doc):