diff --git a/python/paddle/fluid/dygraph/nn.py b/python/paddle/fluid/dygraph/nn.py index 5c99900465efae35af4d3daf6e882bc70b677d71..3403e8470b65dcb1363fb9baa5a64ad8664df931 100644 --- a/python/paddle/fluid/dygraph/nn.py +++ b/python/paddle/fluid/dygraph/nn.py @@ -17,7 +17,8 @@ from __future__ import print_function from six.moves import reduce from .. import core from ..layers import utils -from ..dygraph import dygraph_utils +from ..layers import nn +from .. import dygraph_utils from . import layers from ..framework import Variable, in_dygraph_mode, OpProtoHolder, Parameter, _dygraph_tracer, _varbase_creator from ..param_attr import ParamAttr diff --git a/python/paddle/fluid/dygraph/dygraph_utils.py b/python/paddle/fluid/dygraph_utils.py similarity index 96% rename from python/paddle/fluid/dygraph/dygraph_utils.py rename to python/paddle/fluid/dygraph_utils.py index cb274789da0aaea38847d2ebad19b965f0bf2d11..baa6e05289f6dfe79003b99877dabb26278cc06b 100644 --- a/python/paddle/fluid/dygraph/dygraph_utils.py +++ b/python/paddle/fluid/dygraph_utils.py @@ -12,8 +12,8 @@ # See the License for the specific language governing permissions and # limitations under the License. -from .. import core -from ..framework import dygraph_only +from . import core +from .framework import dygraph_only @dygraph_only diff --git a/python/paddle/fluid/layers/learning_rate_scheduler.py b/python/paddle/fluid/layers/learning_rate_scheduler.py index 65d837baa1a8b32e8a24b04de677bcc6505c3610..138ee824d7e7b2c7d14ebfc617cbe5a743972729 100644 --- a/python/paddle/fluid/layers/learning_rate_scheduler.py +++ b/python/paddle/fluid/layers/learning_rate_scheduler.py @@ -32,7 +32,7 @@ from . import tensor from ..initializer import init_on_cpu from ..framework import default_main_program, Parameter, unique_name, name_scope from ..framework import Variable -from ..dygraph import base as imperative_base +from ..framework import in_dygraph_mode from ..dygraph import learning_rate_scheduler as imperate_lr __all__ = [ @@ -88,7 +88,7 @@ def noam_decay(d_model, warmup_steps): warmup_steps) """ with default_main_program()._lr_schedule_guard(): - if imperative_base.enabled(): + if in_dygraph_mode(): decay = imperate_lr.NoamDecay(d_model, warmup_steps) return decay else: @@ -143,7 +143,7 @@ def exponential_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ with default_main_program()._lr_schedule_guard(): - if imperative_base.enabled(): + if in_dygraph_mode(): decay = imperate_lr.ExponentialDecay(learning_rate, decay_steps, decay_rate, staircase) return decay @@ -199,7 +199,7 @@ def natural_exp_decay(learning_rate, decay_steps, decay_rate, staircase=False): """ with default_main_program()._lr_schedule_guard(): - if imperative_base.enabled(): + if in_dygraph_mode(): decay = imperate_lr.NaturalExpDecay(learning_rate, decay_steps, decay_rate, staircase) return decay @@ -255,7 +255,7 @@ def inverse_time_decay(learning_rate, decay_steps, decay_rate, staircase=False): staircase=True)) """ with default_main_program()._lr_schedule_guard(): - if imperative_base.enabled(): + if in_dygraph_mode(): decay = imperate_lr.InverseTimeDecay(learning_rate, decay_steps, decay_rate, staircase) return decay @@ -311,7 +311,7 @@ def polynomial_decay(learning_rate, """ with default_main_program()._lr_schedule_guard(): - if imperative_base.enabled(): + if in_dygraph_mode(): decay = imperate_lr.PolynomialDecay(learning_rate, decay_steps, end_learning_rate, power, cycle) return decay @@ -380,7 +380,7 @@ def piecewise_decay(boundaries, values): if len(values) - len(boundaries) != 1: raise ValueError("len(values) - len(boundaries) should be 1") - if imperative_base.enabled(): + if in_dygraph_mode(): decay = imperate_lr.PiecewiseDecay(boundaries, values, 0) return decay else: @@ -444,7 +444,7 @@ def cosine_decay(learning_rate, step_each_epoch, epochs): """ with default_main_program()._lr_schedule_guard(): - if imperative_base.enabled(): + if in_dygraph_mode(): decay = imperate_lr.CosineDecay(learning_rate, step_each_epoch, epochs) return decay @@ -520,7 +520,7 @@ def linear_lr_warmup(learning_rate, warmup_steps, start_lr, end_lr): linear_step = float(end_lr) - float(start_lr) with default_main_program()._lr_schedule_guard(): - if imperative_base.enabled(): + if in_dygraph_mode(): lr = imperate_lr.LinearLrWarmup(learning_rate, warmup_steps, start_lr, end_lr) return lr diff --git a/python/paddle/fluid/layers/nn.py b/python/paddle/fluid/layers/nn.py index cc28c4243203c0142e0bf8322ecf76569189c021..9c83f83d1ce1b5f783844dd26686ad4a4b926066 100644 --- a/python/paddle/fluid/layers/nn.py +++ b/python/paddle/fluid/layers/nn.py @@ -25,8 +25,7 @@ import inspect from ..layer_helper import LayerHelper from ..initializer import Normal, Constant, NumpyArrayInitializer from ..framework import Variable, OpProtoHolder, in_dygraph_mode, dygraph_only, _dygraph_tracer, default_main_program -from ..dygraph import base -from ..dygraph import dygraph_utils +from .. import dygraph_utils from ..param_attr import ParamAttr from .layer_function_generator import autodoc, templatedoc, _generate_doc_string_ from .tensor import concat, assign, fill_constant, zeros @@ -34,7 +33,6 @@ from . import utils from .. import unique_name from functools import reduce from .. import core -from ..dygraph import layers from ..data_feeder import convert_dtype, check_type_and_dtype, check_type, check_dtype __all__ = [ @@ -10276,9 +10274,6 @@ def _elementwise_op(helper): op_type = helper.layer_type x = helper.kwargs.get('x', None) y = helper.kwargs.get('y', None) - if in_dygraph_mode(): - x = base.to_variable(x) - y = base.to_variable(y) assert x is not None, 'x cannot be None in {}'.format(op_type) assert y is not None, 'y cannot be None in {}'.format(op_type)