From 3794d171f36d0cfc8db518bce4bf3718a5bda31e Mon Sep 17 00:00:00 2001 From: Meteor Liu Date: Mon, 22 May 2023 20:56:38 +0800 Subject: [PATCH] [dygraph]unify _non_static_mode() in_dygraph_mode() and in_dynamic_mode() (#53856) * [dygraph]unify _non_static_mode() in_dygraph_mode() and in_dynamic_mode() * [dygraph]unify _non_static_mode() in_dygraph_mode() and in_dynamic_mode() * [dygraph]unify _non_static_mode() in_dygraph_mode() and in_dynamic_mode() * [dygraph]unify _non_static_mode() in_dygraph_mode() and in_dynamic_mode() * [dygraph]unify _non_static_mode() in_dygraph_mode() and in_dynamic_mode() * [dygraph]unify _non_static_mode() in_dygraph_mode() and in_dynamic_mode() * fixed cyclic reference that caused patial import * fixed bad change * fix bad import * fix bad import * fix bad import * fix ut failed caused by change in_dynamic_mode * fix ut failed caused by change in_dynamic_mode * fixed usage of in_dynamic_mode() or in_dygraph_mode() * revert python3 to python in .pre-commit-config.yaml * fix merge conflicts --- python/paddle/amp/grad_scaler.py | 5 +- python/paddle/autograd/autograd.py | 2 +- python/paddle/common_ops_import.py | 1 - .../distributed/auto_parallel/engine.py | 8 +- .../auto_parallel/process_group.py | 4 +- python/paddle/distributed/collective.py | 8 +- .../distributed/communication/all_gather.py | 2 +- .../communication/batch_isend_irecv.py | 2 +- .../distributed/communication/broadcast.py | 2 +- .../distributed/communication/gather.py | 2 +- .../paddle/distributed/communication/group.py | 8 +- .../distributed/communication/reduce.py | 2 +- .../distributed/communication/scatter.py | 2 +- .../communication/stream/all_gather.py | 2 +- .../communication/stream/all_reduce.py | 2 +- .../communication/stream/all_to_all.py | 4 +- .../communication/stream/broadcast.py | 2 +- .../communication/stream/gather.py | 2 +- .../distributed/communication/stream/recv.py | 2 +- .../communication/stream/reduce.py | 2 +- .../communication/stream/reduce_scatter.py | 4 +- .../communication/stream/scatter.py | 2 +- .../distributed/communication/stream/send.py | 2 +- .../distributed/fleet/base/role_maker.py | 2 +- python/paddle/distributed/fleet/fleet.py | 9 +- .../distributed/fleet/layers/mpu/mp_ops.py | 18 +- .../distributed/fleet/layers/mpu/random.py | 5 +- .../fleet/meta_optimizers/dgc_optimizer.py | 4 +- .../parallel_layers/pp_layers.py | 2 +- .../fleet/meta_parallel/pipeline_parallel.py | 2 +- .../pp_utils/p2p_communication.py | 6 +- .../sharding/group_sharded_stage3.py | 2 +- python/paddle/distributed/fleet/optimizer.py | 4 +- .../distributed/fleet/recompute/recompute.py | 4 +- .../fleet/recompute/recompute_hybrid.py | 2 +- .../fleet/utils/hybrid_parallel_inference.py | 5 +- .../fleet/utils/hybrid_parallel_util.py | 10 +- .../fleet/utils/mix_precision_utils.py | 4 +- python/paddle/distributed/models/moe/utils.py | 12 +- python/paddle/distributed/parallel.py | 16 +- python/paddle/distributed/utils/moe_utils.py | 6 +- python/paddle/distribution/bernoulli.py | 18 +- python/paddle/distribution/categorical.py | 8 +- python/paddle/distribution/dirichlet.py | 4 +- python/paddle/distribution/distribution.py | 4 +- .../paddle/distribution/exponential_family.py | 4 +- python/paddle/distribution/kl.py | 4 +- python/paddle/distribution/normal.py | 8 +- python/paddle/distribution/uniform.py | 10 +- python/paddle/fft.py | 14 +- python/paddle/fluid/contrib/optimizer.py | 2 +- python/paddle/fluid/data_feeder.py | 8 +- python/paddle/fluid/dygraph/base.py | 2 +- .../fluid/dygraph/tensor_patch_methods.py | 25 +- python/paddle/fluid/framework.py | 49 +--- .../incubate/checkpoint/auto_checkpoint.py | 4 +- python/paddle/fluid/layer_helper.py | 4 +- python/paddle/fluid/layer_helper_base.py | 6 +- python/paddle/fluid/lazy_init.py | 2 +- python/paddle/fluid/reader.py | 6 +- .../fleet/hybrid_parallel_pp_recompute.py | 8 +- .../fleet/parallel_dygraph_se_resnext.py | 2 +- .../tests/unittests/test_context_manager.py | 4 +- .../tests/unittests/test_dropout_nd_op.py | 4 +- .../unittests/test_imperative_decorator.py | 2 +- .../unittests/test_imperative_double_grad.py | 2 +- .../test_imperative_ocr_attention_model.py | 2 +- .../tests/unittests/test_imperative_resnet.py | 2 +- .../test_imperative_resnet_sorted_gradient.py | 2 +- .../unittests/test_imperative_se_resnext.py | 2 +- ...perative_star_gan_with_gradient_penalty.py | 6 +- .../test_imperative_tensor_clear_gradient.py | 2 +- .../unittests/test_imperative_triple_grad.py | 2 +- .../fluid/tests/unittests/test_logical_op.py | 6 +- .../tests/unittests/test_multiclass_nms_op.py | 30 +-- .../unittests/test_squared_l2_norm_op.py | 4 +- python/paddle/fluid/unique_name.py | 4 +- python/paddle/fluid/variable_index.py | 11 +- python/paddle/framework/__init__.py | 6 +- python/paddle/framework/io.py | 12 +- python/paddle/geometric/math.py | 10 +- .../geometric/message_passing/send_recv.py | 9 +- python/paddle/geometric/reindex.py | 7 +- python/paddle/geometric/sampling/neighbors.py | 6 +- python/paddle/hapi/model.py | 25 +- python/paddle/incubate/autograd/functional.py | 10 +- .../distributed/models/moe/moe_layer.py | 4 +- .../incubate/distributed/models/moe/utils.py | 4 +- python/paddle/incubate/layers/nn.py | 6 +- .../nn/functional/fused_dropout_add.py | 5 +- .../nn/functional/fused_gate_attention.py | 4 +- .../nn/functional/fused_matmul_bias.py | 4 +- .../nn/functional/fused_transformer.py | 11 +- .../incubate/nn/layer/fused_dropout_nd.py | 4 +- .../incubate/nn/layer/fused_transformer.py | 5 +- python/paddle/incubate/nn/loss.py | 4 +- .../incubate/nn/memory_efficient_attention.py | 4 +- .../incubate/operators/graph_khop_sampler.py | 4 +- .../incubate/operators/graph_reindex.py | 4 +- .../operators/graph_sample_neighbors.py | 4 +- .../incubate/operators/graph_send_recv.py | 5 +- .../incubate/operators/softmax_mask_fuse.py | 4 +- .../softmax_mask_fuse_upper_triangle.py | 4 +- .../optimizer/distributed_fused_lamb.py | 4 +- .../paddle/incubate/optimizer/modelaverage.py | 38 +-- python/paddle/incubate/tensor/manipulation.py | 4 +- python/paddle/incubate/tensor/math.py | 10 +- python/paddle/incubate/xpu/resnet_block.py | 2 +- .../paddle/io/dataloader/dataloader_iter.py | 8 +- python/paddle/io/dataloader/dataset.py | 2 +- python/paddle/io/reader.py | 7 +- python/paddle/jit/api.py | 7 +- .../jit/dy2static/program_translator.py | 5 +- python/paddle/jit/translated_layer.py | 5 +- python/paddle/metric/metrics.py | 5 +- python/paddle/nn/clip.py | 19 +- python/paddle/nn/decode.py | 4 +- python/paddle/nn/functional/activation.py | 65 +++--- python/paddle/nn/functional/common.py | 217 +++++++----------- python/paddle/nn/functional/conv.py | 18 +- python/paddle/nn/functional/distance.py | 4 +- python/paddle/nn/functional/extension.py | 13 +- python/paddle/nn/functional/input.py | 6 +- python/paddle/nn/functional/loss.py | 54 ++--- python/paddle/nn/layer/norm.py | 42 +--- python/paddle/nn/layer/rnn.py | 3 +- python/paddle/nn/quant/format.py | 6 +- python/paddle/nn/utils/weight_norm_hook.py | 4 +- python/paddle/optimizer/adadelta.py | 4 +- python/paddle/optimizer/adamw.py | 2 +- python/paddle/optimizer/momentum.py | 8 +- python/paddle/optimizer/optimizer.py | 18 +- .../paddle/quantization/quanters/abs_max.py | 2 +- python/paddle/regularizer.py | 42 ++-- python/paddle/signal.py | 21 +- python/paddle/sparse/unary.py | 4 +- python/paddle/static/nn/common.py | 10 +- python/paddle/static/nn/control_flow.py | 6 +- python/paddle/static/nn/metric.py | 4 +- python/paddle/static/nn/sequence_lod.py | 28 +-- python/paddle/tensor/array.py | 10 +- python/paddle/tensor/creation.py | 50 ++-- .../paddle/tensor/layer_function_generator.py | 6 +- python/paddle/tensor/linalg.py | 76 +++--- python/paddle/tensor/logic.py | 44 ++-- python/paddle/tensor/manipulation.py | 86 +++---- python/paddle/tensor/math.py | 154 ++++++------- python/paddle/tensor/ops.py | 47 ++-- python/paddle/tensor/random.py | 25 +- python/paddle/tensor/search.py | 28 +-- python/paddle/tensor/stat.py | 14 +- python/paddle/text/viterbi_decode.py | 12 +- .../utils/cpp_extension/extension_utils.py | 4 +- python/paddle/utils/dlpack.py | 6 +- python/paddle/utils/inplace_utils.py | 4 +- python/paddle/utils/layers_utils.py | 6 +- test/amp/amp_base_models.py | 8 +- test/auto_parallel/test_to_static.py | 6 +- test/dygraph_to_static/test_convert_call.py | 4 +- test/dygraph_to_static/test_lac.py | 8 +- .../inference/test_trt_multiclass_nms3_op.py | 4 +- test/legacy_test/test_dlpack.py | 2 +- .../test_composite_layer_norm.py | 4 +- test/tokenizer/test_faster_tokenizer_op.py | 5 +- 164 files changed, 869 insertions(+), 1072 deletions(-) diff --git a/python/paddle/amp/grad_scaler.py b/python/paddle/amp/grad_scaler.py index 2cade3482e9..e7774a35c1c 100644 --- a/python/paddle/amp/grad_scaler.py +++ b/python/paddle/amp/grad_scaler.py @@ -19,10 +19,11 @@ from enum import Enum import numpy as np from paddle import _C_ops, _legacy_C_ops -from paddle.fluid import core, in_dygraph_mode +from paddle.fluid import core from paddle.fluid.data_feeder import check_type from paddle.fluid.dygraph import to_variable from paddle.fluid.framework import _dygraph_tracer, dygraph_only +from paddle.framework import in_dynamic_mode from .auto_cast import amp_global_state @@ -307,7 +308,7 @@ class AmpScaler: else: param_grads_fp32.append(param._grad_ivar()) else: - if in_dygraph_mode(): + if in_dynamic_mode(): # It is very time-consuming to call c++ functions in a loop on the python side. # We put this part of the code on the c++ side to improve the speed in eager mode. ( diff --git a/python/paddle/autograd/autograd.py b/python/paddle/autograd/autograd.py index e61a7ff0092..96f8ca1bbfa 100644 --- a/python/paddle/autograd/autograd.py +++ b/python/paddle/autograd/autograd.py @@ -689,7 +689,7 @@ def _grad_for_jacobian(ys, xs, v=None): Tensor is the sum of gradients of outputs with respect to the i-th inputs. """ - if paddle.fluid._non_static_mode(): + if paddle.in_dynamic_mode(): # paddle.grad returns a list though the inputs is a signle Tensor. The # follow code snippet fixes the problem by return the first element of # xs_grad when the xs is a signle Tensor. diff --git a/python/paddle/common_ops_import.py b/python/paddle/common_ops_import.py index 62a55884f02..042934d66f7 100644 --- a/python/paddle/common_ops_import.py +++ b/python/paddle/common_ops_import.py @@ -25,7 +25,6 @@ from paddle.fluid.framework import ( # noqa: F401 Variable, _create_tensor, _dygraph_tracer, - _non_static_mode, convert_np_dtype_to_dtype_, default_main_program, device_guard, diff --git a/python/paddle/distributed/auto_parallel/engine.py b/python/paddle/distributed/auto_parallel/engine.py index cb735fe11b8..728a60e18a4 100644 --- a/python/paddle/distributed/auto_parallel/engine.py +++ b/python/paddle/distributed/auto_parallel/engine.py @@ -28,7 +28,7 @@ from paddle.distributed import fleet from paddle.fluid.executor import _to_name_str from paddle.framework import IrGraph from paddle.framework import _current_expected_place as _get_device -from paddle.framework import core, in_dygraph_mode +from paddle.framework import core, in_dynamic_mode from paddle.metric import Metric from paddle.static import InputSpec, Operator, Variable, global_scope @@ -312,7 +312,7 @@ class Engine: return inputs_spec, labels_spec def _prepare_data_tensor(self, inputs_spec, labels_spec, inputs, labels): - if in_dygraph_mode() or self._dygraph_mode: + if in_dynamic_mode() or self._dygraph_mode: raise ValueError("Only support static graph mode.") if inputs_spec: @@ -561,7 +561,7 @@ class Engine: self._has_prepared[mode] = True def _build(self, mode): - if in_dygraph_mode() or self._dygraph_mode: + if in_dynamic_mode() or self._dygraph_mode: paddle.disable_static() self._dygraph_mode = True self._logger.info("Building model with 'to_static' method.") @@ -1789,7 +1789,7 @@ class Engine: self._build(mode) self._plan(mode) else: - if in_dygraph_mode() or self._dygraph_mode: + if in_dynamic_mode() or self._dygraph_mode: raise ValueError( "Please call `prepare()` or `fit()` or `evaluate()` or `predict()` before calling `cost()`." ) diff --git a/python/paddle/distributed/auto_parallel/process_group.py b/python/paddle/distributed/auto_parallel/process_group.py index b5652353311..b5669b850b6 100644 --- a/python/paddle/distributed/auto_parallel/process_group.py +++ b/python/paddle/distributed/auto_parallel/process_group.py @@ -16,7 +16,7 @@ from collections import OrderedDict import paddle from paddle import _legacy_C_ops -from paddle.framework import core, in_dygraph_mode +from paddle.framework import core, in_dynamic_mode from paddle.tensor import fill_constant from ..collective import _get_global_env, _new_ring_id @@ -177,7 +177,7 @@ class ProcessGroup: ) tmp = ( paddle.to_tensor([1], dtype="int32") - if in_dygraph_mode() + if in_dynamic_mode() else fill_constant([0], dtype="int32", value="1") ) # use legacy ops diff --git a/python/paddle/distributed/collective.py b/python/paddle/distributed/collective.py index 210dff45de2..2011fe19811 100644 --- a/python/paddle/distributed/collective.py +++ b/python/paddle/distributed/collective.py @@ -19,7 +19,7 @@ import paddle # (TODO: GhostScreaming) It will be removed later. from paddle.fluid import core -from paddle.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode from .communication.group import Group, _add_new_group, is_initialized from .fleet.layers.mpu.mp_ops import _c_concat # noqa: F401 @@ -128,7 +128,7 @@ def _set_group_map_backend(group, backend): def _new_ring_id(): # NOTE(liyurui): For compatible reason, auto parallel and eager mode relay on previous syntax. - if in_dygraph_mode(): + if in_dynamic_mode(): global _start_ring_id _start_ring_id += 1 return _start_ring_id + max(_get_global_env().nrings, 9) @@ -198,7 +198,7 @@ def new_group(ranks=None, backend=None, timeout=_default_timeout): """ global _custom_gid global _group_map - if in_dygraph_mode(): + if in_dynamic_mode(): global _default_group_name gid = _custom_gid if _custom_gid else _new_ring_id() group_name = _default_group_name + str(gid) @@ -292,7 +292,7 @@ def new_group(ranks=None, backend=None, timeout=_default_timeout): # hang caused by cross-creation of new_group tmp = ( paddle.to_tensor([1], dtype="int32") - if in_dygraph_mode() + if in_dynamic_mode() else paddle.full([0], 1, dtype="int32") ) paddle.distributed.all_reduce(tmp, sync_op=True) diff --git a/python/paddle/distributed/communication/all_gather.py b/python/paddle/distributed/communication/all_gather.py index 0e479841af5..47844cebdf0 100644 --- a/python/paddle/distributed/communication/all_gather.py +++ b/python/paddle/distributed/communication/all_gather.py @@ -102,7 +102,7 @@ def all_gather_object(object_list, obj, group=None): # [{'foo': [1, 2, 3]}, {'bar': [4, 5, 6]}] (2 GPUs) """ assert ( - framework.in_dygraph_mode() + framework.in_dynamic_mode() ), "all_gather_object doesn't support static graph mode." tensor, len_of_tensor = convert_object_to_tensor(obj) diff --git a/python/paddle/distributed/communication/batch_isend_irecv.py b/python/paddle/distributed/communication/batch_isend_irecv.py index abdeae5bff5..17d374fe99f 100644 --- a/python/paddle/distributed/communication/batch_isend_irecv.py +++ b/python/paddle/distributed/communication/batch_isend_irecv.py @@ -159,7 +159,7 @@ def batch_isend_irecv(p2p_op_list): if _warn_cur_rank_not_in_group(group): return - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group backend = group.backend tasks = [] diff --git a/python/paddle/distributed/communication/broadcast.py b/python/paddle/distributed/communication/broadcast.py index dd15ee529f1..dc2b6919427 100644 --- a/python/paddle/distributed/communication/broadcast.py +++ b/python/paddle/distributed/communication/broadcast.py @@ -102,7 +102,7 @@ def broadcast_object_list(object_list, src, group=None): # [{"bar": [4, 5, 6]}] (2 GPUs) """ assert ( - framework.in_dygraph_mode() + framework.in_dynamic_mode() ), "broadcast_object_list doesn't support static graph mode." rank = dist.get_rank() diff --git a/python/paddle/distributed/communication/gather.py b/python/paddle/distributed/communication/gather.py index 089f73d1d0c..34d44a7e2b4 100644 --- a/python/paddle/distributed/communication/gather.py +++ b/python/paddle/distributed/communication/gather.py @@ -55,6 +55,6 @@ def gather(tensor, gather_list=None, dst=0, group=None, sync_op=True): # [] (2 GPUs, out for rank 1) """ assert ( - framework.in_dygraph_mode() + framework.in_dynamic_mode() ), "gather doesn't support static graph mode yet." return stream.gather(tensor, gather_list, dst, group, sync_op) diff --git a/python/paddle/distributed/communication/group.py b/python/paddle/distributed/communication/group.py index 5fff4440877..e722d6fed1d 100644 --- a/python/paddle/distributed/communication/group.py +++ b/python/paddle/distributed/communication/group.py @@ -231,7 +231,7 @@ def get_group(id=0): def _sync_calc_stream(tensor): - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): return paddle._legacy_C_ops.c_sync_calc_stream(tensor, tensor) else: op_type = 'c_sync_calc_stream' @@ -244,7 +244,7 @@ def _sync_calc_stream(tensor): def _sync_comm_stream(tensor, ring_id=0): - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): return paddle._legacy_C_ops.c_sync_comm_stream( [tensor], [tensor], 'ring_id', ring_id ) @@ -318,7 +318,7 @@ def barrier(group=None): if group is not None and not group.is_member(): return - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group place = framework._current_expected_place() if isinstance(place, framework.CPUPlace): @@ -332,7 +332,7 @@ def barrier(group=None): ring_id = 0 if group is None else group.id barrier_tensor = paddle.full([1], 1, dtype="int32") - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): return paddle._legacy_C_ops.barrier( barrier_tensor, barrier_tensor, 'ring_id', ring_id ) diff --git a/python/paddle/distributed/communication/reduce.py b/python/paddle/distributed/communication/reduce.py index 4ee2142856f..a44c3144889 100644 --- a/python/paddle/distributed/communication/reduce.py +++ b/python/paddle/distributed/communication/reduce.py @@ -56,7 +56,7 @@ class ReduceOp: def _get_reduce_op(reduce_op, func_name): - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): if reduce_op == ReduceOp.SUM: return framework.core.ReduceOp.SUM elif reduce_op == ReduceOp.MAX: diff --git a/python/paddle/distributed/communication/scatter.py b/python/paddle/distributed/communication/scatter.py index 2826779d557..f3ae2e358e0 100644 --- a/python/paddle/distributed/communication/scatter.py +++ b/python/paddle/distributed/communication/scatter.py @@ -108,7 +108,7 @@ def scatter_object_list( # [{'bar': [4, 5, 6]}] (2 GPUs, out for rank 1) """ assert ( - framework.in_dygraph_mode() + framework.in_dynamic_mode() ), "scatter_object_list doesn't support static graph mode." rank = dist.get_rank() diff --git a/python/paddle/distributed/communication/stream/all_gather.py b/python/paddle/distributed/communication/stream/all_gather.py index 9f4b19fd5ce..83cbe7ac1fc 100644 --- a/python/paddle/distributed/communication/stream/all_gather.py +++ b/python/paddle/distributed/communication/stream/all_gather.py @@ -171,7 +171,7 @@ def all_gather( "use_calc_stream can only be true in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): if paddle.is_tensor(tensor_or_tensor_list): return _all_gather_into_tensor_in_dygraph( tensor_or_tensor_list, tensor, group, sync_op, use_calc_stream diff --git a/python/paddle/distributed/communication/stream/all_reduce.py b/python/paddle/distributed/communication/stream/all_reduce.py index 6b38bffc0bf..8ddeebd43cd 100644 --- a/python/paddle/distributed/communication/stream/all_reduce.py +++ b/python/paddle/distributed/communication/stream/all_reduce.py @@ -116,7 +116,7 @@ def all_reduce( "use_calc_stream can only be true in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group return _all_reduce_in_dygraph( tensor, op, group, sync_op, use_calc_stream diff --git a/python/paddle/distributed/communication/stream/all_to_all.py b/python/paddle/distributed/communication/stream/all_to_all.py index df9c72c1da4..2425d1c06c8 100644 --- a/python/paddle/distributed/communication/stream/all_to_all.py +++ b/python/paddle/distributed/communication/stream/all_to_all.py @@ -185,7 +185,7 @@ def alltoall( if in_tensor_or_tensor_list is None: raise RuntimeError("The input should be specified.") - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group out_is_tensor = paddle.is_tensor(out_tensor_or_tensor_list) in_is_tensor = paddle.is_tensor(in_tensor_or_tensor_list) @@ -335,7 +335,7 @@ def alltoall_single( "use_calc_stream can only be true in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group return _alltoall_single_in_dygraph( out_tensor, diff --git a/python/paddle/distributed/communication/stream/broadcast.py b/python/paddle/distributed/communication/stream/broadcast.py index 91fab7c7d5c..2b3634f1a61 100644 --- a/python/paddle/distributed/communication/stream/broadcast.py +++ b/python/paddle/distributed/communication/stream/broadcast.py @@ -117,7 +117,7 @@ def broadcast(tensor, src, group=None, sync_op=True, use_calc_stream=False): "use_calc_stream can only be True in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group src_rank_in_group = _get_or_throw_group_rank(src, group) diff --git a/python/paddle/distributed/communication/stream/gather.py b/python/paddle/distributed/communication/stream/gather.py index df3db07eb59..67ec7bf248e 100644 --- a/python/paddle/distributed/communication/stream/gather.py +++ b/python/paddle/distributed/communication/stream/gather.py @@ -99,7 +99,7 @@ def gather( """ assert ( - framework.in_dygraph_mode() + framework.in_dynamic_mode() ), "gather doesn't support static graph mode yet." if _warn_cur_rank_not_in_group(group): diff --git a/python/paddle/distributed/communication/stream/recv.py b/python/paddle/distributed/communication/stream/recv.py index 8fbbfbf0988..d35b005544b 100644 --- a/python/paddle/distributed/communication/stream/recv.py +++ b/python/paddle/distributed/communication/stream/recv.py @@ -105,7 +105,7 @@ def recv(tensor, src=0, group=None, sync_op=True, use_calc_stream=False): "use_calc_stream can only be True in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group src_rank_in_group = _get_or_throw_group_rank(src, group) diff --git a/python/paddle/distributed/communication/stream/reduce.py b/python/paddle/distributed/communication/stream/reduce.py index a952e06a910..ba97943103c 100644 --- a/python/paddle/distributed/communication/stream/reduce.py +++ b/python/paddle/distributed/communication/stream/reduce.py @@ -131,7 +131,7 @@ def reduce( "use_calc_stream can only be true in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group dst_rank_in_group = _get_or_throw_group_rank(dst, group) return _reduce_in_dygraph( diff --git a/python/paddle/distributed/communication/stream/reduce_scatter.py b/python/paddle/distributed/communication/stream/reduce_scatter.py index 5812ec5d8de..69d1b03aeb2 100644 --- a/python/paddle/distributed/communication/stream/reduce_scatter.py +++ b/python/paddle/distributed/communication/stream/reduce_scatter.py @@ -158,7 +158,7 @@ def reduce_scatter( "use_calc_stream can only be true in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group if paddle.is_tensor(tensor_or_tensor_list): return _reduce_scatter_tensor_in_dygraph( @@ -243,7 +243,7 @@ def _reduce_scatter_base( "use_calc_stream can only be true in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group return _reduce_scatter_tensor_in_dygraph( out_tensor, diff --git a/python/paddle/distributed/communication/stream/scatter.py b/python/paddle/distributed/communication/stream/scatter.py index 13f9c3ecf64..d6ab2a9305a 100644 --- a/python/paddle/distributed/communication/stream/scatter.py +++ b/python/paddle/distributed/communication/stream/scatter.py @@ -201,7 +201,7 @@ def scatter( ) tensor_or_tensor_list = [] - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group src_rank_in_group = _get_or_throw_group_rank(src, group) if paddle.is_tensor(tensor_or_tensor_list): diff --git a/python/paddle/distributed/communication/stream/send.py b/python/paddle/distributed/communication/stream/send.py index 0de989042b9..43c6d6302ca 100644 --- a/python/paddle/distributed/communication/stream/send.py +++ b/python/paddle/distributed/communication/stream/send.py @@ -104,7 +104,7 @@ def send(tensor, dst=0, group=None, sync_op=True, use_calc_stream=False): "use_calc_stream can only be True in sync op behavior." ) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = _get_global_group() if group is None else group dst_rank_in_group = _get_or_throw_group_rank(dst, group) diff --git a/python/paddle/distributed/fleet/base/role_maker.py b/python/paddle/distributed/fleet/base/role_maker.py index 0bf7db4a2c3..a490e09c9dd 100755 --- a/python/paddle/distributed/fleet/base/role_maker.py +++ b/python/paddle/distributed/fleet/base/role_maker.py @@ -1176,7 +1176,7 @@ class PaddleCloudRoleMaker(RoleMakerBase): else: self._collective_env() self._role_is_generated = True - if not paddle.framework.in_dynamic_mode(): + if not paddle.in_dynamic_mode(): self._gloo_init() diff --git a/python/paddle/distributed/fleet/fleet.py b/python/paddle/distributed/fleet/fleet.py index e9ee02b8e51..39948ab28e6 100755 --- a/python/paddle/distributed/fleet/fleet.py +++ b/python/paddle/distributed/fleet/fleet.py @@ -17,9 +17,8 @@ import os import paddle from paddle.fluid import compiler -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.wrapped_decorator import wrap_decorator -from paddle.framework import _global_flags +from paddle.framework import _global_flags, in_dynamic_mode from paddle.framework.ir import apply_build_strategy from .base import topology as tp @@ -281,7 +280,7 @@ class Fleet: "CUDA_VISIBLE_DEVICES shoule be set only 1 card if you use `python` to launch fleet program." ) - if in_dygraph_mode(): + if in_dynamic_mode(): if self.worker_num() == 1: # if worker_num is 1, should construct default topology & hcg self._topology = tp.CommunicateTopology() @@ -1270,7 +1269,7 @@ class Fleet: ) else: if ( - in_dygraph_mode() + in_dynamic_mode() or self._role_maker._is_non_distributed() or self._is_collective ): @@ -1286,7 +1285,7 @@ class Fleet: context["user_defined_strategy"] = copy.deepcopy( self._user_defined_strategy ) - if in_dygraph_mode(): + if in_dynamic_mode(): # imitate target optimizer retrieval target_opt = self.user_defined_optimizer self._context = context diff --git a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py index b435795aded..322281d3c9f 100644 --- a/python/paddle/distributed/fleet/layers/mpu/mp_ops.py +++ b/python/paddle/distributed/fleet/layers/mpu/mp_ops.py @@ -16,7 +16,7 @@ import paddle from paddle import _legacy_C_ops from paddle.distributed import collective from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype -from paddle.framework import LayerHelper, _create_tensor, in_dygraph_mode +from paddle.framework import LayerHelper, _create_tensor, in_dynamic_mode from paddle.nn import Layer from paddle.nn.utils import dygraph_utils @@ -39,7 +39,7 @@ def _c_identity(tensor, group=None): return ring_id = 0 if group is None else group.id - if in_dygraph_mode(): + if in_dynamic_mode(): from paddle.autograd import PyLayer class c_identity_eager(PyLayer): @@ -108,7 +108,7 @@ def _c_concat(tensor, group=None): rank = group.rank nranks = group.nranks - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.c_concat( tensor, 'ring_id', @@ -174,7 +174,7 @@ def _c_split(tensor, group=None): else group.nranks ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.c_split( tensor, 'use_calc_stream', @@ -226,7 +226,7 @@ def _mp_allreduce( if group is not None and not group.is_member(): return - if in_dygraph_mode(): + if in_dynamic_mode(): group = collective._get_default_group() if group is None else group assert op == ReduceOp.SUM, f"Unknown parameter: {op}." @@ -308,7 +308,7 @@ def _c_lookup_table(table, index, start_index=0, name=None): Returns: Tensor. """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.c_embedding( table, index, "start_index", start_index ) @@ -401,7 +401,7 @@ def _c_softmax_with_cross_entropy( if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=-1) - if in_dygraph_mode(): + if in_dynamic_mode(): softmax, loss = _legacy_C_ops.c_softmax_with_cross_entropy( logits, label, @@ -445,7 +445,7 @@ def _linear(x, weight, bias=None, name=None): """ Fuction Linear """ - if in_dygraph_mode(): + if in_dynamic_mode(): pre_bias = _create_tensor(dtype=x.dtype) _legacy_C_ops.matmul( x, @@ -810,7 +810,7 @@ def split( supported_operations ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): raise ValueError( "paddle.distributed.split cannot be used in dynamic " "graph mode, plese use ParallelEmbedding, ParallelRowLinear, " diff --git a/python/paddle/distributed/fleet/layers/mpu/random.py b/python/paddle/distributed/fleet/layers/mpu/random.py index 1447fccb66e..aabb8dc3fa1 100644 --- a/python/paddle/distributed/fleet/layers/mpu/random.py +++ b/python/paddle/distributed/fleet/layers/mpu/random.py @@ -21,8 +21,7 @@ from paddle import _legacy_C_ops from paddle.common_ops_import import Variable from paddle.fluid import core from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import in_dygraph_mode -from paddle.framework import LayerHelper +from paddle.framework import LayerHelper, in_dynamic_mode __all__ = [] @@ -218,7 +217,7 @@ def dropout( ) # semantic transfer # dygraph using tracker, doesn't need determinate seed - if in_dygraph_mode(): + if in_dynamic_mode(): out, mask = _legacy_C_ops.dropout( x, 'dropout_prob', diff --git a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py index 27c1a5b651b..f1967b5e1de 100644 --- a/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py +++ b/python/paddle/distributed/fleet/meta_optimizers/dgc_optimizer.py @@ -22,7 +22,7 @@ import paddle from paddle.common_ops_import import LayerHelper from paddle.fluid.dygraph import base as imperative_base from paddle.fluid.optimizer import Momentum, Optimizer -from paddle.framework import core, in_dygraph_mode +from paddle.framework import core, in_dynamic_mode from paddle.nn.clip import ClipGradByNorm, append_gradient_clip_ops from paddle.regularizer import L1Decay, L2Decay from paddle.static import create_global_var @@ -46,7 +46,7 @@ class DGCMomentumOptimizer(Optimizer): grad_clip=None, name=None, ): - if in_dygraph_mode(): + if in_dynamic_mode(): raise Exception("In dygraph, don't support DGCMomentumOptimizer.") assert ( diff --git a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py index bc403da76ba..c18d6311618 100755 --- a/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py +++ b/python/paddle/distributed/fleet/meta_parallel/parallel_layers/pp_layers.py @@ -533,7 +533,7 @@ class PipelineLayer(nn.Layer): for key, comm in self.shared_comm.items(): param = getattr(self.shared_layers[key], comm['weight_attr']) # need use trace_op to allreduce weight - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): with paddle.framework.no_grad(): paddle.distributed.all_reduce( param.grad diff --git a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py index 5ff01d35ec5..84a32379866 100755 --- a/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py +++ b/python/paddle/distributed/fleet/meta_parallel/pipeline_parallel.py @@ -555,7 +555,7 @@ class PipelineParallelWithInterleave(PipelineParallel): super().__init__(layers=layers, hcg=hcg, strategy=strategy) assert layers.get_num_virtual_stages() > 1 assert ( - framework.in_dygraph_mode() + framework.in_dynamic_mode() ), "virtual pipeline stage with interleave only support eager dygraph mode" # setup for interleave scheduler self.num_model_chunks = layers.get_num_virtual_stages() diff --git a/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py b/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py index 1da86c83640..4983de3f2ab 100644 --- a/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py +++ b/python/paddle/distributed/fleet/meta_parallel/pp_utils/p2p_communication.py @@ -220,7 +220,7 @@ def _partial_send_op( tensor, group, use_calc_stream, ring_id, dst, nranks, rank_id ): dst_rank_in_group = dst if group is None else group.get_group_rank(dst) - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): group = ( paddle.distributed.collective._get_default_group() if group is None @@ -291,7 +291,7 @@ def recv_partial( else: if use_calc_stream: recv_op = paddle.distributed.recv - elif framework.in_dygraph_mode(): + elif framework.in_dynamic_mode(): recv_op = paddle.distributed.irecv return recv_op(tensor.detach(), src=src_rank, group=group) @@ -656,7 +656,7 @@ def _p2p_helper( tasks.append(task) _xpu_comm_group_end() if not sync_recv: - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): # wait irecv tasks in eager dygraph mode with new comm library for task in tasks: assert task is not None diff --git a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py index bdf0ec29cd7..b1c47de593b 100644 --- a/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py +++ b/python/paddle/distributed/fleet/meta_parallel/sharding/group_sharded_stage3.py @@ -38,7 +38,7 @@ def _all_gather(tensor, buffer_size, group): """ assert group is not None - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): out = paddle.zeros([buffer_size], dtype=tensor.dtype) task = group.process_group.all_gather(tensor, out) return out, task diff --git a/python/paddle/distributed/fleet/optimizer.py b/python/paddle/distributed/fleet/optimizer.py index 5abe7c47e9b..6fc414d0a65 100755 --- a/python/paddle/distributed/fleet/optimizer.py +++ b/python/paddle/distributed/fleet/optimizer.py @@ -15,7 +15,7 @@ import copy from paddle.distributed import fleet -from paddle.fluid.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode from .meta_optimizers import HeterParallelOptimizer, HybridParallelOptimizer from .utils.log_util import logger @@ -81,7 +81,7 @@ def _dygraph_distributed_optimizer(optimizer, strategy=None): def distributed_optimizer(*args, **kwargs): - if in_dygraph_mode(): + if in_dynamic_mode(): return _dygraph_distributed_optimizer(*args, **kwargs) else: return fleet.fleet.distributed_optimizer(*args, **kwargs) diff --git a/python/paddle/distributed/fleet/recompute/recompute.py b/python/paddle/distributed/fleet/recompute/recompute.py index 8bbbe8e4e7e..ec0690a10bc 100755 --- a/python/paddle/distributed/fleet/recompute/recompute.py +++ b/python/paddle/distributed/fleet/recompute/recompute.py @@ -21,7 +21,7 @@ from paddle.autograd import PyLayer from paddle.distributed.fleet.meta_parallel.parallel_layers.random import ( get_rng_state_tracker, ) -from paddle.framework import core, in_dygraph_mode +from paddle.framework import core, in_dynamic_mode from ..utils.log_util import logger @@ -198,7 +198,7 @@ class RecomputeFunction(PyLayer): forward_outputs_with_grad, backward_inputs_with_grad ) - if in_dygraph_mode(): + if in_dynamic_mode(): grads = tuple( inp._grad_ivar() for inp in detached_inputs diff --git a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py index a5689020eb0..c83cf549217 100644 --- a/python/paddle/distributed/fleet/recompute/recompute_hybrid.py +++ b/python/paddle/distributed/fleet/recompute/recompute_hybrid.py @@ -161,7 +161,7 @@ class _HPRecomputeFunction(PyLayer): # If not marked non_differentiable, all output tensors' attr `stop gradient` # will be reset to `False` in c++ backend. # See https://github.com/PaddlePaddle/Paddle/blob/9d62efb0e6e5373823039d9eda96cd5905426c0a/paddle/fluid/pybind/eager_py_layer.cc#L388 - if framework.in_dygraph_mode() and state: + if framework.in_dynamic_mode() and state: ctx.mark_non_differentiable(arg) else: ctx.inputs.append(arg) diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py index 68b5581d3be..cdec2cfd63f 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_inference.py @@ -20,8 +20,7 @@ from paddle.distributed import fleet # (TODO: GhostScreaming) It will be removed later. from paddle.fluid import core -from paddle.fluid.framework import in_dygraph_mode -from paddle.framework import Block, Program +from paddle.framework import Block, Program, in_dynamic_mode class HybridParallelInferenceHelper: @@ -205,7 +204,7 @@ class HybridParallelInferenceHelper: self._device = "gpu" assert self._device, "Only gpu are supported." - assert not in_dygraph_mode(), "Only static graph mode is supported." + assert not in_dynamic_mode(), "Only static graph mode is supported." op_maker = core.op_proto_and_checker_maker self._op_role = op_maker.OpRole diff --git a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py index cd0455c8069..e07fee4fb56 100644 --- a/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py +++ b/python/paddle/distributed/fleet/utils/hybrid_parallel_util.py @@ -17,7 +17,7 @@ from paddle import framework from paddle.distributed.parallel import ( _split_tensors, build_groups, - in_dygraph_mode, + in_dynamic_mode, sync_params_buffers, ) @@ -131,7 +131,7 @@ def _broadcast_data_help(data, shape, dtype, hcg): ) if mp_rank != 0: - if in_dygraph_mode(): + if in_dynamic_mode(): data._clear_data() input_data._share_buffer_to(data) else: @@ -174,7 +174,7 @@ def broadcast_input_data(hcg, *inputs, **kwargs): for v in inputs: if isinstance(v, core.eager.Tensor): with framework.no_grad(): - if in_dygraph_mode() and not eval(f"v.place.is_{dev}_place")(): + if in_dynamic_mode() and not eval(f"v.place.is_{dev}_place")(): v_gpu = v._copy_to(place, True) v._clear_data() v_gpu._share_buffer_to(v) @@ -185,7 +185,7 @@ def broadcast_input_data(hcg, *inputs, **kwargs): for k, v in kwargs.items(): if isinstance(v, core.eager.Tensor): with framework.no_grad(): - if in_dygraph_mode() and not eval(f"v.place.is_{dev}_place")(): + if in_dynamic_mode() and not eval(f"v.place.is_{dev}_place")(): v_gpu = v._copy_to(place, True) v._clear_data() v_gpu._share_buffer_to(v) @@ -217,7 +217,7 @@ def fused_allreduce_gradients_with_group( ): apply_func = ( _apply_collective_grads_eager - if in_dygraph_mode() + if in_dynamic_mode() else _apply_collective_grads ) with framework.no_grad(): diff --git a/python/paddle/distributed/fleet/utils/mix_precision_utils.py b/python/paddle/distributed/fleet/utils/mix_precision_utils.py index 9e172d840bd..ef8357cc309 100644 --- a/python/paddle/distributed/fleet/utils/mix_precision_utils.py +++ b/python/paddle/distributed/fleet/utils/mix_precision_utils.py @@ -120,7 +120,7 @@ class MixPrecisionOptimizer: if param.stop_gradient: continue grad_var = param.main_grad - if framework.in_dygraph_mode(): + if paddle.in_dynamic_mode(): if ( hasattr(grad_var, "is_selected_rows") and grad_var.is_selected_rows() @@ -151,7 +151,7 @@ class MixPrecisionOptimizer: if param.stop_gradient: continue grad_var = param.main_grad - if framework.in_dygraph_mode(): + if paddle.in_dynamic_mode(): if ( hasattr(grad_var, "is_selected_rows") and grad_var.is_selected_rows() diff --git a/python/paddle/distributed/models/moe/utils.py b/python/paddle/distributed/models/moe/utils.py index 8a9d199ceea..69d43862bdb 100644 --- a/python/paddle/distributed/models/moe/utils.py +++ b/python/paddle/distributed/models/moe/utils.py @@ -14,7 +14,7 @@ from paddle import _legacy_C_ops from paddle.common_ops_import import check_variable_and_dtype -from paddle.framework import LayerHelper, in_dygraph_mode +from paddle.framework import LayerHelper, in_dynamic_mode def _number_count(numbers, upper_range): @@ -39,7 +39,7 @@ def _number_count(numbers, upper_range): number_count = paddle.distributed.utils.number_count(numbers, upper_range) print(number_count) # the result: [2, 0, 2, 0, 0, 0] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.number_count(numbers, 'upper_range', upper_range) else: op_type = 'number_count' @@ -86,7 +86,7 @@ def _assign_pos(x, cum_count): pos = paddle.distributed.utils.assign_pos(x=numbers, cum_count=num_cum) print(pos) # the result: (2, 0, 3, 1) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.assign_pos(x, cum_count, cum_count[-1]) else: op_type = 'assign_pos' @@ -121,7 +121,7 @@ def _random_routing(topk_idx, topk_value, prob, topk=2): prob: random prob, shape=(topk_idx.shape[0],) """ if topk == 2: - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.random_routing(prob, topk_value, topk_idx) else: raise RuntimeError("Not supporting static graph mode now") @@ -150,7 +150,7 @@ def _limit_by_capacity(expert_count, capacity, n_worker): out = paddle.distributed.utils.limit_by_capacity(expert_count, capacity, n_work) print(out) # the result: [1, 2, 2, 4, 3, 3] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.limit_by_capacity( expert_count, capacity, 'n_worker', n_worker ) @@ -195,7 +195,7 @@ def _prune_gate_by_capacity(gate_idx, expert_count, n_expert, n_worker): # Tensor(shape=[8], dtype=int32, place=CUDAPlace(0), stop_gradient=True, [1, 3, 3, 3, -1, 2, 1, 1]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.prune_gate_by_capacity( gate_idx, expert_count, "n_expert", n_expert, "n_worker", n_worker ) diff --git a/python/paddle/distributed/parallel.py b/python/paddle/distributed/parallel.py index 1737d0731a9..f8888b9f71c 100644 --- a/python/paddle/distributed/parallel.py +++ b/python/paddle/distributed/parallel.py @@ -47,7 +47,7 @@ from paddle.distributed.fleet.launch_utils import check_backend # (TODO: GhostScreaming) It will be removed later. from paddle.framework import _set_expected_place from paddle.framework import base as imperative_base -from paddle.framework import core, in_dygraph_mode +from paddle.framework import core, in_dynamic_mode from paddle.nn.layer import layers from paddle.utils import deprecated @@ -101,7 +101,7 @@ def _reshape_inplace(x, shape): @framework.dygraph_only def _split_tensors(coalesced_grads_and_grad_vars): - if in_dygraph_mode(): + if in_dynamic_mode(): for ( coalesced_grad, origin_grad_vars, @@ -356,7 +356,7 @@ class DataParallel(layers.Layer): super().__init__(layers.full_name() + "_data_parallel") assert ( - in_dygraph_mode() + in_dynamic_mode() ), "It's not supported to construct DataParallel in static graph mode." self._layers = layers @@ -381,7 +381,7 @@ class DataParallel(layers.Layer): "constructing the DataParallel." ) - if in_dygraph_mode(): + if in_dynamic_mode(): self.group = ( paddle.distributed.collective._get_default_group() if self.group is None @@ -456,7 +456,7 @@ class DataParallel(layers.Layer): check_layer_sparse(sublayer) for sublayer, _ in layers_param ] - if in_dygraph_mode(): + if in_dynamic_mode(): self.group_indices = core.eager_assign_group_by_size( trainable_parameters, is_sparse_gradient, @@ -1041,7 +1041,7 @@ def init_parallel_env(): group = None - if backend in _valid_backend_list and in_dygraph_mode(): + if backend in _valid_backend_list and in_dynamic_mode(): if _default_group_name in _get_group_map_by_name(): return _get_group_map_by_name()[_default_group_name] _set_default_backend(backend) @@ -1212,7 +1212,7 @@ def get_rank(group=None): print("The rank is %d" % dist.get_rank()) # The rank is 0 """ - if in_dygraph_mode() and group: + if in_dynamic_mode() and group: return group.rank assert group is None, "Only support group argument in eager mode." @@ -1244,7 +1244,7 @@ def get_world_size(group=None): print("The world_size is %d" % dist.get_world_size()) # The world_size is 1 """ - if in_dygraph_mode() and group: + if in_dynamic_mode() and group: return group.world_size assert group is None, "Only support group argument in eager mode." diff --git a/python/paddle/distributed/utils/moe_utils.py b/python/paddle/distributed/utils/moe_utils.py index fa003c1a00d..ff0d04b0742 100644 --- a/python/paddle/distributed/utils/moe_utils.py +++ b/python/paddle/distributed/utils/moe_utils.py @@ -14,7 +14,7 @@ from paddle import _legacy_C_ops from paddle.common_ops_import import check_variable_and_dtype -from paddle.framework import LayerHelper, in_dygraph_mode +from paddle.framework import LayerHelper, in_dynamic_mode def global_scatter( @@ -102,7 +102,7 @@ def global_scatter( return ring_id = 0 if group is None else group.id - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.global_scatter( x, local_count, @@ -219,7 +219,7 @@ def global_gather( return ring_id = 0 if group is None else group.id - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.global_gather( x, local_count, diff --git a/python/paddle/distribution/bernoulli.py b/python/paddle/distribution/bernoulli.py index e961ee9d018..0cecc6024f3 100644 --- a/python/paddle/distribution/bernoulli.py +++ b/python/paddle/distribution/bernoulli.py @@ -18,8 +18,8 @@ import numpy as np import paddle from paddle.distribution import exponential_family from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layers import tensor +from paddle.framework import in_dynamic_mode from paddle.nn.functional import ( binary_cross_entropy_with_logits, sigmoid, @@ -93,7 +93,7 @@ class Bernoulli(exponential_family.ExponentialFamily): def __init__(self, probs, name=None): self.name = name or 'Bernoulli' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type( probs, 'probs', @@ -110,7 +110,7 @@ class Bernoulli(exponential_family.ExponentialFamily): self.dtype = paddle.get_default_dtype() # Check probs range [0, 1]. - if _non_static_mode(): + if in_dynamic_mode(): """Not use `paddle.any` in static mode, which always be `True`.""" if ( paddle.any(self.probs < 0) @@ -176,7 +176,7 @@ class Bernoulli(exponential_family.ExponentialFamily): # [100, 2, 2] """ name = self.name + '_sample' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type( shape, 'shape', @@ -255,7 +255,7 @@ class Bernoulli(exponential_family.ExponentialFamily): # 288.66418457) """ name = self.name + '_rsample' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type( shape, 'shape', @@ -317,7 +317,7 @@ class Bernoulli(exponential_family.ExponentialFamily): # [1.]) """ name = self.name + '_cdf' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(value, 'value', tensor.Variable, name) value = self._check_values_dtype_in_probs(self.probs, value) @@ -355,7 +355,7 @@ class Bernoulli(exponential_family.ExponentialFamily): # [-1.20397282]) """ name = self.name + '_log_prob' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(value, 'value', tensor.Variable, name) value = self._check_values_dtype_in_probs(self.probs, value) @@ -394,7 +394,7 @@ class Bernoulli(exponential_family.ExponentialFamily): # [0.29999998]) """ name = self.name + '_prob' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(value, 'value', tensor.Variable, name) return self.log_prob(value).exp(name=name) @@ -459,7 +459,7 @@ class Bernoulli(exponential_family.ExponentialFamily): # 0.33891910) """ name = self.name + '_kl_divergence' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(other, 'other', Bernoulli, name) a_logits = self.logits diff --git a/python/paddle/distribution/categorical.py b/python/paddle/distribution/categorical.py index 7a02942b89c..6e3f7c44697 100644 --- a/python/paddle/distribution/categorical.py +++ b/python/paddle/distribution/categorical.py @@ -17,8 +17,8 @@ import numpy as np import paddle from paddle.distribution import distribution from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layers import tensor +from paddle.framework import in_dynamic_mode from paddle.tensor import multinomial @@ -90,7 +90,7 @@ class Categorical(distribution.Distribution): logits(list|tuple|numpy.ndarray|Tensor): The logits input of categorical distribution. The data type is float32 or float64. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. """ - if not _non_static_mode(): + if not in_dynamic_mode(): check_type( logits, 'logits', @@ -146,7 +146,7 @@ class Categorical(distribution.Distribution): """ name = self.name + '_sample' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(shape, 'shape', (list), 'sample') num_samples = np.prod(np.array(shape)) @@ -208,7 +208,7 @@ class Categorical(distribution.Distribution): """ name = self.name + '_kl_divergence' - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(other, 'other', Categorical, 'kl_divergence') logits = self.logits - paddle.max(self.logits, axis=-1, keepdim=True) diff --git a/python/paddle/distribution/dirichlet.py b/python/paddle/distribution/dirichlet.py index ca55285540d..5639ef8e3c3 100644 --- a/python/paddle/distribution/dirichlet.py +++ b/python/paddle/distribution/dirichlet.py @@ -15,8 +15,8 @@ import paddle from paddle.distribution import exponential_family from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode class Dirichlet(exponential_family.ExponentialFamily): @@ -159,7 +159,7 @@ class Dirichlet(exponential_family.ExponentialFamily): def _dirichlet(concentration, name=None): - if in_dygraph_mode(): + if in_dynamic_mode(): return paddle._C_ops.dirichlet(concentration) else: op_type = 'dirichlet' diff --git a/python/paddle/distribution/distribution.py b/python/paddle/distribution/distribution.py index ae532f45f0a..af63c201810 100644 --- a/python/paddle/distribution/distribution.py +++ b/python/paddle/distribution/distribution.py @@ -26,8 +26,8 @@ import numpy as np import paddle from paddle import _C_ops from paddle.fluid.data_feeder import check_variable_and_dtype, convert_dtype -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layers import tensor +from paddle.framework import in_dynamic_mode class Distribution: @@ -221,7 +221,7 @@ class Distribution: Returns: value (Tensor): Change value's dtype if value's dtype is different from param. """ - if in_dygraph_mode(): + if in_dynamic_mode(): if value.dtype != param.dtype and convert_dtype(value.dtype) in [ 'float32', 'float64', diff --git a/python/paddle/distribution/exponential_family.py b/python/paddle/distribution/exponential_family.py index 0939e73f726..fbe7e71ebb1 100644 --- a/python/paddle/distribution/exponential_family.py +++ b/python/paddle/distribution/exponential_family.py @@ -14,7 +14,7 @@ import paddle from paddle.distribution import distribution -from paddle.fluid.framework import _non_static_mode +from paddle.framework import in_dynamic_mode class ExponentialFamily(distribution.Distribution): @@ -61,7 +61,7 @@ class ExponentialFamily(distribution.Distribution): log_norm = self._log_normalizer(*natural_parameters) - if _non_static_mode(): + if in_dynamic_mode(): grads = paddle.grad( log_norm.sum(), natural_parameters, create_graph=True ) diff --git a/python/paddle/distribution/kl.py b/python/paddle/distribution/kl.py index 40be41e06c5..1b6e1a4b8d9 100644 --- a/python/paddle/distribution/kl.py +++ b/python/paddle/distribution/kl.py @@ -27,7 +27,7 @@ from paddle.distribution.laplace import Laplace from paddle.distribution.lognormal import LogNormal from paddle.distribution.normal import Normal from paddle.distribution.uniform import Uniform -from paddle.fluid.framework import _non_static_mode +from paddle.framework import in_dynamic_mode __all__ = ["register_kl", "kl_divergence"] @@ -229,7 +229,7 @@ def _kl_expfamily_expfamily(p, q): p_log_norm = p._log_normalizer(*p_natural_params) try: - if _non_static_mode(): + if in_dynamic_mode(): p_grads = paddle.grad( p_log_norm, p_natural_params, create_graph=True ) diff --git a/python/paddle/distribution/normal.py b/python/paddle/distribution/normal.py index 31a3750a048..7ba987819a3 100644 --- a/python/paddle/distribution/normal.py +++ b/python/paddle/distribution/normal.py @@ -20,8 +20,8 @@ import numpy as np import paddle from paddle.distribution import distribution from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layers import tensor +from paddle.framework import in_dynamic_mode from paddle.tensor import random @@ -87,7 +87,7 @@ class Normal(distribution.Distribution): """ def __init__(self, loc, scale, name=None): - if not _non_static_mode(): + if not in_dynamic_mode(): check_type( loc, 'loc', @@ -166,7 +166,7 @@ class Normal(distribution.Distribution): if not isinstance(shape, Iterable): raise TypeError('sample shape must be Iterable object.') - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(seed, 'seed', (int), 'sample') shape = list(shape) @@ -321,7 +321,7 @@ class Normal(distribution.Distribution): Tensor, kl-divergence between two normal distributions.The data type is float32. """ - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(other, 'other', Normal, 'kl_divergence') name = self.name + '_kl_divergence' diff --git a/python/paddle/distribution/uniform.py b/python/paddle/distribution/uniform.py index 8547c6f9900..1d2cb885b56 100644 --- a/python/paddle/distribution/uniform.py +++ b/python/paddle/distribution/uniform.py @@ -18,8 +18,8 @@ import paddle from paddle import _C_ops from paddle.distribution import distribution from paddle.fluid.data_feeder import check_type, convert_dtype -from paddle.fluid.framework import _non_static_mode, in_dygraph_mode from paddle.fluid.layers import tensor +from paddle.framework import in_dynamic_mode from paddle.tensor import random @@ -92,7 +92,7 @@ class Uniform(distribution.Distribution): """ def __init__(self, low, high, name=None): - if not _non_static_mode(): + if not in_dynamic_mode(): check_type( low, 'low', @@ -152,7 +152,7 @@ class Uniform(distribution.Distribution): Tensor, A tensor with prepended dimensions shape. The data type is float32. """ - if not _non_static_mode(): + if not in_dynamic_mode(): check_type(shape, 'shape', (list), 'sample') check_type(seed, 'seed', (int), 'sample') @@ -205,7 +205,7 @@ class Uniform(distribution.Distribution): """ value = self._check_values_dtype_in_probs(self.low, value) - if in_dygraph_mode(): + if in_dynamic_mode(): # ensure value in [low, high] lb_bool = self.low < value ub_bool = value < self.high @@ -234,7 +234,7 @@ class Uniform(distribution.Distribution): """ value = self._check_values_dtype_in_probs(self.low, value) - if in_dygraph_mode(): + if in_dynamic_mode(): lb_bool = self.low < value ub_bool = value < self.high lb = _C_ops.cast(lb_bool, value.dtype) diff --git a/python/paddle/fft.py b/python/paddle/fft.py index 438c65ae2f0..3e3b88cc3aa 100644 --- a/python/paddle/fft.py +++ b/python/paddle/fft.py @@ -20,8 +20,8 @@ import paddle from . import _C_ops from .fluid.data_feeder import check_variable_and_dtype -from .fluid.framework import in_dygraph_mode from .fluid.layer_helper import LayerHelper +from .framework import in_dynamic_mode from .tensor.attribute import is_floating_point, is_integer from .tensor.creation import _complex_to_real_dtype, _real_to_complex_dtype @@ -1437,7 +1437,7 @@ def fft_c2c(x, n, axis, norm, forward, name): s = [n] x = _resize_fft_input(x, s, axes) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.fft_c2c(x, axes, norm, forward) else: op_type = 'fft_c2c' @@ -1468,7 +1468,7 @@ def fft_r2c(x, n, axis, norm, forward, onesided, name): _check_fft_n(n) s = [n] x = _resize_fft_input(x, s, axes) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.fft_r2c(x, axes, norm, forward, onesided) else: op_type = 'fft_r2c' @@ -1511,7 +1511,7 @@ def fft_c2r(x, n, axis, norm, forward, name): s = [n // 2 + 1] x = _resize_fft_input(x, s, axes) - if in_dygraph_mode(): + if in_dynamic_mode(): if n is not None: out = _C_ops.fft_c2r(x, axes, norm, forward, n) else: @@ -1570,7 +1570,7 @@ def fftn_c2c(x, s, axes, norm, forward, name): if s is not None: x = _resize_fft_input(x, s, axes) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.fft_c2c(x, axes, norm, forward) else: op_type = 'fft_c2c' @@ -1620,7 +1620,7 @@ def fftn_r2c(x, s, axes, norm, forward, onesided, name): if s is not None: x = _resize_fft_input(x, s, axes) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.fft_r2c(x, axes, norm, forward, onesided) else: op_type = 'fft_r2c' @@ -1684,7 +1684,7 @@ def fftn_c2r(x, s, axes, norm, forward, name): fft_input_shape[-1] = fft_input_shape[-1] // 2 + 1 x = _resize_fft_input(x, fft_input_shape, axes) - if in_dygraph_mode(): + if in_dynamic_mode(): if s is not None: out = _C_ops.fft_c2r(x, axes, norm, forward, s[-1]) else: diff --git a/python/paddle/fluid/contrib/optimizer.py b/python/paddle/fluid/contrib/optimizer.py index 57f6e8114db..e97b9d8c49c 100644 --- a/python/paddle/fluid/contrib/optimizer.py +++ b/python/paddle/fluid/contrib/optimizer.py @@ -229,7 +229,7 @@ class Momentum(Optimizer): else None ) - if framework._non_static_mode(): + if framework.in_dygraph_mode(): _, _, _ = _legacy_C_ops.momentum( param_and_grad[0], param_and_grad[1], diff --git a/python/paddle/fluid/data_feeder.py b/python/paddle/fluid/data_feeder.py index ef711fb4fa1..687504e3670 100644 --- a/python/paddle/fluid/data_feeder.py +++ b/python/paddle/fluid/data_feeder.py @@ -22,8 +22,8 @@ import struct from .framework import ( Variable, default_main_program, + in_dygraph_mode, _current_expected_place, - _non_static_mode, ) from .framework import _cpu_num, _cuda_ids @@ -140,7 +140,7 @@ def check_type(input, input_name, expected_type, op_name, extra_message=''): # in dynamic graph mode. # 2. Performance considerations. Because these checks are executed at # each step in dynamic graph mode, it will bring a heavy performance burden. - if _non_static_mode(): + if in_dygraph_mode(): return # NOTE: `in_declarative_mode` is used to determined whether this op is called under @@ -171,7 +171,7 @@ def check_dtype( input_dtype, input_name, expected_dtype, op_name, extra_message='' ): # See NOTE [ Why skip dynamic graph check ] - if _non_static_mode(): + if in_dygraph_mode(): return if convert_dtype(input_dtype) in ['float16']: warnings.warn( @@ -208,7 +208,7 @@ def check_shape( expected_tensor_dtype=('int32', 'int64'), ): # See NOTE [ Why skip dynamic graph check ] - if _non_static_mode(): + if in_dygraph_mode(): return check_type(shape, 'shape', expected_shape_type, op_name) if expected_element_type is not None and not isinstance(shape, Variable): diff --git a/python/paddle/fluid/dygraph/base.py b/python/paddle/fluid/dygraph/base.py index 3430d78d6d7..538f5a0492f 100644 --- a/python/paddle/fluid/dygraph/base.py +++ b/python/paddle/fluid/dygraph/base.py @@ -106,7 +106,7 @@ def program_desc_tracing_guard(enable): @signature_safe_contextmanager def param_guard(parameters): # Note: parameters is a reference of self._parameters or self._buffers - if in_declarative_mode() and not framework.in_dygraph_mode() and parameters: + if in_declarative_mode() and not paddle.in_dynamic_mode() and parameters: origin_parameters = parameters.copy() for name, var_base in parameters.items(): if isinstance(var_base, list): diff --git a/python/paddle/fluid/dygraph/tensor_patch_methods.py b/python/paddle/fluid/dygraph/tensor_patch_methods.py index ad51d8ff31a..58f99b9e98a 100644 --- a/python/paddle/fluid/dygraph/tensor_patch_methods.py +++ b/python/paddle/fluid/dygraph/tensor_patch_methods.py @@ -270,7 +270,7 @@ def monkey_patch_tensor(): # 4: [5000.] """ - if framework._non_static_mode(): + if framework.in_dygraph_mode(): if in_profiler_mode(): record_event = profiler.RecordEvent( "Gradient Backward", profiler.TracerEventType.Backward @@ -978,21 +978,20 @@ def monkey_patch_tensor(): ("values", values), ("to_dense", to_dense), ("to_sparse_coo", to_sparse_coo), + ("_set_grad_ivar", _set_grad_ivar), + ("value", value), + ("cpu", cpu), + ("cuda", cuda), + ("pin_memory", pin_memory), + ("_slice", _slice), + ("_numel", _numel), + ("_uva", _uva), + ("_clear_data", _clear_data), + ("__hash__", __hash__), + ("_use_gpudnn", _use_gpudnn), ): setattr(core.eager.Tensor, method_name, method) - setattr(core.eager.Tensor, "_set_grad_ivar", _set_grad_ivar) - setattr(core.eager.Tensor, "value", value) - setattr(core.eager.Tensor, "cpu", cpu) - setattr(core.eager.Tensor, "cuda", cuda) - setattr(core.eager.Tensor, "pin_memory", pin_memory) - setattr(core.eager.Tensor, "_slice", _slice) - setattr(core.eager.Tensor, "_numel", _numel) - setattr(core.eager.Tensor, "_uva", _uva) - setattr(core.eager.Tensor, "_clear_data", _clear_data) - setattr(core.eager.Tensor, "__hash__", __hash__) - setattr(core.eager.Tensor, "_use_gpudnn", _use_gpudnn) - global _already_patch_repr if not _already_patch_repr: # NOTE(zhiqiu): pybind11 will set a default __str__ method of enum class. diff --git a/python/paddle/fluid/framework.py b/python/paddle/fluid/framework.py index ad7140c14b6..a3d589fc616 100644 --- a/python/paddle/fluid/framework.py +++ b/python/paddle/fluid/framework.py @@ -52,7 +52,6 @@ __all__ = [ 'cpu_places', 'xpu_places', 'cuda_pinned_places', - '_non_static_mode', 'in_dygraph_mode', 'is_compiled_with_cinn', 'is_compiled_with_cuda', @@ -156,28 +155,6 @@ extra_op_attrs = { "unique": ["is_sorted"], } -# Some explanation of our execution system 2022.03 -# For now we have 3 kinds of execution system, since we refactored dygraph mode to -# build a fast execution system for dynamic mode. But we can't just remove all legacy -# code once we present the new system for some historical reason. That's why we have -# these flags. -# -# 1. _non_static_mode(): -# _non_static_mode means we are now running in legacy dygraph mode or dygraph mode. -# 2. dygraph_mode(): -# This flags inidicates we are now running in dygraph mode which called eager mode before. -# 3. _in_legacy_dygraph(): -# This flags has been deprecated -# -# They have a relation ship as below: -# Since _in_legacy_graph is deprecated, so dygraph_mode is _non_static_mode -# -# Why we have to make different of _in_legacy_dygraph and dygraph_mode? -# In some performance issue, we find that python if statement cause server performance problem -# and we need our new dygraph mode becomes as fast as it could be. That's why we make these flags -# to make sure in most case, we find new dygraph mode first with only one if statement. - - # FIXME(dev): We haven't fully verified eager mode on XPU et.al but # only GPU/CPU. Remove this after we improve this feature. _is_first_import_ = True @@ -213,10 +190,6 @@ def in_dygraph_mode(): return global_var._dygraph_tracer_ is not None -def _non_static_mode(): - return global_var._dygraph_tracer_ is not None - - global_ipu_index = -1 global_ipu_stage = -1 ipu_index_attr_name = 'ipu_index' @@ -459,7 +432,7 @@ def require_version(min_version, max_version=None): def _dygraph_not_support_(func): def __impl__(*args, **kwargs): - assert not _non_static_mode(), ( + assert not in_dygraph_mode(), ( "We don't support %s in dynamic graph mode" % func.__name__ ) return func(*args, **kwargs) @@ -469,7 +442,7 @@ def _dygraph_not_support_(func): def _dygraph_only_(func): def __impl__(*args, **kwargs): - assert _non_static_mode(), ( + assert in_dygraph_mode(), ( "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__ ) @@ -482,7 +455,7 @@ def _non_static_only_(func): def __impl__(*args, **kwargs): from .dygraph.base import in_declarative_mode - assert _non_static_mode() or in_declarative_mode(), ( + assert in_dygraph_mode() or in_declarative_mode(), ( "We only support '%s()' in dynamic graph mode, please call 'paddle.disable_static()' to enter dynamic graph mode." % func.__name__ ) @@ -493,7 +466,7 @@ def _non_static_only_(func): def _static_only_(func): def __impl__(*args, **kwargs): - assert not _non_static_mode(), ( + assert not in_dygraph_mode(), ( "In PaddlePaddle 2.x, we turn on dynamic graph mode by default, and '%s()' is only supported in static graph mode. So if you want to use this api, please call 'paddle.enable_static()' before this api to enter static graph mode." % func.__name__ ) @@ -971,7 +944,7 @@ def name_scope(prefix=None): """ # TODO(panyx0718): Only [0-9a-z]. # in dygraph we don't need namescope since it will cause mem leak - if _non_static_mode(): + if in_dygraph_mode(): yield else: assert prefix, "namescope prefix can not be empty." @@ -2738,7 +2711,7 @@ class Operator: except ValueError: pass - if _non_static_mode(): + if in_dygraph_mode(): if type is None: raise ValueError( "`type` to initialized an Operator can not be None." @@ -2924,7 +2897,7 @@ class Operator: else: out_arg_names.append(arg.name) # TODO(minqiyang): could we remove variable's op in static graph mode? - if not _non_static_mode(): + if not in_dygraph_mode(): if isinstance(arg, str): block.var(arg).op = self else: @@ -3799,7 +3772,7 @@ class Block: ) def create_var(self, *args, **kwargs): - if _non_static_mode(): + if in_dygraph_mode(): var = _create_tensor(*args, **kwargs) else: var = Variable(block=self, *args, **kwargs) @@ -3956,7 +3929,7 @@ class Block: Operator: the append Operator. """ op_type = kwargs.get("type", None) - if _non_static_mode(): + if in_dygraph_mode(): attrs = kwargs.get("attrs", {}) inplace_map = kwargs.get("inplace_map", None) warnings.warn( @@ -4093,7 +4066,7 @@ class Block: return self.ops[start:end] def _prepend_op(self, *args, **kwargs): - if _non_static_mode(): + if in_dygraph_mode(): type = kwargs.get("type", None) attrs = kwargs.get("attrs", {}) op = Operator( @@ -7469,7 +7442,7 @@ def _cuda_graph_guard(cuda_graph_attr=None): cuda_graph_capture_mode;memory_pool_id;cuda_graph_id """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "cuda_graph_guard only works under static graph mode" assert ( core.is_compiled_with_cuda() diff --git a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py index 33c95f03b1f..a840b702140 100644 --- a/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py +++ b/python/paddle/fluid/incubate/checkpoint/auto_checkpoint.py @@ -24,7 +24,7 @@ from contextlib import contextmanager from paddle.fluid import unique_name, compiler from .checkpoint_saver import SerializableBase, CheckpointSaver, PaddleModel -from paddle.fluid.framework import _non_static_mode, Program +from paddle.fluid.framework import in_dygraph_mode, Program g_train_epoch_range = None g_checker = None @@ -138,7 +138,7 @@ class AutoCheckpointChecker: return self._save_checkpoint_inter def valid(self): - if _non_static_mode(): + if in_dygraph_mode(): return False return ( diff --git a/python/paddle/fluid/layer_helper.py b/python/paddle/fluid/layer_helper.py index 0342017822c..96722b231ac 100644 --- a/python/paddle/fluid/layer_helper.py +++ b/python/paddle/fluid/layer_helper.py @@ -17,7 +17,7 @@ import paddle from .framework import ( Parameter, dtype_is_floating, - _non_static_mode, + in_dygraph_mode, OpProtoHolder, _global_flags, ) @@ -159,7 +159,7 @@ class LayerHelper(LayerHelperBase): if use_mkldnn: act['use_mkldnn'] = use_mkldnn act_type = act.pop('type') - if _non_static_mode(): + if in_dygraph_mode(): res = _append_activation_in_dygraph( input_var, act_type, use_cudnn, use_mkldnn ) diff --git a/python/paddle/fluid/layer_helper_base.py b/python/paddle/fluid/layer_helper_base.py index 4a9b71c0cdf..0579bc98563 100644 --- a/python/paddle/fluid/layer_helper_base.py +++ b/python/paddle/fluid/layer_helper_base.py @@ -20,7 +20,7 @@ from .framework import ( Variable, default_main_program, default_startup_program, - _non_static_mode, + in_dygraph_mode, _current_expected_place, ) from . import unique_name @@ -409,7 +409,7 @@ class LayerHelperBase: param = self._create_weight_normalize(attr, shape, dtype) WeightNormParamAttr.params_with_weight_norm.append(param) return param - if _non_static_mode(): + if in_dygraph_mode(): # In dygraph mode, we want the returned parameter to be # initialized so that it can be used imperatively. # check parameter name @@ -527,7 +527,7 @@ class LayerHelperBase: initializer: initializer to use """ assert isinstance(var, Variable) - if _non_static_mode(): + if in_dygraph_mode(): initializer(var, self.main_program.global_block()) else: self.startup_program.global_block().create_var( diff --git a/python/paddle/fluid/lazy_init.py b/python/paddle/fluid/lazy_init.py index 1851056f2c2..3a577eb9084 100644 --- a/python/paddle/fluid/lazy_init.py +++ b/python/paddle/fluid/lazy_init.py @@ -37,7 +37,7 @@ class LazyInitHelper: if self._state: return assert ( - framework._non_static_mode() + framework.in_dygraph_mode() ), "LazyInit.enable() is only available in dygraph mode." self._state = True diff --git a/python/paddle/fluid/reader.py b/python/paddle/fluid/reader.py index 6dfd487ef49..c08fdedba3b 100644 --- a/python/paddle/fluid/reader.py +++ b/python/paddle/fluid/reader.py @@ -26,7 +26,7 @@ from .framework import ( program_guard, default_main_program, default_startup_program, - _non_static_mode, + in_dygraph_mode, cpu_places, _current_expected_place, ) @@ -417,7 +417,7 @@ class DataLoader: epoch_id, batch_id, np.mean(loss.numpy()))) """ - if _non_static_mode(): + if in_dygraph_mode(): return DygraphGeneratorLoader( feed_list, capacity, @@ -1605,7 +1605,7 @@ class DatasetLoader(DataLoaderBase): dataset, paddle.distributed.fleet.dataset.DatasetBase ), "dataset must be type of DatasetBase" assert ( - not _non_static_mode() + not in_dygraph_mode() ), "DatasetLoader is not supported in dygraph mode yet" if isinstance(places, (list, tuple)): places = _get_paddle_place_list(places) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_recompute.py b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_recompute.py index 36fc010fd8e..793f2effd09 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_recompute.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/hybrid_parallel_pp_recompute.py @@ -87,7 +87,7 @@ class TransformerNet(Layer): class EmbeddingPipe(EmbeddingNet): def forward(self, tensors): - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): stable, x = tensors return stable, super().forward(x) else: @@ -96,7 +96,7 @@ class EmbeddingPipe(EmbeddingNet): class TransformerNetPipe(TransformerNet): def forward(self, tensors): - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): stable, x = tensors output = super().forward(x) return stable, output @@ -109,7 +109,7 @@ class CriterionPipe(Layer): super().__init__() def forward(self, out, label): - if framework.in_dygraph_mode(): + if framework.in_dynamic_mode(): out = out[-1] loss = out.mean() return loss @@ -179,7 +179,7 @@ class TestDistPPTraning(unittest.TestCase): x_data = np.random.randint(0, vocab_size, size=[batch_size, length]) x = paddle.to_tensor(x_data) x.stop_gradient = True - input_ = (x, x) if framework.in_dygraph_mode() else x + input_ = (x, x) if framework.in_dynamic_mode() else x loss = model.train_batch([input_, x], optimizer, scheduler) # TODO(shenliang03) add utest for loss print("loss: ", loss) diff --git a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py index 0aae9d7cc63..2e6026e99bc 100644 --- a/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/collective/fleet/parallel_dygraph_se_resnext.py @@ -55,7 +55,7 @@ def optimizer_setting(params, parameter_list=None): bd = [step * e for e in ls["epochs"]] lr = params["lr"] num_epochs = params["num_epochs"] - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): optimizer = fluid.optimizer.Momentum( learning_rate=fluid.layers.cosine_decay( learning_rate=lr, step_each_epoch=step, epochs=num_epochs diff --git a/python/paddle/fluid/tests/unittests/test_context_manager.py b/python/paddle/fluid/tests/unittests/test_context_manager.py index b11b37050e1..43ddca5fb83 100644 --- a/python/paddle/fluid/tests/unittests/test_context_manager.py +++ b/python/paddle/fluid/tests/unittests/test_context_manager.py @@ -29,8 +29,8 @@ class TestContextManagerRaiseException(unittest.TestCase): def test_func2(self): # After test_func1 executed, if fluid.dygraph.guard() in test_func1 safely exited, - # fluid._non_static_mode() should be false. - self.assertEqual(fluid._non_static_mode(), False) + # fluid.in_dygraph_mode() should be false. + self.assertEqual(fluid.in_dygraph_mode(), False) if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py index e2d5ae8685c..070e0f102a8 100644 --- a/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_dropout_nd_op.py @@ -21,7 +21,7 @@ import paddle from paddle import _legacy_C_ops, fluid from paddle.fluid import core from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _non_static_mode +from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper from paddle.static import default_main_program @@ -34,7 +34,7 @@ def dropout_nd( mode = ( 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode ) # semantic transfer - if _non_static_mode(): + if in_dygraph_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed diff --git a/python/paddle/fluid/tests/unittests/test_imperative_decorator.py b/python/paddle/fluid/tests/unittests/test_imperative_decorator.py index e7c78189eea..d02819ff293 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_decorator.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_decorator.py @@ -27,7 +27,7 @@ class TestTracerMode(unittest.TestCase): self.init_mode = True def get_tracer_mode(self): - assert fluid._non_static_mode(), "Dygraph mode must be enabled" + assert framework.in_dygraph_mode(), "Dygraph mode must be enabled" @fluid.dygraph.no_grad def no_grad_func(self, a): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py index 30e8119ce16..069a99b5587 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_double_grad.py @@ -26,7 +26,7 @@ from paddle.vision.models import resnet50, resnet101 def _dygraph_guard_(func): def __impl__(*args, **kwargs): - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): return func(*args, **kwargs) else: with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py index 7adda0bd9f3..7afbf1ba5f9 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_ocr_attention_model.py @@ -206,7 +206,7 @@ class EncoderNet(paddle.nn.Layer): initializer=paddle.nn.initializer.Normal(0.0, 0.02), learning_rate=2.0, ) - if fluid.framework._non_static_mode(): + if fluid.framework.in_dygraph_mode(): h_0 = np.zeros( (Config.batch_size, rnn_hidden_size), dtype="float32" ) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py index af046305ff7..d130f97686a 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet.py @@ -58,7 +58,7 @@ def optimizer_setting(params, parameter_list=None): base_lr = params["lr"] lr = [] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): optimizer = fluid.optimizer.SGD( learning_rate=0.01, parameter_list=parameter_list ) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py index bc1dbd49af3..c537da047b8 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_resnet_sorted_gradient.py @@ -54,7 +54,7 @@ def optimizer_setting(params, parameter_list=None): base_lr = params["lr"] lr = [] lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): optimizer = fluid.optimizer.SGD( learning_rate=0.01, parameter_list=parameter_list ) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py index 1f98ac13155..e727c16ccf3 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_se_resnext.py @@ -54,7 +54,7 @@ def optimizer_setting(params, parameter_list=None): # bd = [step * e for e in ls["epochs"]] # base_lr = params["lr"] # lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)] - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): optimizer = fluid.optimizer.SGD( learning_rate=0.01, parameter_list=parameter_list ) diff --git a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py index 693dd5c7b88..0eda5bcc349 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_star_gan_with_gradient_penalty.py @@ -114,7 +114,7 @@ class InstanceNorm(paddle.nn.Layer): self.bias = self.create_parameter(shape=[num_channels], is_bias=True) def forward(self, input): - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): out, _, _ = _legacy_C_ops.instance_norm( input, self.scale, self.bias, 'epsilon', self.epsilon ) @@ -387,7 +387,7 @@ def loss_cls(cls, label, cfg): def calc_gradients(outputs, inputs, no_grad_set): - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): return fluid.dygraph.grad( outputs=outputs, inputs=inputs, @@ -481,7 +481,7 @@ def build_optimizer(layer, cfg, loss=None): learning_rate = 1e-3 beta1 = 0.5 beta2 = 0.999 - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): return fluid.optimizer.Adam( learning_rate=learning_rate, beta1=beta1, diff --git a/python/paddle/fluid/tests/unittests/test_imperative_tensor_clear_gradient.py b/python/paddle/fluid/tests/unittests/test_imperative_tensor_clear_gradient.py index e068b595da4..11864a59027 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_tensor_clear_gradient.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_tensor_clear_gradient.py @@ -24,7 +24,7 @@ from paddle.fluid.wrapped_decorator import wrap_decorator def _dygraph_guard_(func): def __impl__(*args, **kwargs): - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): return func(*args, **kwargs) else: with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py index 70df83b98e1..72e2689da77 100644 --- a/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py +++ b/python/paddle/fluid/tests/unittests/test_imperative_triple_grad.py @@ -24,7 +24,7 @@ from paddle.fluid.wrapped_decorator import wrap_decorator def _dygraph_guard_(func): def __impl__(*args, **kwargs): - if fluid._non_static_mode(): + if fluid.in_dygraph_mode(): return func(*args, **kwargs) else: with fluid.dygraph.guard(): diff --git a/python/paddle/fluid/tests/unittests/test_logical_op.py b/python/paddle/fluid/tests/unittests/test_logical_op.py index 14da477f18d..80a42d41451 100755 --- a/python/paddle/fluid/tests/unittests/test_logical_op.py +++ b/python/paddle/fluid/tests/unittests/test_logical_op.py @@ -18,7 +18,7 @@ import numpy as np from eager_op_test import convert_float_to_uint16 import paddle -from paddle.framework import _non_static_mode +from paddle.framework import in_dynamic_mode from paddle.static import Executor, Program, program_guard SUPPORTED_DTYPES = [ @@ -182,11 +182,11 @@ def test_type_error(unit_test, use_gpu, type_str_map): if binary_op: if type_str_map['x'] != type_str_map['y']: unit_test.assertRaises(error_type, op, x=x, y=y) - if not _non_static_mode(): + if not in_dynamic_mode(): error_type = TypeError unit_test.assertRaises(error_type, op, x=x, y=y, out=1) else: - if not _non_static_mode(): + if not in_dynamic_mode(): error_type = TypeError unit_test.assertRaises(error_type, op, x=x, out=1) diff --git a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py index 423e7039dd9..29ce4be023f 100644 --- a/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py +++ b/python/paddle/fluid/tests/unittests/test_multiclass_nms_op.py @@ -19,8 +19,8 @@ import numpy as np from eager_op_test import OpTest import paddle -from paddle import _C_ops, _legacy_C_ops -from paddle.fluid import _non_static_mode, core, in_dygraph_mode +from paddle import _C_ops +from paddle.fluid import core from paddle.fluid.layer_helper import LayerHelper @@ -42,7 +42,7 @@ def multiclass_nms3( helper = LayerHelper('multiclass_nms3', **locals()) - if in_dygraph_mode(): + if paddle.in_dynamic_mode(): attrs = ( score_threshold, nms_top_k, @@ -58,30 +58,6 @@ def multiclass_nms3( if not return_index: index = None return output, index, nms_rois_num - elif _non_static_mode(): - attrs = ( - 'background_label', - background_label, - 'score_threshold', - score_threshold, - 'nms_top_k', - nms_top_k, - 'nms_threshold', - nms_threshold, - 'keep_top_k', - keep_top_k, - 'nms_eta', - nms_eta, - 'normalized', - normalized, - ) - output, index, nms_rois_num = _legacy_C_ops.multiclass_nms3( - bboxes, scores, rois_num, *attrs - ) - if not return_index: - index = None - return output, index, nms_rois_num - else: output = helper.create_variable_for_type_inference(dtype=bboxes.dtype) index = helper.create_variable_for_type_inference(dtype='int32') diff --git a/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py index 6ad9f721d66..17586d94f3b 100755 --- a/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_squared_l2_norm_op.py @@ -20,11 +20,11 @@ from numpy import linalg as LA import paddle from paddle import _C_ops, _legacy_C_ops -from paddle.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode def test_squared_l2_norm(x): - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.squared_l2_norm(x) else: return _legacy_C_ops.squared_l2_norm(x) diff --git a/python/paddle/fluid/unique_name.py b/python/paddle/fluid/unique_name.py index c881196ef56..863883a220c 100644 --- a/python/paddle/fluid/unique_name.py +++ b/python/paddle/fluid/unique_name.py @@ -120,9 +120,9 @@ def generate(key): # NOTE(zhiqiu): use c++ unique_name_generator in dygraph mode, # in order to keep name consistency. def generate_with_ignorable_key(key): - from .framework import _non_static_mode, _dygraph_tracer + from .framework import in_dygraph_mode, _dygraph_tracer - if _non_static_mode(): + if in_dygraph_mode(): return _dygraph_tracer()._generate_unique_name() return generator(key) diff --git a/python/paddle/fluid/variable_index.py b/python/paddle/fluid/variable_index.py index fbd55ba83aa..dae0deb135c 100644 --- a/python/paddle/fluid/variable_index.py +++ b/python/paddle/fluid/variable_index.py @@ -436,8 +436,7 @@ def _getitem_impl_(var, item): start = 0 if step > 0 else MAX_INTEGER if end is None: if ( - paddle.fluid.framework._non_static_mode() - or not is_tensor_array + paddle.in_dynamic_mode() or not is_tensor_array ) and var.shape[dim] != -1: end = var.shape[dim] if step > 0 else -1 else: @@ -550,7 +549,7 @@ def _getitem_impl_(var, item): out = var if len(axes) > 0: op_type = "strided_slice" if use_strided_slice else "slice" - if paddle.fluid.framework.in_dygraph_mode() and op_type == "slice": + if paddle.in_dynamic_mode() and op_type == "slice": if "StartsTensorList" in inputs.keys(): st = inputs['StartsTensorList'] else: @@ -620,11 +619,11 @@ def _setitem_for_tensor_array(var, item, value): If item is case (1), we perform paddle.tensor.array_write, in other cases, we raise a NotImplementedError. """ - from ..framework import LayerHelper, core, _non_static_mode + from ..framework import LayerHelper, core from .framework import Variable assert ( - not _non_static_mode() + not paddle.in_dynamic_mode() ), "setitem for tensor_array must be called in static graph mode." if isinstance(item, (Variable, int)): from paddle.jit.dy2static.variable_trans_func import ( @@ -808,7 +807,7 @@ def _setitem_impl_(var, item, value): ) ) - if paddle.fluid.framework._non_static_mode(): + if paddle.in_dynamic_mode(): var._bump_inplace_version() cur_block = default_main_program().current_block() diff --git a/python/paddle/framework/__init__.py b/python/paddle/framework/__init__.py index 0ae39627d9a..16f12fd41ec 100755 --- a/python/paddle/framework/__init__.py +++ b/python/paddle/framework/__init__.py @@ -55,10 +55,7 @@ from ..fluid.framework import set_flags # noqa: F401 from ..fluid.framework import Parameter from ..fluid.dygraph.base import enable_dygraph as disable_static # noqa: F401 from ..fluid.dygraph.base import disable_dygraph as enable_static # noqa: F401 -from ..fluid.framework import _non_static_mode as in_dynamic_mode # noqa: F401 -from ..fluid.framework import ( # noqa: F401 - _non_static_mode, # temporary used for hackson -) +from ..fluid.framework import in_dygraph_mode as in_dynamic_mode # noqa: F401 from ..fluid.framework import ( _current_expected_place, _get_paddle_place, @@ -74,7 +71,6 @@ from ..fluid.framework import _dygraph_tracer # noqa: F401 from ..fluid.framework import generate_control_dev_var_name # noqa: F401 from ..fluid.layer_helper import LayerHelper # noqa: F401 -from ..fluid.framework import in_dygraph_mode # noqa: F401 from ..fluid.framework import _global_flags # noqa: F401 from ..fluid.framework import _apply_pass # noqa: F401 from ..fluid.framework import switch_main_program diff --git a/python/paddle/framework/io.py b/python/paddle/framework/io.py index e5d011453f2..055b4aa5fa4 100644 --- a/python/paddle/framework/io.py +++ b/python/paddle/framework/io.py @@ -34,7 +34,7 @@ from paddle.fluid.framework import ( _create_tensor, _current_expected_place, _dygraph_tracer, - _non_static_mode, + in_dygraph_mode, ) from .io_utils import ( @@ -438,7 +438,7 @@ def _to_LodTensor(ndarray): def _tuple_to_tensor(obj, return_numpy): if return_numpy: return obj[1] - if _non_static_mode(): + if in_dygraph_mode(): t = paddle.to_tensor(obj[1]) # This function does modify the name of return value. # Loading the same variable multiple times may cause the same name. @@ -451,7 +451,7 @@ def _tuple_to_tensor(obj, return_numpy): def _ndarray_to_tensor(obj, return_numpy): if return_numpy: return obj - if _non_static_mode(): + if in_dygraph_mode(): return paddle.to_tensor(obj) else: return _to_LodTensor(obj) @@ -508,7 +508,7 @@ def _parse_load_result(obj, return_numpy): return obj if _contain_x(obj, is_layer): - if not _non_static_mode(): + if not in_dygraph_mode(): raise ValueError( "Layer can only be loaded in dynamic graph mode, but now in static graph mode." ) @@ -819,7 +819,7 @@ def save(obj, path, protocol=4, **configs): f.write(obj.desc.serialize_to_string()) elif _is_state_dict(obj): - if _non_static_mode(): + if in_dygraph_mode(): _legacy_save(obj, path, protocol) else: _legacy_static_save(obj, path, protocol) @@ -1110,7 +1110,7 @@ def load(path, **configs): if config.return_numpy: return np.array(tensor) else: - if _non_static_mode(): + if in_dygraph_mode(): return _lod_tensor2varbase(tensor) return tensor except: diff --git a/python/paddle/geometric/math.py b/python/paddle/geometric/math.py index 9e07e1c77a7..9db0303a2f6 100644 --- a/python/paddle/geometric/math.py +++ b/python/paddle/geometric/math.py @@ -14,8 +14,8 @@ from paddle import _C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode __all__ = [] @@ -50,7 +50,7 @@ def segment_sum(data, segment_ids, name=None): #Outputs: [[4., 4., 4.], [4., 5., 6.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.segment_pool(data, segment_ids, "SUM") else: check_variable_and_dtype( @@ -107,7 +107,7 @@ def segment_mean(data, segment_ids, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.segment_pool(data, segment_ids, "MEAN") else: @@ -164,7 +164,7 @@ def segment_min(data, segment_ids, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.segment_pool(data, segment_ids, "MIN") else: check_variable_and_dtype( @@ -220,7 +220,7 @@ def segment_max(data, segment_ids, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.segment_pool(data, segment_ids, "MAX") else: check_variable_and_dtype( diff --git a/python/paddle/geometric/message_passing/send_recv.py b/python/paddle/geometric/message_passing/send_recv.py index 5f3ca7afe3f..e37868a229d 100644 --- a/python/paddle/geometric/message_passing/send_recv.py +++ b/python/paddle/geometric/message_passing/send_recv.py @@ -20,8 +20,9 @@ from paddle.fluid.data_feeder import ( check_type, check_variable_and_dtype, ) -from paddle.fluid.framework import Variable, in_dygraph_mode +from paddle.fluid.framework import Variable from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from .utils import ( convert_out_size_to_list, @@ -118,7 +119,7 @@ def send_u_recv( # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. - if in_dygraph_mode(): + if in_dynamic_mode(): out_size = convert_out_size_to_list(out_size) return _C_ops.send_u_recv( x, src_index, dst_index, reduce_op.upper(), out_size @@ -295,7 +296,7 @@ def send_ue_recv( # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. - if in_dygraph_mode(): + if in_dynamic_mode(): out_size = convert_out_size_to_list(out_size) return _C_ops.send_ue_recv( x, @@ -451,7 +452,7 @@ def send_uv(x, y, src_index, dst_index, message_op="add", name=None): message_op = 'mul' y = 1.0 / (y + 1e-12) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.send_uv(x, y, src_index, dst_index, message_op.upper()) else: diff --git a/python/paddle/geometric/reindex.py b/python/paddle/geometric/reindex.py index 0456750d4a5..c65fc36a417 100644 --- a/python/paddle/geometric/reindex.py +++ b/python/paddle/geometric/reindex.py @@ -15,8 +15,9 @@ import paddle from paddle import _C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import Variable, _non_static_mode +from paddle.fluid.framework import Variable from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode __all__ = [] @@ -86,7 +87,7 @@ def reindex_graph( True if value_buffer is not None and index_buffer is not None else False ) - if _non_static_mode(): + if in_dynamic_mode(): reindex_src, reindex_dst, out_nodes = _C_ops.reindex_graph( x, neighbors, @@ -205,7 +206,7 @@ def reindex_heter_graph( True if value_buffer is not None and index_buffer is not None else False ) - if _non_static_mode(): + if in_dynamic_mode(): neighbors = paddle.concat(neighbors, axis=0) count = paddle.concat(count, axis=0) reindex_src, reindex_dst, out_nodes = _C_ops.reindex_graph( diff --git a/python/paddle/geometric/sampling/neighbors.py b/python/paddle/geometric/sampling/neighbors.py index c8d907c078b..1bd7c001d48 100644 --- a/python/paddle/geometric/sampling/neighbors.py +++ b/python/paddle/geometric/sampling/neighbors.py @@ -14,8 +14,8 @@ from paddle import _C_ops, _legacy_C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _non_static_mode, in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode __all__ = [] @@ -100,7 +100,7 @@ def sample_neighbors( use_perm_buffer = True if perm_buffer is not None else False - if _non_static_mode(): + if in_dynamic_mode(): ( out_neighbors, out_count, @@ -251,7 +251,7 @@ def weighted_sample_neighbors( "`eids` should not be None if `return_eids` is True." ) - if in_dygraph_mode(): + if in_dynamic_mode(): ( out_neighbors, out_count, diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index 4169902401f..a20ee3169ad 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -33,7 +33,8 @@ from paddle.fluid.dygraph.base import to_variable from paddle.fluid.executor import global_scope from paddle.fluid.framework import Variable from paddle.fluid.framework import _current_expected_place as _get_device -from paddle.fluid.framework import _get_paddle_place, _non_static_mode +from paddle.fluid.framework import _get_paddle_place +from paddle.framework import in_dynamic_mode from paddle.framework.io_utils import is_belong_to_optimizer from paddle.io import DataLoader, Dataset, DistributedBatchSampler from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX @@ -256,7 +257,7 @@ def prepare_distributed_context(place=None): exe = fluid.Executor(place) exe.run(communicator_prog) - if fluid._non_static_mode(): + if in_dynamic_mode(): fluid.disable_dygraph() _init_context() fluid.enable_dygraph(place) @@ -1170,7 +1171,7 @@ class Model: self._test_dataloader = None self.stop_training = False - if not _non_static_mode(): + if not in_dynamic_mode(): if not isinstance(inputs, (list, tuple, dict, Input)): raise TypeError( "'inputs' must be list or tuple or dict, and couldn't be None." @@ -1182,7 +1183,7 @@ class Model: self._labels = self._verify_spec(labels) # init backend - if fluid._non_static_mode(): + if in_dynamic_mode(): self._adapter = DynamicGraphAdapter(self) else: self._adapter = StaticGraphAdapter(self) @@ -1238,7 +1239,7 @@ class Model: """ loss = self._adapter.train_batch(inputs, labels, update) - if fluid._non_static_mode() and self._input_info is None: + if in_dynamic_mode() and self._input_info is None: self._update_inputs() return loss @@ -1292,7 +1293,7 @@ class Model: """ loss = self._adapter.eval_batch(inputs, labels) - if fluid._non_static_mode() and self._input_info is None: + if in_dynamic_mode() and self._input_info is None: self._update_inputs() return loss @@ -1341,7 +1342,7 @@ class Model: """ loss = self._adapter.predict_batch(inputs) - if fluid._non_static_mode() and self._input_info is None: + if in_dynamic_mode() and self._input_info is None: self._update_inputs() return loss @@ -1527,7 +1528,7 @@ class Model: ) # TODO: support save/load scaler state in static graph - if _non_static_mode(): + if in_dynamic_mode(): scaler_state = None if hasattr(self, '_scaler') and self._scaler is not None: if os.path.exists(path + '.pdscaler'): @@ -1644,7 +1645,7 @@ class Model: ) if 'use_fp16_guard' in amp_config_key_set: - if _non_static_mode(): + if in_dynamic_mode(): raise ValueError( "'use_fp16_guard' is supported in static graph mode only." ) @@ -1702,7 +1703,7 @@ class Model: paddle.distributed.ParallelEnv().nranks > 1 and not _parallel_context_initialized ): - if fluid._non_static_mode(): + if in_dynamic_mode(): main_prog_seed = fluid.default_main_program().random_seed startup_prog_seed = ( fluid.default_startup_program().random_seed @@ -2228,7 +2229,7 @@ class Model: None """ - if fluid._non_static_mode(): + if in_dynamic_mode(): with fluid.framework._dygraph_guard(None): layer = self.network if self._input_info is None: # No provided or inferred @@ -2428,7 +2429,7 @@ class Model: if ( shapes is not None and dtypes is not None - and fluid._non_static_mode() + and in_dynamic_mode() ): out_specs = [ Input(name=n, dtype=dtypes[i], shape=shapes[i]) diff --git a/python/paddle/incubate/autograd/functional.py b/python/paddle/incubate/autograd/functional.py index 218c125a92e..8f6f012ac39 100644 --- a/python/paddle/incubate/autograd/functional.py +++ b/python/paddle/incubate/autograd/functional.py @@ -69,7 +69,7 @@ def vjp(func, xs, v=None): # ``_seprate`` breaks the dependencies between ``xs`` and other # variables. See more ``_seprate`` . - if paddle.fluid._non_static_mode() or not utils.prim_enabled(): + if framework.in_dygraph_mode() or not utils.prim_enabled(): xs, v = _separate(xs), _separate(v) ys = func(*xs) if isinstance(xs, typing.Sequence) else func(xs) _check_v_shape(v, ys) @@ -130,12 +130,12 @@ def jvp(func, xs, v=None): _check_inputs(func, xs, v) # ``_seprate`` breaks the dependencies between ``xs`` and other # variables. See more ``_seprate`` . - if paddle.fluid._non_static_mode() or not utils.prim_enabled(): + if framework.in_dygraph_mode() or not utils.prim_enabled(): xs, v = _separate(xs), _separate(v) ys = func(*xs) if isinstance(xs, typing.Sequence) else func(xs) _check_v_shape(v, xs) - if not paddle.fluid._non_static_mode() and utils.prim_enabled(): + if not framework.in_dygraph_mode() and utils.prim_enabled(): return ys, primapi.forward_grad(ys, xs, v) else: return ys, _double_backward_trick(ys, xs, v) @@ -352,7 +352,7 @@ class _Jacobian: def __init__(self, func, xs): # Skip separating in prim mode temporarily, as detach and clone are not # primitive operators. - if not paddle.fluid._non_static_mode() and utils.prim_enabled(): + if not framework.in_dygraph_mode() and utils.prim_enabled(): self._xs = xs else: self._xs = _separate(xs) @@ -580,7 +580,7 @@ def _grad(ys, xs, v=None): Tensor is the sum of gradients of outputs with respect to the i-th inputs. """ - if paddle.fluid._non_static_mode(): + if framework.in_dygraph_mode(): # paddle.grad returns a list though the inputs is a signle Tensor. The # follow code snippet fixes the problem by return the first element of # xs_grad when the xs is a signle Tensor. diff --git a/python/paddle/incubate/distributed/models/moe/moe_layer.py b/python/paddle/incubate/distributed/models/moe/moe_layer.py index 9fd8081306c..1923db43b78 100644 --- a/python/paddle/incubate/distributed/models/moe/moe_layer.py +++ b/python/paddle/incubate/distributed/models/moe/moe_layer.py @@ -26,7 +26,7 @@ from paddle import nn from paddle.autograd import PyLayer from paddle.distributed.utils.moe_utils import global_gather, global_scatter from paddle.distributed.utils.nccl_utils import check_nccl_version_for_p2p -from paddle.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode from paddle.incubate.distributed.fleet import recompute_hybrid from .gate import BaseGate, GShardGate, NaiveGate, SwitchGate @@ -63,7 +63,7 @@ def _all_gather(tensor, group=None, use_calc_stream=True): if group is not None and not group.is_member(): return - if in_dygraph_mode(): + if in_dynamic_mode(): group = ( paddle.distributed.collective._get_default_group() if group is None diff --git a/python/paddle/incubate/distributed/models/moe/utils.py b/python/paddle/incubate/distributed/models/moe/utils.py index e7c0fa9af54..f385baa1c2f 100644 --- a/python/paddle/incubate/distributed/models/moe/utils.py +++ b/python/paddle/incubate/distributed/models/moe/utils.py @@ -26,14 +26,14 @@ from paddle.distributed.models.moe.utils import ( _number_count, _prune_gate_by_capacity, ) -from paddle.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode def _alltoall(in_tensor_list, group=None, use_calc_stream=True): if group is not None and not group.is_member(): return - if in_dygraph_mode(): + if in_dynamic_mode(): group = ( paddle.distributed.collective._get_default_group() if group is None diff --git a/python/paddle/incubate/layers/nn.py b/python/paddle/incubate/layers/nn.py index 113b35b6e09..5ceea828109 100644 --- a/python/paddle/incubate/layers/nn.py +++ b/python/paddle/incubate/layers/nn.py @@ -1174,7 +1174,7 @@ def bilateral_slice(x, guide, grid, has_offset, name=None): output = paddle.incubate.layers.bilateral_slice(x, guide, grid, has_offset=True) """ - if paddle.fluid._non_static_mode(): + if paddle.in_dynamic_mode(): attrs = ('has_offset', has_offset) return _legacy_C_ops.bilateral_slice(x, grid, guide, *attrs) @@ -1252,7 +1252,7 @@ def correlation( """ - if paddle.fluid._non_static_mode(): + if paddle.in_dynamic_mode(): attrs = ( "pad_size", pad_size, @@ -1501,7 +1501,7 @@ def fused_bn_add_act( def pow2_decay_with_linear_warmup( warmup_steps, total_steps, base_lr, end_lr, dtype='float32', name=None ): - if paddle.fluid._non_static_mode(): + if paddle.in_dynamic_mode(): raise NotImplementedError( "pow2_decay_with_linear_warmup does not support dygraph mode yet." ) diff --git a/python/paddle/incubate/nn/functional/fused_dropout_add.py b/python/paddle/incubate/nn/functional/fused_dropout_add.py index 5f1b94cd896..79f5adfcc33 100644 --- a/python/paddle/incubate/nn/functional/fused_dropout_add.py +++ b/python/paddle/incubate/nn/functional/fused_dropout_add.py @@ -16,8 +16,7 @@ from paddle import _C_ops from paddle.common_ops_import import default_main_program from paddle.fluid import core -from paddle.fluid.framework import in_dygraph_mode -from paddle.framework import LayerHelper +from paddle.framework import LayerHelper, in_dynamic_mode def fused_dropout_add( @@ -73,7 +72,7 @@ def fused_dropout_add( "mode argument should be 'downscale_in_infer' or 'upscale_in_train'" ) seed = None - if in_dygraph_mode(): + if in_dynamic_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed out, seed_offset = _C_ops.fused_dropout_add( diff --git a/python/paddle/incubate/nn/functional/fused_gate_attention.py b/python/paddle/incubate/nn/functional/fused_gate_attention.py index 78f4abac823..5bc2211c33c 100644 --- a/python/paddle/incubate/nn/functional/fused_gate_attention.py +++ b/python/paddle/incubate/nn/functional/fused_gate_attention.py @@ -13,7 +13,7 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.framework import _non_static_mode +from paddle.framework import in_dynamic_mode def fused_gate_attention( @@ -142,7 +142,7 @@ def fused_gate_attention( # [2, 4, 2, 4] """ - if _non_static_mode(): + if in_dynamic_mode(): _, _, _, _, _, _, _, _, out = _legacy_C_ops.fused_gate_attention( query, key, diff --git a/python/paddle/incubate/nn/functional/fused_matmul_bias.py b/python/paddle/incubate/nn/functional/fused_matmul_bias.py index c80a437b390..72fbdea4535 100644 --- a/python/paddle/incubate/nn/functional/fused_matmul_bias.py +++ b/python/paddle/incubate/nn/functional/fused_matmul_bias.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from paddle.tensor.linalg import matmul @@ -53,7 +53,7 @@ def fused_matmul_bias( """ if bias is None: return matmul(x, y, transpose_x, transpose_y, name) - if _non_static_mode(): + if in_dynamic_mode(): return _legacy_C_ops.fused_gemm_epilogue( x, y, bias, 'trans_x', transpose_x, 'trans_y', transpose_y ) diff --git a/python/paddle/incubate/nn/functional/fused_transformer.py b/python/paddle/incubate/nn/functional/fused_transformer.py index 59a15750c7d..469aea26cc6 100644 --- a/python/paddle/incubate/nn/functional/fused_transformer.py +++ b/python/paddle/incubate/nn/functional/fused_transformer.py @@ -15,8 +15,9 @@ from paddle import _legacy_C_ops from paddle.fluid import core from paddle.fluid.data_feeder import check_dtype, check_variable_and_dtype -from paddle.fluid.framework import _non_static_mode, default_main_program +from paddle.fluid.framework import default_main_program from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode __all__ = [] @@ -132,7 +133,7 @@ def fused_feedforward( 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode ) # semantic transfer - if _non_static_mode(): + if in_dynamic_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed out, _, _, _, _, _, _, _, _, _, _ = _legacy_C_ops.fused_feedforward( @@ -363,7 +364,7 @@ def fused_bias_dropout_residual_layer_norm( x.shape[len(x.shape) - 1] == ln_bias.shape[0] ), "The dim of ln_bias must equal to the last dim of x." - if _non_static_mode(): + if in_dynamic_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed ( @@ -620,7 +621,7 @@ def fused_multi_head_attention( f"The rank of the x should be 3, but received {x.ndim}." ) - if _non_static_mode(): + if in_dynamic_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed # pre_ln_mean, pre_ln_variance, pre_ln_out, qkv_out, qkv_bias_out, transpose_out, qk_out, @@ -1046,7 +1047,7 @@ def fused_multi_transformer( 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode ) # semantic transfer - if _non_static_mode(): + if in_dynamic_mode(): cache_kv_out, final_out = _legacy_C_ops.fused_multi_transformer( x, ln_scales, diff --git a/python/paddle/incubate/nn/layer/fused_dropout_nd.py b/python/paddle/incubate/nn/layer/fused_dropout_nd.py index b8aecf0c397..156880f7328 100644 --- a/python/paddle/incubate/nn/layer/fused_dropout_nd.py +++ b/python/paddle/incubate/nn/layer/fused_dropout_nd.py @@ -14,7 +14,7 @@ import paddle from paddle import _legacy_C_ops -from paddle.fluid.framework import _non_static_mode +from paddle.framework import in_dynamic_mode class FusedDropout(paddle.nn.Layer): @@ -104,7 +104,7 @@ class FusedDropout(paddle.nn.Layer): if self.p == 0: return input - if self.axis is not None and _non_static_mode(): + if self.axis is not None and in_dynamic_mode(): seed = None if paddle.static.default_main_program().random_seed != 0: seed = paddle.static.default_main_program().random_seed diff --git a/python/paddle/incubate/nn/layer/fused_transformer.py b/python/paddle/incubate/nn/layer/fused_transformer.py index 31ea1e8d663..c385d0284a1 100644 --- a/python/paddle/incubate/nn/layer/fused_transformer.py +++ b/python/paddle/incubate/nn/layer/fused_transformer.py @@ -17,7 +17,8 @@ import paddle from paddle.fluid import core from paddle.fluid.core import VarDesc from paddle.fluid.dygraph import no_grad -from paddle.fluid.framework import _non_static_mode, convert_np_dtype_to_dtype_ +from paddle.fluid.framework import convert_np_dtype_to_dtype_ +from paddle.framework import in_dynamic_mode from paddle.incubate.nn import functional as incubate_f from paddle.nn import Layer from paddle.nn.initializer import Constant @@ -34,7 +35,7 @@ def _set_var_distributed(var): var.is_distributed = True - if not _non_static_mode(): + if not in_dynamic_mode(): # NOTE: use current_block and find_var_recursive to support while_loop startup_block = paddle.static.default_startup_program().current_block() main_block = paddle.static.default_main_program().current_block() diff --git a/python/paddle/incubate/nn/loss.py b/python/paddle/incubate/nn/loss.py index 9d5d2618391..09d41e3f82d 100644 --- a/python/paddle/incubate/nn/loss.py +++ b/python/paddle/incubate/nn/loss.py @@ -15,7 +15,7 @@ from paddle import _legacy_C_ops from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.layer_helper import LayerHelper -from paddle.framework import _non_static_mode +from paddle.framework import in_dynamic_mode def identity_loss(x, reduction="none"): @@ -59,7 +59,7 @@ def identity_loss(x, reduction="none"): if reduction is None: raise Exception("Unsupported reduction type.") - if _non_static_mode(): + if in_dynamic_mode(): return _legacy_C_ops.identity_loss(x, "reduction", reduction) check_variable_and_dtype(x, 'x', ['float32', 'float64'], "identity_loss") diff --git a/python/paddle/incubate/nn/memory_efficient_attention.py b/python/paddle/incubate/nn/memory_efficient_attention.py index 049f00a70cf..565809cf7df 100644 --- a/python/paddle/incubate/nn/memory_efficient_attention.py +++ b/python/paddle/incubate/nn/memory_efficient_attention.py @@ -21,8 +21,8 @@ import paddle from paddle import _C_ops -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from .attn_bias import ( BlockDiagonalCausalMask, @@ -99,7 +99,7 @@ def memory_efficient_attention( bias = _get_tensor_bias(attn_bias) is_test = not training - if in_dygraph_mode(): + if in_dynamic_mode(): output, logsumexp, seed_and_offset = _C_ops.memory_efficient_attention( query, key, diff --git a/python/paddle/incubate/operators/graph_khop_sampler.py b/python/paddle/incubate/operators/graph_khop_sampler.py index 7bea5dbe762..06b079d7f1c 100644 --- a/python/paddle/incubate/operators/graph_khop_sampler.py +++ b/python/paddle/incubate/operators/graph_khop_sampler.py @@ -14,8 +14,8 @@ from paddle import _legacy_C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode def graph_khop_sampler( @@ -84,7 +84,7 @@ def graph_khop_sampler( """ - if _non_static_mode(): + if in_dynamic_mode(): if return_eids: if sorted_eids is None: raise ValueError( diff --git a/python/paddle/incubate/operators/graph_reindex.py b/python/paddle/incubate/operators/graph_reindex.py index 2e2a0a1642f..2594ed7ce05 100644 --- a/python/paddle/incubate/operators/graph_reindex.py +++ b/python/paddle/incubate/operators/graph_reindex.py @@ -14,8 +14,8 @@ from paddle import _C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from paddle.utils import deprecated @@ -116,7 +116,7 @@ def graph_reindex( "be None if `flag_buffer_hashtable` is True." ) - if _non_static_mode(): + if in_dynamic_mode(): reindex_src, reindex_dst, out_nodes = _C_ops.reindex_graph( x, neighbors, diff --git a/python/paddle/incubate/operators/graph_sample_neighbors.py b/python/paddle/incubate/operators/graph_sample_neighbors.py index b670674c565..169acca5fdc 100644 --- a/python/paddle/incubate/operators/graph_sample_neighbors.py +++ b/python/paddle/incubate/operators/graph_sample_neighbors.py @@ -14,8 +14,8 @@ from paddle import _legacy_C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from paddle.utils import deprecated @@ -109,7 +109,7 @@ def graph_sample_neighbors( "is True." ) - if _non_static_mode(): + if in_dynamic_mode(): ( out_neighbors, out_count, diff --git a/python/paddle/incubate/operators/graph_send_recv.py b/python/paddle/incubate/operators/graph_send_recv.py index 701f7a7fa15..7a874f19249 100644 --- a/python/paddle/incubate/operators/graph_send_recv.py +++ b/python/paddle/incubate/operators/graph_send_recv.py @@ -22,8 +22,9 @@ from paddle.fluid.data_feeder import ( check_variable_and_dtype, convert_dtype, ) -from paddle.fluid.framework import Variable, in_dygraph_mode +from paddle.fluid.framework import Variable from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from paddle.utils import deprecated @@ -124,7 +125,7 @@ def graph_send_recv( # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. - if in_dygraph_mode(): + if in_dynamic_mode(): out_size = convert_out_size_to_list(out_size) return _C_ops.send_u_recv( x, src_index, dst_index, pool_type.upper(), out_size diff --git a/python/paddle/incubate/operators/softmax_mask_fuse.py b/python/paddle/incubate/operators/softmax_mask_fuse.py index 399f8e9bd98..178cfd9a046 100644 --- a/python/paddle/incubate/operators/softmax_mask_fuse.py +++ b/python/paddle/incubate/operators/softmax_mask_fuse.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode def softmax_mask_fuse(x, mask, name=None): @@ -55,7 +55,7 @@ def softmax_mask_fuse(x, mask, name=None): rst = incubate.softmax_mask_fuse(x, mask) # [[[[0.02404429, 0.04658398, 0.02746007, ..., 0.01489375, 0.02397441, 0.02851614] ... ]]] """ - if _non_static_mode(): + if in_dynamic_mode(): out = _legacy_C_ops.fused_softmax_mask(x, mask) return out helper = LayerHelper('fused_softmax_mask', **locals()) diff --git a/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py b/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py index ffe8d8ac5ad..dd8e229a1e9 100644 --- a/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py +++ b/python/paddle/incubate/operators/softmax_mask_fuse_upper_triangle.py @@ -13,8 +13,8 @@ # limitations under the License. from paddle import _legacy_C_ops -from paddle.fluid.framework import _non_static_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode def softmax_mask_fuse_upper_triangle(x): @@ -55,7 +55,7 @@ def softmax_mask_fuse_upper_triangle(x): # [0.32674268, 0.28156221, 0.39169508, ..., 0., 0., 0.] # ... ]]] """ - if _non_static_mode(): + if in_dynamic_mode(): out = _legacy_C_ops.fused_softmax_mask_upper_triangle(x) return out diff --git a/python/paddle/incubate/optimizer/distributed_fused_lamb.py b/python/paddle/incubate/optimizer/distributed_fused_lamb.py index a9d9e941d25..1efc75cb7d6 100644 --- a/python/paddle/incubate/optimizer/distributed_fused_lamb.py +++ b/python/paddle/incubate/optimizer/distributed_fused_lamb.py @@ -15,7 +15,7 @@ import os import paddle -from paddle.fluid import core, framework, unique_name +from paddle.fluid import core, unique_name from paddle.fluid.executor import global_scope from paddle.fluid.framework import Variable, name_scope from paddle.fluid.layer_helper import LayerHelper @@ -129,7 +129,7 @@ class DistributedFusedLamb(Optimizer): name=None, ): assert ( - not framework._non_static_mode() + not paddle.in_dynamic_mode() ), "DistributedFusedLamb does not support dygraph mode" super().__init__(learning_rate=learning_rate, grad_clip=None, name=name) diff --git a/python/paddle/incubate/optimizer/modelaverage.py b/python/paddle/incubate/optimizer/modelaverage.py index 338b3351021..d0c73e306e7 100644 --- a/python/paddle/incubate/optimizer/modelaverage.py +++ b/python/paddle/incubate/optimizer/modelaverage.py @@ -13,12 +13,13 @@ # limitations under the License. import paddle -from paddle import _C_ops, _legacy_C_ops +from paddle import _C_ops from paddle.fluid import framework from paddle.fluid.dygraph import base as imperative_base -from paddle.fluid.framework import Program, in_dygraph_mode +from paddle.fluid.framework import Program from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.wrapped_decorator import signature_safe_contextmanager +from paddle.framework import in_dynamic_mode from paddle.optimizer import Optimizer __all__ = [] @@ -183,7 +184,7 @@ class ModelAverage(Optimizer): self.max_average_window = max_average_window self.type = "average_accumulates" - if not framework._non_static_mode(): + if not in_dynamic_mode(): global_block = framework.default_main_program().global_block() all_parameters = ( parameters if parameters else global_block.all_parameters() @@ -235,7 +236,7 @@ class ModelAverage(Optimizer): ) num_updates = self._get_accumulator('num_updates', param_and_grad[0]) - if in_dygraph_mode(): + if in_dynamic_mode(): _, _, _, _, _, _ = _C_ops.average_accumulates_( param_and_grad[0], sum_1, @@ -249,29 +250,6 @@ class ModelAverage(Optimizer): self.min_average_window, ) return None - elif framework._non_static_mode(): - _, _, _, _, _, _ = _legacy_C_ops.average_accumulates( - param_and_grad[0], - sum_1, - sum_2, - sum_3, - num_accumulates, - old_num_accumulates, - num_updates, - sum_1, - sum_2, - sum_3, - num_accumulates, - old_num_accumulates, - num_updates, - 'average_window', - self.average_window, - 'min_average_window', - self.min_average_window, - 'max_average_window', - self.max_average_window, - ) - return None block = framework.default_main_program().global_block() attrs = { @@ -358,7 +336,7 @@ class ModelAverage(Optimizer): modelaverage.clear_grad() """ - if framework._non_static_mode(): + if in_dynamic_mode(): self.step() @framework.dygraph_only @@ -443,7 +421,7 @@ class ModelAverage(Optimizer): for param in linear.parameters(): print(param) """ - if framework._non_static_mode(): + if in_dynamic_mode(): for param in self._parameter_list: num_accumulates = self._get_accumulator( 'num_accumulates', param @@ -522,7 +500,7 @@ class ModelAverage(Optimizer): for param in linear.parameters(): print(param) """ - if framework._non_static_mode(): + if in_dynamic_mode(): for param in self._parameter_list: param_restore = self._get_accumulator('restore', param) paddle.assign(param_restore, param) diff --git a/python/paddle/incubate/tensor/manipulation.py b/python/paddle/incubate/tensor/manipulation.py index 4e39dfa0c79..4e1380a0bd7 100644 --- a/python/paddle/incubate/tensor/manipulation.py +++ b/python/paddle/incubate/tensor/manipulation.py @@ -14,8 +14,8 @@ from paddle import _C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode __all__ = [] @@ -47,7 +47,7 @@ def _npu_identity(x, format=-1): y = paddle.incubate._npu_identity(x, 3) # ACL_FORMAT_NC1HWC0 = 3 # y.shape = [1, 1, 1, 1, 16] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.npu_identity(x, format) else: check_variable_and_dtype( diff --git a/python/paddle/incubate/tensor/math.py b/python/paddle/incubate/tensor/math.py index e7f8246ba90..04ed1da3e19 100644 --- a/python/paddle/incubate/tensor/math.py +++ b/python/paddle/incubate/tensor/math.py @@ -14,8 +14,8 @@ from paddle import _C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from paddle.utils import deprecated __all__ = [] @@ -63,7 +63,7 @@ def segment_sum(data, segment_ids, name=None): #Outputs: [[4., 4., 4.], [4., 5., 6.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.segment_pool(data, segment_ids, "SUM") else: check_variable_and_dtype( @@ -129,7 +129,7 @@ def segment_mean(data, segment_ids, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.segment_pool(data, segment_ids, "MEAN") check_variable_and_dtype( @@ -194,7 +194,7 @@ def segment_min(data, segment_ids, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.segment_pool(data, segment_ids, "MIN") check_variable_and_dtype( @@ -259,7 +259,7 @@ def segment_max(data, segment_ids, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.segment_pool(data, segment_ids, "MAX") return out diff --git a/python/paddle/incubate/xpu/resnet_block.py b/python/paddle/incubate/xpu/resnet_block.py index d5c70e08605..1694679babd 100644 --- a/python/paddle/incubate/xpu/resnet_block.py +++ b/python/paddle/incubate/xpu/resnet_block.py @@ -61,7 +61,7 @@ def resnet_basic_block( find_conv_max=True, ): - if fluid.framework._non_static_mode(): + if fluid.framework.in_dygraph_mode(): attrs = ( 'stride1', stride1, diff --git a/python/paddle/io/dataloader/dataloader_iter.py b/python/paddle/io/dataloader/dataloader_iter.py index 43b749c869d..b212dd42f1d 100644 --- a/python/paddle/io/dataloader/dataloader_iter.py +++ b/python/paddle/io/dataloader/dataloader_iter.py @@ -29,7 +29,7 @@ from paddle.fluid.framework import _current_expected_place, _set_expected_place from paddle.profiler.timer import benchmark from paddle.profiler.utils import in_profiler_mode -from ...framework import core, in_dygraph_mode +from ...framework import core, in_dynamic_mode from ..multiprocess_utils import ( MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar, @@ -286,7 +286,7 @@ class _DataLoaderIterSingleProcess(_DataLoaderIterBase): try: benchmark().check_if_need_record(self) benchmark().before_reader() - if in_dygraph_mode(): + if in_dynamic_mode(): data = core.eager.read_next_tensor_list( self._reader.read_next_list()[0] ) @@ -535,7 +535,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): # in order not to restart the thread, we just clear # the blocking_queue cachees instead of recreating one while self._blocking_queue.size() >= len(self._places): - if in_dygraph_mode(): + if in_dynamic_mode(): data = core.eager.read_next_tensor_list( self._reader.read_next_list()[0] ) @@ -820,7 +820,7 @@ class _DataLoaderIterMultiProcess(_DataLoaderIterBase): self._thread_done_event.set() self._blocking_queue.close() - if in_dygraph_mode(): + if in_dynamic_mode(): data = core.eager.read_next_tensor_list( self._reader.read_next_list()[0] ) diff --git a/python/paddle/io/dataloader/dataset.py b/python/paddle/io/dataloader/dataset.py index e8bb6bbd364..4442531f7d1 100755 --- a/python/paddle/io/dataloader/dataset.py +++ b/python/paddle/io/dataloader/dataset.py @@ -268,7 +268,7 @@ class TensorDataset(Dataset): """ def __init__(self, tensors): - if not framework._non_static_mode(): + if not framework.in_dynamic_mode(): raise RuntimeError( "TensorDataset con only be used in imperative mode" ) diff --git a/python/paddle/io/reader.py b/python/paddle/io/reader.py index 6698caa435f..861d1253dcf 100644 --- a/python/paddle/io/reader.py +++ b/python/paddle/io/reader.py @@ -27,9 +27,8 @@ from ..fluid.framework import ( _current_expected_place, _get_paddle_place, _get_paddle_place_list, - _non_static_mode, ) -from ..framework import core +from ..framework import core, in_dynamic_mode from .dataloader import BatchSampler, IterableDataset, Subset from .dataloader.batch_sampler import _InfiniteIterableSampler from .dataloader.dataloader_iter import ( @@ -413,7 +412,7 @@ class DataLoader: self.dataset = dataset - if not return_list and not _non_static_mode(): + if not return_list and not in_dynamic_mode(): assert ( feed_list is not None ), "feed_list should be set when return_list=False" @@ -494,7 +493,7 @@ class DataLoader: self.auto_collate_batch = self.batch_sampler is not None self.pin_memory = False - if _non_static_mode(): + if in_dynamic_mode(): self.pin_memory = ( True if use_pinned_memory() is None else use_pinned_memory() ) diff --git a/python/paddle/jit/api.py b/python/paddle/jit/api.py index 14c5a4cc0fc..827bb9806a1 100644 --- a/python/paddle/jit/api.py +++ b/python/paddle/jit/api.py @@ -69,9 +69,10 @@ from paddle.fluid.framework import ( _dygraph_guard, _dygraph_tracer, ) -from paddle.fluid.framework import dygraph_only, _non_static_mode +from paddle.fluid.framework import dygraph_only from paddle.fluid.wrapped_decorator import wrap_decorator from paddle.fluid.io import save_inference_model +from paddle.framework import in_dynamic_mode def create_program_from_desc(program_desc): @@ -154,7 +155,7 @@ def _dygraph_to_static_func_(dygraph_func): # TODO: remove this decorator after we finalize training API def __impl__(*args, **kwargs): program_translator = ProgramTranslator() - if _non_static_mode() or not program_translator.enable_to_static: + if in_dynamic_mode() or not program_translator.enable_to_static: logging_utils.warn( "The decorator 'dygraph_to_static_func' doesn't work in " "dygraph mode or set 'paddle.jit.enable_to_static' to False. " @@ -1711,7 +1712,7 @@ class TracedLayer: ), "Inputs should be a list or tuple of variables" assert len(inputs) == len(self._feed_names) feed_dict = {} - if _non_static_mode(): + if in_dynamic_mode(): for x, name in zip(inputs, self._feed_names): feed_dict[name] = x.value().get_tensor() else: diff --git a/python/paddle/jit/dy2static/program_translator.py b/python/paddle/jit/dy2static/program_translator.py index a8be1abb2a1..38801a36606 100644 --- a/python/paddle/jit/dy2static/program_translator.py +++ b/python/paddle/jit/dy2static/program_translator.py @@ -19,13 +19,14 @@ import threading import warnings import weakref -from paddle.fluid import _non_static_mode, core, framework +from paddle.fluid import core, framework from paddle.fluid.data_feeder import check_type from paddle.fluid.dygraph.base import ( _switch_declarative_mode_guard_, param_guard, switch_to_static_graph, ) +from paddle.framework import in_dynamic_mode from paddle.nn.layer import layers from paddle.utils import flatten, gast @@ -457,7 +458,7 @@ class StaticFunction: ) return self._call_dygraph_function(*args, **kwargs) - if not _non_static_mode(): + if not in_dynamic_mode(): raise RuntimeError( "Failed to run the callable object {} decorated by '@paddle.jit.to_static', " "because it is NOT in dynamic mode. Please disable the static graph mode to enter dynamic mode with the " diff --git a/python/paddle/jit/translated_layer.py b/python/paddle/jit/translated_layer.py index 06a89ce3640..80706eac762 100644 --- a/python/paddle/jit/translated_layer.py +++ b/python/paddle/jit/translated_layer.py @@ -22,7 +22,8 @@ from paddle import _legacy_C_ops from paddle.fluid import backward, core, framework, unique_name from paddle.fluid.data_feeder import check_type from paddle.fluid.dygraph.base import switch_to_static_graph -from paddle.fluid.framework import OpProtoHolder, _non_static_mode +from paddle.fluid.framework import OpProtoHolder +from paddle.framework import in_dynamic_mode from paddle.jit.dy2static.partial_program import ( LazyInitialized, add_build_strategy_for, @@ -1486,7 +1487,7 @@ class TranslatedLayer(layers.Layer): program_holder = self._program_holder_dict[__i_m_p_l__.__name__] # When using jit.save, it runs in static graph mode. # Run in dynamic graph mode when the model is inferring. - if _non_static_mode(): + if in_dynamic_mode(): return _run_dygraph(self, input, program_holder) else: # NOTE(weixin): [ why not use 'program_holder.infer_program' directly? ] diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index bf3f164c338..826e781c61d 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -20,8 +20,9 @@ import paddle from paddle import _legacy_C_ops from ..fluid.data_feeder import check_variable_and_dtype -from ..fluid.framework import _create_tensor, _non_static_mode +from ..fluid.framework import _create_tensor from ..fluid.layer_helper import LayerHelper +from ..framework import in_dynamic_mode __all__ = [] @@ -802,7 +803,7 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None): """ if label.dtype == paddle.int32: label = paddle.cast(label, paddle.int64) - if _non_static_mode(): + if in_dynamic_mode(): if correct is None: correct = _create_tensor(dtype="int32") if total is None: diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index 1952ea0514b..f8c674c8a0b 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -18,11 +18,11 @@ from sqlite3 import NotSupportedError import paddle import paddle.autograd as imperative_base -from paddle import _C_ops, _legacy_C_ops +from paddle import _C_ops from paddle.common_ops_import import Variable, check_type, default_main_program from paddle.fluid import core, framework, unique_name from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.framework import LayerHelper, _non_static_mode, in_dygraph_mode +from paddle.framework import LayerHelper, in_dynamic_mode from paddle.tensor.layer_function_generator import templatedoc __all__ = [] @@ -57,10 +57,8 @@ def clip_by_norm(x, max_norm, name=None): # [[0.5, 0.5], [0.5, 0.5]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.clip_by_norm(x, max_norm) - if _non_static_mode(): - return _legacy_C_ops.clip_by_norm(x, 'max_norm', max_norm) helper = LayerHelper("clip_by_norm", **locals()) check_variable_and_dtype( @@ -109,12 +107,9 @@ def merge_selected_rows(x, name=None): type=fluid.core.VarDesc.VarType.SELECTED_ROWS) y = nn.merge_selected_rows(var) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.merge_selected_rows(x) - if _non_static_mode(): - return _legacy_C_ops.merge_selected_rows(x) - helper = LayerHelper("merge_selected_rows", **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( @@ -216,7 +211,7 @@ def _squared_l2_norm(x): sum_square = paddle.sum(square) return sum_square - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.squared_l2_norm(x) op_type = 'squared_l2_norm' @@ -335,7 +330,7 @@ class ClipGradBase: raise NotImplementedError def __call__(self, params_grads): - if _non_static_mode(): + if in_dynamic_mode(): return self._dygraph_clip(params_grads) else: for p, g in params_grads: @@ -655,7 +650,7 @@ class ClipGradByGlobalNorm(ClipGradBase): continue merge_grad = g - if in_dygraph_mode() and g.is_selected_rows(): + if in_dynamic_mode() and g.is_selected_rows(): merge_grad = merge_selected_rows(g) merge_grad = merge_grad._get_tensor_from_selected_rows() diff --git a/python/paddle/nn/decode.py b/python/paddle/nn/decode.py index 44598d3917a..030a2a04881 100644 --- a/python/paddle/nn/decode.py +++ b/python/paddle/nn/decode.py @@ -20,7 +20,7 @@ import numpy as np import paddle from paddle.common_ops_import import default_main_program -from paddle.framework import _non_static_mode +from paddle.framework import in_dynamic_mode from ..fluid.data_feeder import convert_dtype @@ -1071,7 +1071,7 @@ def dynamic_decode( inits=decoder_cell.get_initial_states(encoder_output), max_step_num=10) """ - if _non_static_mode(): + if in_dynamic_mode(): return _dynamic_decode_imperative( decoder, inits, diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 5bb9d2b1d03..9220d14ab80 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -18,7 +18,7 @@ from paddle.framework import core from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only from ...fluid.data_feeder import check_dtype, check_variable_and_dtype -from ...fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode +from ...fluid.framework import convert_np_dtype_to_dtype_ from ...fluid.layer_helper import LayerHelper from ...tensor.manipulation import chunk from ...tensor.math import tanh # noqa: F401 @@ -58,7 +58,7 @@ def celu(x, alpha=1.0, name=None): """ if alpha == 0: raise ZeroDivisionError("alpha cannot be 0 for celu") - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.celu(x, alpha) else: check_variable_and_dtype( @@ -109,7 +109,7 @@ def elu(x, alpha=1.0, name=None): # [ 1. 15.6 ]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.elu(x, alpha) else: @@ -134,7 +134,7 @@ def elu_(x, alpha=1.0, name=None): Please refer to :ref:`api_nn_cn_elu`. """ assert alpha >= 0.0, "elu_ only support alpha >= 0, please use elu instead." - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.elu_(x, alpha) return _legacy_C_ops.elu_(x, 'alpha', alpha) @@ -180,7 +180,7 @@ def gelu(x, approximate=False, name=None): # [ 0.84119201, 1.39957154]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.gelu(x, approximate) else: check_variable_and_dtype( @@ -230,7 +230,7 @@ def hardshrink(x, threshold=0.5, name=None): out = F.hardshrink(x) # [-1., 0., 2.5] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.hardshrink(x, threshold) else: check_variable_and_dtype( @@ -281,7 +281,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None): out = F.hardtanh(x) # [-1., 0.3, 1.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.hardtanh(x, min, max) else: check_variable_and_dtype( @@ -335,7 +335,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None): out = F.hardsigmoid(x) # [0., 1., 0.666667] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.hardsigmoid(x, slope, offset) else: check_variable_and_dtype( @@ -386,7 +386,7 @@ def hardswish(x, name=None): x = paddle.to_tensor([-4., 5., 1.]) out = F.hardswish(x) # [0., 5., 0.666667] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.hardswish(x) else: check_variable_and_dtype( @@ -435,7 +435,7 @@ def leaky_relu(x, negative_slope=0.01, name=None): # [-0.02, 0., 1.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.leaky_relu(x, negative_slope) else: check_variable_and_dtype( @@ -534,7 +534,7 @@ def prelu(x, weight, data_format="NCHW", name=None): ), "The weight size should be equal to x input channel in prelu() when weight shape is not [1]." mode = 'channel' - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.prelu(x, weight, data_format, mode) else: check_variable_and_dtype( @@ -657,7 +657,7 @@ def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None): is_test = not training - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.rrelu(x, lower, upper, is_test) else: check_variable_and_dtype( @@ -705,7 +705,7 @@ def relu(x, name=None): # [0., 0., 1.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.relu(x) else: check_variable_and_dtype( @@ -751,7 +751,7 @@ def log_sigmoid(x, name=None): out = F.log_sigmoid(x) # [-0.313262 -0.126928 -0.0485874 -0.0181499] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.logsigmoid(x) else: check_variable_and_dtype( @@ -819,7 +819,7 @@ def maxout(x, groups, axis=1, name=None): # [0.95313174 0.6228939 0.7129065 0.7087491 ] # [0.7142536 0.88725346 0.61093384 0.38833922]]]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.maxout(x, groups, axis) else: check_variable_and_dtype( @@ -871,10 +871,8 @@ def relu6(x, name=None): # [0, 0.3, 6] """ threshold = 6.0 - if in_dygraph_mode(): - return _C_ops.relu6(x) if in_dynamic_mode(): - return _legacy_C_ops.relu6(x, 'threshold', threshold) + return _C_ops.relu6(x) check_variable_and_dtype( x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'relu6' @@ -939,7 +937,7 @@ def selu( f"The alpha must be no less than zero. Received: {alpha}." ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.selu(x, scale, alpha) else: check_variable_and_dtype( @@ -983,7 +981,7 @@ def silu(x, name=None): out = F.silu(x) # [ 0.731059, 1.761594, 2.857722, 3.928055 ] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.silu(x) else: check_variable_and_dtype( @@ -1110,7 +1108,7 @@ def softmax(x, axis=-1, dtype=None, name=None): if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): outs_cast = x if dtype is None else _C_ops.cast(x, dtype) return _C_ops.softmax(outs_cast, axis) else: @@ -1197,7 +1195,7 @@ def softplus(x, beta=1, threshold=20, name=None): out = F.softplus(x) # [0.513015, 0.598139, 0.744397, 0.854355] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.softplus(x, beta, threshold) else: check_variable_and_dtype( @@ -1256,7 +1254,7 @@ def softshrink(x, threshold=0.5, name=None): ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.softshrink(x, threshold) else: check_variable_and_dtype( @@ -1300,10 +1298,8 @@ def softsign(x, name=None): # Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True, # [-0.28571430, -0.16666666, 0.09090909, 0.23076925]) """ - if in_dygraph_mode(): - return _C_ops.softsign(x) if in_dynamic_mode(): - return _legacy_C_ops.softsign(x) + return _C_ops.softsign(x) check_variable_and_dtype( x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'softsign' @@ -1341,7 +1337,7 @@ def swish(x, name=None): # Tensor(shape=[3], dtype=float32, place=Place(gpu:0), stop_gradient=True, # [-0.23840584, 0. , 0.73105854]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.swish(x) else: check_variable_and_dtype( @@ -1387,7 +1383,7 @@ def mish(x, name=None): x = paddle.to_tensor([-5., 0., 5.]) out = F.mish(x) # [-0.03357624, 0., 4.99955208] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.mish(x, 20) else: check_variable_and_dtype( @@ -1426,7 +1422,7 @@ def tanhshrink(x, name=None): # Tensor(shape=[4], dtype=float32, place=Place(gpu:0), stop_gradient=True, # [-0.02005106, -0.00262468, 0.00033200, 0.00868741]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.tanh_shrink(x) else: check_variable_and_dtype( @@ -1476,7 +1472,7 @@ def thresholded_relu(x, threshold=1.0, name=None): # [2., 0., 0.]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.thresholded_relu(x, threshold) else: check_variable_and_dtype( @@ -1554,7 +1550,7 @@ def log_softmax(x, axis=-1, dtype=None, name=None): if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): if dtype is not None: x = _C_ops.cast(x, dtype) return _C_ops.log_softmax(x, axis) @@ -1709,13 +1705,8 @@ def gumbel_softmax(x, temperature=1.0, hard=False, axis=-1, name=None): # [0.00000000, 0.00000000, 0.00000000, 0.00001258, 0.99998736, 0.00000000]] """ - if in_dygraph_mode(): - return _C_ops.gumbel_softmax(x, temperature, hard, axis) - if in_dynamic_mode(): - return _legacy_C_ops.gumbel_softmax( - x, 'temperature', temperature, 'hard', hard, 'axis', axis - ) + return _C_ops.gumbel_softmax(x, temperature, hard, axis) helper = LayerHelper("gumbel_softmax", **locals()) check_variable_and_dtype( diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 01c323d6596..d788cc46d0a 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -15,7 +15,7 @@ import numpy import paddle -from paddle import _C_ops, _legacy_C_ops +from paddle import _C_ops from paddle.common_ops_import import Variable, default_main_program from paddle.fluid.layer_helper import LayerHelper from paddle.framework import core, in_dynamic_mode @@ -26,7 +26,6 @@ from ...fluid.data_feeder import ( check_type, check_variable_and_dtype, ) -from ...fluid.framework import in_dygraph_mode from ...tensor import clip, concat, sqrt, sum from ...tensor.creation import zeros @@ -147,7 +146,7 @@ def unfold(x, kernel_sizes, strides=1, paddings=0, dilations=1, name=None): "of 2 or 4 integers" ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.unfold(x, kernel_sizes, strides, paddings, dilations) out = helper.create_variable_for_type_inference(dtype=x.dtype) @@ -605,95 +604,80 @@ def interpolate( dy_attr = tuple(attr_list) if resample_type == "linear": - if in_dygraph_mode(): - out = _C_ops.linear_interp( - x, - inputs['OutSize'] if 'OutSize' in inputs else None, - inputs['SizeTensor'] if 'SizeTensor' in inputs else None, - inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], - attrs['out_d'], - attrs['out_h'], - attrs['out_w'], - attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], - attrs['align_corners'], - attrs['align_mode'], - ) - else: - out = _legacy_C_ops.linear_interp_v2(x, *dy_attr) + out = _C_ops.linear_interp( + x, + inputs['OutSize'] if 'OutSize' in inputs else None, + inputs['SizeTensor'] if 'SizeTensor' in inputs else None, + inputs['Scale'] if 'Scale' in inputs else None, + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) elif resample_type == "bilinear": - if in_dygraph_mode(): - out = _C_ops.bilinear_interp( - x, - inputs['OutSize'] if 'OutSize' in inputs else None, - inputs['SizeTensor'] if 'SizeTensor' in inputs else None, - inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], - attrs['out_d'], - attrs['out_h'], - attrs['out_w'], - attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], - attrs['align_corners'], - attrs['align_mode'], - ) - else: - out = _legacy_C_ops.bilinear_interp_v2(x, *dy_attr) + out = _C_ops.bilinear_interp( + x, + inputs['OutSize'] if 'OutSize' in inputs else None, + inputs['SizeTensor'] if 'SizeTensor' in inputs else None, + inputs['Scale'] if 'Scale' in inputs else None, + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) elif resample_type == "trilinear": - if in_dygraph_mode(): - out = _C_ops.trilinear_interp( - x, - inputs['OutSize'] if 'OutSize' in inputs else None, - inputs['SizeTensor'] if 'SizeTensor' in inputs else None, - inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], - attrs['out_d'], - attrs['out_h'], - attrs['out_w'], - attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], - attrs['align_corners'], - attrs['align_mode'], - ) - else: - out = _legacy_C_ops.trilinear_interp_v2(x, *dy_attr) + out = _C_ops.trilinear_interp( + x, + inputs['OutSize'] if 'OutSize' in inputs else None, + inputs['SizeTensor'] if 'SizeTensor' in inputs else None, + inputs['Scale'] if 'Scale' in inputs else None, + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) elif resample_type == "nearest": - if in_dygraph_mode(): - out = _C_ops.nearest_interp( - x, - inputs['OutSize'] if 'OutSize' in inputs else None, - inputs['SizeTensor'] if 'SizeTensor' in inputs else None, - inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], - attrs['out_d'], - attrs['out_h'], - attrs['out_w'], - attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], - attrs['align_corners'], - attrs['align_mode'], - ) - else: - out = _legacy_C_ops.nearest_interp_v2(x, *dy_attr) + out = _C_ops.nearest_interp( + x, + inputs['OutSize'] if 'OutSize' in inputs else None, + inputs['SizeTensor'] if 'SizeTensor' in inputs else None, + inputs['Scale'] if 'Scale' in inputs else None, + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) elif resample_type == "bicubic": - if in_dygraph_mode(): - out = _C_ops.bicubic_interp( - x, - inputs['OutSize'] if 'OutSize' in inputs else None, - inputs['SizeTensor'] if 'SizeTensor' in inputs else None, - inputs['Scale'] if 'Scale' in inputs else None, - attrs['data_layout'], - attrs['out_d'], - attrs['out_h'], - attrs['out_w'], - attrs['scale'] if 'scale' in attrs else [], - attrs['interp_method'], - attrs['align_corners'], - attrs['align_mode'], - ) - else: - out = _legacy_C_ops.bicubic_interp_v2(x, *dy_attr) + out = _C_ops.bicubic_interp( + x, + inputs['OutSize'] if 'OutSize' in inputs else None, + inputs['SizeTensor'] if 'SizeTensor' in inputs else None, + inputs['Scale'] if 'Scale' in inputs else None, + attrs['data_layout'], + attrs['out_d'], + attrs['out_h'], + attrs['out_w'], + attrs['scale'] if 'scale' in attrs else [], + attrs['interp_method'], + attrs['align_corners'], + attrs['align_mode'], + ) return out dtype = helper.input_dtype(input_param_name='x') @@ -941,7 +925,7 @@ def bilinear(x1, x2, weight, bias=None, name=None): # [5, 1000] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.bilinear(x1, x2, weight, bias) else: check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear') @@ -1132,7 +1116,7 @@ def dropout( 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode ) # semantic transfer - if in_dygraph_mode(): + if in_dynamic_mode(): if default_main_program().random_seed != 0: seed = default_main_program().random_seed @@ -1583,7 +1567,7 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None): paddings = pad pad_value = value - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.pad(x, paddings, float(pad_value)) return out @@ -1681,7 +1665,7 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None): unsqueezed_dim = [1] x = unsqueeze(x, axis=unsqueezed_dim) - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(pad, Variable): pad = pad.tolist() out = _C_ops.pad3d(x, pad, mode, value, data_format) @@ -1853,7 +1837,7 @@ def linear(x, weight, bias=None, name=None): # [0.9440598 0.9440598 0.9440598 0.9440598 ] # [2.1077576 2.1077576 2.1077576 2.1077576 ]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): # TODO(jiabin): using addmm for fast forward route return _C_ops.linear(x, weight, bias) else: @@ -1951,14 +1935,9 @@ def label_smooth(label, prior_dist=None, epsilon=0.1, name=None): if epsilon > 1.0 or epsilon < 0.0: raise ValueError("The value of epsilon must be between 0 and 1.") - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.label_smooth(label, prior_dist, float(epsilon)) - elif paddle.in_dynamic_mode(): - return _legacy_C_ops.label_smooth( - label, prior_dist, 'epsilon', float(epsilon) - ) - check_variable_and_dtype( label, 'label', @@ -2141,7 +2120,7 @@ def class_center_sample(label, num_classes, num_samples, group=None): if (seed is None or seed == 0) and default_main_program().random_seed != 0: seed = default_main_program().random_seed - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.class_center_sample( label, num_classes, @@ -2152,28 +2131,6 @@ def class_center_sample(label, num_classes, num_samples, group=None): seed is not None, seed if seed is not None else 0, ) - elif paddle.in_dynamic_mode(): - ( - remapped_label, - sampled_class_center, - ) = _legacy_C_ops.class_center_sample( - label, - 'num_classes', - num_classes, - 'num_samples', - num_samples, - 'ring_id', - ring_id, - 'nranks', - nranks, - 'rank', - rank, - 'fix_seed', - seed is not None, - 'seed', - seed if seed is not None else 0, - ) - return remapped_label, sampled_class_center check_variable_and_dtype( label, 'label', ['int64', 'int32'], 'class_center_sample' @@ -2321,24 +2278,10 @@ def fold( "of 2 or 4 integers" ) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.fold( x, output_sizes, kernel_sizes, strides, paddings, dilations ) - elif in_dynamic_mode(): - out = _legacy_C_ops.fold( - x, - "output_sizes", - output_sizes, - "kernel_sizes", - kernel_sizes, - "strides", - strides, - "paddings", - paddings, - "dilations", - dilations, - ) else: out = helper.create_variable_for_type_inference(dtype=x.dtype) helper.append_op( diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index e44847b0a9b..671ed1765cf 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -18,7 +18,7 @@ from paddle.device import ( is_compiled_with_cuda, is_compiled_with_rocm, ) -from paddle.fluid.framework import _global_flags, in_dygraph_mode +from paddle.fluid.framework import _global_flags from paddle.tensor.manipulation import reshape from paddle.tensor.math import _add_with_axis @@ -129,7 +129,7 @@ def _conv_nd( ): # Due to the poor performance of NHWC, we transpose the input to NCHW. - if in_dygraph_mode() and op_type == "conv2d": + if in_dynamic_mode() and op_type == "conv2d": pre_bias = _C_ops.conv2d( x, weight, @@ -158,7 +158,7 @@ def _conv_nd( else: return pre_bias - if in_dygraph_mode() and op_type == "depthwise_conv2d": + if in_dynamic_mode() and op_type == "depthwise_conv2d": pre_bias = _C_ops.depthwise_conv2d( x, weight, @@ -177,7 +177,7 @@ def _conv_nd( else: return pre_bias - if in_dygraph_mode() and op_type == "conv3d": + if in_dynamic_mode() and op_type == "conv3d": pre_bias = _C_ops.conv3d( x, weight, @@ -467,7 +467,7 @@ def conv1d( squeeze_aixs = -3 if channel_last else -2 x = unsqueeze(x, axis=[squeeze_aixs]) - if in_dygraph_mode(): + if in_dynamic_mode(): if l_type == 'conv2d': out = _C_ops.conv2d( x, @@ -707,7 +707,7 @@ def conv2d( else: use_cudnn = False else: - if in_dygraph_mode(): + if in_dynamic_mode(): pre_bias = _C_ops.conv2d( x, weight, @@ -1012,7 +1012,7 @@ def conv1d_transpose( x = unsqueeze(x, axis=[squeeze_axis]) weight = unsqueeze(weight, axis=[-1]) - if in_dygraph_mode(): + if in_dynamic_mode(): out = getattr(_C_ops, op_type)( x, weight, @@ -1293,7 +1293,7 @@ def conv2d_transpose( op_type = 'depthwise_conv2d_transpose' use_cudnn = False - if in_dygraph_mode(): + if in_dynamic_mode(): op = ( _C_ops.conv2d_transpose if op_type == 'conv2d_transpose' @@ -1776,7 +1776,7 @@ def conv3d_transpose( op_type = 'conv3d_transpose' data_format_ = "NHWC" if channel_last else "NCHW" - if in_dygraph_mode(): + if in_dynamic_mode(): pre_bias = _C_ops.conv3d_transpose( x, weight, diff --git a/python/paddle/nn/functional/distance.py b/python/paddle/nn/functional/distance.py index 067a49816dd..f1155852b00 100644 --- a/python/paddle/nn/functional/distance.py +++ b/python/paddle/nn/functional/distance.py @@ -14,7 +14,7 @@ import paddle from paddle import _C_ops -from paddle.fluid.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode from ...fluid.data_feeder import check_type, check_variable_and_dtype from ...fluid.layer_helper import LayerHelper @@ -68,7 +68,7 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): # [4.99999860, 4.99999860]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): sub = _C_ops.subtract(x, y) # p_norm op has not used epsilon, so change it to the following. if epsilon != 0.0: diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index b9d226374ae..a5c1e2ef42e 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -24,7 +24,6 @@ from ...fluid.data_feeder import ( check_type, check_variable_and_dtype, ) -from ...fluid.framework import in_dygraph_mode from ...fluid.layer_helper import LayerHelper from ...framework import convert_np_dtype_to_dtype_, core from ...tensor.creation import assign @@ -102,12 +101,8 @@ def diag_embed(input, offset=0, dim1=-2, dim2=-1): if not isinstance(input, Variable): input = assign(input) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.diag_embed(input, offset, dim1, dim2) - elif in_dynamic_mode(): - return _legacy_C_ops.diag_embed( - input, "offset", offset, "dim1", dim1, "dim2", dim2 - ) inputs = {'Input': [input]} attrs = {'offset': offset, 'dim1': dim1, 'dim2': dim2} @@ -218,7 +213,7 @@ def sequence_mask(x, maxlen=None, dtype='int64', name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) if maxlen is not None: @@ -318,7 +313,7 @@ def gather_tree(ids, parents): if ids.ndim != parents.ndim: raise ValueError("The ids's shape must be the same as parents' shape. ") - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.gather_tree(ids, parents) else: helper = LayerHelper('gather_tree', **locals()) @@ -404,7 +399,7 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): "Attr(data_format) should be 'NCHW' or 'NHWC'. " "Received Attr(data_format): {}.".format(data_format) ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.temporal_shift(x, seg_num, shift_ratio, data_format) else: helper = LayerHelper("temporal_shift", **locals()) diff --git a/python/paddle/nn/functional/input.py b/python/paddle/nn/functional/input.py index eccaffcb729..793daddd684 100644 --- a/python/paddle/nn/functional/input.py +++ b/python/paddle/nn/functional/input.py @@ -16,8 +16,8 @@ from paddle import _C_ops from ...common_ops_import import Variable from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.framework import in_dygraph_mode from ...fluid.layer_helper import LayerHelper +from ...framework import in_dynamic_mode __all__ = [] @@ -85,7 +85,7 @@ def one_hot(x, num_classes, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.one_hot(x, num_classes) else: check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'one_hot_v2') @@ -201,7 +201,7 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None): ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.embedding(x, weight, padding_idx, sparse) else: helper = LayerHelper('embedding', **locals()) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 7d71178d4a3..403fb299cbc 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -23,7 +23,7 @@ from paddle.utils import deprecated from ...common_ops_import import Variable from ...fluid.data_feeder import check_variable_and_dtype -from ...fluid.framework import _current_expected_place, in_dygraph_mode +from ...fluid.framework import _current_expected_place from ...fluid.layer_helper import LayerHelper from ...tensor.manipulation import reshape @@ -143,7 +143,7 @@ def log_loss(input, label, epsilon=1e-4, name=None): prob = paddle.randn((10,1)) cost = F.log_loss(input=prob, label=label) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.log_loss(input, label, epsilon) helper = LayerHelper('log_loss', **locals()) @@ -268,7 +268,7 @@ def fluid_softmax_with_cross_entropy( ) if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=axis) - if in_dygraph_mode(): + if in_dynamic_mode(): softmax, loss = _C_ops.cross_entropy_with_softmax( logits, label, @@ -418,7 +418,7 @@ def square_error_cost(input, label): # [0.01, 0.01] """ - if in_dygraph_mode(): + if in_dynamic_mode(): minus_out = _C_ops.subtract(input, label) square_out = _C_ops.square(minus_out) return square_out @@ -545,7 +545,7 @@ def edit_distance( ) label = erased_label - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.edit_distance( input, label, input_length, label_length, normalized ) @@ -644,7 +644,7 @@ def binary_cross_entropy( % reduction ) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.bce_loss(input, label) if weight is not None: out = _C_ops.multiply(out, weight, 'axis', -1) @@ -784,7 +784,7 @@ def binary_cross_entropy_with_logits( % reduction ) - if in_dygraph_mode(): + if in_dynamic_mode(): one = _C_ops.full( [1], float(1.0), @@ -963,7 +963,7 @@ def hsigmoid_loss( if num_classes < 2: raise ValueError(f'Expected num_classes >= 2 (got {num_classes})') - if in_dygraph_mode(): + if in_dynamic_mode(): out, _, _ = _C_ops.hsigmoid_loss( input, label, @@ -1080,7 +1080,7 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None): # 0.068004 """ - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.huber_loss(input, label, delta) else: check_variable_and_dtype( @@ -1172,7 +1172,7 @@ def margin_ranking_loss( "The value of 'reduction' in MarginRankingLoss should be 'sum', 'mean' or 'none', but " "received %s, which is not allowed." % reduction ) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.subtract(other, input) out = _C_ops.multiply(out, label) if margin != 0.0: @@ -1303,7 +1303,7 @@ def l1_loss(input, label, reduction='mean', name=None): "received %s, which is not allowed." % reduction ) - if in_dygraph_mode(): + if in_dynamic_mode(): unreduced = _C_ops.abs(_C_ops.subtract(input, label)) if reduction == 'mean': @@ -1416,7 +1416,7 @@ def nll_loss( n = input_shape[0] c = input_shape[1] - if in_dygraph_mode(): + if in_dynamic_mode(): if input_dims != 2 and input_dims != 4: input = _C_ops.reshape(input, [n, c, 1, -1]) label = _C_ops.reshape(label, [n, 1, -1]) @@ -1646,7 +1646,7 @@ def kl_div(input, label, reduction='mean', name=None): ): label = paddle.cast(label, 'float64') - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.kldiv_loss(input, label, 'none') if reduction == 'mean': out = paddle.mean(out) @@ -1853,7 +1853,7 @@ def ctc_loss( input_length=None, label_length=None, ): - if in_dygraph_mode(): + if in_dynamic_mode(): if input_length is None or label_length is None: raise ValueError( "input_length and label_length must not be None in dygraph mode!" @@ -1974,7 +1974,7 @@ def rnnt_loss( def warprnnt( input, label, input_length, label_length, blank=0, fastemit_lambda=0.001 ): - if in_dygraph_mode(): + if in_dynamic_mode(): loss_out = _C_ops.warprnnt( input, label, @@ -2268,7 +2268,7 @@ def margin_cross_entropy( if input_dims - 1 == label_dims: label = paddle.unsqueeze(label, axis=-1) - if in_dygraph_mode(): + if in_dynamic_mode(): softmax, loss = _C_ops.margin_cross_entropy( logits, label, @@ -2699,7 +2699,7 @@ def cross_entropy( ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): if not soft_label: valid_label = ( paddle.cast(label != ignore_index, dtype=label.dtype) * label @@ -3058,7 +3058,7 @@ def sigmoid_focal_loss( ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): place = _current_expected_place() one = _C_ops.full(logit.shape, float(1.0), logit.dtype, place) @@ -3203,7 +3203,7 @@ def multi_label_soft_margin_loss( "but received {}!={}".format(input.shape, label.shape) ) - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( input, 'input', @@ -3223,7 +3223,7 @@ def multi_label_soft_margin_loss( ) if weight is not None: - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( weight, 'weight', @@ -3322,7 +3322,7 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): "but received {}.".format(reduction) ) - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'hinge_embedding_loss' ) @@ -3547,7 +3547,7 @@ def triplet_margin_with_distance_loss( raise ValueError( "The margin between positive samples and negative samples should be greater than 0." ) - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( input, 'input', @@ -3696,7 +3696,7 @@ def triplet_margin_loss( raise ValueError( "The margin between positive samples and negative samples should be greater than 0." ) - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'triplet_margin_loss' ) @@ -3806,7 +3806,7 @@ def multi_margin_loss( "but received {}.".format(reduction) ) - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'multi_margin_loss' ) @@ -3823,7 +3823,7 @@ def multi_margin_loss( label = label.reshape((-1, 1)) index_sample = paddle.index_sample(input, label) if weight is not None: - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( weight, 'weight', ['float32', 'float64'], 'multi_margin_loss' ) @@ -3927,7 +3927,7 @@ def soft_margin_loss(input, label, reduction='mean', name=None): % reduction ) - if not in_dygraph_mode(): + if not in_dynamic_mode(): fluid.data_feeder.check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'soft_margin_loss' ) @@ -4071,7 +4071,7 @@ def gaussian_nll_loss( 'gaussian_nll_loss', ) # Entries of variance must be non-negative - if not in_dygraph_mode(): + if not in_dynamic_mode(): condition = paddle.all(variance > 0) Assert(condition, [variance], 6) else: diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index 3e6cb3d387c..0320cf2a6bf 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -32,9 +32,8 @@ import warnings import numpy as np -from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode +from paddle import _C_ops, in_dynamic_mode from paddle.device import get_all_custom_device_type -from paddle.fluid.framework import in_dygraph_mode from ...fluid import dygraph_utils from ...fluid.data_feeder import check_variable_and_dtype @@ -476,7 +475,7 @@ class GroupNorm(Layer): ) def forward(self, input): - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.group_norm( input, self.weight, @@ -1015,7 +1014,7 @@ class BatchNorm(Layer): self._trainable_statistics = trainable_statistics def forward(self, input): - if in_dygraph_mode(): + if in_dynamic_mode(): batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm( input, self._mean, @@ -1543,7 +1542,7 @@ class SyncBatchNorm(_BatchNormBase): # train mode: use mini-batch stats, eval mode: use global stats # use_global_stats only support False in sync_batch_norm - if in_dygraph_mode(): + if in_dynamic_mode(): sync_batch_norm_out, _, _, _, _, _ = _C_ops.sync_batch_norm_( x, self._mean, @@ -1559,37 +1558,6 @@ class SyncBatchNorm(_BatchNormBase): ) return sync_batch_norm_out - elif in_dynamic_mode(): - attrs = ( - "momentum", - self._momentum, - "epsilon", - self._epsilon, - "is_test", - not self.training, - "data_layout", - self._data_format, - "use_mkldnn", - False, - "fuse_with_relu", - False, - "use_global_stats", - False, - 'trainable_statistics', - False, - ) - sync_batch_norm_out, _, _, _, _, _ = _legacy_C_ops.sync_batch_norm( - x, - self.weight, - self.bias, - self._mean, - self._variance, - mean_out, - variance_out, - *attrs, - ) - return sync_batch_norm_out - check_variable_and_dtype( x, 'input', @@ -1885,7 +1853,7 @@ class SpectralNorm(Layer): def forward(self, x): weight = x - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.spectral_norm( weight, self.weight_u, diff --git a/python/paddle/nn/layer/rnn.py b/python/paddle/nn/layer/rnn.py index 2a0c9157a7a..8990cccce36 100644 --- a/python/paddle/nn/layer/rnn.py +++ b/python/paddle/nn/layer/rnn.py @@ -24,7 +24,6 @@ from paddle.common_ops_import import Variable from paddle.fluid.data_feeder import check_type, check_variable_and_dtype from paddle.fluid.dygraph.base import NON_PERSISTABLE_VAR_NAME_SUFFIX from paddle.fluid.framework import ( - _non_static_mode, default_startup_program, in_dygraph_mode, program_guard, @@ -102,7 +101,7 @@ def rnn( """ - if _non_static_mode(): + if in_dygraph_mode(): return _rnn_dynamic_graph( cell, inputs, diff --git a/python/paddle/nn/quant/format.py b/python/paddle/nn/quant/format.py index edd72ad8ce3..245c65510ab 100644 --- a/python/paddle/nn/quant/format.py +++ b/python/paddle/nn/quant/format.py @@ -17,7 +17,7 @@ from typing import List, Tuple import paddle from paddle import _legacy_C_ops as _C_ops -from paddle.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode from paddle.nn import Layer @@ -57,7 +57,7 @@ class LinearQuanter(Layer): self._bit_length = bit_length def forward(self, input): - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.quantize_linear( input, self._scales, @@ -108,7 +108,7 @@ class LinearDequanter(Layer): self._bit_length = bit_length def forward(self, input): - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.dequantize_linear( input, self._scales, diff --git a/python/paddle/nn/utils/weight_norm_hook.py b/python/paddle/nn/utils/weight_norm_hook.py index 36e62128c08..0c0851cdf31 100644 --- a/python/paddle/nn/utils/weight_norm_hook.py +++ b/python/paddle/nn/utils/weight_norm_hook.py @@ -16,7 +16,7 @@ from paddle import _C_ops from ...fluid.data_feeder import check_variable_and_dtype from ...fluid.layer_helper import LayerHelper -from ...framework import in_dygraph_mode +from ...framework import in_dynamic_mode __all__ = [] @@ -25,7 +25,7 @@ def l2_norm(x, axis, epsilon=1e-12, name=None): if len(x.shape) == 1: axis = 0 - if in_dygraph_mode(): + if in_dynamic_mode(): out, norm = _C_ops.norm(x, 1 if axis is None else axis, epsilon, False) return paddle.squeeze(norm, axis=[axis]) diff --git a/python/paddle/optimizer/adadelta.py b/python/paddle/optimizer/adadelta.py index c760c535da0..0edebb17625 100644 --- a/python/paddle/optimizer/adadelta.py +++ b/python/paddle/optimizer/adadelta.py @@ -18,7 +18,7 @@ from paddle import _C_ops from ..fluid import framework from ..fluid.dygraph import no_grad -from ..framework import in_dygraph_mode +from ..framework import in_dynamic_mode from .optimizer import Optimizer __all__ = [] @@ -190,7 +190,7 @@ class Adadelta(Optimizer): else None ) - if in_dygraph_mode(): + if in_dynamic_mode(): with no_grad(): _C_ops.adadelta_( param_and_grad[0], diff --git a/python/paddle/optimizer/adamw.py b/python/paddle/optimizer/adamw.py index 3af9457142c..284beec9ba5 100644 --- a/python/paddle/optimizer/adamw.py +++ b/python/paddle/optimizer/adamw.py @@ -205,7 +205,7 @@ class AdamW(Optimizer): self._parameter_list = None self._name = name - if framework._non_static_mode(): + if framework.in_dygraph_mode(): if self._parameter_list is None: raise AttributeError( "parameters argument given to the Optimizer should not be None in dygraph mode." diff --git a/python/paddle/optimizer/momentum.py b/python/paddle/optimizer/momentum.py index f46cde0b099..e01466bc0f6 100644 --- a/python/paddle/optimizer/momentum.py +++ b/python/paddle/optimizer/momentum.py @@ -16,7 +16,7 @@ import warnings import paddle from paddle import _C_ops -from paddle.fluid.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode from paddle.regularizer import L2Decay from ..fluid import core, framework @@ -200,7 +200,7 @@ class Momentum(Optimizer): def _create_accumulators(self, block, parameters): ''' - if framework._non_static_mode(): + if framework.in_dynamic_mode(): return ''' assert isinstance(block, framework.Block) @@ -275,7 +275,7 @@ class Momentum(Optimizer): else None ) - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(param_and_grad, dict): self._update_regularization(param_and_grad['weight_decay']) return _C_ops.momentum_( @@ -471,7 +471,7 @@ class Momentum(Optimizer): else None ) - if in_dygraph_mode(): + if in_dynamic_mode(): found_inf = self._get_auxiliary_var('found_inf') if found_inf: if isinstance(found_inf, core.eager.Tensor): diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index df20134e923..5792497a2a3 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -194,7 +194,7 @@ class Optimizer: self._parameter_list = None self._name = name - if framework._non_static_mode(): + if framework.in_dygraph_mode(): if self._parameter_list is None: raise AttributeError( "parameters argument given to the Optimizer should not be None in dygraph mode." @@ -743,7 +743,7 @@ class Optimizer: name in self._accumulators and param.name in self._accumulators[name] ): - if framework._non_static_mode(): + if framework.in_dygraph_mode(): return self._accumulators[name][param.name] raise Exception( "Accumulator {} already exists for parameter {}".format( @@ -790,7 +790,7 @@ class Optimizer: ), ) - if framework._non_static_mode(): + if framework.in_dygraph_mode(): if len(self._accumulators_holder) > 0: assert ( var_name in self._accumulators_holder @@ -949,7 +949,7 @@ class Optimizer: ], param_group_idx, ) - if framework._non_static_mode(): + if framework.in_dygraph_mode(): self._append_optimize_multi_tensor_op( target_block, parameters_and_grads, @@ -980,7 +980,7 @@ class Optimizer: param_group_idx=param_group_idx, ) else: - if not framework._non_static_mode(): + if not framework.in_dygraph_mode(): params_grads_device_map = ( parameters_and_grads['params'] if isinstance(parameters_and_grads, dict) @@ -1010,7 +1010,7 @@ class Optimizer: with paddle.fluid.framework.dygraph_guard_if_declarative(): self._create_accumulators(target_block, params_acc_dict) - if framework._non_static_mode(): + if framework.in_dygraph_mode(): found_inf = self._get_auxiliary_var('found_inf') if found_inf: if isinstance(found_inf, core.eager.Tensor): @@ -1114,7 +1114,7 @@ class Optimizer: adam.clear_grad() """ act_no_grad_set = None - if framework._non_static_mode(): + if framework.in_dygraph_mode(): pass else: act_no_grad_set = self._get_no_grad_set(loss, no_grad_set) @@ -1219,7 +1219,7 @@ class Optimizer: Returns: list: A list of operators appended to the current program. """ - if framework._non_static_mode(): + if framework.in_dygraph_mode(): with program_guard( framework.default_main_program(), framework.default_startup_program(), @@ -1320,7 +1320,7 @@ class Optimizer: Exception: Unknown regularization type """ params_and_grads = [] - if framework._non_static_mode(): + if framework.in_dygraph_mode(): for param, grad in parameters_and_grads: new_grad = self._create_regularization_of_grad( param, grad, regularization diff --git a/python/paddle/quantization/quanters/abs_max.py b/python/paddle/quantization/quanters/abs_max.py index ce1e0233e2f..23e50d4f775 100644 --- a/python/paddle/quantization/quanters/abs_max.py +++ b/python/paddle/quantization/quanters/abs_max.py @@ -212,7 +212,7 @@ class FakeQuanterWithAbsMaxObserverLayer(BaseQuanter): return quant_out def forward(self, input): - if paddle.framework.in_dynamic_mode(): + if paddle.in_dynamic_mode(): return self.dynamic_forward(input) else: return self.static_forward(input) diff --git a/python/paddle/regularizer.py b/python/paddle/regularizer.py index 8c245f99b8b..e10bde313fd 100644 --- a/python/paddle/regularizer.py +++ b/python/paddle/regularizer.py @@ -13,7 +13,7 @@ # limitations under the License. -from paddle import _C_ops, _legacy_C_ops +from paddle import _C_ops from paddle.fluid import framework from paddle.fluid.framework import in_dygraph_mode @@ -122,9 +122,9 @@ class L1Decay(WeightDecayRegularizer): assert isinstance(param, framework.Variable) assert isinstance(block, framework.Block) - if framework._non_static_mode(): - sign = block.create_var(dtype=param.dtype, shape=param.shape) - decay = block.create_var(dtype=param.dtype, shape=param.shape) + if in_dygraph_mode(): + sign = _C_ops.sign(param) + return _C_ops.scale(sign, self._coeff, 0.0, True) else: sign = block.create_var( dtype=param.dtype, shape=param.shape, lod_level=param.lod_level @@ -132,22 +132,19 @@ class L1Decay(WeightDecayRegularizer): decay = block.create_var( dtype=param.dtype, shape=param.shape, lod_level=param.lod_level ) - if in_dygraph_mode(): - sign = _C_ops.sign(param) - return _C_ops.scale(sign, self._coeff, 0.0, True) - - # Append sign op - block.append_op(type='sign', inputs={"X": param}, outputs={"Out": sign}) - - # Append scale op to the output of sign op - block.append_op( - type='scale', - inputs={"X": sign}, - outputs={"Out": decay}, - attrs={"scale": self._coeff}, - ) + # Append sign op + block.append_op( + type='sign', inputs={"X": param}, outputs={"Out": sign} + ) - return decay + # Append scale op to the output of sign op + block.append_op( + type='scale', + inputs={"X": sign}, + outputs={"Out": decay}, + attrs={"scale": self._coeff}, + ) + return decay def __str__(self): return "L1Decay, coeff=%f" % self._coeff @@ -231,11 +228,8 @@ class L2Decay(WeightDecayRegularizer): assert isinstance(param, framework.Variable) assert isinstance(block, framework.Block) - if framework._non_static_mode(): - if framework.in_dygraph_mode(): - return _C_ops.scale(param, self._coeff, 0.0, True) - else: - return _legacy_C_ops.scale(param, "scale", self._coeff) + if in_dygraph_mode(): + return _C_ops.scale(param, self._coeff, 0.0, True) else: decay = block.create_var( dtype=param.dtype, shape=param.shape, lod_level=param.lod_level diff --git a/python/paddle/signal.py b/python/paddle/signal.py index e1580b00075..f61df7a2b07 100644 --- a/python/paddle/signal.py +++ b/python/paddle/signal.py @@ -13,12 +13,11 @@ # limitations under the License. import paddle -from paddle import _C_ops, _legacy_C_ops -from paddle.fluid.framework import in_dygraph_mode +from paddle import _C_ops +from paddle.framework import in_dynamic_mode from .fft import fft_c2c, fft_c2r, fft_r2c from .fluid.data_feeder import check_variable_and_dtype -from .fluid.framework import _non_static_mode from .fluid.layer_helper import LayerHelper from .tensor.attribute import is_complex @@ -118,14 +117,12 @@ def frame(x, frame_length, hop_length, axis=-1, name=None): f'Unexpected hop_length: {hop_length}. It should be an positive integer.' ) - if _non_static_mode(): + if in_dynamic_mode(): if frame_length > x.shape[axis]: raise ValueError( f'Attribute frame_length should be less equal than sequence length, ' f'but got ({frame_length}) > ({x.shape[axis]}).' ) - - if in_dygraph_mode(): return _C_ops.frame(x, frame_length, hop_length, axis) else: op_type = 'frame' @@ -211,12 +208,8 @@ def overlap_add(x, hop_length, axis=-1, name=None): op_type = 'overlap_add' - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.overlap_add(x, hop_length, axis) - elif paddle.in_dynamic_mode(): - attrs = ('hop_length', hop_length, 'axis', axis) - op = getattr(_legacy_C_ops, op_type) - out = op(x, *attrs) else: check_variable_and_dtype( x, @@ -328,7 +321,7 @@ def stft( if win_length is None: win_length = n_fft - if _non_static_mode(): + if in_dynamic_mode(): assert ( 0 < n_fft <= x.shape[-1] ), f'n_fft should be in (0, seq_length({x.shape[-1]})], but got {n_fft}.' @@ -522,7 +515,7 @@ def istft( n_frames = x.shape[-1] fft_size = x.shape[-2] - if _non_static_mode(): + if in_dynamic_mode(): if onesided: assert ( fft_size == n_fft // 2 + 1 @@ -610,7 +603,7 @@ def istft( window_envelop = window_envelop[start : start + length] # Check whether the Nonzero Overlap Add (NOLA) constraint is met. - if _non_static_mode() and window_envelop.abs().min().item() < 1e-11: + if in_dynamic_mode() and window_envelop.abs().min().item() < 1e-11: raise ValueError( 'Abort istft because Nonzero Overlap Add (NOLA) condition failed. For more information about NOLA constraint please see `scipy.signal.check_NOLA`(https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.check_NOLA.html).' ) diff --git a/python/paddle/sparse/unary.py b/python/paddle/sparse/unary.py index 45398022589..ef7daa5bbb6 100644 --- a/python/paddle/sparse/unary.py +++ b/python/paddle/sparse/unary.py @@ -22,7 +22,7 @@ from paddle.fluid.framework import ( core, dygraph_only, ) -from paddle.framework import LayerHelper, in_dygraph_mode +from paddle.framework import LayerHelper __all__ = [] @@ -198,7 +198,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): dtype_flag = True dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.sparse_sum(x, axis, dtype, keepdim) else: if axis is None: diff --git a/python/paddle/static/nn/common.py b/python/paddle/static/nn/common.py index cd0b96c04e8..6aeef4934e5 100644 --- a/python/paddle/static/nn/common.py +++ b/python/paddle/static/nn/common.py @@ -26,7 +26,7 @@ from paddle.common_ops_import import ( ) from paddle.fluid import core from paddle.fluid.data_feeder import check_dtype -from paddle.fluid.framework import Variable, _non_static_mode, static_only +from paddle.fluid.framework import Variable, in_dygraph_mode, static_only from paddle.fluid.layers.layer_function_generator import templatedoc from paddle.fluid.param_attr import ParamAttr from paddle.nn.initializer import Constant, Normal @@ -1656,7 +1656,7 @@ def conv2d_transpose( if filter_size is None: if output_size is []: raise ValueError("output_size must be set when filter_size is None") - if not _non_static_mode(): + if not in_dygraph_mode(): if isinstance(output_size, Variable) or paddle.utils._contain_var( output_size ): @@ -2808,7 +2808,7 @@ def batch_norm( # variance and variance_out share the same memory variance_out = variance - if _non_static_mode(): + if in_dygraph_mode(): inputs_has_MomemtumTensor = False attrs_has_momentum = False tmp_tensor_type = core.eager.Tensor @@ -3478,7 +3478,7 @@ def spectral_norm(weight, dim=0, power_iters=1, eps=1e-12, name=None): ) v.stop_gradient = True - if paddle.framework.in_dygraph_mode(): + if in_dygraph_mode(): return paddle._C_ops.spectral_norm(weight, u, v, dim, power_iters, eps) inputs = {'Weight': weight} @@ -3583,7 +3583,7 @@ def layer_norm( print(output.shape) # [8, 32, 32] """ assert ( - _non_static_mode() is not True + in_dygraph_mode() is not True ), "please use LayerNorm instead of layer_norm in dygraph mode!" helper = LayerHelper('layer_norm', **locals()) check_variable_and_dtype( diff --git a/python/paddle/static/nn/control_flow.py b/python/paddle/static/nn/control_flow.py index 79cc848804b..093d63c96e7 100644 --- a/python/paddle/static/nn/control_flow.py +++ b/python/paddle/static/nn/control_flow.py @@ -18,10 +18,10 @@ from functools import partial, reduce import paddle from paddle.common_ops_import import ( LayerHelper, - _non_static_mode, check_type, check_variable_and_dtype, convert_dtype, + in_dygraph_mode, ) from paddle.fluid import core from paddle.fluid.framework import Operator, Program, Variable, static_only @@ -469,7 +469,7 @@ def while_loop(cond, body, loop_vars, is_test=False, name=None): f"but given shape as {list(pre_cond.shape)}." ) - if _non_static_mode(): + if in_dygraph_mode(): now_cond = pre_cond.item() while now_cond: output_vars = body(*loop_vars) @@ -969,7 +969,7 @@ def cond(pred, true_fn=None, false_fn=None, name=None, return_names=None): # [ True True True]] """ - if _non_static_mode(): + if in_dygraph_mode(): assert isinstance(pred, Variable), "The pred in cond must be Variable" assert pred.size == 1, "condition input's numel should be 1" pred = pred.item() diff --git a/python/paddle/static/nn/metric.py b/python/paddle/static/nn/metric.py index 3c1e0bf3852..e63a5236726 100644 --- a/python/paddle/static/nn/metric.py +++ b/python/paddle/static/nn/metric.py @@ -19,7 +19,7 @@ import numpy as np import paddle from paddle import _legacy_C_ops from paddle.fluid.data_feeder import check_variable_and_dtype -from paddle.fluid.framework import Variable, _create_tensor, _non_static_mode +from paddle.fluid.framework import Variable, _create_tensor, in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper from paddle.nn.initializer import ConstantInitializer @@ -72,7 +72,7 @@ def accuracy(input, label, k=1, correct=None, total=None): # [array(0.33333334, dtype=float32)] """ - if _non_static_mode(): + if in_dygraph_mode(): if correct is None: correct = _create_tensor(dtype="int32") if total is None: diff --git a/python/paddle/static/nn/sequence_lod.py b/python/paddle/static/nn/sequence_lod.py index 882d4a05acd..f9b9ea355e2 100644 --- a/python/paddle/static/nn/sequence_lod.py +++ b/python/paddle/static/nn/sequence_lod.py @@ -15,7 +15,7 @@ import paddle from paddle.fluid.core import VarDesc from paddle.fluid.data_feeder import check_type, check_variable_and_dtype -from paddle.fluid.framework import Variable, _non_static_mode +from paddle.fluid.framework import Variable, in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layers.layer_function_generator import templatedoc @@ -131,7 +131,7 @@ def sequence_conv( """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'sequence_conv' @@ -233,7 +233,7 @@ def sequence_softmax(input, use_cudnn=False, name=None): x_sequence_softmax_2 = paddle.static.nn.sequence_softmax(input=y) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_softmax', **locals()) check_variable_and_dtype( @@ -338,7 +338,7 @@ def sequence_pool(input, pool_type, is_test=False, pad_value=0.0): first_x = paddle.static.nn.sequence_pool(input=x, pool_type='first') """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( input, 'input', ['float32', 'float64'], 'sequence_pool' @@ -416,7 +416,7 @@ def sequence_concat(input, name=None): out = paddle.static.nn.sequence_concat(input=[x, y]) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_concat', **locals()) @@ -617,7 +617,7 @@ def sequence_slice(input, offset, length, name=None): length=length) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper("sequence_slice", **locals()) @@ -771,7 +771,7 @@ def sequence_expand(x, y, ref_level=-1, name=None): # data: [1 2 1 2 3 4 3 4] """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand' @@ -893,7 +893,7 @@ def sequence_expand_as(x, y, name=None): # data: [1 1 1 2 2 2 3 4] """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( x, 'x', ['float32', 'float64', 'int32', 'int64'], 'sequence_expand_as' @@ -996,7 +996,7 @@ def sequence_pad(x, pad_value, maxlen=None, name=None): """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_pad', **locals()) check_variable_and_dtype( @@ -1085,7 +1085,7 @@ def sequence_unpad(x, length, name=None): """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_unpad', **locals()) check_variable_and_dtype( @@ -1163,7 +1163,7 @@ def sequence_reshape(input, new_dim): x_reshaped = paddle.static.nn.sequence_reshape(input=x, new_dim=4) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_reshape', **locals()) check_variable_and_dtype( @@ -1248,7 +1248,7 @@ def sequence_scatter(input, index, updates, name=None): """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper('sequence_scatter', **locals()) @@ -1330,7 +1330,7 @@ def sequence_enumerate(input, win_size, pad_value=0, name=None): out = paddle.static.nn.sequence_enumerate(input=x, win_size=3, pad_value=0) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." check_variable_and_dtype( input, 'input', ['int32', 'int64'], 'sequence_enumerate' @@ -1459,7 +1459,7 @@ def sequence_reverse(x, name=None): x_reversed = paddle.static.nn.sequence_reverse(x) """ assert ( - not _non_static_mode() + not in_dygraph_mode() ), "sequence layer is not supported in dygraph mode yet." helper = LayerHelper("sequence_reverse", **locals()) check_variable_and_dtype( diff --git a/python/paddle/tensor/array.py b/python/paddle/tensor/array.py index eefdf27c1a1..c4503bb1aec 100644 --- a/python/paddle/tensor/array.py +++ b/python/paddle/tensor/array.py @@ -16,7 +16,7 @@ from ..common_ops_import import Variable from ..fluid.data_feeder import check_type, check_variable_and_dtype -from ..framework import LayerHelper, core, in_dygraph_mode +from ..framework import LayerHelper, core, in_dynamic_mode __all__ = [] @@ -45,7 +45,7 @@ def array_length(array): arr_len = paddle.tensor.array_length(arr) print(arr_len) # 1 """ - if in_dygraph_mode(): + if in_dynamic_mode(): assert isinstance( array, list ), "The 'array' in array_write must be a list in dygraph mode" @@ -109,7 +109,7 @@ def array_read(array, i): item = paddle.tensor.array_read(arr, i) print(item) # [[5., 5., 5.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): assert isinstance( array, list ), "The 'array' in array_read must be list in dygraph mode" @@ -169,7 +169,7 @@ def array_write(x, i, array=None): item = paddle.tensor.array_read(arr, i) print(item) # [[5., 5., 5.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): assert isinstance( x, Variable ), "The input data 'x' in array_write must be Variable in dygraph mode" @@ -267,7 +267,7 @@ def create_array(dtype, initialized_list=None): ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): return array else: helper = LayerHelper("array", **locals()) diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index bf6c603a7ec..bc9790e5cb1 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -38,7 +38,7 @@ from ..framework import ( _get_paddle_place, convert_np_dtype_to_dtype_, core, - in_dygraph_mode, + in_dynamic_mode, ) __all__ = [] @@ -312,7 +312,7 @@ def linspace(start, stop, num, dtype=None, name=None): if not isinstance(num, Variable): with device_guard("cpu"): tensor_num = fill_constant([1], 'int32', num, force_cpu=True) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.linspace( tensor_start, tensor_stop, @@ -445,7 +445,7 @@ def logspace(start, stop, num, base=10.0, dtype=None, name=None): if not isinstance(base, Variable): with device_guard("cpu"): tensor_base = fill_constant([1], dtype, base) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.logspace( tensor_start, tensor_stop, @@ -569,12 +569,12 @@ def _to_tensor_non_static(data, dtype=None, place=None, stop_gradient=True): "\n\tFaild to convert input data to a regular ndarray :\n\t - Usually " "this means the input data contains nested lists with different lengths. " ) - elif isinstance(data, paddle.Tensor) and not in_dygraph_mode(): + elif isinstance(data, paddle.Tensor) and not in_dynamic_mode(): data = data._copy_to(place, False) data = _handle_tensor_dtype(data, dtype) data.stop_gradient = stop_gradient return data - elif isinstance(data, core.eager.Tensor) and in_dygraph_mode(): + elif isinstance(data, core.eager.Tensor) and in_dynamic_mode(): data = data._copy_to(place, False) data = _handle_tensor_dtype(data, dtype) data.stop_gradient = stop_gradient @@ -583,7 +583,7 @@ def _to_tensor_non_static(data, dtype=None, place=None, stop_gradient=True): # should't expose it to users, just for internal use. # convert core.Tensor/core.LoDTensor to Tensor first # Currenly, there is no copy when places are same - if in_dygraph_mode(): + if in_dynamic_mode(): data = core.eager.Tensor(data) else: data = paddle.Tensor(data) @@ -777,7 +777,7 @@ def to_tensor(data, dtype=None, place=None, stop_gradient=True): if place is None: place = _current_expected_place() - if paddle.fluid.framework._non_static_mode(): + if in_dynamic_mode(): return _to_tensor_non_static(data, dtype, place, stop_gradient) # call assign for static graph @@ -821,7 +821,7 @@ def full_like(x, fill_value, dtype=None, name=None): else: if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.full_like(x, fill_value, dtype, x.place) else: helper = LayerHelper("full_like", **locals()) @@ -868,7 +868,7 @@ def full_like(x, fill_value, dtype=None, name=None): def fill_constant(shape, dtype, value, force_cpu=False, out=None, name=None): - if in_dygraph_mode(): + if in_dynamic_mode(): place = _current_expected_place() if force_cpu: place = core.CPUPlace() @@ -1154,7 +1154,7 @@ def eye(num_rows, num_columns=None, dtype=None, name=None): else: num_columns = num_rows - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.eye( num_rows, num_columns, dtype, _current_expected_place() ) @@ -1302,7 +1302,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): start = 0 out_shape = None - if not in_dygraph_mode() and ( + if not in_dynamic_mode() and ( not isinstance(start, Variable) and not isinstance(end, Variable) and not isinstance(step, Variable) @@ -1330,7 +1330,7 @@ def arange(start=0, end=None, step=1, dtype=None, name=None): elif step.dtype != dtype: step = paddle.cast(step, dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.arange(start, end, step, dtype, _current_expected_place()) else: check_dtype( @@ -1445,7 +1445,7 @@ def tril(x, diagonal=0, name=None): # [5 , 0 , 0 , 0 ], # [9 , 10, 0 , 0 ]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.tril(x, diagonal) else: return _tril_triu_op(LayerHelper('tril', **locals())) @@ -1507,7 +1507,7 @@ def triu(x, diagonal=0, name=None): # [0 , 10, 11, 12]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.triu(x, diagonal) else: return _tril_triu_op(LayerHelper('triu', **locals())) @@ -1548,7 +1548,7 @@ def meshgrid(*args, **kwargs): if len(args) == 1 and isinstance(args[0], (list, tuple)): args = args[0] - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.meshgrid(list(args)) else: name = kwargs.get("name", None) @@ -1664,7 +1664,7 @@ def diagflat(x, offset=0, name=None): # [0, 0, 3, 0, 0], # [0, 0, 0, 4, 0]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): if len(x.shape) <= 1: return _C_ops.diag(x, offset, 0) else: @@ -1787,7 +1787,7 @@ def diag(x, offset=0, padding_value=0, name=None): # Tensor(shape=[1], dtype=int64, place=Place(cpu), stop_gradient=True, # [4]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.diag(x, offset, padding_value) else: check_type(x, 'x', (Variable), 'diag_v2') @@ -1869,7 +1869,7 @@ def empty(shape, dtype=None, name=None): dtype = convert_dtype(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): shape = paddle.utils.convert_shape_to_list(shape) out = _C_ops.empty( shape, convert_np_dtype_to_dtype_(dtype), _current_expected_place() @@ -1950,7 +1950,7 @@ def empty_like(x, dtype=None, name=None): dtype = x.dtype dtype = convert_dtype(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.empty( x.shape, convert_np_dtype_to_dtype_(dtype), @@ -2056,11 +2056,11 @@ def assign(x, output=None): input = np.array(input) # NOTE(Aurelius84): Why we judge core.Tensor? # In case of @to_static, a Tensor can be as input of `assign`, - # but _non_static_mode()==False under @to_static, which means + # but in_dynamic_mode()==False under @to_static, which means # isinstance(Tensor, Variable) == False. It will cause return None # after this api. if isinstance(input, (Variable, core.eager.Tensor)): - if in_dygraph_mode(): + if in_dynamic_mode(): if output is None: output = _C_ops.assign(input) else: @@ -2154,7 +2154,7 @@ def assign(x, output=None): "The size of input is too big. Please consider " "saving it to file and 'load_op' to load it" ) - if in_dygraph_mode(): + if in_dynamic_mode(): if output is None: output = zeros(list(input.shape), dtype) _C_ops.assign_value_( @@ -2313,7 +2313,7 @@ def complex(real, imag, name=None): # [[0j , 1j , 2j ], # [(1+0j), (1+1j), (1+2j)]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.complex(real, imag) else: check_variable_and_dtype( @@ -2385,7 +2385,7 @@ def tril_indices(row, col, offset=0, dtype='int64'): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): if col is None: col = row out = _C_ops.tril_indices( @@ -2464,7 +2464,7 @@ def triu_indices(row, col=None, offset=0, dtype='int64'): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): if col is None: col = row out = _C_ops.triu_indices( diff --git a/python/paddle/tensor/layer_function_generator.py b/python/paddle/tensor/layer_function_generator.py index a2ab51d286d..884cecac0f5 100644 --- a/python/paddle/tensor/layer_function_generator.py +++ b/python/paddle/tensor/layer_function_generator.py @@ -27,7 +27,7 @@ from ..framework import ( OpProtoHolder, convert_np_dtype_to_dtype_, core, - in_dygraph_mode, + in_dynamic_mode, ) __all__ = [] @@ -267,7 +267,7 @@ def generate_activation_fn(op_type): op_proto = OpProtoHolder.instance().get_op_proto(op_type) def func(x, name=None): - if in_dygraph_mode(): + if in_dynamic_mode(): if hasattr(_C_ops, op_type): op = getattr(_C_ops, op_type) return op(x) @@ -346,7 +346,7 @@ def generate_inplace_fn(inplace_op_type): def func(x, name=None): - if in_dygraph_mode(): + if in_dynamic_mode(): if hasattr(_C_ops, inplace_op_type): op = getattr(_C_ops, inplace_op_type) return op(x) diff --git a/python/paddle/tensor/linalg.py b/python/paddle/tensor/linalg.py index 3b3c7565ab3..db0805669a1 100644 --- a/python/paddle/tensor/linalg.py +++ b/python/paddle/tensor/linalg.py @@ -24,7 +24,7 @@ from ..fluid.data_feeder import ( check_type, check_variable_and_dtype, ) -from ..framework import LayerHelper, in_dygraph_mode +from ..framework import LayerHelper, in_dynamic_mode from .creation import full from .manipulation import cast @@ -84,7 +84,7 @@ def transpose(x, perm, name=None): # [3L, 2L, 4L] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.transpose(x, perm) else: check_variable_and_dtype( @@ -229,7 +229,7 @@ def matmul(x, y, transpose_x=False, transpose_y=False, name=None): # (10, 3, 5, 5) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.matmul(x, y, transpose_x, transpose_y) else: attrs = { @@ -367,7 +367,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): "The dim of frobenius norm op should be None or two elements list!" ) - if in_dygraph_mode(): + if in_dynamic_mode(): if dim is None: return _C_ops.frobenius_norm(input, [], keepdim, True) return _C_ops.frobenius_norm(input, dim, keepdim, False) @@ -403,7 +403,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): axis (int, optional): None for last dimension. keepdim (bool, optional): Whether keep the dimensions as the `input`, Default False. """ - if in_dygraph_mode(): + if in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.p_norm(input, porder, axis, 1e-12, keepdim, asvector) @@ -442,7 +442,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): def inf_norm( input, porder=None, axis=axis, keepdim=False, asvector=False, name=None ): - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.abs(input) if porder == np.float64('inf'): return _C_ops.max(out, axis, keepdim) @@ -486,7 +486,7 @@ def norm(x, p='fro', axis=None, keepdim=False, name=None): NOTE: This function actually treats the matrix as flattened vector to calculate vector norm instead of matrix norm. """ - if in_dygraph_mode(): + if in_dynamic_mode(): abs_out = _C_ops.abs(input) pow_out = _C_ops.pow(abs_out, porder) sum_out = _C_ops.sum(pow_out, axis, None, keepdim) @@ -701,7 +701,7 @@ def dist(x, y, p=2, name=None): out = paddle.dist(x, y, float("-inf")) print(out) # out = 0. """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.dist(x, y, p) check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'dist') @@ -825,7 +825,7 @@ def cond(x, p=None, name=None): Calculate the matrix norm of a square matrix or batches of square matrices, when porder is in (1, -1, inf, -inf) """ - if in_dygraph_mode(): + if in_dynamic_mode(): abs_out = _C_ops.abs(input) sum_out = _C_ops.sum(abs_out, axis, None, False) @@ -888,7 +888,7 @@ def cond(x, p=None, name=None): NOTE: Calculate the frobenius norm of a square matrix or batches of square matrices. """ - if in_dygraph_mode(): + if in_dynamic_mode(): pow_out = _C_ops.pow(input, porder) sum_out_1 = _C_ops.sum(pow_out, axis, None, False) sum_out_2 = _C_ops.sum(sum_out_1, axis, None, False) @@ -950,7 +950,7 @@ def cond(x, p=None, name=None): """ u, s, vh = svd(input, full_matrices=False) - if in_dygraph_mode(): + if in_dynamic_mode(): if porder == "nuc": return _C_ops.sum(s, axis, None, False) max_out = _C_ops.max(s, axis, False) @@ -1021,7 +1021,7 @@ def cond(x, p=None, name=None): return out def empty_tensor(input, shape): - if in_dygraph_mode(): + if in_dynamic_mode(): return input.reshape(shape) raise ValueError( "only support x is nonempty tensor in static graph mode" @@ -1104,7 +1104,7 @@ def dot(x, y, name=None): print(z) # [32, 64] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.dot(x, y) else: op_type = 'dot' @@ -1306,7 +1306,7 @@ def t(input, name=None): "length of Input(input) is %s. Perhaps you can use paddle." "tensor.transpose() instead." % len(input.shape) ) - if in_dygraph_mode(): + if in_dynamic_mode(): if len(input.shape) <= 1: return input # 2-D tensor @@ -1374,7 +1374,7 @@ def cross(x, y, axis=9, name=None): # [0. 0. 0.] # [0. 0. 0.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): axis = K_DEFAULT_DIM if axis is None else axis return _C_ops.cross(x, y, axis) else: @@ -1440,7 +1440,7 @@ def cholesky(x, upper=False, name=None): out = paddle.linalg.cholesky(x, upper=False) print(out) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.cholesky(x, upper) else: check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'cholesky') @@ -1495,7 +1495,7 @@ def matrix_rank(x, tol=None, hermitian=False, name=None): # [1, 1, 1, 1]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(tol, Variable): if tol.dtype != x.dtype: tol_tensor = cast(tol, x.dtype) @@ -1580,7 +1580,7 @@ def bmm(x, y, name=None): # [60., 60.]]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.bmm(x, y) else: x_shape = x.shape @@ -1636,7 +1636,7 @@ def histogram(input, bins=100, min=0, max=0, name=None): result = paddle.histogram(inputs, bins=4, min=0, max=3) print(result) # [0, 2, 1, 0] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.histogram(input, bins, min, max) else: helper = LayerHelper('histogram', **locals()) @@ -1683,7 +1683,7 @@ def bincount(x, weights=None, minlength=0, name=None): if x.dtype not in [paddle.int32, paddle.int64]: raise TypeError("Elements in Input(x) should all be integers") - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.bincount(x, weights, minlength) else: helper = LayerHelper('bincount', **locals()) @@ -1739,7 +1739,7 @@ def mv(x, vec, name=None): # Tensor(shape=[2], dtype=float64, place=Place(cpu), stop_gradient=True, # [14., 10.]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.mv(x, vec) else: @@ -1804,7 +1804,7 @@ def det(x, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.det(x) else: check_dtype(x.dtype, 'Input', ['float16', 'float32', 'float64'], 'det') @@ -1863,7 +1863,7 @@ def slogdet(x, name=None): # [-0.98610914, -0.43010661, -0.10872950]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.slogdet(x) else: check_dtype(x.dtype, 'Input', ['float32', 'float64'], 'slogdet') @@ -1945,7 +1945,7 @@ def svd(x, full_matrices=False, name=None): # V * VH == I """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.svd(x, full_matrices) else: check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'svd') @@ -2019,7 +2019,7 @@ def matrix_power(x, n, name=None): # [-7.66666667 , 8. , -1.83333333 ], # [ 1.80555556 , -1.91666667 , 0.44444444 ]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.matrix_power(x, n) else: check_variable_and_dtype( @@ -2079,7 +2079,7 @@ def qr(x, mode="reduced", name=None): # one can verify : X = Q * R ; """ - if in_dygraph_mode(): + if in_dynamic_mode(): q, r = _C_ops.qr(x, mode) if mode == "r": return r @@ -2182,7 +2182,7 @@ def lu(x, pivot=True, get_infos=False, name=None): # one can verify : X = P @ L @ U ; """ - if in_dygraph_mode(): + if in_dynamic_mode(): lu, p, info = _C_ops.lu(x, pivot) else: check_variable_and_dtype(x, 'dtype', ['float32', 'float64'], 'lu') @@ -2277,7 +2277,7 @@ def lu_unpack(x, y, unpack_ludata=True, unpack_pivots=True, name=None): # one can verify : X = P @ L @ U ; """ - if in_dygraph_mode(): + if in_dynamic_mode(): P, L, U = _C_ops.lu_unpack(x, y, unpack_ludata, unpack_pivots) return P, L, U else: @@ -2348,7 +2348,7 @@ def eig(x, name=None): # (-0.21026087843552282+0j)]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.eig(x) else: check_variable_and_dtype( @@ -2417,7 +2417,7 @@ def eigvals(x, name=None): ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.eigvals(x) else: check_variable_and_dtype( @@ -2488,7 +2488,7 @@ def multi_dot(x, name=None): # [10, 7] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.multi_dot(x) else: check_type(x, 'x', (list, tuple), 'multi_dot') @@ -2546,7 +2546,7 @@ def eigh(x, UPLO='L', name=None): #[ 0.3826834323650898j , -0.9238795325112867j ]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.eigh(x, UPLO) else: @@ -2653,7 +2653,7 @@ def pinv(x, rcond=1e-15, hermitian=False, name=None): # one can verify : x * out * x = x ; # or out * x * out = x ; """ - if in_dygraph_mode(): + if in_dynamic_mode(): if not hermitian: # combine svd and matmul op u, s, vt = _C_ops.svd(x, False) @@ -2876,7 +2876,7 @@ def solve(x, y, name=None): print(out) # [2., 3.]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.solve(x, y) else: inputs = {"X": [x], "Y": [y]} @@ -2945,7 +2945,7 @@ def triangular_solve( print(out) # [7, -2, -5] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.triangular_solve(x, y, upper, transpose, unitriangular) else: inputs = {"X": [x], "Y": [y]} @@ -3004,7 +3004,7 @@ def cholesky_solve(x, y, upper=False, name=None): print(out) # [-2.5, -7, 9.5] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.cholesky_solve(x, y, upper) else: helper = LayerHelper("cholesky_solve", **locals()) @@ -3051,7 +3051,7 @@ def eigvalsh(x, UPLO='L', name=None): # Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True, # [0.17157286, 5.82842731]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): values, _ = _C_ops.eigvalsh(x, UPLO, x.stop_gradient) return values else: @@ -3202,7 +3202,7 @@ def lstsq(x, y, rcond=None, driver=None, name=None): elif x.dtype == paddle.float64: rcond = 1e-15 * max(x.shape[-2], x.shape[-1]) - if in_dygraph_mode(): + if in_dynamic_mode(): solution, residuals, rank, singular_values = _C_ops.lstsq( x, y, rcond, driver ) diff --git a/python/paddle/tensor/logic.py b/python/paddle/tensor/logic.py index cf3c1f5284d..8848f80949d 100755 --- a/python/paddle/tensor/logic.py +++ b/python/paddle/tensor/logic.py @@ -25,13 +25,13 @@ Tensor = paddle.fluid.framework.core.eager.Tensor from paddle import _C_ops from paddle.tensor.creation import full -from ..framework import LayerHelper, in_dygraph_mode +from ..framework import LayerHelper, in_dynamic_mode __all__ = [] def _logical_op(op_name, x, y, out=None, name=None, binary_op=True): - if in_dygraph_mode(): + if in_dynamic_mode(): op = getattr(_C_ops, op_name) if binary_op: return op(x, y) @@ -131,7 +131,7 @@ def logical_and(x, y, out=None, name=None): res = paddle.logical_and(x, y) print(res) # [True False True False] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.logical_and(x, y) return _logical_op( @@ -176,7 +176,7 @@ def logical_or(x, y, out=None, name=None): # [[True , True ], # [True , False]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.logical_or(x, y) return _logical_op( op_name="logical_or", x=x, y=y, name=name, out=out, binary_op=True @@ -220,7 +220,7 @@ def logical_xor(x, y, out=None, name=None): # [[False, True ], # [True , False]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.logical_xor(x, y) return _logical_op( @@ -260,7 +260,7 @@ def logical_not(x, out=None, name=None): res = paddle.logical_not(x) print(res) # [False True False True] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.logical_not(x) return _logical_op( op_name="logical_not", x=x, y=None, name=name, out=out, binary_op=False @@ -292,7 +292,7 @@ def is_empty(x, name=None): # False) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.is_empty(x) else: check_variable_and_dtype( @@ -338,7 +338,7 @@ def equal_all(x, y, name=None): result2 = paddle.equal_all(x, z) print(result2) # result2 = False """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.equal_all(x, y) else: helper = LayerHelper("equal_all", **locals()) @@ -400,7 +400,7 @@ def allclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): # True """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.allclose(x, y, rtol, atol, equal_nan) else: check_variable_and_dtype( @@ -464,7 +464,7 @@ def equal(x, y, name=None): if not isinstance(y, Variable): y = full(shape=[], dtype=x.dtype, fill_value=y) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.equal(x, y) else: check_variable_and_dtype( @@ -533,7 +533,7 @@ def greater_equal(x, y, name=None): result1 = paddle.greater_equal(x, y) print(result1) # result1 = [True False True] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.greater_equal(x, y) else: check_variable_and_dtype( @@ -602,7 +602,7 @@ def greater_than(x, y, name=None): result1 = paddle.greater_than(x, y) print(result1) # result1 = [False False True] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.greater_than(x, y) else: check_variable_and_dtype( @@ -672,7 +672,7 @@ def less_equal(x, y, name=None): result1 = paddle.less_equal(x, y) print(result1) # result1 = [True True False] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.less_equal(x, y) else: check_variable_and_dtype( @@ -742,7 +742,7 @@ def less_than(x, y, name=None): result1 = paddle.less_than(x, y) print(result1) # result1 = [False True False] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.less_than(x, y) else: check_variable_and_dtype( @@ -812,7 +812,7 @@ def not_equal(x, y, name=None): result1 = paddle.not_equal(x, y) print(result1) # result1 = [False True True] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.not_equal(x, y) else: check_variable_and_dtype( @@ -880,14 +880,14 @@ def is_tensor(x): print(check) #False """ - if in_dygraph_mode(): + if in_dynamic_mode(): return isinstance(x, (Tensor, paddle.fluid.core.eager.Tensor)) else: return isinstance(x, Variable) def _bitwise_op(op_name, x, y, out=None, name=None, binary_op=True): - if in_dygraph_mode(): + if in_dynamic_mode(): op = getattr(_C_ops, op_name) if binary_op: return op(x, y) @@ -959,7 +959,7 @@ def bitwise_and(x, y, out=None, name=None): res = paddle.bitwise_and(x, y) print(res) # [0, 2, 1] """ - if in_dygraph_mode() and out is None: + if in_dynamic_mode() and out is None: return _C_ops.bitwise_and(x, y) return _bitwise_op( op_name="bitwise_and", x=x, y=y, name=name, out=out, binary_op=True @@ -996,7 +996,7 @@ def bitwise_or(x, y, out=None, name=None): res = paddle.bitwise_or(x, y) print(res) # [-1, -1, -3] """ - if in_dygraph_mode() and out is None: + if in_dynamic_mode() and out is None: return _C_ops.bitwise_or(x, y) return _bitwise_op( @@ -1034,7 +1034,7 @@ def bitwise_xor(x, y, out=None, name=None): res = paddle.bitwise_xor(x, y) print(res) # [-1, -3, -4] """ - if in_dygraph_mode() and out is None: + if in_dynamic_mode() and out is None: return _C_ops.bitwise_xor(x, y) return _bitwise_op( op_name="bitwise_xor", x=x, y=y, name=name, out=out, binary_op=True @@ -1069,7 +1069,7 @@ def bitwise_not(x, out=None, name=None): res = paddle.bitwise_not(x) print(res) # [4, 0, -2] """ - if in_dygraph_mode() and out is None: + if in_dynamic_mode() and out is None: return _C_ops.bitwise_not(x) return _bitwise_op( @@ -1126,7 +1126,7 @@ def isclose(x, y, rtol=1e-05, atol=1e-08, equal_nan=False, name=None): # [True, True] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.isclose(x, y, rtol, atol, equal_nan) else: check_variable_and_dtype( diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 1371db3ea93..3828b097f8d 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -33,7 +33,7 @@ from ..framework import ( convert_np_dtype_to_dtype_, core, dygraph_only, - in_dygraph_mode, + in_dynamic_mode, ) from .creation import _complex_to_real_dtype, _real_to_complex_dtype, zeros @@ -120,7 +120,7 @@ def tensor_array_to_tensor(input, axis=1, use_stack=False, name=None): paddle.tensor.array.array_write(x1, i + 1, array) output, output_index = paddle.tensor.manipulation.tensor_array_to_tensor(input=array) """ - if in_dygraph_mode(): + if in_dynamic_mode(): assert isinstance( input, list ), "The 'input' in tensor_array_to_tensor must be list" @@ -178,7 +178,7 @@ def cast(x, dtype): x = paddle.to_tensor([2, 3, 4], 'float64') y = paddle.cast(x, 'uint8') """ - if in_dygraph_mode(): + if in_dynamic_mode(): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) return _C_ops.cast(x, dtype) @@ -298,7 +298,7 @@ def slice(input, axes, starts, ends): sliced_2 = paddle.slice(input, axes=axes, starts=[minus_3, 0, 2], ends=ends) # sliced_2 is input[1:3, 0:2, 2:4]. """ - if in_dygraph_mode(): + if in_dynamic_mode(): attrs = () starts_tensor = None ends_tensor = None @@ -465,7 +465,7 @@ def transpose(x, perm, name=None): # [3L, 2L, 4L] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.transpose(x, perm) else: check_variable_and_dtype( @@ -545,7 +545,7 @@ def unstack(x, axis=0, num=None): raise ValueError( '`axis` must be in the range [-{0}, {0})'.format(x.ndim) ) - if in_dygraph_mode(): + if in_dynamic_mode(): if num is None: num = x.shape[axis] if num == 0: @@ -618,7 +618,7 @@ def shard_index(input, index_num, nshards, shard_id, ignore_value=-1): print(shard_label) # [[-1], [1]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.shard_index( input, index_num, nshards, shard_id, ignore_value ) @@ -751,7 +751,7 @@ def crop(x, shape=None, offsets=None, name=None): if shape is None: shape = x.shape - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.crop(x, shape, offsets) out = helper.create_variable_for_type_inference(x.dtype) @@ -934,7 +934,7 @@ def fill_diagonal_(x, value, offset=0, wrap=False, name=None): x.fill_diagonal_(1.0) print(x.tolist()) #[[1.0, 2.0, 2.0], [2.0, 1.0, 2.0], [2.0, 2.0, 1.0], [2.0, 2.0, 2.0]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): if len(x.shape) == 2: return _C_ops.fill_diagonal_(x, value, offset, wrap) return _C_ops.fill_diagonal_(x, value, offset, True) @@ -1113,7 +1113,7 @@ def concat(x, axis=0, name=None): # [14 15 16]] """ input = x - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(axis, Variable): axis = axis.item(0) if not isinstance(input, Variable): @@ -1226,7 +1226,7 @@ def broadcast_tensors(input, name=None): """ num_inputs = len(input) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.broadcast_tensors(input) else: check_type(input, 'input', (list, tuple), 'broadcast_tensors') @@ -1340,7 +1340,7 @@ def flip(x, axis, name=None): if isinstance(axis, int): axis = [axis] - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.flip(x, axis) else: helper = LayerHelper("flip", **locals()) @@ -1580,7 +1580,7 @@ def flatten(x, start_axis=0, stop_axis=-1, name=None): if start_axis > stop_axis: raise ValueError("The stop_axis should be larger than stat_axis") - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.flatten(x, start_axis, stop_axis) else: check_variable_and_dtype( @@ -1644,7 +1644,7 @@ def flatten_(x, start_axis=0, stop_axis=-1, name=None): if start_axis > stop_axis: raise ValueError("The stop_axis should be larger than stat_axis") - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.flatten_(x, start_axis, stop_axis) @@ -1709,7 +1709,7 @@ def roll(x, shifts, axis=None, name=None): else: axis = [] - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.roll(x, shifts, axis) else: check_variable_and_dtype( @@ -1838,7 +1838,7 @@ def stack(x, axis=0, name=None): """ axis = 0 if axis is None else axis - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.stack(x, axis) else: if not isinstance(x, list) and not isinstance(x, tuple): @@ -1952,7 +1952,7 @@ def split(x, num_or_sections, axis=0, name=None): """ input = x dim = axis - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(dim, Variable): dim = dim.item(0) assert len(input.shape) + dim >= 0, "(rank(x) + axis) must >= 0" @@ -2201,7 +2201,7 @@ def squeeze(x, axis=None, name=None): input = x axes = axis - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.squeeze(input, axes) else: helper = LayerHelper("squeeze", **locals()) @@ -2261,7 +2261,7 @@ def squeeze_(x, axis=None, name=None): input = x axes = axis - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.squeeze_(input, axes) @@ -2342,7 +2342,7 @@ def unique_consecutive( else: axis = [axis] attr_dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): out, inverse, counts = _C_ops.unique_consecutive( x, return_inverse, return_counts, axis, attr_dtype ) @@ -2469,7 +2469,7 @@ def unique( else: axis = [axis] attr_dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): out, indices, inverse, counts = _C_ops.unique( x, return_index, return_inverse, return_counts, axis, attr_dtype ) @@ -2592,7 +2592,7 @@ def unsqueeze(x, axis, name=None): """ input = x axes = axis - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(axes, int): axes = [axes] elif isinstance(axes, Variable): @@ -2721,7 +2721,7 @@ def gather(x, index, axis=None, name=None): if axis is None: axis = 0 - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.gather(x, index, axis) else: check_variable_and_dtype( @@ -2805,7 +2805,7 @@ def unbind(input, axis=0): f'The axis must in range({-input.ndim}, {input.ndim}).' ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.unbind(input, axis) else: if isinstance(axis, np.generic): @@ -2919,7 +2919,7 @@ def scatter(x, index, updates, overwrite=True, name=None): # [2., 2.], # [1., 1.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.scatter(x, index, updates, overwrite) else: check_variable_and_dtype( @@ -3018,7 +3018,7 @@ def scatter_nd_add(x, index, updates, name=None): print(output.shape) # [3, 5, 9, 10] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.scatter_nd_add(x, index, updates) else: if x.dtype != updates.dtype: @@ -3156,7 +3156,7 @@ def tile(x, repeat_times, name=None): # Tensor(shape=[1, 6], dtype=int32, place=Place(gpu:0), stop_gradient=True, # [[1, 2, 3, 1, 2, 3]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(repeat_times, core.eager.Tensor): assert ( repeat_times.ndim == 1 @@ -3269,7 +3269,7 @@ def expand_as(x, y, name=None): # [[1, 2, 3], # [1, 2, 3]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.expand_as(x, None, y.shape) else: check_variable_and_dtype( @@ -3336,7 +3336,7 @@ def broadcast_to(x, shape, name=None): print(out) # [[1, 2, 3], [1, 2, 3]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.expand(x, shape) else: if isinstance(shape, Variable): @@ -3438,7 +3438,7 @@ def expand(x, shape, name=None): print(out) # [[1, 2, 3], [1, 2, 3]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.expand(x, shape) else: if isinstance(shape, Variable): @@ -3572,7 +3572,7 @@ def reshape(x, shape, name=None): # the value is [10.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(shape, (list, tuple)): new_shape = [] for ele in shape: @@ -3681,7 +3681,7 @@ def reshape_(x, shape, name=None): Inplace version of ``reshape`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_paddle_tensor_reshape`. """ - if in_dygraph_mode(): + if in_dynamic_mode(): tmp_tensor_type = core.eager.Tensor if isinstance(shape, (list, tuple)): shape = [ @@ -3776,7 +3776,7 @@ def gather_nd(x, index, name=None): output = paddle.gather_nd(x, index) #[[3, 4]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.gather_nd(x, index) else: check_variable_and_dtype( @@ -3894,7 +3894,7 @@ def strided_slice(x, axes, starts, ends, strides, name=None): sliced_2 = paddle.strided_slice(x, axes=axes, starts=[minus_3, 0, 2], ends=ends, strides=strides_2) # sliced_2 is x[:, 1:3:1, 0:2:1, 2:4:2]. """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.strided_slice(x, axes, starts, ends, strides) else: helper = LayerHelper('strided_slice', **locals()) @@ -4143,7 +4143,7 @@ def tensordot(x, y, axes=2, name=None): check_type(axes, 'axes', (int, tuple, list, Variable), op_type) def _var_to_list(var): - if in_dygraph_mode(): + if in_dynamic_mode(): return tolist(var) raise TypeError( "The 'axes' with type 'Tensor' in " @@ -4266,7 +4266,7 @@ def as_complex(x, name=None): # [[1j , (2+3j) , (4+5j) ], # [(6+7j) , (8+9j) , (10+11j)]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.as_complex(x) else: check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'as_complex') @@ -4319,7 +4319,7 @@ def as_real(x, name=None): # [8. , 9. ], # [10., 11.]]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.as_real(x) else: check_variable_and_dtype(x, 'x', ['complex64', 'complex128'], 'as_real') @@ -4374,7 +4374,7 @@ def repeat_interleave(x, repeats, axis=None, name=None): x = paddle.flatten(x) axis = 0 - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(repeats, Variable): return _C_ops.repeat_interleave_with_tensor_index(x, repeats, axis) return _C_ops.repeat_interleave(x, repeats, axis) @@ -4485,7 +4485,7 @@ def moveaxis(x, source, destination, name=None): for i in range(len(src_dims)): perm[dst_dims[i]] = src_dims[i] - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.transpose(x, perm) return out else: @@ -4578,7 +4578,7 @@ def take_along_axis(arr, indices, axis): if not broadcast_shape: # if indices matrix have larger size than arr, arr should broadcast into indices shape. broadcast_shape = indices.shape - if in_dygraph_mode(): + if in_dynamic_mode(): indices = paddle.broadcast_to(indices, broadcast_shape) broadcast_shape_list = list(broadcast_shape) broadcast_shape_list[axis] = list(arr.shape)[axis] @@ -4655,7 +4655,7 @@ def put_along_axis(arr, indices, values, axis, reduce='assign'): ) axis = non_negative_axis(arr, axis) broadcast_shape = infer_broadcast_shape(arr, indices, axis) - if in_dygraph_mode(): + if in_dynamic_mode(): values = ( paddle.to_tensor(values) if not isinstance(values, paddle.Tensor) @@ -4752,7 +4752,7 @@ def index_add(x, index, axis, value, name=None): # [1., 1., 1.], # [2., 2., 2.]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.index_add(x, index, value, axis) helper = LayerHelper("index_add", **locals()) @@ -4887,7 +4887,7 @@ def index_put(x, indices, value, accumulate=False, name=None): # [0., 0., 1.], # [0., 1., 0.]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.index_put(x, indices, value, accumulate) helper = LayerHelper("index_put", **locals()) diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index d36e0ff3064..e5a4cbe9a8e 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -37,7 +37,7 @@ from ..framework import ( _dygraph_tracer, convert_np_dtype_to_dtype_, core, - in_dygraph_mode, + in_dynamic_mode, ) from .creation import _complex_to_real_dtype from .layer_function_generator import generate_layer_fn, templatedoc @@ -155,7 +155,7 @@ def log(x, name=None): res = paddle.log(x) # [[0.693147, 1.09861, 1.38629], [1.94591, 2.07944, 2.19722]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.log(x) else: check_variable_and_dtype( @@ -216,7 +216,7 @@ def scale(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): if act is None: return _C_ops.scale(x, scale, float(bias), bias_after_scale) out = _C_ops.scale(x, scale, float(bias), bias_after_scale) @@ -284,7 +284,7 @@ def stanh(x, scale_a=0.67, scale_b=1.7159, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.stanh(x, scale_a, scale_b) else: check_variable_and_dtype( @@ -352,7 +352,7 @@ def multiplex(inputs, index, name=None): print(res) # Tensor([[5., 6.], [3., 4.]], dtype=float32) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.multiplex(inputs, index) else: helper = LayerHelper('multiplex', **locals()) @@ -388,7 +388,7 @@ def scale_(x, scale=1.0, bias=0.0, bias_after_scale=True, act=None, name=None): Inplace version of ``scale`` API, the output Tensor will be inplaced with input ``x``. Please refer to :ref:`api_tensor_scale`. """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.scale_(x, scale, float(bias), bias_after_scale) @@ -440,7 +440,7 @@ def pow(x, y, name=None): """ # in dynamic graph mode - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(y, (int, float)): return _C_ops.pow(x, y) elif isinstance(y, (paddle.Tensor, Variable)): @@ -612,7 +612,7 @@ def add(x, y, name=None): print(z) # [3., 8., 6. ] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.add(x, y) else: return _elementwise_op(LayerHelper('elementwise_add', **locals())) @@ -750,7 +750,7 @@ def subtract(x, y, name=None): # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, # [ 4. , inf., -inf.]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.subtract(x, y) else: return _elementwise_op(LayerHelper('elementwise_sub', **locals())) @@ -806,7 +806,7 @@ def divide(x, y, name=None): print(z) # [2., 0.6, 2.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.divide(x, y) else: return _elementwise_op(LayerHelper('elementwise_div', **locals())) @@ -849,7 +849,7 @@ def floor_divide(x, y, name=None): print(z) # [2, 0, 2, 2] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.floor_divide(x, y) else: return _elementwise_op(LayerHelper('elementwise_floordiv', **locals())) @@ -888,7 +888,7 @@ def remainder(x, y, name=None): print(z) # [0, 3, 2, 1] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.remainder(x, y) else: return _elementwise_op(LayerHelper('elementwise_mod', **locals())) @@ -951,7 +951,7 @@ def multiply(x, y, name=None): print(res) # [[[2, 4, 6], [2, 4, 6]]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.multiply(x, y) else: if x.dtype != y.dtype: @@ -990,8 +990,8 @@ def _elementwise_op_with_axis_in_dygraph( x, y, axis=-1, name=None, op_type="Undifined" ): assert ( - in_dygraph_mode() - ), "You can only call `_elementwise_op_with_axis_in_dygraph` function within in_dygraph_mode" + in_dynamic_mode() + ), "You can only call `_elementwise_op_with_axis_in_dygraph` function within in_dynamic_mode" assert op_type in ["add", "subtract", "multiply", "divide"], ( "op_name input error! _elementwise_op_with_axis is an inner function to replace elementwise_add/sub/mul/div. Input op_name=%s, Expect op_name=[add|subtract|multiply|divide]\n" % op_type @@ -1012,7 +1012,7 @@ def _elementwise_op_with_axis_in_dygraph( def _add_with_axis(x, y, axis=-1, name=None): # opt performance, only dynamic mode needs reshape - if in_dygraph_mode(): + if in_dynamic_mode(): return _elementwise_op_with_axis_in_dygraph(x, y, axis, name, "add") else: op_type = 'elementwise_add' @@ -1021,7 +1021,7 @@ def _add_with_axis(x, y, axis=-1, name=None): def _subtract_with_axis(x, y, axis=-1, name=None): # opt performance, only dynamic mode needs reshape - if in_dygraph_mode(): + if in_dynamic_mode(): return _elementwise_op_with_axis_in_dygraph( x, y, axis, name, "subtract" ) @@ -1032,7 +1032,7 @@ def _subtract_with_axis(x, y, axis=-1, name=None): def _multiply_with_axis(x, y, axis=-1, name=None): # opt performance, only dynamic mode needs reshape - if in_dygraph_mode(): + if in_dynamic_mode(): return _elementwise_op_with_axis_in_dygraph( x, y, axis, name, "multiply" ) @@ -1043,7 +1043,7 @@ def _multiply_with_axis(x, y, axis=-1, name=None): def _divide_with_axis(x, y, axis=-1, name=None): # opt performance, only dynamic mode needs reshape - if in_dygraph_mode(): + if in_dynamic_mode(): return _elementwise_op_with_axis_in_dygraph(x, y, axis, name, "divide") else: op_type = 'elementwise_div' @@ -1106,7 +1106,7 @@ def maximum(x, y, name=None): # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, # [5. , 3. , inf.]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.maximum(x, y) else: return _elementwise_op(LayerHelper('elementwise_max', **locals())) @@ -1168,7 +1168,7 @@ def minimum(x, y, name=None): # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, # [ 1. , -inf., 5. ]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.minimum(x, y) else: return _elementwise_op(LayerHelper('elementwise_min', **locals())) @@ -1232,7 +1232,7 @@ def fmax(x, y, name=None): # Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True, # [5. , 3. , inf.]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.fmax(x, y) else: return _elementwise_op(LayerHelper('elementwise_fmax', **locals())) @@ -1296,7 +1296,7 @@ def fmin(x, y, name=None): # Tensor(shape=[3], dtype=float64, place=Place(cpu), stop_gradient=True, # [ 1. , -inf., 5. ]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.fmin(x, y) else: return _elementwise_op(LayerHelper('elementwise_fmin', **locals())) @@ -1367,7 +1367,7 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): dtype_flag = True dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.sum(x, axis, dtype, keepdim) else: reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) @@ -1711,7 +1711,7 @@ def add_n(inputs, name=None): # [[8., 10., 12.], # [14., 16., 18.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(inputs, Variable): inputs = [inputs] return _C_ops.add_n(inputs) @@ -1783,7 +1783,7 @@ def trunc(input, name=None): # [[0., 0.], # [0., 0.]])) ''' - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.trunc(input) else: inputs = {"X": input} @@ -1868,7 +1868,7 @@ def mm(input, mat2, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.matmul(input, mat2, False, False) else: @@ -2009,7 +2009,7 @@ def addmm(input, x, y, beta=1.0, alpha=1.0, name=None): ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.addmm(input, x, y, beta, alpha) else: inputs = {'Input': input, "X": x, "Y": y} @@ -2082,7 +2082,7 @@ def renorm(x, p, axis, max_norm): ) ) axis = axis + len(input_shape) - if in_dygraph_mode(): + if in_dynamic_mode(): out = _C_ops.renorm(x, p, axis, max_norm) return out else: @@ -2137,7 +2137,7 @@ def inner(x, y, name=None): nx = x.reshape((-1, xshape[-1])) ny = y.reshape((-1, yshape[-1])) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.matmul(nx, ny.T, False, False).reshape(dstshape) else: @@ -2204,7 +2204,7 @@ def outer(x, y, name=None): nx = x.reshape((-1, 1)) ny = y.reshape((1, -1)) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.matmul(nx, ny, False, False) else: @@ -2269,7 +2269,7 @@ def logsumexp(x, axis=None, keepdim=False, name=None): """ reduce_all, axis = _get_reduce_axis(axis, x) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.logsumexp(x, axis, keepdim, reduce_all) else: check_variable_and_dtype( @@ -2312,7 +2312,7 @@ def inverse(x, name=None): print(inv) # [[0.5, 0], [0, 0.5]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.inverse(x) else: @@ -2412,7 +2412,7 @@ def max(x, axis=None, keepdim=False, name=None): #[7., 8.], [[[0., 0.], [0., 0.]], [[0., 0.], [1., 1.]]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.max(x, axis, keepdim) else: reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) @@ -2513,7 +2513,7 @@ def min(x, axis=None, keepdim=False, name=None): #[1., 2.], [[[1., 1.], [0., 0.]], [[0., 0.], [0., 0.]]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.min(x, axis, keepdim) else: reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) @@ -2624,7 +2624,7 @@ def amax(x, axis=None, keepdim=False, name=None): print(result6, y.grad) #[0.9., 0.9], [[[0., 0.3333], [0.5, 0.3333]], [[0.5, 0.3333], [1., 1.]]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.amax(x, axis, keepdim) else: @@ -2734,7 +2734,7 @@ def amin(x, axis=None, keepdim=False, name=None): print(result6, y.grad) #[0.1., 0.1], [[[0., 0.3333], [0.5, 0.3333]], [[0.5, 0.3333], [1., 1.]]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.amin(x, axis, keepdim) else: @@ -2778,7 +2778,7 @@ def log1p(x, name=None): # [[0.], [0.6931472]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.log1p(x) else: check_variable_and_dtype( @@ -2830,7 +2830,7 @@ def log2(x, name=None): res = paddle.log2(x_i) print(res) # [1.0] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.log2(x) else: check_variable_and_dtype( @@ -2882,7 +2882,7 @@ def log10(x, name=None): res = paddle.log10(x_i) print(res) # [1.0] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.log10(x) else: check_variable_and_dtype( @@ -2946,7 +2946,7 @@ def clip(x, min=None, max=None, name=None): min_ = float(np.finfo(np.float32).min) max_ = float(np.finfo(np.float32).max) - if in_dygraph_mode(): + if in_dynamic_mode(): if isinstance(min, Variable): min = min.item(0) if isinstance(max, Variable): @@ -3024,7 +3024,7 @@ def clip_(x, min=None, max=None, name=None): min = fmin if min is None else min max = fmax if max is None else max - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.clip_(x, min, max) @@ -3101,7 +3101,7 @@ def trace(x, offset=0, axis1=0, axis2=1, name=None): "But received axis1 = %d, axis2 = %d\n" % (axis1, axis2) ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.trace(x, offset, axis1, axis2) else: __check_input(x, offset, axis1, axis2) @@ -3183,7 +3183,7 @@ def diagonal(x, offset=0, axis1=0, axis2=1, name=None): # [0.17020577, 0.27325270]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.diagonal(x, offset, axis1, axis2) else: @@ -3284,7 +3284,7 @@ def kron(x, y, name=None): # [12, 15, 18, 16, 20, 24], # [21, 24, 27, 28, 32, 36]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _legacy_C_ops.kron(x, y) else: helper = LayerHelper('kron', **locals()) @@ -3350,7 +3350,7 @@ def cumsum(x, axis=None, dtype=None, name=None): if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype): x = cast(x, dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.cumsum(x, axis, flatten, False, False) @@ -3426,7 +3426,7 @@ def logcumsumexp(x, axis=None, dtype=None, name=None): if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype): x = cast(x, dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): if axis is None: axis = -1 return _C_ops.logcumsumexp(x, axis, flatten, False, False) @@ -3500,7 +3500,7 @@ def cumprod(x, dim=None, dtype=None, name=None): if dtype is not None and x.dtype != convert_np_dtype_to_dtype_(dtype): x = cast(x, dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.cumprod(x, dim) else: check_variable_and_dtype( @@ -3552,7 +3552,7 @@ def isfinite(x, name=None): out = paddle.isfinite(x) print(out) # [False True True False True False False] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.isfinite(x) else: helper = LayerHelper("isfinite_v2", **locals()) @@ -3597,7 +3597,7 @@ def isinf(x, name=None): out = paddle.isinf(x) print(out) # [ True False False True False False False] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.isinf(x) else: helper = LayerHelper("isinf_v2", **locals()) @@ -3640,7 +3640,7 @@ def isnan(x, name=None): out = paddle.isnan(x) print(out) # [False False False False False True True] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.isnan(x) else: helper = LayerHelper("isnan_v2", **locals()) @@ -3727,7 +3727,7 @@ def prod(x, axis=None, keepdim=False, dtype=None, name=None): x = cast(x, dtype) reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.prod(x, axis, keepdim, reduce_all) else: helper = LayerHelper('reduce_prod', **locals()) @@ -3769,7 +3769,7 @@ def sign(x, name=None): out = paddle.sign(x=x) print(out) # [1.0, 0.0, -1.0, 1.0] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.sign(x) else: check_variable_and_dtype( @@ -3808,7 +3808,7 @@ def tanh(x, name=None): print(out) # [-0.37994896 -0.19737532 0.09966799 0.29131261] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.tanh(x) else: check_variable_and_dtype( @@ -3853,7 +3853,7 @@ def increment(x, value=1.0, name=None): # [1.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.increment_(x, value) else: check_variable_and_dtype( @@ -3918,7 +3918,7 @@ def all(x, axis=None, keepdim=False, name=None): print(out4) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.all(x, axis, keepdim) else: reduce_all, axis = _get_reduce_axis(axis, x) @@ -3992,7 +3992,7 @@ def any(x, axis=None, keepdim=False, name=None): print(out4) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.any(x, axis, keepdim) else: reduce_all, axis = _get_reduce_axis(axis, x) @@ -4078,7 +4078,7 @@ def conj(x, name=None): # [(4-4j), (5-5j), (6-6j)]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.conj(x) else: check_variable_and_dtype( @@ -4132,7 +4132,7 @@ def digamma(x, name=None): # [ nan , 5.32286835]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.digamma(x) else: check_variable_and_dtype( @@ -4169,7 +4169,7 @@ def lgamma(x, name=None): print(out) # [1.31452441, 1.76149750, 2.25271273, 1.09579802] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.lgamma(x) else: check_variable_and_dtype( @@ -4251,7 +4251,7 @@ def atan2(x, y, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.atan2(x, y) else: check_variable_and_dtype( @@ -4316,7 +4316,7 @@ def logit(x, eps=None, name=None): """ if eps is None: eps = 0.0 - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.logit(x, eps) else: check_variable_and_dtype( @@ -4366,7 +4366,7 @@ def lerp(x, y, weight, name=None): if isinstance(weight, float): weight = paddle.full(shape=[], fill_value=weight, dtype=x.dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.lerp(x, y, weight) else: check_variable_and_dtype( @@ -4432,7 +4432,7 @@ def erfinv(x, name=None): # out: [0, 0.4769, -inf] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.erfinv(x) else: check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'erfinv') @@ -4494,7 +4494,7 @@ def rad2deg(x, name=None): # 57.29578018) """ rad2deg_scale = 180 / np.pi - if in_dygraph_mode(): + if in_dynamic_mode(): if convert_dtype(x.dtype) in ['int32', 'int64']: x = cast(x, dtype="float32") return _C_ops.scale(x, rad2deg_scale, 0.0, True) @@ -4558,7 +4558,7 @@ def deg2rad(x, name=None): # 3.14159274) """ deg2rad_scale = np.pi / 180.0 - if in_dygraph_mode(): + if in_dynamic_mode(): if convert_dtype(x.dtype) in ['int32', 'int64']: x = cast(x, dtype="float32") return _C_ops.scale(x, deg2rad_scale, 0.0, True) @@ -4661,7 +4661,7 @@ def gcd(x, y, name=None): ) return (paddle.where(x < y, y, x), paddle.where(x < y, x, y)) - if in_dygraph_mode(): + if in_dynamic_mode(): while _gcd_cond_fn(x, y): x, y = _gcd_body_fn(x, y) @@ -4798,7 +4798,7 @@ def diff(x, n=1, axis=-1, prepend=None, append=None, name=None): dtype = x.dtype axes = [axis] infer_flags = [1 for i in range(len(axes))] - if in_dygraph_mode(): + if in_dynamic_mode(): has_pend = False input_list = [] if prepend is not None and append is not None: @@ -4950,7 +4950,7 @@ def angle(x, name=None): # [-1.10714877, -0.78539819, 0. , 0.78539819]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.angle(x) else: check_variable_and_dtype( @@ -5018,7 +5018,7 @@ def heaviside(x, y, name=None): # [[0. , 0.20000000, 1. ], # [0. , 1. , 0.30000001]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.heaviside(x, y) else: op_type = 'elementwise_heaviside' @@ -5060,7 +5060,7 @@ def frac(x, name=None): x.dtype ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): y = _C_ops.trunc(x) return _C_ops.subtract(x, y) else: @@ -5200,7 +5200,7 @@ def take(x, index, mode='raise', name=None): ) ) - if in_dygraph_mode(): + if in_dynamic_mode(): if not isinstance(index, (paddle.Tensor, Variable)): raise TypeError( "The type of 'index' must be Tensor, but got {}".format( @@ -5575,7 +5575,7 @@ def nextafter(x, y, name=None): #Tensor(shape=[2], dtype=float32, place=Place(cpu), stop_gradient=True, # [1.00000012, 1.99999988]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.nextafter(x, y) else: check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'nextafter') @@ -5614,7 +5614,7 @@ def i0(x, name=None): print(paddle.i0(x)) # (Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, [0.99999994 , 1.26606596 , 2.27958512 , 4.88079262 , 11.30192089]), """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.i0(x) else: check_variable_and_dtype(x, "x", ["float32", "float64"], "i0") @@ -5651,7 +5651,7 @@ def i0e(x, name=None): print(paddle.i0e(x)) # (Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, [1., 0.46575961, 0.30850832, 0.24300035, 0.20700192]), """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.i0e(x) else: check_variable_and_dtype(x, "x", ["float32", "float64"], "i0e") @@ -5682,7 +5682,7 @@ def i1(x, name=None): print(paddle.i1(x)) # (Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, [0., 0.5651591 , 1.59063685 , 3.95337022 , 9.75946515]), """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.i1(x) else: check_variable_and_dtype(x, "x", ["float32", "float64"], "i1") @@ -5716,7 +5716,7 @@ def i1e(x, name=None): print(paddle.i1e(x)) # (Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True, [0., 0.20791042, 0.21526929, 0.24300035, 0.17875084]), """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.i1e(x) else: check_variable_and_dtype(x, "x", ["float32", "float64"], "i1e") diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index 5fd9372c9cd..2218235d172 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -14,8 +14,7 @@ from .. import _C_ops from ..fluid.data_feeder import check_variable_and_dtype -from ..fluid.framework import in_dygraph_mode -from ..framework import LayerHelper +from ..framework import LayerHelper, in_dynamic_mode from .layer_function_generator import ( add_sample_code, generate_activation_fn, @@ -217,7 +216,7 @@ def acos(x, name=None): # [1.98231317 1.77215425 1.47062891 1.26610367] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.acos(x) else: check_variable_and_dtype( @@ -254,7 +253,7 @@ def acosh(x, name=None): # [0. , 1.76274729, 2.06343699, 2.29243159] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.acosh(x) else: check_variable_and_dtype( @@ -291,7 +290,7 @@ def asin(x, name=None): # [-0.41151685 -0.20135792 0.10016742 0.30469265] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.asin(x) else: check_variable_and_dtype( @@ -328,7 +327,7 @@ def asinh(x, name=None): # [-0.39003533, -0.19869010, 0.09983408, 0.29567307] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.asinh(x) else: check_variable_and_dtype( @@ -365,7 +364,7 @@ def atan(x, name=None): # [-0.38050638 -0.19739556 0.09966865 0.29145679] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.atan(x) else: check_variable_and_dtype( @@ -402,7 +401,7 @@ def atanh(x, name=None): # [-0.42364895, -0.20273256, 0.10033535, 0.30951962] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.atanh(x) else: check_variable_and_dtype( @@ -440,7 +439,7 @@ def ceil(x, name=None): # [-0. -0. 1. 1.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.ceil(x) else: check_variable_and_dtype( @@ -479,7 +478,7 @@ def cos(x, name=None): # [0.92106099 0.98006658 0.99500417 0.95533649] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.cos(x) else: check_variable_and_dtype( @@ -518,7 +517,7 @@ def cosh(x, name=None): # [1.08107237 1.02006676 1.00500417 1.04533851] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.cosh(x) else: check_variable_and_dtype( @@ -556,7 +555,7 @@ def exp(x, name=None): # [0.67032005 0.81873075 1.10517092 1.34985881] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.exp(x) else: check_variable_and_dtype( @@ -607,7 +606,7 @@ def expm1(x, name=None): # [-0.32967997, -0.18126924, 0.10517092, 0.34985882] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.expm1(x) else: check_variable_and_dtype( @@ -645,7 +644,7 @@ def floor(x, name=None): # [-1. -1. 0. 0.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.floor(x) else: check_variable_and_dtype( @@ -683,7 +682,7 @@ def reciprocal(x, name=None): # [-2.5 -5. 10. 3.33333333] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.reciprocal(x) else: check_variable_and_dtype( @@ -730,7 +729,7 @@ def round(x, name=None): # [-1. -0. 1. 2.] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.round(x) else: check_variable_and_dtype( @@ -769,7 +768,7 @@ def rsqrt(x, name=None): # [3.16227766 2.23606798 1.82574186 1.58113883] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.rsqrt(x) else: check_variable_and_dtype( @@ -807,7 +806,7 @@ def sigmoid(x, name=None): # [0.40131234 0.450166 0.52497919 0.57444252] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.sigmoid(x) else: check_variable_and_dtype( @@ -844,7 +843,7 @@ def sin(x, name=None): # [-0.38941834 -0.19866933 0.09983342 0.29552021] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.sin(x) else: check_variable_and_dtype( @@ -881,7 +880,7 @@ def sinh(x, name=None): # [-0.41075233 -0.201336 0.10016675 0.30452029] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.sinh(x) else: check_variable_and_dtype( @@ -917,7 +916,7 @@ def sqrt(x, name=None): print(out) # [0.31622777 0.4472136 0.54772256 0.63245553] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.sqrt(x) else: check_variable_and_dtype( @@ -956,7 +955,7 @@ def square(x, name=None): print(out) # [0.16 0.04 0.01 0.09] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.square(x) else: check_variable_and_dtype( @@ -1006,7 +1005,7 @@ def tan(x, name=None): # [-0.42279324, -0.20271005, 0.10033467, 0.30933627] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.tan(x) else: check_variable_and_dtype( @@ -1022,7 +1021,7 @@ _erf_ = generate_layer_fn('erf') def erf(x, name=None): - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.erf(x) locals_var = locals().copy() diff --git a/python/paddle/tensor/random.py b/python/paddle/tensor/random.py index 109dbcdfd97..a8206ff95bf 100644 --- a/python/paddle/tensor/random.py +++ b/python/paddle/tensor/random.py @@ -17,7 +17,8 @@ import paddle from paddle import _C_ops, _legacy_C_ops from paddle.common_ops_import import Variable -from paddle.fluid.framework import _current_expected_place, in_dygraph_mode +from paddle.fluid.framework import _current_expected_place +from paddle.framework import in_dynamic_mode from ..fluid.data_feeder import ( check_dtype, @@ -73,7 +74,7 @@ def bernoulli(x, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.bernoulli(x) else: check_variable_and_dtype(x, "x", ["float32", "float64"], "bernoulli") @@ -119,7 +120,7 @@ def poisson(x, name=None): # [5., 1., 3.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.poisson(x) else: check_variable_and_dtype(x, "x", ["float32", "float64"], "poisson") @@ -184,7 +185,7 @@ def multinomial(x, num_samples=1, replacement=False, name=None): not core.is_compiled_with_rocm() ), "multinomial op is not supported on ROCM yet." - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.multinomial(x, num_samples, replacement) else: check_variable_and_dtype( @@ -337,7 +338,7 @@ def gaussian(shape, mean=0.0, std=1.0, seed=0, dtype=None, name=None): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): shape = paddle.utils.convert_shape_to_list(shape) place = _current_expected_place() return _C_ops.gaussian( @@ -525,7 +526,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): # [1.00780561 3.78457445 5.81058198] # random """ - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_type(mean, 'mean', (int, float, Variable), 'normal') check_type(std, 'std', (int, float, Variable), 'normal') if isinstance(mean, Variable): @@ -563,7 +564,7 @@ def normal(mean=0.0, std=1.0, shape=None, name=None): return gaussian(shape=shape, mean=mean, std=std, name=name) out = out * std + mean - if not in_dygraph_mode(): + if not in_dynamic_mode(): out.stop_grediant = True return out @@ -646,7 +647,7 @@ def uniform(shape, dtype=None, min=-1.0, max=1.0, seed=0, name=None): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): shape = paddle.utils.convert_shape_to_list(shape) return _C_ops.uniform( shape, @@ -796,7 +797,7 @@ def randint(low=0, high=None, shape=[1], dtype=None, name=None): elif not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): shape = paddle.utils.convert_shape_to_list(shape) place = _current_expected_place() return _C_ops.randint(low, high, shape, dtype, place) @@ -969,7 +970,7 @@ def randint_like(x, low=0, high=None, dtype=None, name=None): f"high = {high}" ) - if in_dygraph_mode(): + if in_dynamic_mode(): shape = paddle.utils.convert_shape_to_list(shape) out = _legacy_C_ops.randint( 'shape', @@ -1047,7 +1048,7 @@ def randperm(n, dtype="int64", name=None): if not isinstance(dtype, core.VarDesc.VarType): dtype = convert_np_dtype_to_dtype_(dtype) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.randperm(n, dtype, _current_expected_place()) else: if n < 1: @@ -1151,7 +1152,7 @@ def exponential_(x, lam=1.0, name=None): # [0.72520673, 0.45208144, 0.30234432]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.exponential_(x, lam) else: check_variable_and_dtype( diff --git a/python/paddle/tensor/search.py b/python/paddle/tensor/search.py index 9fc8e39a9ed..908f0836bf8 100755 --- a/python/paddle/tensor/search.py +++ b/python/paddle/tensor/search.py @@ -25,7 +25,7 @@ from ..framework import ( LayerHelper, convert_np_dtype_to_dtype_, core, - in_dygraph_mode, + in_dynamic_mode, ) # from ..fluid.layers import has_inf #DEFINE_ALIAS @@ -94,7 +94,7 @@ def argsort(x, axis=-1, descending=False, name=None): # [1 1 0 2] # [0 2 1 1]]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): _, ids = _C_ops.argsort(x, axis, descending) return ids else: @@ -186,7 +186,7 @@ def argmax(x, axis=None, keepdim=False, dtype="int64", name=None): flatten = True axis = 0 - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.argmax(x, axis, keepdim, flatten, var_dtype) else: helper = LayerHelper("argmax", **locals()) @@ -276,7 +276,7 @@ def argmin(x, axis=None, keepdim=False, dtype="int64", name=None): flatten = True axis = 0 - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.argmin(x, axis, keepdim, flatten, var_dtype) else: helper = LayerHelper("argmin", **locals()) @@ -345,7 +345,7 @@ def index_select(x, index, axis=0, name=None): # [ 9. 10. 10.]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.index_select(x, index, axis) else: helper = LayerHelper("index_select", **locals()) @@ -429,7 +429,7 @@ def nonzero(x, as_tuple=False): shape = x.shape rank = len(shape) - if in_dygraph_mode(): + if in_dynamic_mode(): outs = _C_ops.nonzero(x) else: check_variable_and_dtype( @@ -526,7 +526,7 @@ def sort(x, axis=-1, descending=False, name=None): # [4. 7. 4. 6.] # [5. 7. 7. 9.]]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): outs, _ = _C_ops.argsort(x, axis, descending) return outs else: @@ -577,7 +577,7 @@ def mode(x, axis=-1, keepdim=False, name=None): # [2, 1]])) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.mode(x, axis, keepdim) else: helper = LayerHelper("mode", **locals()) @@ -676,7 +676,7 @@ def where(condition, x=None, y=None, name=None): broadcast_condition = paddle.add(cast_cond, broadcast_zeros) broadcast_condition = paddle.cast(broadcast_condition, 'bool') - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.where(broadcast_condition, broadcast_x, broadcast_y) else: check_variable_and_dtype(condition, 'condition', ['bool'], 'where') @@ -781,7 +781,7 @@ def index_sample(x, index): # [1200 1100]] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.index_sample(x, index) else: helper = LayerHelper("index_sample", **locals()) @@ -836,7 +836,7 @@ def masked_select(x, mask, name=None): #[1.0 5.0 6.0 9.0] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.masked_select(x, mask) else: @@ -907,7 +907,7 @@ def topk(x, k, axis=None, largest=True, sorted=True, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): if axis is None: axis = -1 out, indices = _C_ops.topk(x, k, axis, largest, sorted) @@ -1040,7 +1040,7 @@ def searchsorted( # [1, 3, 4, 5]]) """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.searchsorted(sorted_sequence, values, out_int32, right) else: check_variable_and_dtype( @@ -1107,7 +1107,7 @@ def kthvalue(x, k, axis=None, keepdim=False, name=None): # [[0, 2], # [1, 2]])) """ - if in_dygraph_mode(): + if in_dynamic_mode(): if axis is not None: return _C_ops.kthvalue(x, k, axis, keepdim) else: diff --git a/python/paddle/tensor/stat.py b/python/paddle/tensor/stat.py index c2332da9ea7..fa41f5a93b8 100644 --- a/python/paddle/tensor/stat.py +++ b/python/paddle/tensor/stat.py @@ -16,7 +16,7 @@ import paddle from paddle import _C_ops -from paddle.fluid.framework import in_dygraph_mode +from paddle.framework import in_dynamic_mode from ..common_ops_import import Variable from ..fluid.data_feeder import check_type, check_variable_and_dtype @@ -79,7 +79,7 @@ def mean(x, axis=None, keepdim=False, name=None): out4 = paddle.mean(x, axis=[0, 2]) # [ 8.5 12.5 16.5] """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.mean(x, axis, keepdim) else: reduce_all, axis = _get_reduce_axis_with_tensor(axis, x) @@ -144,7 +144,7 @@ def var(x, axis=None, unbiased=True, keepdim=False, name=None): out2 = paddle.var(x, axis=1) # [1. 4.33333333] """ - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( x, 'x', ['float16', 'float32', 'float64'], 'var' ) @@ -212,7 +212,7 @@ def std(x, axis=None, unbiased=True, keepdim=False, name=None): # [1. 2.081666] """ - if not in_dygraph_mode(): + if not in_dynamic_mode(): check_variable_and_dtype( x, 'x', ['float16', 'float32', 'float64'], 'std' ) @@ -242,7 +242,7 @@ def numel(x, name=None): """ - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.numel(x) else: if not isinstance(x, Variable): @@ -330,7 +330,7 @@ def nanmedian(x, axis=None, keepdim=True, name=None): if len(axis) != len(set(axis)): raise ValueError("Axis has duplicated elements.") - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.nanmedian(x, axis, keepdim) else: check_variable_and_dtype( @@ -549,7 +549,7 @@ def _compute_quantile(x, q, axis=None, keepdim=False, ignore_nan=False): for q_num in q: if q_num < 0 or q_num > 1: raise ValueError("q should be in range [0, 1]") - if in_dygraph_mode(): + if in_dynamic_mode(): q_num = paddle.to_tensor(q_num, dtype='float64') if ignore_nan: indices.append(q_num * (valid_counts - 1)) diff --git a/python/paddle/text/viterbi_decode.py b/python/paddle/text/viterbi_decode.py index 0fbb2c20a6f..d0e8d120faa 100644 --- a/python/paddle/text/viterbi_decode.py +++ b/python/paddle/text/viterbi_decode.py @@ -12,10 +12,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -from paddle import _C_ops, _legacy_C_ops +from paddle import _C_ops from ..fluid.data_feeder import check_type, check_variable_and_dtype -from ..fluid.framework import _non_static_mode, in_dygraph_mode +from ..fluid.framework import in_dygraph_mode from ..fluid.layer_helper import LayerHelper from ..nn import Layer @@ -62,14 +62,6 @@ def viterbi_decode( potentials, transition_params, lengths, include_bos_eos_tag ) - if _non_static_mode(): - return _legacy_C_ops.viterbi_decode( - potentials, - transition_params, - lengths, - 'include_bos_eos_tag', - include_bos_eos_tag, - ) check_variable_and_dtype( potentials, 'input', ['float32', 'float64'], 'viterbi_decode' ) diff --git a/python/paddle/utils/cpp_extension/extension_utils.py b/python/paddle/utils/cpp_extension/extension_utils.py index f5fbcc35d46..1d6fa487c16 100644 --- a/python/paddle/utils/cpp_extension/extension_utils.py +++ b/python/paddle/utils/cpp_extension/extension_utils.py @@ -1142,13 +1142,13 @@ def _custom_api_content(op_name): API_TEMPLATE = textwrap.dedent( """ import paddle.fluid.core as core - from paddle.fluid.framework import in_dygraph_mode + from paddle.framework import in_dynamic_mode from paddle.fluid.layer_helper import LayerHelper def {op_name}({params_list}): # The output variable's dtype use default value 'float32', # and the actual dtype of output variable will be inferred in runtime. - if in_dygraph_mode(): + if in_dynamic_mode(): outs = core.eager._run_custom_op("{op_name}", {params_list}) {dynamic_content} else: diff --git a/python/paddle/utils/dlpack.py b/python/paddle/utils/dlpack.py index af4f9fe6c95..d9821698f08 100644 --- a/python/paddle/utils/dlpack.py +++ b/python/paddle/utils/dlpack.py @@ -16,7 +16,7 @@ import paddle from ..fluid.core import LoDTensor from ..fluid.data_feeder import check_type -from ..fluid.framework import _non_static_mode +from ..fluid.framework import in_dygraph_mode __all__ = [ 'to_dlpack', @@ -48,7 +48,7 @@ def to_dlpack(x): # """ - if _non_static_mode(): + if in_dygraph_mode(): if not isinstance(x, (paddle.Tensor, paddle.fluid.core.eager.Tensor)): raise TypeError( "The type of 'x' in to_dlpack must be paddle.Tensor," @@ -96,7 +96,7 @@ def from_dlpack(dlpack): " but received {}.".format(type(dlpack)) ) - if _non_static_mode(): + if in_dygraph_mode(): out = paddle.fluid.core.from_dlpack(dlpack) out = paddle.to_tensor(out) return out diff --git a/python/paddle/utils/inplace_utils.py b/python/paddle/utils/inplace_utils.py index b56f766f115..e02ddbeb758 100644 --- a/python/paddle/utils/inplace_utils.py +++ b/python/paddle/utils/inplace_utils.py @@ -15,8 +15,8 @@ import warnings import paddle # noqa: F401 -from paddle.fluid.framework import _non_static_mode from paddle.fluid.wrapped_decorator import wrap_decorator +from paddle.framework import in_dynamic_mode # NOTE(pangyoki): The Inplace APIs with underline(`_`) is only valid for the method of calling `_C_ops` @@ -24,7 +24,7 @@ from paddle.fluid.wrapped_decorator import wrap_decorator # of the original API will be called. def _inplace_apis_in_dygraph_only_(func): def __impl__(*args, **kwargs): - if not _non_static_mode(): + if not in_dynamic_mode(): origin_api_name = func.__name__[:-1] warnings.warn( "In static graph mode, {}() is the same as {}() and does not perform inplace operation.".format( diff --git a/python/paddle/utils/layers_utils.py b/python/paddle/utils/layers_utils.py index d04c2bfeac6..fbb0781db03 100644 --- a/python/paddle/utils/layers_utils.py +++ b/python/paddle/utils/layers_utils.py @@ -21,7 +21,7 @@ from weakref import WeakKeyDictionary import paddle from ..fluid.data_feeder import check_dtype, convert_dtype -from ..fluid.framework import Block, Variable, _non_static_mode +from ..fluid.framework import Block, Variable, in_dygraph_mode def convert_to_list(value, n, name, dtype=int): @@ -493,7 +493,7 @@ def try_set_static_shape_tensor(tensor, shape): # (-1, 2) """ - if not _non_static_mode(): + if not in_dygraph_mode(): # static graph mode, and shape is not all inferred (contains -1) if -1 in tensor.shape: if isinstance(shape, Variable): @@ -516,7 +516,7 @@ def try_get_constant_shape_from_tensor(shape_tensor): # (-1, 2) """ - if not _non_static_mode(): + if not in_dygraph_mode(): try: if shape_tensor.op is not None: generate_op = shape_tensor.op diff --git a/test/amp/amp_base_models.py b/test/amp/amp_base_models.py index 29e8234e913..6d08b0d1483 100644 --- a/test/amp/amp_base_models.py +++ b/test/amp/amp_base_models.py @@ -21,7 +21,7 @@ import numpy as np import paddle from paddle import nn from paddle.fluid import core -from paddle.fluid.framework import _non_static_mode +from paddle.framework import in_dynamic_mode def copy_bits_from_float_to_uint16(f): @@ -68,7 +68,7 @@ def _build_optimizer( grad_clip = paddle.nn.ClipGradByGlobalNorm(clip_norm=1.0) else: grad_clip = None - if _non_static_mode(): + if in_dynamic_mode(): assert model is not None parameters = model.parameters() else: @@ -82,7 +82,7 @@ def _build_optimizer( epsilon=1e-4, weight_decay=0.01, ) - if not _non_static_mode() and use_amp: + if not in_dynamic_mode() and use_amp: optimizer = paddle.static.amp.decorate( optimizer, amp_lists, @@ -178,7 +178,7 @@ class SimpleConvNet(nn.Layer): def build_conv_model( use_amp, amp_dtype="float16", amp_level="O1", use_promote=False ): - if _non_static_mode(): + if in_dynamic_mode(): model = SimpleConvNet() optimizer = _build_optimizer(use_amp=False, model=model) if use_amp and amp_dtype == "float16": diff --git a/test/auto_parallel/test_to_static.py b/test/auto_parallel/test_to_static.py index 04d2591fb89..2057d509ad1 100644 --- a/test/auto_parallel/test_to_static.py +++ b/test/auto_parallel/test_to_static.py @@ -21,7 +21,7 @@ import paddle.nn.functional as F from paddle import LazyGuard, nn from paddle.distributed.auto_parallel.helper import ProgramHelper, ProxyLayer from paddle.distributed.fleet import auto -from paddle.fluid.framework import _non_static_mode +from paddle.framework import in_dynamic_mode from paddle.io import Dataset from paddle.jit.dy2static.utils import is_paddle_func from paddle.nn import Sequential @@ -144,7 +144,7 @@ class TestToStatic(unittest.TestCase): # inputs = InputSpec([batch_size, hidden_size], 'float32', 'x') # labels = InputSpec([batch_size], 'int64', 'label') - assert _non_static_mode() + assert in_dynamic_mode() engine = auto.Engine( model=mlp, loss=loss, @@ -155,7 +155,7 @@ class TestToStatic(unittest.TestCase): engine.fit(dataset, batch_size=batch_size) engine.evaluate(dataset, batch_size=batch_size) engine.predict(dataset, batch_size=batch_size) - assert not _non_static_mode() + assert not in_dynamic_mode() class TestLazyInit(unittest.TestCase): diff --git a/test/dygraph_to_static/test_convert_call.py b/test/dygraph_to_static/test_convert_call.py index 4b325f6282c..11f947d1832 100644 --- a/test/dygraph_to_static/test_convert_call.py +++ b/test/dygraph_to_static/test_convert_call.py @@ -282,13 +282,13 @@ class TestConvertPaddleAPI(unittest.TestCase): func = paddle.nn.functional.relu func = paddle.jit.to_static(func) self.assertNotIn("_jst.IfElse", func.code) - self.assertIn("if in_dygraph_mode()", func.code) + self.assertIn("if in_dynamic_mode()", func.code) def test_class_api(self): bn = paddle.nn.SyncBatchNorm(2) paddle.jit.to_static(bn) self.assertNotIn("_jst.IfElse", bn.forward.code) - self.assertIn("if in_dygraph_mode()", bn.forward.code) + self.assertIn("if in_dynamic_mode()", bn.forward.code) def test_class_patch_api(self): paddle.nn.SyncBatchNorm.forward = forward diff --git a/test/dygraph_to_static/test_lac.py b/test/dygraph_to_static/test_lac.py index 933fc00d735..ead65802136 100644 --- a/test/dygraph_to_static/test_lac.py +++ b/test/dygraph_to_static/test_lac.py @@ -25,7 +25,7 @@ os.environ["CUDA_VISIBLE_DEVICES"] = "2" import paddle from paddle import _legacy_C_ops, fluid from paddle.fluid.dygraph import to_variable -from paddle.fluid.framework import _non_static_mode +from paddle.framework import in_dynamic_mode from paddle.jit.api import to_static from paddle.jit.translated_layer import INFER_MODEL_SUFFIX, INFER_PARAMS_SUFFIX @@ -172,7 +172,7 @@ class LinearChainCRF(paddle.nn.Layer): self._transition = value def forward(self, input, label, length=None): - if _non_static_mode(): + if in_dynamic_mode(): _, _, _, log_likelihood = _legacy_C_ops.linear_chain_crf( input, self._transition, label, length, "is_test", self._is_test ) @@ -236,7 +236,7 @@ class CRFDecoding(paddle.nn.Layer): self._transition = value def forward(self, input, label=None, length=None): - if _non_static_mode(): + if in_dynamic_mode(): return _legacy_C_ops.crf_decoding( input, self._transition, label, length, "is_test", self._is_test ) @@ -272,7 +272,7 @@ class ChunkEval(paddle.nn.Layer): self.excluded_chunk_types = excluded_chunk_types def forward(self, input, label, seq_length=None): - if _non_static_mode(): + if in_dynamic_mode(): return _legacy_C_ops.chunk_eval( input, label, diff --git a/test/ir/inference/test_trt_multiclass_nms3_op.py b/test/ir/inference/test_trt_multiclass_nms3_op.py index 6ad37b18985..9b83863a87e 100644 --- a/test/ir/inference/test_trt_multiclass_nms3_op.py +++ b/test/ir/inference/test_trt_multiclass_nms3_op.py @@ -22,8 +22,8 @@ import paddle from paddle import fluid from paddle.fluid import core from paddle.fluid.core import AnalysisConfig, PassVersionChecker -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from paddle.static import nn @@ -131,7 +131,7 @@ def multiclass_nms( normalized=False, return_index=True) """ - if in_dygraph_mode(): + if in_dynamic_mode(): attrs = ( 'background_label', background_label, diff --git a/test/legacy_test/test_dlpack.py b/test/legacy_test/test_dlpack.py index dbfd08316c5..d908b316c06 100644 --- a/test/legacy_test/test_dlpack.py +++ b/test/legacy_test/test_dlpack.py @@ -27,7 +27,7 @@ class TestDLPack(unittest.TestCase): tensor = paddle.to_tensor(np.array([1, 2, 3, 4]).astype('int')) dlpack = paddle.utils.dlpack.to_dlpack(tensor) out_from_dlpack = paddle.utils.dlpack.from_dlpack(dlpack) - if paddle.fluid.framework.in_dygraph_mode(): + if paddle.in_dynamic_mode(): self.assertTrue( isinstance(out_from_dlpack, paddle.fluid.core.eager.Tensor) ) diff --git a/test/prim/composite_ops/test_composite_layer_norm.py b/test/prim/composite_ops/test_composite_layer_norm.py index f56dbea1556..31893397abd 100644 --- a/test/prim/composite_ops/test_composite_layer_norm.py +++ b/test/prim/composite_ops/test_composite_layer_norm.py @@ -20,8 +20,8 @@ from utils import SUB_TOLERANCE import paddle from paddle import _C_ops from paddle.fluid import core, framework -from paddle.fluid.framework import in_dygraph_mode from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode from paddle.incubate.autograd import primapi from paddle.nn import LayerNorm @@ -56,7 +56,7 @@ def layer_norm_wrapper( + str(input_shape) ) - if in_dygraph_mode(): + if in_dynamic_mode(): return _C_ops.layer_norm(x, weight, bias, epsilon, begin_norm_axis) else: diff --git a/test/tokenizer/test_faster_tokenizer_op.py b/test/tokenizer/test_faster_tokenizer_op.py index 37bb09a514a..a5abbef8ab6 100755 --- a/test/tokenizer/test_faster_tokenizer_op.py +++ b/test/tokenizer/test_faster_tokenizer_op.py @@ -21,8 +21,9 @@ from bert_tokenizer import BertTokenizer import paddle from paddle import _legacy_C_ops, nn -from paddle.fluid.framework import _non_static_mode, core +from paddle.fluid.framework import core from paddle.fluid.layer_helper import LayerHelper +from paddle.framework import in_dynamic_mode def to_string_tensor(string_values, name): @@ -77,7 +78,7 @@ class FasterTokenizer(nn.Layer): is_split_into_words=False, pad_to_max_seq_len=False, ): - if _non_static_mode(): + if in_dynamic_mode(): input_ids, seg_ids = _legacy_C_ops.faster_tokenizer( self.vocab, text, -- GitLab