diff --git a/paddle/fluid/pybind/eager_method.cc b/paddle/fluid/pybind/eager_method.cc index 977d2931cc4df8577edfb41f96466c55d30b8e99..e7978b8e23ac49e0d542abf96634f28961b6bdfd 100644 --- a/paddle/fluid/pybind/eager_method.cc +++ b/paddle/fluid/pybind/eager_method.cc @@ -825,8 +825,7 @@ static PyObject* tensor__getitem_index_not_tensor(TensorObject* self, } paddle::experimental::Tensor new_out; - framework::AttributeMap attrs = {{"axes", none_axes}}; - new_out = std::get<0>(unsqueeze2_dygraph_function(out, std::move(attrs))); + new_out = unsqueeze_final_state_dygraph_function(out, none_axes); return ToPyObject(new_out); } } diff --git a/paddle/phi/api/yaml/legacy_api.yaml b/paddle/phi/api/yaml/legacy_api.yaml index 4822994fef00891b4ac7032b7620acfb41917fb4..8b74c62cf435123e69f1e46bfb258e5c095c9077 100755 --- a/paddle/phi/api/yaml/legacy_api.yaml +++ b/paddle/phi/api/yaml/legacy_api.yaml @@ -276,7 +276,7 @@ func : assign_value param : [shape, dtype, values] data_type : dtype - backend : place > output + backend : place > output # atan - api : atan diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index 4ba496f4499cdae3ced3ffc9754950e075d2056b..87b5f8265d3ee85a206c8ddbbe04a3dcfd28a550 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -2614,16 +2614,22 @@ void SliceRawInferMeta(const MetaTensor& input, // To be compatible with other op tests in which infer_flags is not set. infer_flags = std::vector(axes.size(), 1); } + auto new_axes = axes; + for (auto& axis : new_axes) { + if (axis < 0) { + axis = std::max(int64_t(0), axis + int64_t(in_dims.size())); + } + } // 2.1 Check attrs. std::vector starts = starts_arr.GetData(); std::vector ends = ends_arr.GetData(); phi::funcs::CheckAndUpdateSliceAttrs( - in_dims, axes, &starts, &ends, nullptr, &infer_flags); + in_dims, new_axes, &starts, &ends, nullptr, &infer_flags); auto slice_dims = phi::funcs::GetSliceDims( - in_dims, axes, starts, ends, nullptr, &infer_flags); + in_dims, new_axes, starts, ends, nullptr, &infer_flags); if (config.is_runtime) { out_dims = phi::funcs::GetDecreasedDims( slice_dims, decrease_axis, &infer_flags); @@ -2633,7 +2639,7 @@ void SliceRawInferMeta(const MetaTensor& input, } out->set_dims(out_dims); - if (axes.size() > 0 && axes[0] != 0) { + if (new_axes.size() > 0 && new_axes[0] != 0) { out->share_lod(input); } } @@ -2662,6 +2668,13 @@ void SplitInferMeta(const MetaTensor& x, const Scalar& axis, std::vector out, MetaConfig config) { + if (axis.dtype() == DataType::FLOAT32 || axis.dtype() == DataType::FLOAT64) { + PADDLE_THROW( + phi::errors::InvalidArgument("%s(): argument (position 3) must be " + "int, but got %s", + "split", + "float")); // NOLINT + } int axis_value = axis.to(); int rank = x.dims().size(); PADDLE_ENFORCE_EQ( diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index a7b399307e1fd8d4a2cfe519aaf758a8839c747e..1a59d2f95fd4d307410df9b3a10d5adf09e55a75 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -290,6 +290,8 @@ def monkey_patch_math_varbase(): axis = -1 math_op = getattr(_C_ops, op_type) if call_final_api: + if op_type == "final_state_matmul": + return math_op(self, other_var, False, False) return math_op(self, other_var, -1) return math_op(self, other_var, 'axis', axis) @@ -385,10 +387,16 @@ def monkey_patch_math_varbase(): None)), ('__floordiv__', _binary_creator_('__floordiv__', 'elementwise_floordiv', False, None)), - ('__mod__', _binary_creator_('__mod__', 'elementwise_mod', False, - None)), - ('__matmul__', _binary_creator_('__matmul__', "matmul_v2", False, - None)), + ('__mod__', + _binary_creator_('__mod__', 'final_state_modulo', False, None, True)) + if framework._in_eager_mode_ else + ('__mod__', + _binary_creator_('__mod__', 'elementwise_mod', False, None)), + ('__matmul__', + _binary_creator_('__matmul__', "final_state_matmul", False, None, + True)) if framework._in_eager_mode_ else + ('__matmul__', + _binary_creator_('__matmul__', "matmul_v2", False, None)), ## for logical compare ('__eq__', _binary_creator_('__eq__', 'final_state_equal', False, None, True)) diff --git a/python/paddle/fluid/initializer.py b/python/paddle/fluid/initializer.py index 4ffaedee55e59f9ba6b484ab40cc4a19fd4a13a0..0e636d8f72dfc65de87a1ea7d3e05adc360e12cd 100644 --- a/python/paddle/fluid/initializer.py +++ b/python/paddle/fluid/initializer.py @@ -23,6 +23,7 @@ from .core import VarDesc from . import unique_name from .data_feeder import check_variable_and_dtype, check_type, check_dtype from paddle import _C_ops +import paddle __all__ = [ 'Constant', 'Uniform', 'Normal', 'TruncatedNormal', 'Xavier', 'Bilinear', @@ -599,9 +600,15 @@ class XavierInitializer(Initializer): if framework._non_static_mode(): if self._uniform: limit = math.sqrt(6.0 / float(fan_in + fan_out)) - out_var = _C_ops.uniform_random('shape', out_var.shape, 'min', - -limit, 'max', limit, 'seed', - self._seed, 'dtype', out_dtype) + if in_dygraph_mode(): + out_var = _C_ops.final_state_uniform_random( + out_var.shape, out_dtype, -limit, limit, self._seed, + _current_expected_place()) + elif _in_legacy_dygraph(): + out_var = _C_ops.uniform_random('shape', out_var.shape, + 'min', -limit, 'max', limit, + 'seed', self._seed, 'dtype', + out_dtype) else: std = math.sqrt(2.0 / float(fan_in + fan_out)) @@ -617,8 +624,11 @@ class XavierInitializer(Initializer): if var.dtype == VarDesc.VarType.FP16 or ( var.dtype == VarDesc.VarType.BF16 and not self._uniform): - var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype, - 'out_dtype', var.dtype) + if in_dygraph_mode(): + var_tmp = _C_ops.final_state_cast(out_var, var.dtype) + elif _in_legacy_dygraph(): + var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype, + 'out_dtype', var.dtype) var_tmp._share_underline_tensor_to(var) else: out_var._share_underline_tensor_to(var) diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index 3b341d793667621b60b072166b6f97b996ec00e2..cdffb6fb37fb862aac95b4a869b22fed62a74c1c 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -21,7 +21,7 @@ from op_test import OpTest, convert_float_to_uint16 import paddle.fluid as fluid import paddle.fluid.layers as layers import paddle -from paddle.fluid.framework import _test_eager_guard +from paddle.fluid.framework import _test_eager_guard, _enable_legacy_dygraph paddle.enable_static() @@ -787,7 +787,6 @@ class TestInferShape(unittest.TestCase): self.assertEqual(out0.shape, (3, 3, 5)) def test_axis_less_than_zero(self): - # Using paddle.disable_static will make other unittests fail. with fluid.dygraph.guard(): x_arr = np.arange(0, 24, dtype=np.float32).reshape([2, 3, 4]) @@ -829,6 +828,7 @@ class TestInferShape(unittest.TestCase): class TestImperativeCUDAPinnedInput(unittest.TestCase): def test_input_cuda_pinned_var(self): + _enable_legacy_dygraph() with fluid.dygraph.guard(): data = np.random.random((2, 80, 16128)).astype('float32') var = core.VarBase(value=data, diff --git a/python/paddle/fluid/variable_index.py b/python/paddle/fluid/variable_index.py index e24a6a3aff7ffcf1917c0354bebc4a818f18a67c..9038e10658177ffe4df5766b5691674b95df01eb 100644 --- a/python/paddle/fluid/variable_index.py +++ b/python/paddle/fluid/variable_index.py @@ -490,18 +490,31 @@ def _getitem_impl_(var, item): out = var if len(axes) > 0: - target_block = default_main_program().current_block() op_type = "strided_slice" if use_strided_slice else "slice" - - slice_out_var = target_block.create_var( - name=unique_name.generate_with_ignorable_key(var.name + "_" + - op_type), - dtype=var.dtype) - target_block.append_op(type=op_type, - inputs=inputs, - outputs={'Out': [slice_out_var]}, - attrs=attrs) - out = slice_out_var + if paddle.fluid.framework.in_dygraph_mode() and op_type == "slice": + if "StartsTensorList" in inputs.keys(): + st = inputs['StartsTensorList'] + else: + st = attrs['starts'] + if "EndsTensorList" in inputs.keys(): + end = inputs['EndsTensorList'] + else: + end = attrs['ends'] + out = paddle._C_ops.final_state_slice(var, axes, st, end, + attrs['infer_flags'], + attrs['decrease_axis']) + else: + target_block = default_main_program().current_block() + + slice_out_var = target_block.create_var( + name=unique_name.generate_with_ignorable_key(var.name + "_" + + op_type), + dtype=var.dtype) + target_block.append_op(type=op_type, + inputs=inputs, + outputs={'Out': [slice_out_var]}, + attrs=attrs) + out = slice_out_var if len(reverse_axes) > 0: from .layers.tensor import reverse diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 373186096bda0ccb9973205c5fd808ceb4241859..8281cf37c4f67c13c7b918ee7699006841ed51af 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -964,7 +964,9 @@ def silu(x, name=None): out = F.silu(x) # [ 0.731059, 1.761594, 2.857722, 3.928055 ] """ - if in_dynamic_mode(): + if in_dygraph_mode(): + return _C_ops.final_state_silu(x) + if _in_legacy_dygraph(): return _C_ops.silu(x) check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'silu') diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index e40731b828d978ab6ff2460a9fa413b537d218cd..f9c7b5f8e6b172c1f4dd05792512e28e9f6239eb 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -82,7 +82,7 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): if in_dygraph_mode(): eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype) out = _C_ops.final_state_p_norm(x, float(p), axis, epsilon, True, False) - return x / _C_ops.elementwise_max(out, eps) + return x / _C_ops.final_state_maximum(out, eps) if _in_legacy_dygraph(): eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype) diff --git a/python/paddle/nn/layer/norm.py b/python/paddle/nn/layer/norm.py index b9081d0c8e682370d0bf478636e4e28abf61d999..d637c6dff8d218429f4c47c6537f78c83ad10bcc 100644 --- a/python/paddle/nn/layer/norm.py +++ b/python/paddle/nn/layer/norm.py @@ -49,7 +49,7 @@ from .. import functional as F from paddle import _C_ops from .. import Layer from paddle import in_dynamic_mode -from paddle.fluid.framework import in_dygraph_mode +from paddle.fluid.framework import in_dygraph_mode, _in_legacy_dygraph __all__ = [] @@ -411,7 +411,15 @@ class GroupNorm(Layer): variance_out = self._helper.create_variable_for_type_inference( dtype=input.dtype, stop_gradient=True) - if _non_static_mode(): + if in_dygraph_mode(): + pre_act = _C_ops.final_state_group_norm(input, self.weight, + self.bias, self._epsilon, + self._num_groups, "NCHW") + + return dygraph_utils._append_activation_in_dygraph(pre_act, + act=None) + + elif _in_legacy_dygraph(): pre_act, _, _ = _C_ops.group_norm( input, self.weight, diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index eb264723e797f4c528adc3aeda3d14c8172b615a..b58ba75270fee92095914d63362b5885ea2d5724 100755 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -197,10 +197,9 @@ def slice(input, axes, starts, ends): if isinstance(item, tmp_tensor_type) else item for item in starts ] - attrs += ('starts', starts) elif isinstance(starts, tmp_tensor_type): - starts_tensor = starts - starts.stop_gradient = True + tensor_t = starts.numpy() + starts = [ele for ele in tensor_t] infer_flags = list(-1 for i in range(len(axes))) if isinstance(ends, (list, tuple)): @@ -208,13 +207,13 @@ def slice(input, axes, starts, ends): item.numpy().item(0) if isinstance(item, tmp_tensor_type) else item for item in ends ] - attrs += ('ends', ends) elif isinstance(ends, tmp_tensor_type): - ends_tensor = ends - ends_tensor.stop_gradient = True + etensor_t = ends.numpy() + ends = [ele for ele in tensor_t] infer_flags = list(-1 for i in range(len(axes))) - return _C_ops.slice(input, starts_tensor, ends_tensor, None, None, - 'axes', axes, 'infer_flags', infer_flags, *attrs) + + return _C_ops.final_state_slice(input, axes, starts, ends, infer_flags, + []) else: if _in_legacy_dygraph(): attrs = () @@ -1817,9 +1816,14 @@ def split(x, num_or_sections, axis=0, name=None): raise TypeError( "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " "received %s." % (type(num_or_sections))) - out = [_varbase_creator() for n in range(num)] - _C_ops.split(input, out, *attrs) - return out + if in_dygraph_mode(): + return _C_ops.final_state_split( + input, [num_or_sections] + if isinstance(num_or_sections, int) else num_or_sections, dim) + elif _in_legacy_dygraph(): + out = [_varbase_creator() for n in range(num)] + _C_ops.split(input, out, *attrs) + return out check_variable_and_dtype(input, 'input', [ 'bool', 'float16', 'float32', 'float64', 'int32', 'int64', 'uint8',