未验证 提交 140d786d 编写于 作者: 姜永久 提交者: GitHub

rm in_legacy_dygraph python/paddle/nn/functional/ part1 (#49258)

* rm in_legacy_dygraph nn part1

* rm non_static_mode

* modify rrelu
上级 861fef52
......@@ -18,11 +18,7 @@ from paddle.framework import core
from paddle.utils.inplace_utils import inplace_apis_in_dygraph_only
from ...fluid.data_feeder import check_dtype, check_variable_and_dtype
from ...fluid.framework import (
_in_legacy_dygraph,
convert_np_dtype_to_dtype_,
in_dygraph_mode,
)
from ...fluid.framework import convert_np_dtype_to_dtype_, in_dygraph_mode
from ...fluid.layer_helper import LayerHelper
from ...tensor.manipulation import chunk
from ...tensor.math import tanh # noqa: F401
......@@ -62,13 +58,12 @@ def celu(x, alpha=1.0, name=None):
"""
if alpha == 0:
raise ZeroDivisionError("alpha cannot be 0 for celu")
if _in_legacy_dygraph():
return _legacy_C_ops.celu(x, 'alpha', alpha)
if in_dygraph_mode():
return _C_ops.celu(x, alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'celu')
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'celu'
)
helper = LayerHelper("celu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
......@@ -117,10 +112,10 @@ def elu(x, alpha=1.0, name=None):
if in_dygraph_mode():
return _C_ops.elu(x, alpha)
if _in_legacy_dygraph():
return _legacy_C_ops.elu(x, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'elu')
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'elu'
)
helper = LayerHelper("elu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
......@@ -187,11 +182,10 @@ def gelu(x, approximate=False, name=None):
if in_dygraph_mode():
return _C_ops.gelu(x, approximate)
if _in_legacy_dygraph():
return _legacy_C_ops.gelu(x, 'approximate', approximate)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'gelu')
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'gelu'
)
helper = LayerHelper("gelu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
......@@ -238,10 +232,7 @@ def hardshrink(x, threshold=0.5, name=None):
"""
if in_dygraph_mode():
return _C_ops.hardshrink(x, threshold)
if _in_legacy_dygraph():
return _legacy_C_ops.hard_shrink(x, 'threshold', threshold)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'hardshrink'
)
......@@ -292,10 +283,7 @@ def hardtanh(x, min=-1.0, max=1.0, name=None):
if in_dygraph_mode():
return _C_ops.hardtanh(x, min, max)
if _in_legacy_dygraph():
return _legacy_C_ops.brelu(x, 't_min', min, 't_max', max)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'hardtanh'
)
......@@ -349,10 +337,7 @@ def hardsigmoid(x, slope=0.1666667, offset=0.5, name=None):
if in_dygraph_mode():
return _C_ops.hardsigmoid(x, slope, offset)
if _in_legacy_dygraph():
return _legacy_C_ops.hard_sigmoid(x, 'slope', slope, 'offset', offset)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'hardsigmoid'
)
......@@ -401,19 +386,18 @@ def hardswish(x, name=None):
x = paddle.to_tensor([-4., 5., 1.])
out = F.hardswish(x) # [0., 5., 0.666667]
"""
if _in_legacy_dygraph():
return _legacy_C_ops.hard_swish(x)
if in_dygraph_mode():
return _C_ops.hardswish(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'hardswish'
)
helper = LayerHelper('hardswish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='hard_swish', inputs={'X': x}, outputs={'Out': out})
helper.append_op(
type='hard_swish', inputs={'X': x}, outputs={'Out': out}
)
return out
......@@ -453,10 +437,7 @@ def leaky_relu(x, negative_slope=0.01, name=None):
"""
if in_dygraph_mode():
return _C_ops.leaky_relu(x, negative_slope)
if _in_legacy_dygraph():
return _legacy_C_ops.leaky_relu(x, 'alpha', negative_slope)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'leaky_relu'
)
......@@ -559,11 +540,7 @@ def prelu(x, weight, data_format="NCHW", name=None):
if in_dygraph_mode():
return _C_ops.prelu(x, weight, data_format, mode)
if _in_legacy_dygraph():
return _legacy_C_ops.prelu(
x, weight, 'mode', mode, 'data_format', data_format
)
else:
helper = LayerHelper('prelu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
......@@ -681,12 +658,12 @@ def rrelu(x, lower=1.0 / 8.0, upper=1.0 / 3.0, training=True, name=None):
is_test = not training
if _in_legacy_dygraph():
if in_dygraph_mode():
out, noise = _legacy_C_ops.rrelu(
x, 'lower', lower, 'upper', upper, 'is_test', is_test
)
return out
else:
helper = LayerHelper('rrelu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
noise = helper.create_variable_for_type_inference(dtype=x.dtype)
......@@ -729,9 +706,10 @@ def relu(x, name=None):
if in_dygraph_mode():
return _C_ops.relu(x)
if _in_legacy_dygraph():
return _legacy_C_ops.relu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'relu')
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'relu'
)
helper = LayerHelper('relu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='relu', inputs={'X': x}, outputs={'Out': out})
......@@ -744,10 +722,7 @@ def relu_(x, name=None):
Inplace version of ``relu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_nn_cn_relu`.
"""
if in_dygraph_mode():
return _C_ops.relu_(x)
if _in_legacy_dygraph():
return _legacy_C_ops.relu_(x)
def log_sigmoid(x, name=None):
......@@ -777,16 +752,15 @@ def log_sigmoid(x, name=None):
if in_dygraph_mode():
return _C_ops.logsigmoid(x)
if _in_legacy_dygraph():
return _legacy_C_ops.logsigmoid(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'log_sigmoid'
)
helper = LayerHelper("log_sigmoid", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='logsigmoid', inputs={'X': x}, outputs={'Out': out})
helper.append_op(
type='logsigmoid', inputs={'X': x}, outputs={'Out': out}
)
return out
......@@ -844,10 +818,9 @@ def maxout(x, groups, axis=1, name=None):
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.7142536 0.88725346 0.61093384 0.38833922]]]]
"""
if _in_legacy_dygraph():
return _legacy_C_ops.maxout(x, 'groups', groups, 'axis', axis)
if in_dygraph_mode():
return _C_ops.maxout(x, groups, axis)
else:
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'maxout')
if axis not in [1, -1, 3]:
raise ValueError(
......@@ -963,10 +936,10 @@ def selu(
if in_dygraph_mode():
return _C_ops.selu(x, scale, alpha)
if _in_legacy_dygraph():
return _legacy_C_ops.selu(x, 'scale', scale, 'alpha', alpha)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'selu')
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'selu'
)
helper = LayerHelper('selu', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
......@@ -1007,10 +980,10 @@ def silu(x, name=None):
if in_dygraph_mode():
return _C_ops.silu(x)
if _in_legacy_dygraph():
return _legacy_C_ops.silu(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'silu')
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'silu'
)
helper = LayerHelper("silu", **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='silu', inputs={'X': x}, outputs={'Out': out})
......@@ -1132,22 +1105,11 @@ def softmax(x, axis=-1, dtype=None, name=None):
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
use_cudnn = True
if in_dygraph_mode():
outs_cast = x if dtype is None else _C_ops.cast(x, dtype)
return _C_ops.softmax(outs_cast, axis)
if _in_legacy_dygraph():
outs_cast = (
x
if dtype is None
else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
)
return _legacy_C_ops.softmax(
outs_cast, 'axis', axis, 'use_cudnn', use_cudnn
)
else:
use_cudnn = True
if dtype is None:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'softmax'
......@@ -1172,7 +1134,9 @@ def softmax(x, axis=-1, dtype=None, name=None):
attrs={'in_dtype': x.dtype, 'out_dtype': dtype},
)
outs_softmax = helper.create_variable_for_type_inference(outs_cast.dtype)
outs_softmax = helper.create_variable_for_type_inference(
outs_cast.dtype
)
helper.append_op(
type='softmax',
inputs={'X': outs_cast},
......@@ -1191,9 +1155,6 @@ def softmax_(x, axis=-1, dtype=None, name=None):
"""
if (dtype is not None) and (not isinstance(dtype, core.VarDesc.VarType)):
dtype = convert_np_dtype_to_dtype_(dtype)
use_cudnn = True
if in_dygraph_mode():
outs_cast = (
x
if dtype is None
......@@ -1201,16 +1162,6 @@ def softmax_(x, axis=-1, dtype=None, name=None):
)
return _C_ops.softmax_(outs_cast, axis)
if _in_legacy_dygraph():
outs_cast = (
x
if dtype is None
else _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
)
return _legacy_C_ops.softmax_(
outs_cast, 'axis', axis, 'use_cudnn', use_cudnn
)
def softplus(x, beta=1, threshold=20, name=None):
r"""
......@@ -1243,10 +1194,7 @@ def softplus(x, beta=1, threshold=20, name=None):
if in_dygraph_mode():
return _C_ops.softplus(x, beta, threshold)
if _in_legacy_dygraph():
return _legacy_C_ops.softplus(x, 'beta', beta, 'threshold', threshold)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'softplus'
)
......@@ -1305,9 +1253,7 @@ def softshrink(x, threshold=0.5, name=None):
if in_dygraph_mode():
return _C_ops.softshrink(x, threshold)
if _in_legacy_dygraph():
return _legacy_C_ops.softshrink(x, 'lambda', threshold)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'softshrink'
)
......@@ -1392,14 +1338,17 @@ def swish(x, name=None):
"""
if in_dygraph_mode():
return _C_ops.swish(x)
if _in_legacy_dygraph():
return _legacy_C_ops.swish(x, 'beta', 1.0)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'swish')
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'swish'
)
helper = LayerHelper('swish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(
type='swish', inputs={'X': x}, outputs={'Out': out}, attrs={'beta': 1.0}
type='swish',
inputs={'X': x},
outputs={'Out': out},
attrs={'beta': 1.0},
)
return out
......@@ -1435,10 +1384,10 @@ def mish(x, name=None):
"""
if in_dygraph_mode():
return _C_ops.mish(x, 20)
if _in_legacy_dygraph():
return _legacy_C_ops.mish(x)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'mish')
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'mish'
)
helper = LayerHelper('mish', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='mish', inputs={'X': x}, outputs={'Out': out})
......@@ -1474,16 +1423,15 @@ def tanhshrink(x, name=None):
"""
if in_dygraph_mode():
return _C_ops.tanh_shrink(x)
if _in_legacy_dygraph():
return _legacy_C_ops.tanh_shrink(x)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'tanhshrink'
)
helper = LayerHelper('tanh_shrink', **locals())
out = helper.create_variable_for_type_inference(x.dtype)
helper.append_op(type='tanh_shrink', inputs={'X': x}, outputs={'Out': out})
helper.append_op(
type='tanh_shrink', inputs={'X': x}, outputs={'Out': out}
)
return out
......@@ -1525,10 +1473,7 @@ def thresholded_relu(x, threshold=1.0, name=None):
if in_dygraph_mode():
return _C_ops.thresholded_relu(x, threshold)
if _in_legacy_dygraph():
return _legacy_C_ops.thresholded_relu(x, 'threshold', threshold)
else:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'thresholded_relu'
)
......@@ -1605,12 +1550,7 @@ def log_softmax(x, axis=-1, dtype=None, name=None):
if dtype is not None:
x = _C_ops.cast(x, dtype)
return _C_ops.log_softmax(x, axis)
if _in_legacy_dygraph():
if dtype is not None:
x = _legacy_C_ops.cast(x, 'in_dtype', x.dtype, 'out_dtype', dtype)
return _legacy_C_ops.log_softmax(x, 'axis', axis)
else:
if dtype is None:
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'log_softmax'
......
......@@ -25,11 +25,7 @@ from ...fluid.data_feeder import (
check_type,
check_variable_and_dtype,
)
from ...fluid.framework import (
_in_legacy_dygraph,
_non_static_mode,
in_dygraph_mode,
)
from ...fluid.framework import in_dygraph_mode
from ...tensor import clip, concat, sqrt, sum
from ...tensor.creation import zeros
......@@ -927,9 +923,7 @@ def bilinear(x1, x2, weight, bias=None, name=None):
if in_dygraph_mode():
return _C_ops.bilinear_tensor_product(x1, x2, weight, bias)
elif _non_static_mode():
return _legacy_C_ops.bilinear_tensor_product(x1, x2, weight, bias)
else:
check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')
......@@ -1118,11 +1112,10 @@ def dropout(
'downgrade_in_infer' if mode == 'downscale_in_infer' else mode
) # semantic transfer
if _non_static_mode():
if in_dygraph_mode():
if default_main_program().random_seed != 0:
seed = default_main_program().random_seed
if in_dygraph_mode():
out, mask = _C_ops.dropout(
x,
None,
......@@ -1134,21 +1127,7 @@ def dropout(
)
return out
out, mask = _legacy_C_ops.dropout(
x,
'dropout_prob',
p,
'is_test',
not training,
'fix_seed',
seed is not None,
'seed',
seed if seed is not None else 0,
'dropout_implementation',
mode,
)
return out
else:
helper = LayerHelper('dropout', **locals())
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'dropout'
......@@ -1683,23 +1662,6 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None):
if isinstance(pad, Variable):
pad = pad.numpy().tolist()
out = _C_ops.pad3d(x, pad, mode, value, data_format)
else:
if _in_legacy_dygraph():
if isinstance(pad, Variable):
pad = pad.numpy().tolist()
out = _legacy_C_ops.pad3d(
x,
"paddings",
pad,
"mode",
mode,
"value",
value,
"data_format",
data_format,
"name",
name,
)
else:
attrs = {'mode': mode, 'value': value, 'data_format': data_format}
inputs = {'X': [x]}
......@@ -1872,16 +1834,6 @@ def linear(x, weight, bias=None, name=None):
if in_dygraph_mode():
# TODO(jiabin): using addmm for fast forward route
return _C_ops.linear(x, weight, bias)
else:
if _in_legacy_dygraph():
pre_bias = _legacy_C_ops.matmul_v2(
x, weight, 'trans_x', False, 'trans_y', False
)
if bias is None:
return pre_bias
return _legacy_C_ops.elementwise_add(pre_bias, bias)
else:
helper = LayerHelper('linear', **locals())
dtype = x.dtype
......@@ -1889,9 +1841,7 @@ def linear(x, weight, bias=None, name=None):
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'linear'
)
check_dtype(
dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear'
)
check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear')
inputs = {'X': [x], 'Y': [weight]}
attrs = {'trans_x': False, 'trans_y': False}
......
......@@ -19,11 +19,7 @@ from paddle.device import (
is_compiled_with_npu,
is_compiled_with_rocm,
)
from paddle.fluid.framework import (
_global_flags,
_in_legacy_dygraph,
in_dygraph_mode,
)
from paddle.fluid.framework import _global_flags, in_dygraph_mode
from paddle.tensor.math import _add_with_axis
from ...device import get_cudnn_version
......@@ -489,30 +485,6 @@ def conv1d(
)
if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim)
elif _in_legacy_dygraph():
attrs = (
'strides',
stride,
'paddings',
padding,
'dilations',
dilation,
'groups',
groups,
'use_cudnn',
use_cudnn,
'use_mkldnn',
False,
'fuse_relu_before_depthwise_conv',
False,
"padding_algorithm",
padding_algorithm,
"data_format",
conv2d_data_format,
)
out = getattr(_legacy_C_ops, l_type)(x, weight, *attrs)
if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim)
else:
inputs = {'Input': [x], 'Filter': [weight]}
attrs = {
......@@ -1044,30 +1016,6 @@ def conv1d_transpose(
)
if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim)
elif _in_legacy_dygraph():
attrs = (
'output_padding',
output_padding,
'output_size',
output_size,
'strides',
stride,
'paddings',
padding,
'padding_algorithm',
padding_algorithm,
'dilations',
dilation,
'groups',
groups,
'use_cudnn',
use_cudnn,
'data_format',
conv2d_data_format,
)
out = getattr(_legacy_C_ops, op_type)(x, weight, *attrs)
if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim)
else:
inputs = {'Input': [x], 'Filter': [weight]}
attrs = {
......@@ -1350,33 +1298,6 @@ def conv2d_transpose(
return _add_with_axis(pre_bias, bias, axis=channel_dim)
else:
return pre_bias
if _in_legacy_dygraph():
attrs = (
'output_padding',
output_padding,
'output_size',
output_size,
'strides',
stride,
'paddings',
padding,
'padding_algorithm',
padding_algorithm,
'dilations',
dilation,
'groups',
groups,
'use_cudnn',
use_cudnn,
'data_format',
data_format,
)
pre_bias = getattr(_legacy_C_ops, op_type)(x, weight, *attrs)
if bias is not None:
out = _add_with_axis(pre_bias, bias, axis=channel_dim)
else:
out = pre_bias
else:
inputs = {'Input': [x], 'Filter': [weight]}
attrs = {
......@@ -1823,33 +1744,6 @@ def conv3d_transpose(
return _add_with_axis(pre_bias, bias, axis=channel_dim)
else:
return pre_bias
if _in_legacy_dygraph():
attrs = (
'output_padding',
output_padding,
'output_size',
output_size,
'paddings',
padding,
"padding_algorithm",
padding_algorithm,
'strides',
stride,
'dilations',
dilation,
'groups',
groups,
'use_cudnn',
use_cudnn,
"data_format",
data_format_,
)
pre_bias = getattr(_legacy_C_ops, op_type)(x, weight, *attrs)
if bias is not None:
out = _add_with_axis(pre_bias, bias, axis=channel_dim)
else:
out = pre_bias
else:
inputs = {'Input': [x], 'Filter': [weight]}
attrs = {
......
......@@ -13,8 +13,8 @@
# limitations under the License.
import paddle
from paddle import _C_ops, _legacy_C_ops
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle import _C_ops
from paddle.fluid.framework import in_dygraph_mode
from ...fluid.data_feeder import check_type, check_variable_and_dtype
from ...fluid.layer_helper import LayerHelper
......@@ -81,19 +81,13 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
sub = _C_ops.add(sub, epsilon)
return _C_ops.p_norm(sub, p, -1, 0.0, keepdim, False)
if _in_legacy_dygraph():
sub = _legacy_C_ops.elementwise_sub(x, y)
if epsilon != 0.0:
epsilon = paddle.fluid.dygraph.base.to_variable(
[epsilon], dtype=sub.dtype
else:
check_variable_and_dtype(
x, 'x', ['float32', 'float64'], 'PairwiseDistance'
)
sub = _legacy_C_ops.elementwise_add(sub, epsilon)
return _legacy_C_ops.p_norm(
sub, 'axis', -1, 'porder', p, 'keepdim', keepdim, 'epsilon', 0.0
check_variable_and_dtype(
y, 'y', ['float32', 'float64'], 'PairwiseDistance'
)
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'PairwiseDistance')
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'PairwiseDistance')
sub = paddle.subtract(x, y)
if epsilon != 0.0:
epsilon_var = sub.block.create_var(dtype=sub.dtype)
......
......@@ -34,17 +34,11 @@ import numpy as np
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.device import get_all_custom_device_type
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.framework import in_dygraph_mode
from ...fluid import dygraph_utils
from ...fluid.data_feeder import check_variable_and_dtype
from ...framework import (
ParamAttr,
_global_flags,
_non_static_mode,
get_default_dtype,
no_grad,
)
from ...framework import ParamAttr, _global_flags, get_default_dtype, no_grad
from .. import Layer
from .. import functional as F
from ..functional import batch_norm, instance_norm, layer_norm
......@@ -492,20 +486,6 @@ class GroupNorm(Layer):
dtype=input.dtype, stop_gradient=True
)
if _in_legacy_dygraph():
pre_act, _, _ = _legacy_C_ops.group_norm(
input,
self.weight,
self.bias,
mean_out,
variance_out,
'epsilon',
self._epsilon,
'groups',
self._num_groups,
)
return pre_act
inputs = {'X': input}
if self.bias is not None:
inputs['Bias'] = self.bias
......@@ -1005,13 +985,6 @@ class BatchNorm(Layer):
self._trainable_statistics = trainable_statistics
def forward(self, input):
# create output
# mean and mean_out share the same memory
mean_out = self._mean
# variance and variance out share the same memory
variance_out = self._variance
if _non_static_mode():
if in_dygraph_mode():
batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm(
input,
......@@ -1029,42 +1002,12 @@ class BatchNorm(Layer):
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn
)
elif _in_legacy_dygraph():
attrs = (
"momentum",
self._momentum,
"epsilon",
self._epsilon,
"is_test",
not self.training,
"data_layout",
self._data_layout,
"use_mkldnn",
self._use_mkldnn,
"fuse_with_relu",
self._fuse_with_relu,
"use_global_stats",
self._use_global_stats,
'trainable_statistics',
self._trainable_statistics,
)
batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm(
input,
self.weight,
self.bias,
self._mean,
self._variance,
None,
mean_out,
variance_out,
*attrs
)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn
)
else:
# create output
# mean and mean_out share the same memory
mean_out = self._mean
# variance and variance out share the same memory
variance_out = self._variance
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
)
......@@ -1101,7 +1044,9 @@ class BatchNorm(Layer):
batch_norm_out = (
input
if self._in_place
else self._helper.create_variable_for_type_inference(self._dtype)
else self._helper.create_variable_for_type_inference(
self._dtype
)
)
outputs = {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册