未验证 提交 140d786d 编写于 作者: 姜永久 提交者: GitHub

rm in_legacy_dygraph python/paddle/nn/functional/ part1 (#49258)

* rm in_legacy_dygraph nn part1

* rm non_static_mode

* modify rrelu
上级 861fef52
...@@ -25,11 +25,7 @@ from ...fluid.data_feeder import ( ...@@ -25,11 +25,7 @@ from ...fluid.data_feeder import (
check_type, check_type,
check_variable_and_dtype, check_variable_and_dtype,
) )
from ...fluid.framework import ( from ...fluid.framework import in_dygraph_mode
_in_legacy_dygraph,
_non_static_mode,
in_dygraph_mode,
)
from ...tensor import clip, concat, sqrt, sum from ...tensor import clip, concat, sqrt, sum
from ...tensor.creation import zeros from ...tensor.creation import zeros
...@@ -927,24 +923,22 @@ def bilinear(x1, x2, weight, bias=None, name=None): ...@@ -927,24 +923,22 @@ def bilinear(x1, x2, weight, bias=None, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.bilinear_tensor_product(x1, x2, weight, bias) return _C_ops.bilinear_tensor_product(x1, x2, weight, bias)
elif _non_static_mode(): else:
return _legacy_C_ops.bilinear_tensor_product(x1, x2, weight, bias) check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')
check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
check_variable_and_dtype(x2, 'x2', ['float32', 'float64'], 'bilinear')
inputs = {"X": x1, "Y": x2, "Weight": weight} inputs = {"X": x1, "Y": x2, "Weight": weight}
if bias is not None: if bias is not None:
inputs["Bias"] = bias inputs["Bias"] = bias
helper = LayerHelper("bilinear", **locals()) helper = LayerHelper("bilinear", **locals())
out = helper.create_variable_for_type_inference(dtype=x1.dtype) out = helper.create_variable_for_type_inference(dtype=x1.dtype)
helper.append_op( helper.append_op(
type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out} type="bilinear_tensor_product", inputs=inputs, outputs={"Out": out}
) )
return out return out
def dropout( def dropout(
...@@ -1118,77 +1112,62 @@ def dropout( ...@@ -1118,77 +1112,62 @@ def dropout(
'downgrade_in_infer' if mode == 'downscale_in_infer' else mode 'downgrade_in_infer' if mode == 'downscale_in_infer' else mode
) # semantic transfer ) # semantic transfer
if _non_static_mode(): if in_dygraph_mode():
if default_main_program().random_seed != 0: if default_main_program().random_seed != 0:
seed = default_main_program().random_seed seed = default_main_program().random_seed
if in_dygraph_mode(): out, mask = _C_ops.dropout(
out, mask = _C_ops.dropout(
x,
None,
p,
not training,
mode,
seed if seed is not None else 0,
seed is not None,
)
return out
out, mask = _legacy_C_ops.dropout(
x, x,
'dropout_prob', None,
p, p,
'is_test',
not training, not training,
'fix_seed',
seed is not None,
'seed',
seed if seed is not None else 0,
'dropout_implementation',
mode, mode,
seed if seed is not None else 0,
seed is not None,
) )
return out
helper = LayerHelper('dropout', **locals()) return out
check_variable_and_dtype( else:
x, 'x', ['float16', 'float32', 'float64'], 'dropout' helper = LayerHelper('dropout', **locals())
) check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'dropout'
)
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
mask = helper.create_variable_for_type_inference( mask = helper.create_variable_for_type_inference(
dtype=core.VarDesc.VarType.UINT8, stop_gradient=True dtype=core.VarDesc.VarType.UINT8, stop_gradient=True
) )
def get_attrs(prog, dropout_prob, is_test, seed): def get_attrs(prog, dropout_prob, is_test, seed):
if (seed is None or seed == 0) and prog.random_seed != 0: if (seed is None or seed == 0) and prog.random_seed != 0:
seed = prog.random_seed seed = prog.random_seed
if isinstance( if isinstance(
dropout_prob, Variable dropout_prob, Variable
) and not dropout_prob.shape != [1]: ) and not dropout_prob.shape != [1]:
raise TypeError( raise TypeError(
"Required p.shape == [1] if type(p) is Variable, but received p.shape = {}".format( "Required p.shape == [1] if type(p) is Variable, but received p.shape = {}".format(
p.shape p.shape
)
) )
) attrs = {
attrs = { 'dropout_prob': dropout_prob,
'dropout_prob': dropout_prob, 'is_test': is_test,
'is_test': is_test, 'fix_seed': seed is not None,
'fix_seed': seed is not None, 'seed': seed if seed is not None else 0,
'seed': seed if seed is not None else 0, 'dropout_implementation': mode,
'dropout_implementation': mode, }
} return attrs
return attrs
attrs = get_attrs(helper.main_program, p, not training, seed) attrs = get_attrs(helper.main_program, p, not training, seed)
helper.append_op( helper.append_op(
type='dropout', type='dropout',
inputs={'X': [x]}, inputs={'X': [x]},
outputs={'Out': [out], 'Mask': [mask]}, outputs={'Out': [out], 'Mask': [mask]},
attrs=attrs, attrs=attrs,
) )
return out return out
else: # sometimes called dropout_nd #TODO: optimize with c++ else: # sometimes called dropout_nd #TODO: optimize with c++
if not in_dynamic_mode(): if not in_dynamic_mode():
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'dropout') check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'dropout')
...@@ -1684,38 +1663,21 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None): ...@@ -1684,38 +1663,21 @@ def pad(x, pad, mode='constant', value=0.0, data_format="NCHW", name=None):
pad = pad.numpy().tolist() pad = pad.numpy().tolist()
out = _C_ops.pad3d(x, pad, mode, value, data_format) out = _C_ops.pad3d(x, pad, mode, value, data_format)
else: else:
if _in_legacy_dygraph(): attrs = {'mode': mode, 'value': value, 'data_format': data_format}
if isinstance(pad, Variable): inputs = {'X': [x]}
pad = pad.numpy().tolist() if isinstance(pad, Variable):
out = _legacy_C_ops.pad3d( inputs['Paddings'] = [pad]
x, attrs['paddings'] = []
"paddings",
pad,
"mode",
mode,
"value",
value,
"data_format",
data_format,
"name",
name,
)
else: else:
attrs = {'mode': mode, 'value': value, 'data_format': data_format} attrs['paddings'] = pad
inputs = {'X': [x]}
if isinstance(pad, Variable):
inputs['Paddings'] = [pad]
attrs['paddings'] = []
else:
attrs['paddings'] = pad
helper = LayerHelper('pad3d', **locals()) helper = LayerHelper('pad3d', **locals())
dtype = helper.input_dtype(input_param_name='input') dtype = helper.input_dtype(input_param_name='input')
out = helper.create_variable_for_type_inference(dtype) out = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs
) )
if len(unsqueezed_dim) != 0: if len(unsqueezed_dim) != 0:
out = squeeze(out, axis=unsqueezed_dim) out = squeeze(out, axis=unsqueezed_dim)
...@@ -1873,46 +1835,34 @@ def linear(x, weight, bias=None, name=None): ...@@ -1873,46 +1835,34 @@ def linear(x, weight, bias=None, name=None):
# TODO(jiabin): using addmm for fast forward route # TODO(jiabin): using addmm for fast forward route
return _C_ops.linear(x, weight, bias) return _C_ops.linear(x, weight, bias)
else: else:
if _in_legacy_dygraph(): helper = LayerHelper('linear', **locals())
pre_bias = _legacy_C_ops.matmul_v2( dtype = x.dtype
x, weight, 'trans_x', False, 'trans_y', False
)
if bias is None:
return pre_bias
return _legacy_C_ops.elementwise_add(pre_bias, bias)
else:
helper = LayerHelper('linear', **locals())
dtype = x.dtype
check_variable_and_dtype( check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'linear' x, 'x', ['float16', 'float32', 'float64'], 'linear'
) )
check_dtype( check_dtype(dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear')
dtype, 'dtype', ['float16', 'float32', 'float64'], 'linear'
)
inputs = {'X': [x], 'Y': [weight]} inputs = {'X': [x], 'Y': [weight]}
attrs = {'trans_x': False, 'trans_y': False} attrs = {'trans_x': False, 'trans_y': False}
tmp = helper.create_variable_for_type_inference(dtype) tmp = helper.create_variable_for_type_inference(dtype)
helper.append_op(
type='matmul_v2',
inputs=inputs,
outputs={'Out': tmp},
attrs=attrs,
)
if bias is not None:
res = helper.create_variable_for_type_inference(dtype)
helper.append_op( helper.append_op(
type='matmul_v2', type='elementwise_add',
inputs=inputs, inputs={'X': [tmp], 'Y': [bias]},
outputs={'Out': tmp}, outputs={'Out': [res]},
attrs=attrs, attrs={'axis': len(x.shape) - 1},
) )
if bias is not None: else:
res = helper.create_variable_for_type_inference(dtype) res = tmp
helper.append_op( return res
type='elementwise_add',
inputs={'X': [tmp], 'Y': [bias]},
outputs={'Out': [res]},
attrs={'axis': len(x.shape) - 1},
)
else:
res = tmp
return res
def label_smooth(label, prior_dist=None, epsilon=0.1, name=None): def label_smooth(label, prior_dist=None, epsilon=0.1, name=None):
......
...@@ -19,11 +19,7 @@ from paddle.device import ( ...@@ -19,11 +19,7 @@ from paddle.device import (
is_compiled_with_npu, is_compiled_with_npu,
is_compiled_with_rocm, is_compiled_with_rocm,
) )
from paddle.fluid.framework import ( from paddle.fluid.framework import _global_flags, in_dygraph_mode
_global_flags,
_in_legacy_dygraph,
in_dygraph_mode,
)
from paddle.tensor.math import _add_with_axis from paddle.tensor.math import _add_with_axis
from ...device import get_cudnn_version from ...device import get_cudnn_version
...@@ -489,30 +485,6 @@ def conv1d( ...@@ -489,30 +485,6 @@ def conv1d(
) )
if bias is not None: if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim) out = _add_with_axis(out, bias, axis=channel_dim)
elif _in_legacy_dygraph():
attrs = (
'strides',
stride,
'paddings',
padding,
'dilations',
dilation,
'groups',
groups,
'use_cudnn',
use_cudnn,
'use_mkldnn',
False,
'fuse_relu_before_depthwise_conv',
False,
"padding_algorithm",
padding_algorithm,
"data_format",
conv2d_data_format,
)
out = getattr(_legacy_C_ops, l_type)(x, weight, *attrs)
if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim)
else: else:
inputs = {'Input': [x], 'Filter': [weight]} inputs = {'Input': [x], 'Filter': [weight]}
attrs = { attrs = {
...@@ -1044,30 +1016,6 @@ def conv1d_transpose( ...@@ -1044,30 +1016,6 @@ def conv1d_transpose(
) )
if bias is not None: if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim) out = _add_with_axis(out, bias, axis=channel_dim)
elif _in_legacy_dygraph():
attrs = (
'output_padding',
output_padding,
'output_size',
output_size,
'strides',
stride,
'paddings',
padding,
'padding_algorithm',
padding_algorithm,
'dilations',
dilation,
'groups',
groups,
'use_cudnn',
use_cudnn,
'data_format',
conv2d_data_format,
)
out = getattr(_legacy_C_ops, op_type)(x, weight, *attrs)
if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim)
else: else:
inputs = {'Input': [x], 'Filter': [weight]} inputs = {'Input': [x], 'Filter': [weight]}
attrs = { attrs = {
...@@ -1350,33 +1298,6 @@ def conv2d_transpose( ...@@ -1350,33 +1298,6 @@ def conv2d_transpose(
return _add_with_axis(pre_bias, bias, axis=channel_dim) return _add_with_axis(pre_bias, bias, axis=channel_dim)
else: else:
return pre_bias return pre_bias
if _in_legacy_dygraph():
attrs = (
'output_padding',
output_padding,
'output_size',
output_size,
'strides',
stride,
'paddings',
padding,
'padding_algorithm',
padding_algorithm,
'dilations',
dilation,
'groups',
groups,
'use_cudnn',
use_cudnn,
'data_format',
data_format,
)
pre_bias = getattr(_legacy_C_ops, op_type)(x, weight, *attrs)
if bias is not None:
out = _add_with_axis(pre_bias, bias, axis=channel_dim)
else:
out = pre_bias
else: else:
inputs = {'Input': [x], 'Filter': [weight]} inputs = {'Input': [x], 'Filter': [weight]}
attrs = { attrs = {
...@@ -1823,33 +1744,6 @@ def conv3d_transpose( ...@@ -1823,33 +1744,6 @@ def conv3d_transpose(
return _add_with_axis(pre_bias, bias, axis=channel_dim) return _add_with_axis(pre_bias, bias, axis=channel_dim)
else: else:
return pre_bias return pre_bias
if _in_legacy_dygraph():
attrs = (
'output_padding',
output_padding,
'output_size',
output_size,
'paddings',
padding,
"padding_algorithm",
padding_algorithm,
'strides',
stride,
'dilations',
dilation,
'groups',
groups,
'use_cudnn',
use_cudnn,
"data_format",
data_format_,
)
pre_bias = getattr(_legacy_C_ops, op_type)(x, weight, *attrs)
if bias is not None:
out = _add_with_axis(pre_bias, bias, axis=channel_dim)
else:
out = pre_bias
else: else:
inputs = {'Input': [x], 'Filter': [weight]} inputs = {'Input': [x], 'Filter': [weight]}
attrs = { attrs = {
......
...@@ -13,8 +13,8 @@ ...@@ -13,8 +13,8 @@
# limitations under the License. # limitations under the License.
import paddle import paddle
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
from ...fluid.data_feeder import check_type, check_variable_and_dtype from ...fluid.data_feeder import check_type, check_variable_and_dtype
from ...fluid.layer_helper import LayerHelper from ...fluid.layer_helper import LayerHelper
...@@ -81,36 +81,30 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): ...@@ -81,36 +81,30 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
sub = _C_ops.add(sub, epsilon) sub = _C_ops.add(sub, epsilon)
return _C_ops.p_norm(sub, p, -1, 0.0, keepdim, False) return _C_ops.p_norm(sub, p, -1, 0.0, keepdim, False)
if _in_legacy_dygraph(): else:
sub = _legacy_C_ops.elementwise_sub(x, y) check_variable_and_dtype(
x, 'x', ['float32', 'float64'], 'PairwiseDistance'
)
check_variable_and_dtype(
y, 'y', ['float32', 'float64'], 'PairwiseDistance'
)
sub = paddle.subtract(x, y)
if epsilon != 0.0: if epsilon != 0.0:
epsilon = paddle.fluid.dygraph.base.to_variable( epsilon_var = sub.block.create_var(dtype=sub.dtype)
[epsilon], dtype=sub.dtype epsilon_var = paddle.full(
shape=[1], fill_value=epsilon, dtype=sub.dtype
) )
sub = _legacy_C_ops.elementwise_add(sub, epsilon) sub = paddle.add(sub, epsilon_var)
return _legacy_C_ops.p_norm( helper = LayerHelper("PairwiseDistance", name=name)
sub, 'axis', -1, 'porder', p, 'keepdim', keepdim, 'epsilon', 0.0 attrs = {
'axis': -1,
'porder': p,
'keepdim': keepdim,
'epsilon': 0.0,
}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='p_norm', inputs={'X': sub}, outputs={'Out': out}, attrs=attrs
) )
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'PairwiseDistance') return out
check_variable_and_dtype(y, 'y', ['float32', 'float64'], 'PairwiseDistance')
sub = paddle.subtract(x, y)
if epsilon != 0.0:
epsilon_var = sub.block.create_var(dtype=sub.dtype)
epsilon_var = paddle.full(
shape=[1], fill_value=epsilon, dtype=sub.dtype
)
sub = paddle.add(sub, epsilon_var)
helper = LayerHelper("PairwiseDistance", name=name)
attrs = {
'axis': -1,
'porder': p,
'keepdim': keepdim,
'epsilon': 0.0,
}
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='p_norm', inputs={'X': sub}, outputs={'Out': out}, attrs=attrs
)
return out
...@@ -34,17 +34,11 @@ import numpy as np ...@@ -34,17 +34,11 @@ import numpy as np
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.device import get_all_custom_device_type from paddle.device import get_all_custom_device_type
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
from ...fluid import dygraph_utils from ...fluid import dygraph_utils
from ...fluid.data_feeder import check_variable_and_dtype from ...fluid.data_feeder import check_variable_and_dtype
from ...framework import ( from ...framework import ParamAttr, _global_flags, get_default_dtype, no_grad
ParamAttr,
_global_flags,
_non_static_mode,
get_default_dtype,
no_grad,
)
from .. import Layer from .. import Layer
from .. import functional as F from .. import functional as F
from ..functional import batch_norm, instance_norm, layer_norm from ..functional import batch_norm, instance_norm, layer_norm
...@@ -492,20 +486,6 @@ class GroupNorm(Layer): ...@@ -492,20 +486,6 @@ class GroupNorm(Layer):
dtype=input.dtype, stop_gradient=True dtype=input.dtype, stop_gradient=True
) )
if _in_legacy_dygraph():
pre_act, _, _ = _legacy_C_ops.group_norm(
input,
self.weight,
self.bias,
mean_out,
variance_out,
'epsilon',
self._epsilon,
'groups',
self._num_groups,
)
return pre_act
inputs = {'X': input} inputs = {'X': input}
if self.bias is not None: if self.bias is not None:
inputs['Bias'] = self.bias inputs['Bias'] = self.bias
...@@ -1005,121 +985,86 @@ class BatchNorm(Layer): ...@@ -1005,121 +985,86 @@ class BatchNorm(Layer):
self._trainable_statistics = trainable_statistics self._trainable_statistics = trainable_statistics
def forward(self, input): def forward(self, input):
# create output if in_dygraph_mode():
# mean and mean_out share the same memory batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm(
mean_out = self._mean input,
# variance and variance out share the same memory self._mean,
variance_out = self._variance self._variance,
self.weight,
if _non_static_mode(): self.bias,
if in_dygraph_mode(): not self.training,
batch_norm_out, t1, t2, t3, t4, _ = _C_ops.batch_norm( self._momentum,
input, self._epsilon,
self._mean, self._data_layout,
self._variance, self._use_global_stats,
self.weight, self._trainable_statistics,
self.bias, )
not self.training,
self._momentum,
self._epsilon,
self._data_layout,
self._use_global_stats,
self._trainable_statistics,
)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn
)
elif _in_legacy_dygraph():
attrs = (
"momentum",
self._momentum,
"epsilon",
self._epsilon,
"is_test",
not self.training,
"data_layout",
self._data_layout,
"use_mkldnn",
self._use_mkldnn,
"fuse_with_relu",
self._fuse_with_relu,
"use_global_stats",
self._use_global_stats,
'trainable_statistics',
self._trainable_statistics,
)
batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm(
input,
self.weight,
self.bias,
self._mean,
self._variance,
None,
mean_out,
variance_out,
*attrs
)
return dygraph_utils._append_activation_in_dygraph( return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn batch_norm_out, act=self._act, use_mkldnn=self._use_mkldnn
) )
else:
# create output
# mean and mean_out share the same memory
mean_out = self._mean
# variance and variance out share the same memory
variance_out = self._variance
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
)
check_variable_and_dtype( attrs = {
input, 'input', ['float16', 'float32', 'float64'], 'BatchNorm' "momentum": self._momentum,
) "epsilon": self._epsilon,
"is_test": self._is_test,
attrs = { "data_layout": self._data_layout,
"momentum": self._momentum, "use_mkldnn": False,
"epsilon": self._epsilon, "fuse_with_relu": self._fuse_with_relu,
"is_test": self._is_test, "use_global_stats": self._use_global_stats,
"data_layout": self._data_layout, "trainable_statistics": self._trainable_statistics,
"use_mkldnn": False, }
"fuse_with_relu": self._fuse_with_relu,
"use_global_stats": self._use_global_stats, inputs = {
"trainable_statistics": self._trainable_statistics, "X": [input],
} "Scale": [self.weight],
"Bias": [self.bias],
inputs = { "Mean": [self._mean],
"X": [input], "Variance": [self._variance],
"Scale": [self.weight], }
"Bias": [self.bias],
"Mean": [self._mean], saved_mean = self._helper.create_variable_for_type_inference(
"Variance": [self._variance], dtype=self._dtype, stop_gradient=True
} )
saved_variance = self._helper.create_variable_for_type_inference(
saved_mean = self._helper.create_variable_for_type_inference( dtype=self._dtype, stop_gradient=True
dtype=self._dtype, stop_gradient=True )
) reserve_space = self._helper.create_variable_for_type_inference(
saved_variance = self._helper.create_variable_for_type_inference( dtype=self._helper.input_dtype(input), stop_gradient=True
dtype=self._dtype, stop_gradient=True )
)
reserve_space = self._helper.create_variable_for_type_inference(
dtype=self._helper.input_dtype(input), stop_gradient=True
)
batch_norm_out = (
input
if self._in_place
else self._helper.create_variable_for_type_inference(self._dtype)
)
outputs = { batch_norm_out = (
"Y": [batch_norm_out], input
"MeanOut": [mean_out], if self._in_place
"VarianceOut": [variance_out], else self._helper.create_variable_for_type_inference(
"SavedMean": [saved_mean], self._dtype
"SavedVariance": [saved_variance], )
} )
if reserve_space is not None:
outputs["ReserveSpace"] = [reserve_space]
self._helper.append_op( outputs = {
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs "Y": [batch_norm_out],
) "MeanOut": [mean_out],
"VarianceOut": [variance_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
if reserve_space is not None:
outputs["ReserveSpace"] = [reserve_space]
self._helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
# Currently, we don't support inplace in dygraph mode # Currently, we don't support inplace in dygraph mode
return self._helper.append_activation(batch_norm_out, self._act) return self._helper.append_activation(batch_norm_out, self._act)
class BatchNorm1D(_BatchNormBase): class BatchNorm1D(_BatchNormBase):
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册