未验证 提交 69e51c77 编写于 作者: 姜永久 提交者: GitHub

rm legacy nn part2 (#49259)

* rm legacy nn part2

* rm _non_static_mode

* modify

* modify unpool test

* modify unpool test

* modify loss

* keep legacy for layer_norm
上级 e34e634a
...@@ -414,6 +414,7 @@ class TestUnpoolOpAPI_st(unittest.TestCase): ...@@ -414,6 +414,7 @@ class TestUnpoolOpAPI_st(unittest.TestCase):
pool_out_np, indices_np, [2, 2], [2, 2], [0, 0], [5, 5] pool_out_np, indices_np, [2, 2], [2, 2], [0, 0], [5, 5]
).astype("float64") ).astype("float64")
np.testing.assert_allclose(results[0], expect_res, rtol=1e-05) np.testing.assert_allclose(results[0], expect_res, rtol=1e-05)
paddle.disable_static()
class TestOutputSizeTensor(UnittestBase): class TestOutputSizeTensor(UnittestBase):
......
...@@ -23,11 +23,7 @@ from ...fluid.data_feeder import ( ...@@ -23,11 +23,7 @@ from ...fluid.data_feeder import (
check_type, check_type,
check_variable_and_dtype, check_variable_and_dtype,
) )
from ...fluid.framework import ( from ...fluid.framework import in_dygraph_mode
_in_legacy_dygraph,
_non_static_mode,
in_dygraph_mode,
)
from ...fluid.layer_helper import LayerHelper from ...fluid.layer_helper import LayerHelper
from ...framework import convert_np_dtype_to_dtype_, core from ...framework import convert_np_dtype_to_dtype_, core
from ...static import Variable from ...static import Variable
...@@ -326,25 +322,20 @@ def gather_tree(ids, parents): ...@@ -326,25 +322,20 @@ def gather_tree(ids, parents):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.gather_tree(ids, parents) return _C_ops.gather_tree(ids, parents)
else: else:
if _in_legacy_dygraph(): helper = LayerHelper('gather_tree', **locals())
return _legacy_C_ops.gather_tree(ids, parents) check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
else: check_variable_and_dtype(
helper = LayerHelper('gather_tree', **locals()) parents, 'parents', ['int32', 'int64'], 'gather_tree'
check_variable_and_dtype( )
ids, 'ids', ['int32', 'int64'], 'gather_tree' out = helper.create_variable_for_type_inference(dtype=ids.dtype)
)
check_variable_and_dtype(
parents, 'parents', ['int32', 'int64'], 'gather_tree'
)
out = helper.create_variable_for_type_inference(dtype=ids.dtype)
helper.append_op(
type="gather_tree",
inputs={"Ids": ids, "Parents": parents},
outputs={"Out": out},
)
return out helper.append_op(
type="gather_tree",
inputs={"Ids": ids, "Parents": parents},
outputs={"Out": out},
)
return out
@templatedoc() @templatedoc()
...@@ -385,35 +376,27 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"): ...@@ -385,35 +376,27 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
) )
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.temporal_shift(x, seg_num, shift_ratio, data_format) return _C_ops.temporal_shift(x, seg_num, shift_ratio, data_format)
if _non_static_mode(): else:
return _legacy_C_ops.temporal_shift( helper = LayerHelper("temporal_shift", **locals())
x, check_variable_and_dtype(
'seg_num', x, 'x', ['float32', 'float64'], 'temporal_shift'
seg_num,
'shift_ratio',
shift_ratio,
'data_format',
data_format,
) )
check_type(seg_num, 'seg_num', int, 'temporal_shift')
helper = LayerHelper("temporal_shift", **locals()) check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_type(seg_num, 'seg_num', int, 'temporal_shift') out = helper.create_variable_for_type_inference(dtype=x.dtype)
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
if not isinstance(seg_num, int):
out = helper.create_variable_for_type_inference(dtype=x.dtype) raise TypeError("seg_num must be int type.")
if not isinstance(seg_num, int): helper.append_op(
raise TypeError("seg_num must be int type.") type="temporal_shift",
inputs={"X": x},
helper.append_op( outputs={"Out": out},
type="temporal_shift", attrs={
inputs={"X": x}, "seg_num": seg_num,
outputs={"Out": out}, "shift_ratio": shift_ratio,
attrs={ "data_format": data_format,
"seg_num": seg_num, },
"shift_ratio": shift_ratio, )
"data_format": data_format, return out
},
)
return out
...@@ -12,10 +12,10 @@ ...@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from paddle import _C_ops, _legacy_C_ops from paddle import _C_ops
from ...fluid.data_feeder import check_variable_and_dtype from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode from ...fluid.framework import in_dygraph_mode
from ...fluid.layer_helper import LayerHelper from ...fluid.layer_helper import LayerHelper
from ...static import Variable from ...static import Variable
...@@ -88,35 +88,26 @@ def one_hot(x, num_classes, name=None): ...@@ -88,35 +88,26 @@ def one_hot(x, num_classes, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.one_hot(x, num_classes) return _C_ops.one_hot(x, num_classes)
else: else:
if _in_legacy_dygraph(): check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'one_hot_v2')
return _legacy_C_ops.one_hot_v2( helper = LayerHelper("one_hot_v2", **locals())
x, 'depth', num_classes, 'allow_out_of_range', False
) one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(num_classes, Variable):
# user attribute
inputs = {'X': x}
attrs = {'depth': num_classes, 'allow_out_of_range': False}
else: else:
check_variable_and_dtype( num_classes.stop_gradient = True
x, 'input', ['int32', 'int64'], 'one_hot_v2' inputs = {'X': x, 'depth_tensor': num_classes}
) attrs = {'allow_out_of_range': False}
helper = LayerHelper("one_hot_v2", **locals()) helper.append_op(
type="one_hot_v2",
one_hot_out = helper.create_variable_for_type_inference( inputs=inputs,
dtype='float32' attrs=attrs,
) outputs={'Out': one_hot_out},
if not isinstance(num_classes, Variable): stop_gradient=True,
# user attribute )
inputs = {'X': x} return one_hot_out
attrs = {'depth': num_classes, 'allow_out_of_range': False}
else:
num_classes.stop_gradient = True
inputs = {'X': x, 'depth_tensor': num_classes}
attrs = {'allow_out_of_range': False}
helper.append_op(
type="one_hot_v2",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out},
stop_gradient=True,
)
return one_hot_out
def embedding(x, weight, padding_idx=None, sparse=False, name=None): def embedding(x, weight, padding_idx=None, sparse=False, name=None):
...@@ -212,19 +203,6 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None): ...@@ -212,19 +203,6 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.embedding(x, weight, padding_idx, sparse) return _C_ops.embedding(x, weight, padding_idx, sparse)
elif _in_legacy_dygraph():
return _legacy_C_ops.lookup_table_v2(
weight,
x,
'is_sparse',
sparse,
'is_distributed',
False,
'remote_prefetch',
False,
'padding_idx',
padding_idx,
)
else: else:
helper = LayerHelper('embedding', **locals()) helper = LayerHelper('embedding', **locals())
dtype = helper.input_dtype(input_param_name='weight') dtype = helper.input_dtype(input_param_name='weight')
......
此差异已折叠。
...@@ -83,47 +83,33 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None): ...@@ -83,47 +83,33 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
out = _C_ops.p_norm(x, float(p), axis, epsilon, True, False) out = _C_ops.p_norm(x, float(p), axis, epsilon, True, False)
return x / _C_ops.maximum(out, eps) return x / _C_ops.maximum(out, eps)
if _in_legacy_dygraph(): else:
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype) check_type(p, 'p', (float, int), 'normalize')
out = _legacy_C_ops.p_norm( check_type(axis, 'axis', (int), 'normalize')
x, check_variable_and_dtype(
'axis', x, 'x', ['float16', 'float32', 'float64'], 'normalize'
axis,
'porder',
float(p),
'keepdim',
True,
'epsilon',
epsilon,
) )
return x / _legacy_C_ops.elementwise_max(out, eps) if len(x.shape) == 1 and axis != 0 and axis != -1:
raise ValueError(
check_type(p, 'p', (float, int), 'normalize') "Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}".format(
check_type(axis, 'axis', (int), 'normalize') axis
check_variable_and_dtype( )
x, 'x', ['float16', 'float32', 'float64'], 'normalize'
)
if len(x.shape) == 1 and axis != 0 and axis != -1:
raise ValueError(
"Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}".format(
axis
) )
)
attrs = { attrs = {
'axis': axis, 'axis': axis,
'porder': float(p), 'porder': float(p),
'keepdim': True, 'keepdim': True,
'epsilon': epsilon, 'epsilon': epsilon,
} }
helper = LayerHelper('p_norm', **locals()) helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype) out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op( helper.append_op(
type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs
) )
eps = out.block.create_var(dtype=out.dtype) eps = out.block.create_var(dtype=out.dtype)
eps = paddle.full(shape=[1], fill_value=epsilon, dtype=out.dtype) eps = paddle.full(shape=[1], fill_value=epsilon, dtype=out.dtype)
return paddle.divide(x, paddle.maximum(out, eps), name=name) return paddle.divide(x, paddle.maximum(out, eps), name=name)
def batch_norm( def batch_norm(
...@@ -229,98 +215,62 @@ def batch_norm( ...@@ -229,98 +215,62 @@ def batch_norm(
batch_norm_out, act=None batch_norm_out, act=None
) )
elif _in_legacy_dygraph(): else:
# for dygraph need tuple check_variable_and_dtype(
attrs = ( x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
"momentum",
momentum,
"epsilon",
epsilon,
"is_test",
not training,
"data_layout",
data_format,
"use_mkldnn",
False,
"fuse_with_relu",
False,
"use_global_stats",
use_global_stats,
"trainable_statistics",
trainable_statistics,
) )
batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm( # for static need dict
x, attrs = {
weight, "momentum": momentum,
bias, "epsilon": epsilon,
running_mean, "is_test": not training,
running_var, "data_layout": data_format,
None, "use_mkldnn": False,
mean_out, "fuse_with_relu": False,
variance_out, "use_global_stats": use_global_stats,
*attrs "trainable_statistics": trainable_statistics,
}
inputs = {
"X": [x],
"Scale": [weight],
"Bias": [bias],
"Mean": [running_mean],
"Variance": [running_var],
}
helper = LayerHelper('batch_norm', **locals())
param_dtype = x.dtype if x.dtype != 'float16' else 'float32'
saved_mean = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True
) )
saved_variance = helper.create_variable_for_type_inference(
return dygraph_utils._append_activation_in_dygraph( dtype=param_dtype, stop_gradient=True
batch_norm_out, act=None
) )
batch_norm_out = helper.create_variable_for_type_inference(x.dtype)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [running_mean],
"VarianceOut": [running_var],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
if training or trainable_statistics:
# reserve_space is only used for training.
reserve_space = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
outputs["ReserveSpace"] = [reserve_space]
check_variable_and_dtype( helper.append_op(
x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm' type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
# for static need dict
attrs = {
"momentum": momentum,
"epsilon": epsilon,
"is_test": not training,
"data_layout": data_format,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"trainable_statistics": trainable_statistics,
}
inputs = {
"X": [x],
"Scale": [weight],
"Bias": [bias],
"Mean": [running_mean],
"Variance": [running_var],
}
helper = LayerHelper('batch_norm', **locals())
param_dtype = x.dtype if x.dtype != 'float16' else 'float32'
saved_mean = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True
)
saved_variance = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True
)
batch_norm_out = helper.create_variable_for_type_inference(x.dtype)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [running_mean],
"VarianceOut": [running_var],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
if training or trainable_statistics:
# reserve_space is only used for training.
reserve_space = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
) )
outputs["ReserveSpace"] = [reserve_space]
helper.append_op( return helper.append_activation(batch_norm_out)
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
return helper.append_activation(batch_norm_out)
def layer_norm( def layer_norm(
...@@ -483,48 +433,41 @@ def instance_norm( ...@@ -483,48 +433,41 @@ def instance_norm(
if in_dygraph_mode(): if in_dygraph_mode():
out = _C_ops.instance_norm(x, weight, bias, eps) out = _C_ops.instance_norm(x, weight, bias, eps)
return out return out
if _in_legacy_dygraph(): else:
out, _, _ = _legacy_C_ops.instance_norm( check_variable_and_dtype(
x, x, 'input', ['float32', 'float64'], "InstanceNorm"
weight,
bias,
"epsilon",
eps,
"momentum",
momentum,
"data_format",
data_format,
) )
return out
check_variable_and_dtype(x, 'input', ['float32', 'float64'], "InstanceNorm") attrs = {
"epsilon": eps,
"momentum": momentum,
"data_format": data_format,
}
attrs = {"epsilon": eps, "momentum": momentum, "data_format": data_format} if weight and bias:
inputs = {"X": [x], "Scale": [weight], "Bias": [bias]}
else:
inputs = {"X": [x]}
if weight and bias: helper = LayerHelper('instance_norm', **locals())
inputs = {"X": [x], "Scale": [weight], "Bias": [bias]} saved_mean = helper.create_variable_for_type_inference(
else: dtype=x.dtype, stop_gradient=True
inputs = {"X": [x]} )
saved_variance = helper.create_variable_for_type_inference(
helper = LayerHelper('instance_norm', **locals()) dtype=x.dtype, stop_gradient=True
saved_mean = helper.create_variable_for_type_inference( )
dtype=x.dtype, stop_gradient=True instance_norm_out = helper.create_variable_for_type_inference(x.dtype)
)
saved_variance = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
instance_norm_out = helper.create_variable_for_type_inference(x.dtype)
outputs = { outputs = {
"Y": [instance_norm_out], "Y": [instance_norm_out],
"SavedMean": [saved_mean], "SavedMean": [saved_mean],
"SavedVariance": [saved_variance], "SavedVariance": [saved_variance],
} }
helper.append_op( helper.append_op(
type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs
) )
return instance_norm_out return instance_norm_out
def local_response_norm( def local_response_norm(
......
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
# limitations under the License. # limitations under the License.
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode from paddle.fluid.framework import in_dygraph_mode
from paddle.framework import _non_static_mode
from ...device import get_cudnn_version, is_compiled_with_rocm from ...device import get_cudnn_version, is_compiled_with_rocm
from ...fluid.data_feeder import check_variable_and_dtype from ...fluid.data_feeder import check_variable_and_dtype
...@@ -381,22 +380,22 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): ...@@ -381,22 +380,22 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
) )
if in_dygraph_mode(): if in_dygraph_mode():
return _C_ops.pixel_shuffle(x, upscale_factor, data_format) return _C_ops.pixel_shuffle(x, upscale_factor, data_format)
else:
if _in_legacy_dygraph(): helper = LayerHelper("pixel_shuffle", **locals())
return _legacy_C_ops.pixel_shuffle( check_variable_and_dtype(
x, "upscale_factor", upscale_factor, "data_format", data_format x, 'x', ['float32', 'float64'], 'pixel_shuffle'
) )
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper = LayerHelper("pixel_shuffle", **locals()) helper.append_op(
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle') type="pixel_shuffle",
out = helper.create_variable_for_type_inference(dtype=x.dtype) inputs={"X": x},
helper.append_op( outputs={"Out": out},
type="pixel_shuffle", attrs={
inputs={"X": x}, "upscale_factor": upscale_factor,
outputs={"Out": out}, "data_format": data_format,
attrs={"upscale_factor": upscale_factor, "data_format": data_format}, },
) )
return out return out
def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None):
...@@ -442,7 +441,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): ...@@ -442,7 +441,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None):
"But recevie Attr(data_format): {} ".format(data_format) "But recevie Attr(data_format): {} ".format(data_format)
) )
if _non_static_mode(): if in_dygraph_mode():
return _legacy_C_ops.pixel_unshuffle( return _legacy_C_ops.pixel_unshuffle(
x, "downscale_factor", downscale_factor, "data_format", data_format x, "downscale_factor", downscale_factor, "data_format", data_format
) )
...@@ -516,7 +515,7 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None): ...@@ -516,7 +515,7 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None):
"But recevie Attr(data_format): {} ".format(data_format) "But recevie Attr(data_format): {} ".format(data_format)
) )
if _non_static_mode(): if in_dygraph_mode():
return _legacy_C_ops.channel_shuffle( return _legacy_C_ops.channel_shuffle(
x, "groups", groups, "data_format", data_format x, "groups", groups, "data_format", data_format
) )
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册