未验证 提交 69e51c77 编写于 作者: 姜永久 提交者: GitHub

rm legacy nn part2 (#49259)

* rm legacy nn part2

* rm _non_static_mode

* modify

* modify unpool test

* modify unpool test

* modify loss

* keep legacy for layer_norm
上级 e34e634a
......@@ -414,6 +414,7 @@ class TestUnpoolOpAPI_st(unittest.TestCase):
pool_out_np, indices_np, [2, 2], [2, 2], [0, 0], [5, 5]
).astype("float64")
np.testing.assert_allclose(results[0], expect_res, rtol=1e-05)
paddle.disable_static()
class TestOutputSizeTensor(UnittestBase):
......
......@@ -23,11 +23,7 @@ from ...fluid.data_feeder import (
check_type,
check_variable_and_dtype,
)
from ...fluid.framework import (
_in_legacy_dygraph,
_non_static_mode,
in_dygraph_mode,
)
from ...fluid.framework import in_dygraph_mode
from ...fluid.layer_helper import LayerHelper
from ...framework import convert_np_dtype_to_dtype_, core
from ...static import Variable
......@@ -326,25 +322,20 @@ def gather_tree(ids, parents):
if in_dygraph_mode():
return _C_ops.gather_tree(ids, parents)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.gather_tree(ids, parents)
else:
helper = LayerHelper('gather_tree', **locals())
check_variable_and_dtype(
ids, 'ids', ['int32', 'int64'], 'gather_tree'
)
check_variable_and_dtype(
parents, 'parents', ['int32', 'int64'], 'gather_tree'
)
out = helper.create_variable_for_type_inference(dtype=ids.dtype)
helper.append_op(
type="gather_tree",
inputs={"Ids": ids, "Parents": parents},
outputs={"Out": out},
)
helper = LayerHelper('gather_tree', **locals())
check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
check_variable_and_dtype(
parents, 'parents', ['int32', 'int64'], 'gather_tree'
)
out = helper.create_variable_for_type_inference(dtype=ids.dtype)
return out
helper.append_op(
type="gather_tree",
inputs={"Ids": ids, "Parents": parents},
outputs={"Out": out},
)
return out
@templatedoc()
......@@ -385,35 +376,27 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
)
if in_dygraph_mode():
return _C_ops.temporal_shift(x, seg_num, shift_ratio, data_format)
if _non_static_mode():
return _legacy_C_ops.temporal_shift(
x,
'seg_num',
seg_num,
'shift_ratio',
shift_ratio,
'data_format',
data_format,
else:
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64'], 'temporal_shift'
)
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_type(seg_num, 'seg_num', int, 'temporal_shift')
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(seg_num, int):
raise TypeError("seg_num must be int type.")
helper.append_op(
type="temporal_shift",
inputs={"X": x},
outputs={"Out": out},
attrs={
"seg_num": seg_num,
"shift_ratio": shift_ratio,
"data_format": data_format,
},
)
return out
check_type(seg_num, 'seg_num', int, 'temporal_shift')
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
if not isinstance(seg_num, int):
raise TypeError("seg_num must be int type.")
helper.append_op(
type="temporal_shift",
inputs={"X": x},
outputs={"Out": out},
attrs={
"seg_num": seg_num,
"shift_ratio": shift_ratio,
"data_format": data_format,
},
)
return out
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle import _C_ops, _legacy_C_ops
from paddle import _C_ops
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from ...fluid.framework import in_dygraph_mode
from ...fluid.layer_helper import LayerHelper
from ...static import Variable
......@@ -88,35 +88,26 @@ def one_hot(x, num_classes, name=None):
if in_dygraph_mode():
return _C_ops.one_hot(x, num_classes)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.one_hot_v2(
x, 'depth', num_classes, 'allow_out_of_range', False
)
check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'one_hot_v2')
helper = LayerHelper("one_hot_v2", **locals())
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(num_classes, Variable):
# user attribute
inputs = {'X': x}
attrs = {'depth': num_classes, 'allow_out_of_range': False}
else:
check_variable_and_dtype(
x, 'input', ['int32', 'int64'], 'one_hot_v2'
)
helper = LayerHelper("one_hot_v2", **locals())
one_hot_out = helper.create_variable_for_type_inference(
dtype='float32'
)
if not isinstance(num_classes, Variable):
# user attribute
inputs = {'X': x}
attrs = {'depth': num_classes, 'allow_out_of_range': False}
else:
num_classes.stop_gradient = True
inputs = {'X': x, 'depth_tensor': num_classes}
attrs = {'allow_out_of_range': False}
helper.append_op(
type="one_hot_v2",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out},
stop_gradient=True,
)
return one_hot_out
num_classes.stop_gradient = True
inputs = {'X': x, 'depth_tensor': num_classes}
attrs = {'allow_out_of_range': False}
helper.append_op(
type="one_hot_v2",
inputs=inputs,
attrs=attrs,
outputs={'Out': one_hot_out},
stop_gradient=True,
)
return one_hot_out
def embedding(x, weight, padding_idx=None, sparse=False, name=None):
......@@ -212,19 +203,6 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
if in_dygraph_mode():
return _C_ops.embedding(x, weight, padding_idx, sparse)
elif _in_legacy_dygraph():
return _legacy_C_ops.lookup_table_v2(
weight,
x,
'is_sparse',
sparse,
'is_distributed',
False,
'remote_prefetch',
False,
'padding_idx',
padding_idx,
)
else:
helper = LayerHelper('embedding', **locals())
dtype = helper.input_dtype(input_param_name='weight')
......
此差异已折叠。
......@@ -83,47 +83,33 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
out = _C_ops.p_norm(x, float(p), axis, epsilon, True, False)
return x / _C_ops.maximum(out, eps)
if _in_legacy_dygraph():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
out = _legacy_C_ops.p_norm(
x,
'axis',
axis,
'porder',
float(p),
'keepdim',
True,
'epsilon',
epsilon,
else:
check_type(p, 'p', (float, int), 'normalize')
check_type(axis, 'axis', (int), 'normalize')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'normalize'
)
return x / _legacy_C_ops.elementwise_max(out, eps)
check_type(p, 'p', (float, int), 'normalize')
check_type(axis, 'axis', (int), 'normalize')
check_variable_and_dtype(
x, 'x', ['float16', 'float32', 'float64'], 'normalize'
)
if len(x.shape) == 1 and axis != 0 and axis != -1:
raise ValueError(
"Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}".format(
axis
if len(x.shape) == 1 and axis != 0 and axis != -1:
raise ValueError(
"Axis must be 0 or -1 when x is a 1-D tensor, but received axis = {}".format(
axis
)
)
)
attrs = {
'axis': axis,
'porder': float(p),
'keepdim': True,
'epsilon': epsilon,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs
)
eps = out.block.create_var(dtype=out.dtype)
eps = paddle.full(shape=[1], fill_value=epsilon, dtype=out.dtype)
return paddle.divide(x, paddle.maximum(out, eps), name=name)
attrs = {
'axis': axis,
'porder': float(p),
'keepdim': True,
'epsilon': epsilon,
}
helper = LayerHelper('p_norm', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs
)
eps = out.block.create_var(dtype=out.dtype)
eps = paddle.full(shape=[1], fill_value=epsilon, dtype=out.dtype)
return paddle.divide(x, paddle.maximum(out, eps), name=name)
def batch_norm(
......@@ -229,98 +215,62 @@ def batch_norm(
batch_norm_out, act=None
)
elif _in_legacy_dygraph():
# for dygraph need tuple
attrs = (
"momentum",
momentum,
"epsilon",
epsilon,
"is_test",
not training,
"data_layout",
data_format,
"use_mkldnn",
False,
"fuse_with_relu",
False,
"use_global_stats",
use_global_stats,
"trainable_statistics",
trainable_statistics,
else:
check_variable_and_dtype(
x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
)
batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm(
x,
weight,
bias,
running_mean,
running_var,
None,
mean_out,
variance_out,
*attrs
# for static need dict
attrs = {
"momentum": momentum,
"epsilon": epsilon,
"is_test": not training,
"data_layout": data_format,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"trainable_statistics": trainable_statistics,
}
inputs = {
"X": [x],
"Scale": [weight],
"Bias": [bias],
"Mean": [running_mean],
"Variance": [running_var],
}
helper = LayerHelper('batch_norm', **locals())
param_dtype = x.dtype if x.dtype != 'float16' else 'float32'
saved_mean = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True
)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=None
saved_variance = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True
)
batch_norm_out = helper.create_variable_for_type_inference(x.dtype)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [running_mean],
"VarianceOut": [running_var],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
if training or trainable_statistics:
# reserve_space is only used for training.
reserve_space = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
outputs["ReserveSpace"] = [reserve_space]
check_variable_and_dtype(
x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
)
# for static need dict
attrs = {
"momentum": momentum,
"epsilon": epsilon,
"is_test": not training,
"data_layout": data_format,
"use_mkldnn": False,
"fuse_with_relu": False,
"use_global_stats": use_global_stats,
"trainable_statistics": trainable_statistics,
}
inputs = {
"X": [x],
"Scale": [weight],
"Bias": [bias],
"Mean": [running_mean],
"Variance": [running_var],
}
helper = LayerHelper('batch_norm', **locals())
param_dtype = x.dtype if x.dtype != 'float16' else 'float32'
saved_mean = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True
)
saved_variance = helper.create_variable_for_type_inference(
dtype=param_dtype, stop_gradient=True
)
batch_norm_out = helper.create_variable_for_type_inference(x.dtype)
outputs = {
"Y": [batch_norm_out],
"MeanOut": [running_mean],
"VarianceOut": [running_var],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
if training or trainable_statistics:
# reserve_space is only used for training.
reserve_space = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
outputs["ReserveSpace"] = [reserve_space]
helper.append_op(
type="batch_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
return helper.append_activation(batch_norm_out)
return helper.append_activation(batch_norm_out)
def layer_norm(
......@@ -483,48 +433,41 @@ def instance_norm(
if in_dygraph_mode():
out = _C_ops.instance_norm(x, weight, bias, eps)
return out
if _in_legacy_dygraph():
out, _, _ = _legacy_C_ops.instance_norm(
x,
weight,
bias,
"epsilon",
eps,
"momentum",
momentum,
"data_format",
data_format,
else:
check_variable_and_dtype(
x, 'input', ['float32', 'float64'], "InstanceNorm"
)
return out
check_variable_and_dtype(x, 'input', ['float32', 'float64'], "InstanceNorm")
attrs = {
"epsilon": eps,
"momentum": momentum,
"data_format": data_format,
}
attrs = {"epsilon": eps, "momentum": momentum, "data_format": data_format}
if weight and bias:
inputs = {"X": [x], "Scale": [weight], "Bias": [bias]}
else:
inputs = {"X": [x]}
if weight and bias:
inputs = {"X": [x], "Scale": [weight], "Bias": [bias]}
else:
inputs = {"X": [x]}
helper = LayerHelper('instance_norm', **locals())
saved_mean = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
saved_variance = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
instance_norm_out = helper.create_variable_for_type_inference(x.dtype)
helper = LayerHelper('instance_norm', **locals())
saved_mean = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
saved_variance = helper.create_variable_for_type_inference(
dtype=x.dtype, stop_gradient=True
)
instance_norm_out = helper.create_variable_for_type_inference(x.dtype)
outputs = {
"Y": [instance_norm_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
outputs = {
"Y": [instance_norm_out],
"SavedMean": [saved_mean],
"SavedVariance": [saved_variance],
}
helper.append_op(
type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
return instance_norm_out
helper.append_op(
type="instance_norm", inputs=inputs, outputs=outputs, attrs=attrs
)
return instance_norm_out
def local_response_norm(
......
......@@ -13,8 +13,7 @@
# limitations under the License.
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.framework import _non_static_mode
from paddle.fluid.framework import in_dygraph_mode
from ...device import get_cudnn_version, is_compiled_with_rocm
from ...fluid.data_feeder import check_variable_and_dtype
......@@ -381,22 +380,22 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
)
if in_dygraph_mode():
return _C_ops.pixel_shuffle(x, upscale_factor, data_format)
if _in_legacy_dygraph():
return _legacy_C_ops.pixel_shuffle(
x, "upscale_factor", upscale_factor, "data_format", data_format
else:
helper = LayerHelper("pixel_shuffle", **locals())
check_variable_and_dtype(
x, 'x', ['float32', 'float64'], 'pixel_shuffle'
)
helper = LayerHelper("pixel_shuffle", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle')
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="pixel_shuffle",
inputs={"X": x},
outputs={"Out": out},
attrs={"upscale_factor": upscale_factor, "data_format": data_format},
)
return out
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="pixel_shuffle",
inputs={"X": x},
outputs={"Out": out},
attrs={
"upscale_factor": upscale_factor,
"data_format": data_format,
},
)
return out
def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None):
......@@ -442,7 +441,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None):
"But recevie Attr(data_format): {} ".format(data_format)
)
if _non_static_mode():
if in_dygraph_mode():
return _legacy_C_ops.pixel_unshuffle(
x, "downscale_factor", downscale_factor, "data_format", data_format
)
......@@ -516,7 +515,7 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None):
"But recevie Attr(data_format): {} ".format(data_format)
)
if _non_static_mode():
if in_dygraph_mode():
return _legacy_C_ops.channel_shuffle(
x, "groups", groups, "data_format", data_format
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册