未验证 提交 69e51c77 编写于 作者: 姜永久 提交者: GitHub

rm legacy nn part2 (#49259)

* rm legacy nn part2

* rm _non_static_mode

* modify

* modify unpool test

* modify unpool test

* modify loss

* keep legacy for layer_norm
上级 e34e634a
......@@ -414,6 +414,7 @@ class TestUnpoolOpAPI_st(unittest.TestCase):
pool_out_np, indices_np, [2, 2], [2, 2], [0, 0], [5, 5]
).astype("float64")
np.testing.assert_allclose(results[0], expect_res, rtol=1e-05)
paddle.disable_static()
class TestOutputSizeTensor(UnittestBase):
......
......@@ -23,11 +23,7 @@ from ...fluid.data_feeder import (
check_type,
check_variable_and_dtype,
)
from ...fluid.framework import (
_in_legacy_dygraph,
_non_static_mode,
in_dygraph_mode,
)
from ...fluid.framework import in_dygraph_mode
from ...fluid.layer_helper import LayerHelper
from ...framework import convert_np_dtype_to_dtype_, core
from ...static import Variable
......@@ -325,14 +321,9 @@ def gather_tree(ids, parents):
if in_dygraph_mode():
return _C_ops.gather_tree(ids, parents)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.gather_tree(ids, parents)
else:
helper = LayerHelper('gather_tree', **locals())
check_variable_and_dtype(
ids, 'ids', ['int32', 'int64'], 'gather_tree'
)
check_variable_and_dtype(ids, 'ids', ['int32', 'int64'], 'gather_tree')
check_variable_and_dtype(
parents, 'parents', ['int32', 'int64'], 'gather_tree'
)
......@@ -385,19 +376,11 @@ def temporal_shift(x, seg_num, shift_ratio=0.25, name=None, data_format="NCHW"):
)
if in_dygraph_mode():
return _C_ops.temporal_shift(x, seg_num, shift_ratio, data_format)
if _non_static_mode():
return _legacy_C_ops.temporal_shift(
x,
'seg_num',
seg_num,
'shift_ratio',
shift_ratio,
'data_format',
data_format,
)
else:
helper = LayerHelper("temporal_shift", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'temporal_shift')
check_variable_and_dtype(
x, 'x', ['float32', 'float64'], 'temporal_shift'
)
check_type(seg_num, 'seg_num', int, 'temporal_shift')
check_type(shift_ratio, 'shift_ratio', float, 'temporal_shift')
......
......@@ -12,10 +12,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from paddle import _C_ops, _legacy_C_ops
from paddle import _C_ops
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from ...fluid.framework import in_dygraph_mode
from ...fluid.layer_helper import LayerHelper
from ...static import Variable
......@@ -88,19 +88,10 @@ def one_hot(x, num_classes, name=None):
if in_dygraph_mode():
return _C_ops.one_hot(x, num_classes)
else:
if _in_legacy_dygraph():
return _legacy_C_ops.one_hot_v2(
x, 'depth', num_classes, 'allow_out_of_range', False
)
else:
check_variable_and_dtype(
x, 'input', ['int32', 'int64'], 'one_hot_v2'
)
check_variable_and_dtype(x, 'input', ['int32', 'int64'], 'one_hot_v2')
helper = LayerHelper("one_hot_v2", **locals())
one_hot_out = helper.create_variable_for_type_inference(
dtype='float32'
)
one_hot_out = helper.create_variable_for_type_inference(dtype='float32')
if not isinstance(num_classes, Variable):
# user attribute
inputs = {'X': x}
......@@ -212,19 +203,6 @@ def embedding(x, weight, padding_idx=None, sparse=False, name=None):
if in_dygraph_mode():
return _C_ops.embedding(x, weight, padding_idx, sparse)
elif _in_legacy_dygraph():
return _legacy_C_ops.lookup_table_v2(
weight,
x,
'is_sparse',
sparse,
'is_distributed',
False,
'remote_prefetch',
False,
'padding_idx',
padding_idx,
)
else:
helper = LayerHelper('embedding', **locals())
dtype = helper.input_dtype(input_param_name='weight')
......
此差异已折叠。
......@@ -83,21 +83,7 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
out = _C_ops.p_norm(x, float(p), axis, epsilon, True, False)
return x / _C_ops.maximum(out, eps)
if _in_legacy_dygraph():
eps = fluid.dygraph.base.to_variable([epsilon], dtype=x.dtype)
out = _legacy_C_ops.p_norm(
x,
'axis',
axis,
'porder',
float(p),
'keepdim',
True,
'epsilon',
epsilon,
)
return x / _legacy_C_ops.elementwise_max(out, eps)
else:
check_type(p, 'p', (float, int), 'normalize')
check_type(axis, 'axis', (int), 'normalize')
check_variable_and_dtype(
......@@ -229,43 +215,7 @@ def batch_norm(
batch_norm_out, act=None
)
elif _in_legacy_dygraph():
# for dygraph need tuple
attrs = (
"momentum",
momentum,
"epsilon",
epsilon,
"is_test",
not training,
"data_layout",
data_format,
"use_mkldnn",
False,
"fuse_with_relu",
False,
"use_global_stats",
use_global_stats,
"trainable_statistics",
trainable_statistics,
)
batch_norm_out, _, _, _, _, _ = _legacy_C_ops.batch_norm(
x,
weight,
bias,
running_mean,
running_var,
None,
mean_out,
variance_out,
*attrs
)
return dygraph_utils._append_activation_in_dygraph(
batch_norm_out, act=None
)
else:
check_variable_and_dtype(
x, 'input', ['float16', 'float32', 'float64'], 'BatchNorm'
)
......@@ -483,23 +433,16 @@ def instance_norm(
if in_dygraph_mode():
out = _C_ops.instance_norm(x, weight, bias, eps)
return out
if _in_legacy_dygraph():
out, _, _ = _legacy_C_ops.instance_norm(
x,
weight,
bias,
"epsilon",
eps,
"momentum",
momentum,
"data_format",
data_format,
else:
check_variable_and_dtype(
x, 'input', ['float32', 'float64'], "InstanceNorm"
)
return out
check_variable_and_dtype(x, 'input', ['float32', 'float64'], "InstanceNorm")
attrs = {"epsilon": eps, "momentum": momentum, "data_format": data_format}
attrs = {
"epsilon": eps,
"momentum": momentum,
"data_format": data_format,
}
if weight and bias:
inputs = {"X": [x], "Scale": [weight], "Bias": [bias]}
......
......@@ -13,12 +13,7 @@
# limitations under the License.
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import (
Variable,
_in_legacy_dygraph,
_non_static_mode,
in_dygraph_mode,
)
from paddle.fluid.framework import Variable, in_dygraph_mode
from ...fluid.data_feeder import check_type, check_variable_and_dtype
......@@ -266,34 +261,7 @@ def avg_pool1d(
)
return squeeze(output, [2])
if _in_legacy_dygraph():
output = _legacy_C_ops.pool2d(
x,
'pooling_type',
'avg',
'ksize',
kernel_size,
'global_pooling',
False,
'strides',
stride,
'paddings',
padding,
'padding_algorithm',
padding_algorithm,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
exclusive,
'data_format',
data_format,
)
return squeeze(output, [2])
else:
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
......@@ -397,7 +365,6 @@ def avg_pool2d(
padding, 2, channel_last, ceil_mode=ceil_mode
)
if _non_static_mode():
if in_dygraph_mode():
output = _C_ops.pool2d(
x,
......@@ -412,38 +379,12 @@ def avg_pool2d(
False,
padding_algorithm,
)
else:
output = _legacy_C_ops.pool2d(
x,
'pooling_type',
'avg',
'ksize',
kernel_size,
'global_pooling',
False,
'padding_algorithm',
padding_algorithm,
'strides',
stride,
'paddings',
padding,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
exclusive,
'data_format',
data_format,
)
if divisor_override is None:
return output
else:
_check_instance(divisor_override, "divisor_override")
return output * (kernel_size[0] * kernel_size[1]) / divisor_override
else:
op_type = 'pool2d'
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'avg_pool2d')
......@@ -473,7 +414,9 @@ def avg_pool2d(
return pool_out
else:
_check_instance(divisor_override, "divisor_override")
return pool_out * (kernel_size[0] * kernel_size[1]) / divisor_override
return (
pool_out * (kernel_size[0] * kernel_size[1]) / divisor_override
)
def avg_pool3d(
......@@ -565,32 +508,6 @@ def avg_pool3d(
False,
padding_algorithm,
)
elif _in_legacy_dygraph():
pool_out = _legacy_C_ops.pool3d(
x,
'pooling_type',
'avg',
'ksize',
kernel_size,
'strides',
stride,
'paddings',
padding,
'global_pooling',
False,
'padding_algorithm',
padding_algorithm,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
exclusive,
'data_format',
data_format,
)
else:
op_type = "pool3d"
helper = LayerHelper(op_type, **locals())
......@@ -723,64 +640,7 @@ def max_pool1d(
)
return squeeze(pool_out, [2])
if _in_legacy_dygraph():
if return_mask:
pool_out = _legacy_C_ops.max_pool2d_with_index(
x,
'ksize',
kernel_size,
'global_pooling',
False,
'strides',
stride,
'paddings',
padding,
'padding_algorithm',
padding_algorithm,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
True,
'data_format',
data_format,
)
return (
(squeeze(pool_out[0], [2]), squeeze(pool_out[1], [2]))
if return_mask
else squeeze(pool_out[0], [2])
)
else:
pool_out = _legacy_C_ops.pool2d(
x,
'pooling_type',
'max',
'ksize',
kernel_size,
'global_pooling',
False,
'padding_algorithm',
padding_algorithm,
'strides',
stride,
'paddings',
padding,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
True,
'data_format',
data_format,
)
return squeeze(pool_out, [2])
op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
helper = LayerHelper(op_type, **locals())
dtype = helper.input_dtype(input_param_name='x')
......@@ -831,7 +691,7 @@ def _unpool_output_size(x, kernel_size, stride, padding, output_size):
if output_size is None:
return default_size
elif utils._contain_var(output_size):
if not _non_static_mode():
if not in_dygraph_mode():
has_static_var = True
output_size = utils._convert_to_tensor_list(output_size)
else:
......@@ -1366,60 +1226,7 @@ def max_pool2d(
padding_algorithm,
)
if _in_legacy_dygraph():
if return_mask:
output = _legacy_C_ops.max_pool2d_with_index(
x,
'ksize',
kernel_size,
'global_pooling',
False,
'strides',
stride,
'paddings',
padding,
'padding_algorithm',
padding_algorithm,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
True,
'data_format',
data_format,
)
return output if return_mask else output[0]
else:
output = _legacy_C_ops.pool2d(
x,
'pooling_type',
'max',
'ksize',
kernel_size,
'global_pooling',
False,
'padding_algorithm',
padding_algorithm,
'strides',
stride,
'paddings',
padding,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
True,
'data_format',
data_format,
)
return output
op_type = 'max_pool2d_with_index' if return_mask else "pool2d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(
......@@ -1580,62 +1387,7 @@ def max_pool3d(
padding_algorithm,
)
if _in_legacy_dygraph():
if return_mask:
output = _legacy_C_ops.max_pool3d_with_index(
x,
'pooling_type',
'max',
'ksize',
kernel_size,
'strides',
stride,
'paddings',
padding,
'global_pooling',
False,
'padding_algorithm',
padding_algorithm,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
True,
'data_format',
data_format,
)
return output if return_mask else output[0]
else:
output = _legacy_C_ops.pool3d(
x,
'pooling_type',
'max',
'ksize',
kernel_size,
'global_pooling',
False,
'padding_algorithm',
padding_algorithm,
'strides',
stride,
'paddings',
padding,
'use_cudnn',
True,
'ceil_mode',
ceil_mode,
'use_mkldnn',
False,
'exclusive',
True,
'data_format',
data_format,
)
return output
op_type = "max_pool3d_with_index" if return_mask else "pool3d"
helper = LayerHelper(op_type, **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'max_pool3d')
......@@ -1729,12 +1481,7 @@ def adaptive_avg_pool1d(x, output_size, name=None):
"EXPLICIT",
)
return squeeze(pool_out, [2])
if _in_legacy_dygraph():
pool_out = _legacy_C_ops.pool2d(
x, 'pooling_type', pool_type, 'ksize', pool_size, 'adaptive', True
)
return squeeze(pool_out, [2])
else:
l_type = "pool2d"
helper = LayerHelper(l_type, **locals())
......@@ -1841,7 +1588,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
if output_size[1] is None:
output_size[1] = in_w
if _non_static_mode():
if in_dygraph_mode():
output_size = [
item.numpy().item(0) if isinstance(item, Variable) else item
for item in output_size
......@@ -1866,21 +1613,7 @@ def adaptive_avg_pool2d(x, output_size, data_format='NCHW', name=None):
"EXPLICIT",
)
if _in_legacy_dygraph():
return _legacy_C_ops.pool2d(
x,
'pooling_type',
'avg',
'ksize',
output_size,
'global_pooling',
False,
'adaptive',
True,
'data_format',
data_format,
)
else:
l_type = 'pool2d'
helper = LayerHelper(l_type, **locals())
......@@ -2010,21 +1743,7 @@ def adaptive_avg_pool3d(x, output_size, data_format='NCDHW', name=None):
True,
"EXPLICIT",
)
elif _in_legacy_dygraph():
return _legacy_C_ops.pool3d(
x,
'pooling_type',
'avg',
'ksize',
output_size,
'global_pooling',
False,
'adaptive',
True,
'data_format',
data_format,
)
else:
l_type = 'pool3d'
helper = LayerHelper(l_type, **locals())
......@@ -2112,16 +1831,7 @@ def adaptive_max_pool1d(x, output_size, return_mask=False, name=None):
if return_mask
else squeeze(pool_out[0], [2])
)
if _in_legacy_dygraph():
pool_out = _legacy_C_ops.max_pool2d_with_index(
x, 'pooling_type', pool_type, 'ksize', pool_size, 'adaptive', True
)
return (
(squeeze(pool_out[0], [2]), squeeze(pool_out[1], [2]))
if return_mask
else squeeze(pool_out[0], [2])
)
else:
l_type = 'max_pool2d_with_index'
helper = LayerHelper(l_type, **locals())
......@@ -2211,12 +1921,7 @@ def adaptive_max_pool2d(x, output_size, return_mask=False, name=None):
x, output_size, [1, 1], [0, 0], False, True
)
return pool_out if return_mask else pool_out[0]
if _in_legacy_dygraph():
pool_out = _legacy_C_ops.max_pool2d_with_index(
x, 'pooling_type', 'max', 'ksize', output_size, 'adaptive', True
)
return pool_out if return_mask else pool_out[0]
else:
l_type = 'max_pool2d_with_index'
helper = LayerHelper(l_type, **locals())
......@@ -2304,18 +2009,13 @@ def adaptive_max_pool3d(x, output_size, return_mask=False, name=None):
if output_size[2] is None:
output_size[2] = in_w
if in_dynamic_mode():
if in_dygraph_mode():
# By default, strides is [1,1,1] and paddings is [0, 0, 0]
pool_out = _C_ops.max_pool3d_with_index(
x, output_size, [1, 1, 1], [0, 0, 0], False, True
)
elif _in_legacy_dygraph():
pool_out = _legacy_C_ops.max_pool3d_with_index(
x, 'pooling_type', 'max', 'ksize', output_size, 'adaptive', True
)
return pool_out if return_mask else pool_out[0]
else:
l_type = 'max_pool3d_with_index'
helper = LayerHelper(l_type, **locals())
......
......@@ -13,8 +13,7 @@
# limitations under the License.
from paddle import _C_ops, _legacy_C_ops, in_dynamic_mode
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
from paddle.framework import _non_static_mode
from paddle.fluid.framework import in_dygraph_mode
from ...device import get_cudnn_version, is_compiled_with_rocm
from ...fluid.data_feeder import check_variable_and_dtype
......@@ -381,20 +380,20 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
)
if in_dygraph_mode():
return _C_ops.pixel_shuffle(x, upscale_factor, data_format)
if _in_legacy_dygraph():
return _legacy_C_ops.pixel_shuffle(
x, "upscale_factor", upscale_factor, "data_format", data_format
)
else:
helper = LayerHelper("pixel_shuffle", **locals())
check_variable_and_dtype(x, 'x', ['float32', 'float64'], 'pixel_shuffle')
check_variable_and_dtype(
x, 'x', ['float32', 'float64'], 'pixel_shuffle'
)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="pixel_shuffle",
inputs={"X": x},
outputs={"Out": out},
attrs={"upscale_factor": upscale_factor, "data_format": data_format},
attrs={
"upscale_factor": upscale_factor,
"data_format": data_format,
},
)
return out
......@@ -442,7 +441,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None):
"But recevie Attr(data_format): {} ".format(data_format)
)
if _non_static_mode():
if in_dygraph_mode():
return _legacy_C_ops.pixel_unshuffle(
x, "downscale_factor", downscale_factor, "data_format", data_format
)
......@@ -516,7 +515,7 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None):
"But recevie Attr(data_format): {} ".format(data_format)
)
if _non_static_mode():
if in_dygraph_mode():
return _legacy_C_ops.channel_shuffle(
x, "groups", groups, "data_format", data_format
)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册