未验证 提交 a508e725 编写于 作者: Z zhiboniu 提交者: GitHub

unset fluid api in nn.functional (#34114)

上级 1c95631f
......@@ -16,18 +16,21 @@ import warnings
import paddle
from ...fluid.framework import in_dygraph_mode, default_main_program
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.layers.tensor import Variable, fill_constant, zeros, concat
from paddle.fluid.layers.tensor import fill_constant
from ...tensor import concat
from ...tensor.creation import zeros
from paddle.static import Variable
from ...fluid.layers import core
from ...fluid import dygraph_utils
# TODO: define the common functions to build a neural network
from ...fluid.layers import unfold # noqa: F401
from ...fluid.layers import squeeze
from ...fluid.layers import unsqueeze
from ...tensor.manipulation import squeeze
from ...tensor.manipulation import unsqueeze
from ...tensor import clip
from ...tensor import sum
from ...tensor import sqrt
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
from ...fluid.framework import Variable, in_dygraph_mode, _varbase_creator
from ...fluid.framework import in_dygraph_mode, _varbase_creator
from ...fluid.framework import in_dygraph_mode
from ...fluid import core, dygraph_utils
......@@ -927,9 +930,9 @@ def dropout(x,
keep_prob = 1 - p
if training:
if p == 1.:
return layers.scale(x, scale=0.)
return paddle.scale(x, scale=0.)
scale_input = layers.scale(
scale_input = paddle.scale(
x, scale=1 / keep_prob) if mode == 'upscale_in_train' else x
#get mask shape
......@@ -947,17 +950,17 @@ def dropout(x,
mask_shape[i] = input_shape[i]
#get mask
random_tensor = layers.uniform_random(
random_tensor = paddle.uniform(
mask_shape, dtype='float32', min=0., max=1.0)
p = layers.fill_constant(shape=[1], dtype='float32', value=p)
keep_mask = layers.greater_equal(random_tensor, p)
keep_mask = paddle.greater_equal(random_tensor, p)
scale_input = layers.cast(scale_input, dtype)
keep_mask = layers.cast(keep_mask, dtype)
scale_input = paddle.cast(scale_input, dtype)
keep_mask = paddle.cast(keep_mask, dtype)
ret = paddle.multiply(scale_input, keep_mask, name=name)
return ret
else: # test
ret = layers.scale(
ret = paddle.scale(
x, scale=keep_prob) if mode == 'downscale_in_infer' else x
return ret
......@@ -1113,7 +1116,7 @@ def alpha_dropout(x, p=0.5, training=True, name=None):
if training:
if p == 1:
return layers.scale(x, scale=0.)
return paddle.scale(x, scale=0.)
#get transformation params
alpha = 1.6732632423543772848170429916717
scale = 1.0507009873554804934193349852946
......@@ -1125,23 +1128,22 @@ def alpha_dropout(x, p=0.5, training=True, name=None):
input_shape = x.shape
#get mask
random_tensor = layers.uniform_random(
random_tensor = paddle.uniform(
input_shape, dtype='float32', min=0., max=1.0)
p = layers.fill_constant(shape=[1], dtype='float32', value=p)
keep_mask = layers.greater_equal(random_tensor, p)
keep_mask = layers.cast(keep_mask, dtype)
drop_mask = layers.elementwise_sub(
keep_mask = paddle.greater_equal(random_tensor, p)
keep_mask = paddle.cast(keep_mask, dtype)
drop_mask = paddle.subtract(
layers.fill_constant(
shape=input_shape, dtype=dtype, value=1.),
keep_mask)
#apply mask
b = layers.fill_constant(shape=[1], dtype=dtype, value=b)
y = layers.elementwise_add(
paddle.multiply(x, keep_mask),
layers.scale(
y = paddle.add(paddle.multiply(x, keep_mask),
paddle.scale(
drop_mask, scale=alpha_p))
res = layers.elementwise_add(layers.scale(y, scale=a), b, name=name)
res = paddle.add(paddle.scale(y, scale=a), b, name=name)
return res
else: # test
return x
......@@ -1277,42 +1279,42 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
if x_dim == 3:
pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
unsqueezed_dim = [3, 4]
x = unsqueeze(x, axes=unsqueezed_dim)
x = unsqueeze(x, axis=unsqueezed_dim)
elif x_dim == 4:
pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
unsqueezed_dim = [2]
x = unsqueeze(x, axes=unsqueezed_dim)
x = unsqueeze(x, axis=unsqueezed_dim)
elif data_format in ["NLC", "NHWC", "NDHWC"]:
data_format = "NDHWC"
if x_dim == 3:
pad = concat([zeros((4, ), dtype="int32"), pad], axis=0)
unsqueezed_dim = [2, 3]
x = unsqueeze(x, axes=unsqueezed_dim)
x = unsqueeze(x, axis=unsqueezed_dim)
elif x_dim == 4:
pad = concat([pad, zeros((2, ), dtype="int32")], axis=0)
unsqueezed_dim = [1]
x = unsqueeze(x, axes=unsqueezed_dim)
x = unsqueeze(x, axis=unsqueezed_dim)
else:
if data_format in ["NCL", "NCHW", "NCDHW"]:
data_format = "NCDHW"
if x_dim == 3:
pad = [0, 0, 0, 0] + pad
unsqueezed_dim = [3, 4]
x = unsqueeze(x, axes=unsqueezed_dim)
x = unsqueeze(x, axis=unsqueezed_dim)
elif x_dim == 4:
pad = pad + [0, 0]
unsqueezed_dim = [2]
x = unsqueeze(x, axes=unsqueezed_dim)
x = unsqueeze(x, axis=unsqueezed_dim)
elif data_format in ["NLC", "NHWC", "NDHWC"]:
data_format = "NDHWC"
if x_dim == 3:
pad = [0, 0, 0, 0] + pad
unsqueezed_dim = [2, 3]
x = unsqueeze(x, axes=unsqueezed_dim)
x = unsqueeze(x, axis=unsqueezed_dim)
elif x_dim == 4:
pad = pad + [0, 0]
unsqueezed_dim = [1]
x = unsqueeze(x, axes=unsqueezed_dim)
x = unsqueeze(x, axis=unsqueezed_dim)
if in_dygraph_mode():
if isinstance(pad, Variable):
......@@ -1336,7 +1338,7 @@ def pad(x, pad, mode='constant', value=0, data_format="NCHW", name=None):
type='pad3d', inputs=inputs, outputs={"Out": out}, attrs=attrs)
if len(unsqueezed_dim) != 0:
out = squeeze(out, axes=unsqueezed_dim)
out = squeeze(out, axis=unsqueezed_dim)
return out
......
......@@ -16,13 +16,17 @@ from paddle.fluid.framework import _global_flags
import numpy as np
from ...device import get_cudnn_version
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.framework import in_dygraph_mode
from ...static import Variable
from ...fluid import core, dygraph_utils, get_flags
from ...fluid.layers import nn, utils
from ...fluid.layers.utils import convert_to_list, _is_symmetric_padding
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid.param_attr import ParamAttr
from ...framework import ParamAttr
from ...fluid.layer_helper import LayerHelper
from paddle import _C_ops
from ...tensor.manipulation import unsqueeze, squeeze
from ...tensor.math import add
from ...fluid.layers import nn
__all__ = []
......@@ -69,24 +73,24 @@ def _update_padding_nd(padding, channel_last, num_dims):
padding_algorithm = "EXPLICIT"
padding = _exclude_padding_in_batch_and_channel(padding,
channel_last)
if utils._is_symmetric_padding(padding, num_dims):
if _is_symmetric_padding(padding, num_dims):
padding = padding[0::2]
# for padding like [pad_before, pad_after, pad_before, pad_after, ...]
elif len(padding) == 2 * num_dims and isinstance(padding[0], int):
padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, 2 * num_dims, 'padding')
if utils._is_symmetric_padding(padding, num_dims):
padding = convert_to_list(padding, 2 * num_dims, 'padding')
if _is_symmetric_padding(padding, num_dims):
padding = padding[0::2]
# for padding like [pad_d1, pad_d2, ...]
elif len(padding) == num_dims and isinstance(padding[0], int):
padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, num_dims, 'padding')
padding = convert_to_list(padding, num_dims, 'padding')
else:
raise ValueError("In valid padding: {}".format(padding))
# for integer padding
else:
padding_algorithm = "EXPLICIT"
padding = utils.convert_to_list(padding, num_dims, 'padding')
padding = convert_to_list(padding, num_dims, 'padding')
if not all([p >= 0 for p in padding]):
raise ValueError(
"Invalid padding, all value should be larger than or equal to 0, but received: {}".
......@@ -323,8 +327,8 @@ def conv1d(x,
"The size of padding's dimension should be 1 or 2. But got padding={}".
format(padding))
stride = utils.convert_to_list(stride, 1, 'stride') + [1]
dilation = utils.convert_to_list(dilation, 1, 'dilation') + [1]
stride = convert_to_list(stride, 1, 'stride') + [1]
dilation = convert_to_list(dilation, 1, 'dilation') + [1]
l_type = "conv2d"
if (num_channels == groups and num_channels != 1 and
......@@ -333,8 +337,8 @@ def conv1d(x,
use_cudnn = False
squeeze_aixs = -2 if channel_last else -1
x = nn.unsqueeze(input=x, axes=[squeeze_aixs])
weight = nn.unsqueeze(input=weight, axes=[-1])
x = unsqueeze(x, axis=[squeeze_aixs])
weight = unsqueeze(weight, axis=[-1])
if in_dygraph_mode():
attrs = ('strides', stride, 'paddings', padding, 'dilations', dilation,
'groups', groups, 'use_cudnn', use_cudnn, 'use_mkldnn', False,
......@@ -366,7 +370,7 @@ def conv1d(x,
type=l_type, inputs=inputs, outputs=outputs, attrs=attrs)
if bias is not None:
out = nn.elementwise_add(out, bias, axis=channel_dim)
out = nn.squeeze(input=out, axes=[squeeze_aixs])
out = squeeze(out, axis=[squeeze_aixs])
return out
......@@ -530,8 +534,8 @@ def conv2d(x,
# update attrs
padding, padding_algorithm = _update_padding_nd(padding, channel_last, 2)
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
stride = convert_to_list(stride, 2, 'stride')
dilation = convert_to_list(dilation, 2, 'dilation')
l_type = "conv2d"
if (num_channels == groups and num_channels != 1 and
......@@ -730,8 +734,8 @@ def conv1d_transpose(x,
"The size of padding's dimension should 1 or 2. But got padding={}".
format(padding))
stride = utils.convert_to_list(stride, 1, 'stride') + [1]
dilation = utils.convert_to_list(dilation, 1, 'dilation') + [1]
stride = convert_to_list(stride, 1, 'stride') + [1]
dilation = convert_to_list(dilation, 1, 'dilation') + [1]
if output_size is None:
output_size = []
......@@ -740,8 +744,7 @@ def conv1d_transpose(x,
raise ValueError('output_padding option is mutually exclusive with '
'output_size')
if isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 1,
'output_size') + [1]
output_size = convert_to_list(output_size, 1, 'output_size') + [1]
else:
raise ValueError(
"output_size should be int, or list, tuple of ints")
......@@ -749,7 +752,7 @@ def conv1d_transpose(x,
if output_padding == 0:
output_padding = []
else:
output_padding = utils.convert_to_list(output_padding, 1,
output_padding = convert_to_list(output_padding, 1,
'output_padding') + [0]
if len(output_padding) > 0 and output_padding[0] > stride[0]:
......@@ -768,8 +771,8 @@ def conv1d_transpose(x,
squeeze_axis = -2 if channel_last else -1
conv2d_data_format = "NHWC" if channel_last else "NCHW"
x = nn.unsqueeze(input=x, axes=[squeeze_axis])
weight = nn.unsqueeze(input=weight, axes=[-1])
x = unsqueeze(x, axis=[squeeze_axis])
weight = unsqueeze(weight, axis=[-1])
if in_dygraph_mode():
attrs = ('output_padding', output_padding, 'output_size', output_size,
......@@ -803,7 +806,7 @@ def conv1d_transpose(x,
if bias is not None:
out = nn.elementwise_add(out, bias, axis=channel_dim)
out = nn.squeeze(input=out, axes=[squeeze_axis])
out = squeeze(out, axis=[squeeze_axis])
return out
......@@ -979,8 +982,8 @@ def conv2d_transpose(x,
# update attrs
padding, padding_algorithm = _update_padding_nd(padding, channel_last, 2)
stride = utils.convert_to_list(stride, 2, 'stride')
dilation = utils.convert_to_list(dilation, 2, 'dilation')
stride = convert_to_list(stride, 2, 'stride')
dilation = convert_to_list(dilation, 2, 'dilation')
if output_size is None:
output_size = []
......@@ -989,7 +992,7 @@ def conv2d_transpose(x,
raise ValueError('output_padding option is mutually exclusive with '
'output_size')
if isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 2, 'output_size')
output_size = convert_to_list(output_size, 2, 'output_size')
else:
raise ValueError(
"output_size should be int, or list, tuple of ints")
......@@ -997,8 +1000,7 @@ def conv2d_transpose(x,
if output_padding == 0:
output_padding = []
else:
output_padding = utils.convert_to_list(output_padding, 2,
'output_padding')
output_padding = convert_to_list(output_padding, 2, 'output_padding')
op_type = 'conv2d_transpose'
num_filters = weight.shape[1]
......@@ -1187,8 +1189,8 @@ def conv3d(x,
cudnn_version is not None) else False
padding, padding_algorithm = _update_padding_nd(padding, channel_last, 3)
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
stride = convert_to_list(stride, 3, 'stride')
dilation = convert_to_list(dilation, 3, 'dilation')
op_type = "conv3d"
return _conv_nd(x, weight, bias, stride, padding, padding_algorithm,
......@@ -1369,8 +1371,8 @@ def conv3d_transpose(x,
groups))
padding, padding_algorithm = _update_padding_nd(padding, channel_last, 3)
stride = utils.convert_to_list(stride, 3, 'stride')
dilation = utils.convert_to_list(dilation, 3, 'dilation')
stride = convert_to_list(stride, 3, 'stride')
dilation = convert_to_list(dilation, 3, 'dilation')
if output_size is None:
output_size = []
else:
......@@ -1378,7 +1380,7 @@ def conv3d_transpose(x,
raise ValueError('output_padding option is mutually exclusive with '
'output_size')
if isinstance(output_size, (list, tuple, int)):
output_size = utils.convert_to_list(output_size, 3, 'output_size')
output_size = convert_to_list(output_size, 3, 'output_size')
else:
raise ValueError(
"output_size should be int, or list, tuple of ints")
......@@ -1386,8 +1388,7 @@ def conv3d_transpose(x,
if output_padding == 0:
output_padding = []
else:
output_padding = utils.convert_to_list(output_padding, 3,
'output_padding')
output_padding = convert_to_list(output_padding, 3, 'output_padding')
cudnn_version = get_cudnn_version()
......
......@@ -17,8 +17,9 @@
import numpy as np
from ...fluid.data_feeder import check_dtype
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.layers.tensor import assign
from ...fluid.framework import in_dygraph_mode
from ...static import Variable
from ...tensor.creation import assign
from ...fluid import core, dygraph_utils
from ...fluid.layers.layer_function_generator import templatedoc
from ...fluid.layers.sequence_lod import sequence_mask
......
......@@ -14,7 +14,8 @@
from __future__ import print_function
import warnings
from ...fluid.framework import Variable, in_dygraph_mode
from ...fluid.framework import in_dygraph_mode
from ...static import Variable
from ...fluid.layer_helper import LayerHelper
from ...fluid.layers import core
from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
......
......@@ -27,7 +27,7 @@ from ...fluid.layers.nn import _elementwise_op_in_dygraph
from ...fluid.layers import dice_loss # noqa: F401
from ...fluid.layers import log_loss # noqa: F401
from ...fluid.layers import npair_loss # noqa: F401
from ...fluid.layers import reshape
from ...tensor.manipulation import reshape
from ...fluid.layers import softmax_with_cross_entropy as fluid_softmax_with_cross_entropy
from ...fluid.layers import square_error_cost # noqa: F401
......@@ -36,7 +36,7 @@ from ...fluid.layers import huber_loss
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import in_dygraph_mode
from ...fluid.framework import _varbase_creator
from ...fluid.framework import Variable
from ...static import Variable
from paddle.utils import deprecated
from paddle import _C_ops
......@@ -291,9 +291,7 @@ def binary_cross_entropy_with_logits(logit,
pos_weight, 'pos_weight', ['float32', 'float64'],
'binary_cross_entropy_with_logits')
log_weight = paddle.add(
paddle.multiply(
label, paddle.fluid.layers.elementwise_sub(pos_weight, one)),
one)
paddle.multiply(label, paddle.subtract(pos_weight, one)), one)
pos_weight_name = name if reduction == 'none' and weight is None else None
out = paddle.multiply(out, log_weight, name=pos_weight_name)
......@@ -515,9 +513,9 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None):
if reduction == 'none':
return out
elif reduction == 'mean':
return fluid.layers.reduce_mean(out)
return paddle.mean(out)
elif reduction == 'sum':
return fluid.layers.reduce_sum(out)
return paddle.sum(out)
def margin_ranking_loss(input,
......@@ -592,7 +590,7 @@ def margin_ranking_loss(input,
fluid.data_feeder.check_variable_and_dtype(
label, 'label', ['float32', 'float64'], 'margin_rank_loss')
out = paddle.fluid.layers.elementwise_sub(other, input)
out = paddle.subtract(other, input)
out = paddle.multiply(out, label)
if margin != 0.0:
......@@ -898,11 +896,11 @@ def kl_div(input, label, reduction='mean', name=None):
if fluid.data_feeder.convert_dtype(
input.dtype) == 'float32' and fluid.data_feeder.convert_dtype(
label.dtype) == 'float64':
input = fluid.layers.cast(input, 'float64')
input = paddle.cast(input, 'float64')
elif fluid.data_feeder.convert_dtype(
input.dtype) == 'float64' and fluid.data_feeder.convert_dtype(
label.dtype) == 'float32':
label = fluid.layers.cast(label, 'float64')
label = paddle.cast(label, 'float64')
if paddle.in_dynamic_mode():
out = _C_ops.kldiv_loss(input, label, 'reduction', reduction)
......@@ -988,16 +986,12 @@ def mse_loss(input, label, reduction='mean', name=None):
label, 'label', ['float32', 'float64'], 'mse_loss')
if reduction == 'none':
return paddle.fluid.layers.square(
paddle.fluid.layers.elementwise_sub(input, label), name=name)
return paddle.square(paddle.subtract(input, label), name=name)
elif reduction == 'mean':
return paddle.mean(
paddle.fluid.layers.square(
paddle.fluid.layers.elementwise_sub(input, label)),
name=name)
paddle.square(paddle.subtract(input, label)), name=name)
else:
return paddle.sum(paddle.fluid.layers.square(
paddle.fluid.layers.elementwise_sub(input, label)),
return paddle.sum(paddle.square(paddle.subtract(input, label)),
name=name)
......
......@@ -19,8 +19,8 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_type
from ...fluid.layer_helper import LayerHelper
from ...fluid.framework import in_dygraph_mode, core
from ...framework import create_parameter
from ...fluid.initializer import Constant
from ...fluid.param_attr import ParamAttr
from ..initializer import Constant
from ...framework import ParamAttr
from ...fluid import core, dygraph_utils
import numbers
from paddle import _C_ops
......@@ -104,8 +104,7 @@ def normalize(x, p=2, axis=1, epsilon=1e-12, name=None):
type='p_norm', inputs={'X': x}, outputs={'Out': out}, attrs=attrs)
eps = out.block.create_var(dtype=out.dtype)
paddle.fluid.layers.fill_constant([1], out.dtype, epsilon, out=eps)
return paddle.fluid.layers.elementwise_div(
x, paddle.maximum(out, eps), name=name)
return paddle.divide(x, paddle.maximum(out, eps), name=name)
def batch_norm(x,
......
......@@ -15,7 +15,8 @@
# TODO: define pooling functions
from ...fluid import core
from ...fluid.framework import in_dygraph_mode
from ...fluid.layers import utils, LayerHelper, unsqueeze, squeeze
from ...fluid.layers import utils, LayerHelper
from ...tensor.manipulation import unsqueeze, squeeze
from ...fluid.data_feeder import check_type, check_variable_and_dtype
from paddle import _C_ops
from paddle import _C_ops
......
......@@ -13,7 +13,8 @@
# limitations under the License.
from ...device import get_cudnn_version
from ...fluid.framework import core, in_dygraph_mode, Variable
from ...fluid.framework import core, in_dygraph_mode
from ...static import Variable
from ...fluid.layer_helper import LayerHelper
from ...fluid.data_feeder import check_variable_and_dtype
from ...fluid import dygraph_utils
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册