未验证 提交 802be98b 编写于 作者: L LoneRanger 提交者: GitHub

relocate paddle/fluid/contrib/layers (#52820)

* relocate metri_op.py

* reloacte nn.py

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug

* fix bug

* fix variable->tensor and fix __all__

* fix ctr_metric_bundle and sparse_embedding

* fix bug of function init

* fix bug of importing sparse_embedding and ctr_metric_bundle

* fix bug

* Update __init__.py
上级 cbfd43e4
...@@ -57,7 +57,7 @@ void TDMChildInner(const framework::ExecutionContext &context, ...@@ -57,7 +57,7 @@ void TDMChildInner(const framework::ExecutionContext &context,
input_data[input_ids], input_data[input_ids],
node_nums, node_nums,
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"input id of OP(fluid.contrib.layers.tdm_child) " "input id of OP(paddle.incubate.layers.tdm_child) "
"expected >= 0 and < %ld, but got %ld. Please check input " "expected >= 0 and < %ld, but got %ld. Please check input "
"value.", "value.",
node_nums, node_nums,
...@@ -66,7 +66,7 @@ void TDMChildInner(const framework::ExecutionContext &context, ...@@ -66,7 +66,7 @@ void TDMChildInner(const framework::ExecutionContext &context,
0, 0,
input_data[input_ids], input_data[input_ids],
platform::errors::InvalidArgument( platform::errors::InvalidArgument(
"input id of OP(fluid.contrib.layers.tdm_child) " "input id of OP(paddle.incubate.layers.tdm_child) "
"expected >= 0 and < %ld, but got %ld. Please check input " "expected >= 0 and < %ld, but got %ld. Please check input "
"value.", "value.",
node_nums, node_nums,
......
...@@ -22,8 +22,7 @@ from . import extend_optimizer ...@@ -22,8 +22,7 @@ from . import extend_optimizer
from .extend_optimizer import * from .extend_optimizer import *
from . import model_stat from . import model_stat
from .model_stat import * from .model_stat import *
from . import layers
from .layers import *
from . import optimizer from . import optimizer
from .optimizer import * from .optimizer import *
...@@ -32,5 +31,4 @@ __all__ = [] ...@@ -32,5 +31,4 @@ __all__ = []
__all__ += memory_usage_calc.__all__ __all__ += memory_usage_calc.__all__
__all__ += op_frequence.__all__ __all__ += op_frequence.__all__
__all__ += extend_optimizer.__all__ __all__ += extend_optimizer.__all__
__all__ += layers.__all__
__all__ += optimizer.__all__ __all__ += optimizer.__all__
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contrib layers just related to metric.
"""
import paddle
import warnings
import paddle
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import Variable
from paddle.fluid.param_attr import ParamAttr
from paddle.fluid.layers import tensor
import paddle
__all__ = ['ctr_metric_bundle']
def ctr_metric_bundle(input, label, ins_tag_weight=None):
"""
ctr related metric layer
This function help compute the ctr related metrics: RMSE, MAE, predicted_ctr, q_value.
To compute the final values of these metrics, we should do following computations using
total instance number:
MAE = local_abserr / instance number
RMSE = sqrt(local_sqrerr / instance number)
predicted_ctr = local_prob / instance number
q = local_q / instance number
Note that if you are doing distribute job, you should all reduce these metrics and instance
number first
Args:
input(Tensor): A floating-point 2D Tensor, values are in the range
[0, 1]. Each row is sorted in descending order. This
input should be the output of topk. Typically, this
Tensor indicates the probability of each label.
label(Tensor): A 2D int Tensor indicating the label of the training
data. The height is batch size and width is always 1.
ins_tag_weight(Tensor): A 2D int Tensor indicating the ins_tag_weight of the training
data. 1 means real data, 0 means fake data.
A LoDTensor or Tensor with type float32,float64.
Returns:
local_sqrerr(Tensor): Local sum of squared error
local_abserr(Tensor): Local sum of abs error
local_prob(Tensor): Local sum of predicted ctr
local_q(Tensor): Local sum of q value
Examples 1:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name="data", shape=[32, 32], dtype="float32")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32")
predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(input=data, size=1))
auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label)
Examples 2:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name="data", shape=[32, 32], dtype="float32")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32")
predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(input=data, size=1))
ins_tag_weight = paddle.static.data(name='ins_tag', shape=[-1,16], lod_level=0, dtype='int64')
auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label, ins_tag_weight=ins_tag_weight)
"""
if ins_tag_weight is None:
ins_tag_weight = paddle.tensor.fill_constant(
shape=[1, 1], dtype="float32", value=1.0
)
assert input.shape == label.shape
helper = LayerHelper("ctr_metric_bundle", **locals())
local_abserr = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_sqrerr = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_prob = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_q = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_pos_num = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_ins_num = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
tmp_res_elesub = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1]
)
tmp_res_sigmoid = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1]
)
tmp_ones = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1]
)
batch_prob = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_abserr = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_sqrerr = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_q = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_pos_num = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_ins_num = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
for var in [
local_abserr,
batch_abserr,
local_sqrerr,
batch_sqrerr,
local_prob,
batch_prob,
local_q,
batch_q,
batch_pos_num,
batch_ins_num,
local_pos_num,
local_ins_num,
]:
helper.set_variable_initializer(
var,
paddle.nn.initializer.ConstantInitializer(
value=0.0, force_cpu=True
),
)
helper.append_op(
type="elementwise_sub",
inputs={"X": [input], "Y": [label]},
outputs={"Out": [tmp_res_elesub]},
)
helper.append_op(
type="squared_l2_norm",
inputs={"X": [tmp_res_elesub]},
outputs={"Out": [batch_sqrerr]},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_sqrerr], "Y": [local_sqrerr]},
outputs={"Out": [local_sqrerr]},
)
helper.append_op(
type="l1_norm",
inputs={"X": [tmp_res_elesub]},
outputs={"Out": [batch_abserr]},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_abserr], "Y": [local_abserr]},
outputs={"Out": [local_abserr]},
)
helper.append_op(
type="reduce_sum", inputs={"X": [input]}, outputs={"Out": [batch_prob]}
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_prob], "Y": [local_prob]},
outputs={"Out": [local_prob]},
)
helper.append_op(
type="sigmoid",
inputs={"X": [input]},
outputs={"Out": [tmp_res_sigmoid]},
)
helper.append_op(
type="reduce_sum",
inputs={"X": [tmp_res_sigmoid]},
outputs={"Out": [batch_q]},
)
helper.append_op(
type="reduce_sum",
inputs={"X": [label]},
outputs={"Out": [batch_pos_num]},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_pos_num], "Y": [local_pos_num]},
outputs={"Out": [local_pos_num]},
)
helper.append_op(
type='fill_constant_batch_size_like',
inputs={"Input": label},
outputs={'Out': [tmp_ones]},
attrs={
'shape': [-1, 1],
'dtype': tmp_ones.dtype,
'value': float(1.0),
},
)
helper.append_op(
type="reduce_sum",
inputs={"X": [tmp_ones]},
outputs={"Out": [batch_ins_num]},
)
# if data is fake, return 0
inputs_slice = {'Input': ins_tag_weight}
attrs = {'axes': [0]}
attrs['starts'] = [0]
attrs['ends'] = [1]
helper.append_op(
type="slice",
inputs=inputs_slice,
attrs=attrs,
outputs={"Out": ins_tag_weight},
)
axis = helper.kwargs.get('axis', 0)
helper.append_op(
type="elementwise_mul",
inputs={"X": [batch_ins_num], "Y": [ins_tag_weight]},
outputs={"Out": [batch_ins_num]},
attrs={'axis': axis},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_ins_num], "Y": [local_ins_num]},
outputs={"Out": [local_ins_num]},
)
helper.append_op(
type="elementwise_mul",
inputs={"X": [batch_q], "Y": [ins_tag_weight]},
outputs={"Out": [batch_q]},
attrs={'axis': axis},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_q], "Y": [local_q]},
outputs={"Out": [local_q]},
)
return (
local_sqrerr,
local_abserr,
local_prob,
local_q,
local_pos_num,
local_ins_num,
)
...@@ -197,7 +197,7 @@ def embedding( ...@@ -197,7 +197,7 @@ def embedding(
if is_distributed: if is_distributed:
is_distributed = False is_distributed = False
warnings.warn( warnings.warn(
"is_distributed is go out of use, `fluid.contrib.layers.sparse_embedding` is your needed" "is_distributed is go out of use, `paddle.static.nn.sparse_embedding` is your needed"
) )
remote_prefetch = True if is_sparse else False remote_prefetch = True if is_sparse else False
...@@ -227,128 +227,6 @@ def embedding( ...@@ -227,128 +227,6 @@ def embedding(
return tmp return tmp
def _pull_gpups_sparse(
input, size, dtype='float32', is_distributed=False, is_sparse=False
):
r"""
**Pull GpuPS Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
GpuPS lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int|list of int): The embedding size parameter of each input, which indicates the size of
each embedding vector respectively.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs, whose size are indicated by size respectively.
Examples:
.. code-block:: python
import paddle.fluid as fluid
slots = []
data_1 = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
slots.append(data_1)
data_2 = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
slots.append(data_2)
embs = fluid.layers.pull_gpups_sparse(input=slots, size=[11, 35])
"""
helper = LayerHelper('pull_gpups_sparse', **locals())
if dtype != 'float32':
raise ValueError(
"GpuPS only support float type embedding now, and your type is: "
+ dtype
)
helper.input_dtype()
inputs = helper.multiple_input()
outs = [
helper.create_variable_for_type_inference(dtype)
for i in range(len(inputs))
]
w = helper.create_parameter(
attr=helper.param_attr, shape=[size[0]], dtype=dtype, is_bias=False
)
helper.append_op(
type='pull_gpups_sparse',
inputs={'Ids': inputs, 'W': w},
outputs={'Out': outs},
attrs={
'size': size,
'is_distributed': is_distributed,
'is_sparse': is_sparse,
},
)
if len(outs) == 1:
return outs[0]
return outs
def _pull_box_sparse(
input, size, dtype='float32', is_distributed=False, is_sparse=False
):
r"""
**Pull Box Sparse Layer**
This layer is used to lookup embeddings of IDs, provided by :attr:`input`, in
BoxPS lookup table. The result of this lookup is the embedding of each ID in the
:attr:`input`.
Args:
input(Variable|list of Variable): Input is a Tensor<int64> Variable, which
contains the IDs information.
size(int): The embedding size parameter, which indicates the size of
each embedding vector respectively.
dtype(str): The dtype refers to the data type of output tensor. Only supports
float32 now.
Returns:
Variable|list of Variable: The tensor variable storing the embeddings of the \
supplied inputs.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = paddle.static.data(name='sequence', shape=[-1,1], dtype='int64', lod_level=1)
emb = fluid.layers.pull_box_sparse(input=data, size=[11])
"""
helper = LayerHelper('pull_box_sparse', **locals())
if dtype != 'float32':
raise ValueError(
"BoxPS only support float type embedding now, and your type is: "
+ dtype
)
helper.input_dtype()
inputs = helper.multiple_input()
outs = [
helper.create_variable_for_type_inference(dtype)
for i in range(len(inputs))
]
w = helper.create_parameter(
attr=helper.param_attr, shape=[size], dtype=dtype, is_bias=False
)
helper.append_op(
type='pull_box_sparse',
inputs={'Ids': inputs, 'W': w},
outputs={'Out': outs},
attrs={
'size': size,
'is_distributed': is_distributed,
'is_sparse': is_sparse,
},
)
if len(outs) == 1:
return outs[0]
return outs
def autoincreased_step_counter(counter_name=None, begin=1, step=1): def autoincreased_step_counter(counter_name=None, begin=1, step=1):
""" """
:api_attr: Static Graph :api_attr: Static Graph
......
...@@ -96,7 +96,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -96,7 +96,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
entry = paddle.distributed.ShowClickEntry("show", "click") entry = paddle.distributed.ShowClickEntry("show", "click")
dnn_layer_dims = [128, 64, 32] dnn_layer_dims = [128, 64, 32]
dnn_embedding = fluid.contrib.layers.sparse_embedding( dnn_embedding = paddle.static.nn.sparse_embedding(
input=dnn_data, input=dnn_data,
size=[dnn_input_dim, dnn_layer_dims[0]], size=[dnn_input_dim, dnn_layer_dims[0]],
is_test=inference, is_test=inference,
...@@ -120,7 +120,7 @@ class TestDistCTR2x2(FleetDistRunnerBase): ...@@ -120,7 +120,7 @@ class TestDistCTR2x2(FleetDistRunnerBase):
dnn_out = fc dnn_out = fc
# build lr model # build lr model
lr_embbding = fluid.contrib.layers.sparse_embedding( lr_embbding = paddle.static.nn.sparse_embedding(
input=lr_data, input=lr_data,
size=[lr_input_dim, 1], size=[lr_input_dim, 1],
is_test=inference, is_test=inference,
......
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
from eager_op_test import OpTest, paddle_static_guard from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle.incubate.layers.nn import bilateral_slice
class Gsz: class Gsz:
...@@ -202,7 +203,7 @@ class TestBilateralSliceApi(unittest.TestCase): ...@@ -202,7 +203,7 @@ class TestBilateralSliceApi(unittest.TestCase):
grid = paddle.static.data( grid = paddle.static.data(
name='grid', shape=[None, None, 8, 5, 3], dtype='float32' name='grid', shape=[None, None, 8, 5, 3], dtype='float32'
) )
paddle.fluid.contrib.layers.bilateral_slice(x, guide, grid, False) bilateral_slice(x, guide, grid, False)
if not paddle.fluid.is_compiled_with_cuda(): if not paddle.fluid.is_compiled_with_cuda():
return return
...@@ -212,7 +213,7 @@ class TestBilateralSliceApi(unittest.TestCase): ...@@ -212,7 +213,7 @@ class TestBilateralSliceApi(unittest.TestCase):
guide1 = paddle.rand([3, 50, 30]) guide1 = paddle.rand([3, 50, 30])
grid1 = paddle.rand([3, 2, 2, 5, 3]) grid1 = paddle.rand([3, 2, 2, 5, 3])
paddle.fluid.contrib.bilateral_slice(x1, guide1, grid1, False) bilateral_slice(x1, guide1, grid1, False)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -18,7 +18,7 @@ import paddle ...@@ -18,7 +18,7 @@ import paddle
from paddle import fluid from paddle import fluid
from paddle.distributed.transpiler import collective from paddle.distributed.transpiler import collective
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.layers.nn import _pull_box_sparse from paddle.incubate.layers.nn import _pull_box_sparse
class TestTranspile(unittest.TestCase): class TestTranspile(unittest.TestCase):
......
...@@ -73,7 +73,7 @@ class TestPSMinimize(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestPSMinimize(unittest.TestCase):
name="1", shape=[-1, 1], dtype="int64", lod_level=1 name="1", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
q_emb = fluid.contrib.layers.sparse_embedding( q_emb = paddle.static.nn.sparse_embedding(
input=q, input=q,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -105,7 +105,7 @@ class TestPSMinimize(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestPSMinimize(unittest.TestCase):
name="2", shape=[-1, 1], dtype="int64", lod_level=1 name="2", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
pt_emb = fluid.contrib.layers.sparse_embedding( pt_emb = paddle.static.nn.sparse_embedding(
input=pt, input=pt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -136,7 +136,7 @@ class TestPSMinimize(unittest.TestCase): ...@@ -136,7 +136,7 @@ class TestPSMinimize(unittest.TestCase):
name="3", shape=[-1, 1], dtype="int64", lod_level=1 name="3", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
nt_emb = fluid.contrib.layers.sparse_embedding( nt_emb = paddle.static.nn.sparse_embedding(
input=nt, input=nt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
......
...@@ -73,7 +73,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="1", shape=[-1, 1], dtype="int64", lod_level=1 name="1", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
q_emb = fluid.contrib.layers.sparse_embedding( q_emb = paddle.static.nn.sparse_embedding(
input=q, input=q,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -105,7 +105,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="2", shape=[-1, 1], dtype="int64", lod_level=1 name="2", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
pt_emb = fluid.contrib.layers.sparse_embedding( pt_emb = paddle.static.nn.sparse_embedding(
input=pt, input=pt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -136,7 +136,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -136,7 +136,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="3", shape=[-1, 1], dtype="int64", lod_level=1 name="3", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
nt_emb = fluid.contrib.layers.sparse_embedding( nt_emb = paddle.static.nn.sparse_embedding(
input=nt, input=nt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
......
...@@ -76,7 +76,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -76,7 +76,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="1", shape=[-1, 1], dtype="int64", lod_level=1 name="1", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
q_emb = fluid.contrib.layers.sparse_embedding( q_emb = paddle.static.nn.sparse_embedding(
input=q, input=q,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -108,7 +108,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -108,7 +108,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="2", shape=[-1, 1], dtype="int64", lod_level=1 name="2", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
pt_emb = fluid.contrib.layers.sparse_embedding( pt_emb = paddle.static.nn.sparse_embedding(
input=pt, input=pt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -139,7 +139,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -139,7 +139,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="3", shape=[-1, 1], dtype="int64", lod_level=1 name="3", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
nt_emb = fluid.contrib.layers.sparse_embedding( nt_emb = paddle.static.nn.sparse_embedding(
input=nt, input=nt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
......
...@@ -77,7 +77,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -77,7 +77,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
q_emb = fluid.contrib.layers.sparse_embedding( q_emb = paddle.static.nn.sparse_embedding(
input=q, input=q,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -109,7 +109,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -109,7 +109,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
pt_emb = fluid.contrib.layers.sparse_embedding( pt_emb = paddle.static.nn.sparse_embedding(
input=pt, input=pt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -140,7 +140,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -140,7 +140,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
nt_emb = fluid.contrib.layers.sparse_embedding( nt_emb = paddle.static.nn.sparse_embedding(
input=nt, input=nt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
......
...@@ -76,7 +76,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -76,7 +76,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
q_emb = fluid.contrib.layers.sparse_embedding( q_emb = paddle.static.nn.sparse_embedding(
input=q, input=q,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -109,7 +109,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -109,7 +109,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
pt_emb = fluid.contrib.layers.sparse_embedding( pt_emb = paddle.static.nn.sparse_embedding(
input=pt, input=pt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -140,7 +140,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -140,7 +140,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
nt_emb = fluid.contrib.layers.sparse_embedding( nt_emb = paddle.static.nn.sparse_embedding(
input=nt, input=nt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
......
...@@ -73,7 +73,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
q_emb = fluid.contrib.layers.sparse_embedding( q_emb = paddle.static.nn.sparse_embedding(
input=q, input=q,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -105,7 +105,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
pt_emb = fluid.contrib.layers.sparse_embedding( pt_emb = paddle.static.nn.sparse_embedding(
input=pt, input=pt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -136,7 +136,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -136,7 +136,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
nt_emb = fluid.contrib.layers.sparse_embedding( nt_emb = paddle.static.nn.sparse_embedding(
input=nt, input=nt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
......
...@@ -73,7 +73,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -73,7 +73,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="query_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
q_emb = fluid.contrib.layers.sparse_embedding( q_emb = paddle.static.nn.sparse_embedding(
input=q, input=q,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -105,7 +105,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -105,7 +105,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="pos_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
pt_emb = fluid.contrib.layers.sparse_embedding( pt_emb = paddle.static.nn.sparse_embedding(
input=pt, input=pt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -136,7 +136,7 @@ class TestPSPassWithBow(unittest.TestCase): ...@@ -136,7 +136,7 @@ class TestPSPassWithBow(unittest.TestCase):
name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1 name="neg_title_ids", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
nt_emb = fluid.contrib.layers.sparse_embedding( nt_emb = paddle.static.nn.sparse_embedding(
input=nt, input=nt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
......
...@@ -214,7 +214,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): ...@@ -214,7 +214,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase):
init = paddle.nn.initializer.Uniform() init = paddle.nn.initializer.Uniform()
dnn_layer_dims = [128, 64, 32] dnn_layer_dims = [128, 64, 32]
dnn_embedding = fluid.contrib.layers.sparse_embedding( dnn_embedding = paddle.static.nn.sparse_embedding(
input=dnn_data, input=dnn_data,
size=[dnn_input_dim, dnn_layer_dims[0]], size=[dnn_input_dim, dnn_layer_dims[0]],
is_test=inference, is_test=inference,
...@@ -239,7 +239,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase): ...@@ -239,7 +239,7 @@ class TestDistMnistAsync2x2WithGauss(TestFleetBase):
dnn_out = fc dnn_out = fc
# build lr model # build lr model
lr_embbding = fluid.contrib.layers.sparse_embedding( lr_embbding = paddle.static.nn.sparse_embedding(
input=lr_data, input=lr_data,
size=[lr_input_dim, 1], size=[lr_input_dim, 1],
is_test=inference, is_test=inference,
......
...@@ -71,7 +71,7 @@ class TestSPMT(unittest.TestCase): ...@@ -71,7 +71,7 @@ class TestSPMT(unittest.TestCase):
name="1", shape=[-1, 1], dtype="int64", lod_level=1 name="1", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
q_emb = fluid.contrib.layers.sparse_embedding( q_emb = paddle.static.nn.sparse_embedding(
input=q, input=q,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -103,7 +103,7 @@ class TestSPMT(unittest.TestCase): ...@@ -103,7 +103,7 @@ class TestSPMT(unittest.TestCase):
name="2", shape=[-1, 1], dtype="int64", lod_level=1 name="2", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
pt_emb = fluid.contrib.layers.sparse_embedding( pt_emb = paddle.static.nn.sparse_embedding(
input=pt, input=pt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -134,7 +134,7 @@ class TestSPMT(unittest.TestCase): ...@@ -134,7 +134,7 @@ class TestSPMT(unittest.TestCase):
name="3", shape=[-1, 1], dtype="int64", lod_level=1 name="3", shape=[-1, 1], dtype="int64", lod_level=1
) )
# embedding # embedding
nt_emb = fluid.contrib.layers.sparse_embedding( nt_emb = paddle.static.nn.sparse_embedding(
input=nt, input=nt,
size=[dict_dim, emb_dim], size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
......
...@@ -23,6 +23,7 @@ from paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler im ...@@ -23,6 +23,7 @@ from paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler im
from paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler.distributed_strategy import ( from paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler.distributed_strategy import (
StrategyFactory, StrategyFactory,
) )
from paddle.incubate.layers.nn import search_pyramid_hash
class TestPyramidHashOpApi(unittest.TestCase): class TestPyramidHashOpApi(unittest.TestCase):
...@@ -33,7 +34,7 @@ class TestPyramidHashOpApi(unittest.TestCase): ...@@ -33,7 +34,7 @@ class TestPyramidHashOpApi(unittest.TestCase):
x = paddle.static.data( x = paddle.static.data(
name='x', shape=x_shape, dtype='int32', lod_level=1 name='x', shape=x_shape, dtype='int32', lod_level=1
) )
hash_embd = fluid.contrib.layers.search_pyramid_hash( hash_embd = search_pyramid_hash(
input=x, input=x,
num_emb=embed_dim, num_emb=embed_dim,
space_len=num_voc * embed_dim, space_len=num_voc * embed_dim,
......
...@@ -97,7 +97,7 @@ class TestFusedBnAddActAPI(unittest.TestCase): ...@@ -97,7 +97,7 @@ class TestFusedBnAddActAPI(unittest.TestCase):
act=None, act=None,
data_layout='NHWC', data_layout='NHWC',
) )
fused_bn_add_act = fluid.contrib.layers.fused_bn_add_act( fused_bn_add_act = paddle.incubate.layers.nn.fused_bn_add_act(
conv1_2, conv1_2,
bn, bn,
param_attr=self.bn_param_attr2, param_attr=self.bn_param_attr2,
......
...@@ -20,6 +20,7 @@ from eager_op_test import OpTest, paddle_static_guard, skip_check_grad_ci ...@@ -20,6 +20,7 @@ from eager_op_test import OpTest, paddle_static_guard, skip_check_grad_ci
import paddle import paddle
import paddle.version as ver import paddle.version as ver
from paddle.incubate.layers.nn import fused_embedding_seq_pool
@skip_check_grad_ci( @skip_check_grad_ci(
...@@ -114,7 +115,7 @@ class TestFusedEmbeddingSeqPoolApi(unittest.TestCase): ...@@ -114,7 +115,7 @@ class TestFusedEmbeddingSeqPoolApi(unittest.TestCase):
name='word', shape=[-1, 1], dtype='int64', lod_level=1 name='word', shape=[-1, 1], dtype='int64', lod_level=1
) )
padding_idx = np.random.randint(1, 10) padding_idx = np.random.randint(1, 10)
out = fluid.contrib.fused_embedding_seq_pool( out = fused_embedding_seq_pool(
input=data_t, input=data_t,
size=[dict_size, 32], size=[dict_size, 32],
param_attr='w', param_attr='w',
......
...@@ -26,6 +26,13 @@ from paddle import fluid ...@@ -26,6 +26,13 @@ from paddle import fluid
from paddle.fluid import core, layers, nets from paddle.fluid import core, layers, nets
from paddle.fluid.dygraph import base, to_variable from paddle.fluid.dygraph import base, to_variable
from paddle.fluid.framework import Program, default_main_program, program_guard from paddle.fluid.framework import Program, default_main_program, program_guard
from paddle.incubate.layers.nn import (
batch_fc,
partial_concat,
partial_sum,
rank_attention,
shuffle_batch,
)
from paddle.tensor import random from paddle.tensor import random
...@@ -2145,9 +2152,9 @@ class TestBook(LayerTest): ...@@ -2145,9 +2152,9 @@ class TestBook(LayerTest):
x = paddle.static.data( x = paddle.static.data(
name='X', shape=[-1, 4, 50], dtype='float32', lod_level=0 name='X', shape=[-1, 4, 50], dtype='float32', lod_level=0
) )
out1 = fluid.contrib.layers.shuffle_batch(x) out1 = shuffle_batch(x)
default_main_program().random_seed = 1000 default_main_program().random_seed = 1000
out2 = fluid.contrib.layers.shuffle_batch(x) out2 = shuffle_batch(x)
self.assertIsNotNone(out1) self.assertIsNotNone(out1)
self.assertIsNotNone(out2) self.assertIsNotNone(out2)
return out1 return out1
...@@ -2156,9 +2163,7 @@ class TestBook(LayerTest): ...@@ -2156,9 +2163,7 @@ class TestBook(LayerTest):
with self.static_graph(): with self.static_graph():
x = paddle.static.data(name="x", shape=[None, 3], dtype="float32") x = paddle.static.data(name="x", shape=[None, 3], dtype="float32")
y = paddle.static.data(name="y", shape=[None, 3], dtype="float32") y = paddle.static.data(name="y", shape=[None, 3], dtype="float32")
sum = fluid.contrib.layers.partial_sum( sum = partial_sum([x, y], start_index=0, length=2)
[x, y], start_index=0, length=2
)
return sum return sum
def test_batch_fc(self): def test_batch_fc(self):
...@@ -2166,7 +2171,7 @@ class TestBook(LayerTest): ...@@ -2166,7 +2171,7 @@ class TestBook(LayerTest):
input = paddle.static.data( input = paddle.static.data(
name="input", shape=[16, 2, 3], dtype="float32" name="input", shape=[16, 2, 3], dtype="float32"
) )
out = fluid.contrib.layers.batch_fc( out = batch_fc(
input=input, input=input,
param_size=[16, 3, 10], param_size=[16, 3, 10],
param_attr=fluid.ParamAttr( param_attr=fluid.ParamAttr(
...@@ -2192,7 +2197,7 @@ class TestBook(LayerTest): ...@@ -2192,7 +2197,7 @@ class TestBook(LayerTest):
rank_offset = paddle.static.data( rank_offset = paddle.static.data(
name="rank_offset", shape=[None, 7], dtype="int32" name="rank_offset", shape=[None, 7], dtype="int32"
) )
out = fluid.contrib.layers.rank_attention( out = rank_attention(
input=input, input=input,
rank_offset=rank_offset, rank_offset=rank_offset,
rank_param_shape=[18, 3], rank_param_shape=[18, 3],
...@@ -2263,12 +2268,8 @@ class TestBook(LayerTest): ...@@ -2263,12 +2268,8 @@ class TestBook(LayerTest):
with self.static_graph(): with self.static_graph():
x = paddle.static.data(name="x", shape=[None, 3], dtype="float32") x = paddle.static.data(name="x", shape=[None, 3], dtype="float32")
y = paddle.static.data(name="y", shape=[None, 3], dtype="float32") y = paddle.static.data(name="y", shape=[None, 3], dtype="float32")
concat1 = fluid.contrib.layers.partial_concat( concat1 = partial_concat([x, y], start_index=0, length=2)
[x, y], start_index=0, length=2 concat2 = partial_concat(x, start_index=0, length=-1)
)
concat2 = fluid.contrib.layers.partial_concat(
x, start_index=0, length=-1
)
return concat1, concat2 return concat1, concat2
def test_addmm(self): def test_addmm(self):
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
import paddle import paddle
from paddle.fluid.contrib.layers.nn import pow2_decay_with_linear_warmup from paddle.incubate.layers.nn import pow2_decay_with_linear_warmup
from paddle.optimizer.lr import LinearWarmup, PolynomialDecay from paddle.optimizer.lr import LinearWarmup, PolynomialDecay
......
...@@ -18,7 +18,7 @@ import numpy as np ...@@ -18,7 +18,7 @@ import numpy as np
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid.layers.nn import _pull_gpups_sparse from paddle.incubate.layers import _pull_gpups_sparse
paddle.enable_static() paddle.enable_static()
......
...@@ -18,6 +18,7 @@ import numpy as np ...@@ -18,6 +18,7 @@ import numpy as np
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.incubate.layers.nn import search_pyramid_hash
class TestPyramidHashOpApi(unittest.TestCase): class TestPyramidHashOpApi(unittest.TestCase):
...@@ -28,7 +29,7 @@ class TestPyramidHashOpApi(unittest.TestCase): ...@@ -28,7 +29,7 @@ class TestPyramidHashOpApi(unittest.TestCase):
x = paddle.static.data( x = paddle.static.data(
name='x', shape=x_shape, dtype='int32', lod_level=1 name='x', shape=x_shape, dtype='int32', lod_level=1
) )
hash_embd = fluid.contrib.search_pyramid_hash( hash_embd = search_pyramid_hash(
input=x, input=x,
num_emb=embed_dim, num_emb=embed_dim,
space_len=num_voc * embed_dim, space_len=num_voc * embed_dim,
......
...@@ -19,6 +19,7 @@ from eager_op_test import OpTest, paddle_static_guard ...@@ -19,6 +19,7 @@ from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.incubate.layers.nn import tdm_child
def create_tdm_tree(): def create_tdm_tree():
...@@ -147,7 +148,7 @@ class TestTDMChildShape(unittest.TestCase): ...@@ -147,7 +148,7 @@ class TestTDMChildShape(unittest.TestCase):
tdm_tree_info = create_tdm_tree() tdm_tree_info = create_tdm_tree()
tree_info_np = np.array(tdm_tree_info).astype('int32') tree_info_np = np.array(tdm_tree_info).astype('int32')
child, leaf_mask = fluid.contrib.layers.tdm_child( child, leaf_mask = tdm_child(
x=x, x=x,
node_nums=26, node_nums=26,
child_nums=2, child_nums=2,
......
...@@ -20,6 +20,7 @@ from eager_op_test import OpTest, paddle_static_guard ...@@ -20,6 +20,7 @@ from eager_op_test import OpTest, paddle_static_guard
import paddle import paddle
from paddle import fluid from paddle import fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.incubate.layers.nn import tdm_sampler
def create_tdm_travel(): def create_tdm_travel():
...@@ -284,7 +285,7 @@ class TestTDMSamplerShape(unittest.TestCase): ...@@ -284,7 +285,7 @@ class TestTDMSamplerShape(unittest.TestCase):
neg_samples_num_list = [1, 2, 3, 4] neg_samples_num_list = [1, 2, 3, 4]
leaf_node_num = 13 leaf_node_num = 13
sample, label, mask = fluid.contrib.layers.tdm_sampler( sample, label, mask = tdm_sampler(
x, x,
neg_samples_num_list, neg_samples_num_list,
layer_node_num_list, layer_node_num_list,
......
...@@ -21,6 +21,7 @@ from paddle import fluid ...@@ -21,6 +21,7 @@ from paddle import fluid
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
from paddle.fluid.executor import Executor from paddle.fluid.executor import Executor
from paddle.incubate.layers.nn import shuffle_batch
paddle.enable_static() paddle.enable_static()
...@@ -145,7 +146,7 @@ class TestIgnoreVarNameInWhile(unittest.TestCase): ...@@ -145,7 +146,7 @@ class TestIgnoreVarNameInWhile(unittest.TestCase):
def body_func(i, ten, batch_info, origin_seq): def body_func(i, ten, batch_info, origin_seq):
print(batch_info) print(batch_info)
batch_info = fluid.contrib.layers.shuffle_batch(batch_info) batch_info = shuffle_batch(batch_info)
print(batch_info) print(batch_info)
i = i + 1 i = i + 1
return [i, ten, batch_info, origin_seq] return [i, ten, batch_info, origin_seq]
......
...@@ -35,6 +35,7 @@ from . import autotune # noqa: F401 ...@@ -35,6 +35,7 @@ from . import autotune # noqa: F401
from . import nn # noqa: F401 from . import nn # noqa: F401
from . import asp # noqa: F401 from . import asp # noqa: F401
from . import multiprocessing # noqa: F401 from . import multiprocessing # noqa: F401
from . import layers
from .nn.loss import identity_loss from .nn.loss import identity_loss
......
...@@ -1387,7 +1387,7 @@ class FleetUtil: ...@@ -1387,7 +1387,7 @@ class FleetUtil:
label=label, curve='ROC',\ label=label, curve='ROC',\
num_thresholds=4096) num_thresholds=4096)
local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins,\ local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins,\
local_total_ins = fluid.contrib.layers.ctr_metric_bundle(\ local_total_ins = paddle.static.ctr_metric_bundle(\
similarity_norm, label) similarity_norm, label)
""" """
...@@ -1587,7 +1587,7 @@ class FleetUtil: ...@@ -1587,7 +1587,7 @@ class FleetUtil:
label=label, curve='ROC',\ label=label, curve='ROC',\
num_thresholds=4096) num_thresholds=4096)
local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins, \ local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins, \
local_total_ins = fluid.contrib.layers.ctr_metric_bundle(\ local_total_ins = paddle.static.ctr_metric_bundle(\
similarity_norm, label) similarity_norm, label)
""" """
......
...@@ -13,11 +13,25 @@ ...@@ -13,11 +13,25 @@
# limitations under the License. # limitations under the License.
from . import nn from . import nn
from .nn import * from .nn import (
fused_embedding_seq_pool,
from . import metric_op fused_seqpool_cvm,
from .metric_op import * multiclass_nms2,
search_pyramid_hash,
shuffle_batch,
partial_concat,
partial_sum,
tdm_child,
tdm_sampler,
rank_attention,
batch_fc,
_pull_box_extended_sparse,
bilateral_slice,
correlation,
fused_bn_add_act,
pow2_decay_with_linear_warmup,
_pull_gpups_sparse,
_pull_box_sparse,
)
__all__ = [] __all__ = []
__all__ += nn.__all__
__all__ += metric_op.__all__
...@@ -71,12 +71,12 @@ from ..fluid.optimizer import Optimizer # noqa: F401 ...@@ -71,12 +71,12 @@ from ..fluid.optimizer import Optimizer # noqa: F401
from ..fluid.optimizer import Adam # noqa: F401 from ..fluid.optimizer import Adam # noqa: F401
from ..fluid.optimizer import ExponentialMovingAverage # noqa: F401 from ..fluid.optimizer import ExponentialMovingAverage # noqa: F401
from ..fluid.contrib.layers import ctr_metric_bundle # noqa: F401
from ..fluid.layers import exponential_decay # noqa: F401 from ..fluid.layers import exponential_decay # noqa: F401
from ..fluid.layers import learning_rate_scheduler # noqa: F401 from ..fluid.layers import learning_rate_scheduler # noqa: F401
from .nn.metric import auc # noqa: F401 from .nn.metric import auc # noqa: F401
from .nn.metric import accuracy # noqa: F401 from .nn.metric import accuracy # noqa: F401
from .nn.metric import ctr_metric_bundle # noqa: F401
__all__ = [ # noqa __all__ = [ # noqa
'append_backward', 'append_backward',
......
...@@ -39,7 +39,7 @@ from .common import layer_norm # noqa: F401 ...@@ -39,7 +39,7 @@ from .common import layer_norm # noqa: F401
from .common import embedding # noqa: F401 from .common import embedding # noqa: F401
from ...fluid.contrib.layers import sparse_embedding # noqa: F401 from .common import sparse_embedding # noqa: F401
from ...fluid.layers import StaticRNN # noqa: F401 from ...fluid.layers import StaticRNN # noqa: F401
from .sequence_lod import sequence_conv # noqa: F401 from .sequence_lod import sequence_conv # noqa: F401
......
...@@ -3810,3 +3810,200 @@ def embedding( ...@@ -3810,3 +3810,200 @@ def embedding(
}, },
) )
return tmp return tmp
def sparse_embedding(
input,
size,
padding_idx=None,
is_test=False,
entry=None,
table_class="MemorySparseTable",
param_attr=None,
dtype='float32',
slot=None,
):
r"""
:api_attr: Static Graph
The OP is used as the operator of the Embedding Lookup layer in the large-scale
sparse training of the parameter server mode, instead of using the paddle.nn.functional.embedding.
The operator is used to lookup embeddings vector of ids provided by :attr:`input` .
It automatically constructs a 2D embedding matrix based on the input :attr:`size`
(vocab_size, emb_size) and :attr:`dtype` .
The shape of output Tensor is generated by appending an emb_size dimension to the
last dimension of the input Tensor shape.
**Note:** The id in :attr:`input` must satisfy :math:`0 =< id < size[0]` , otherwise
the program will throw an exception and exit.
.. code-block:: text
Case 1:
input is a Tensor. padding_idx = -1
input.data = [[1, 3], [2, 4], [4, 127]]
input.shape = [3, 2]
Given size = [128, 16]
output is a Tensor:
out.shape = [3, 2, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452],
[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745],
[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.945345345, 0.435394634, ..., 0.435345365],
[0.0, 0.0, ..., 0.0 ]]] # padding data
The input padding_idx is less than 0, it is automatically converted to padding_idx = -1 + 128 = 127
It will pad all-zero data when ids is 127.
Case 2:
input is a LoDTensor with 1-level LoD. padding_idx = 0
input.lod = [[2, 3]]
input.data = [[1], [3], [2], [4], [0]]
input.shape = [5, 1]
Given size = [128, 16]
output is a LoDTensor:
out.lod = [[2, 3]]
out.shape = [5, 1, 16]
out.data = [[[0.129435295, 0.244512452, ..., 0.436322452]],
[[0.345421456, 0.524563927, ..., 0.144534654]],
[[0.345249859, 0.124939536, ..., 0.194353745]],
[[0.945345345, 0.435394634, ..., 0.435345365]],
[[0.0, 0.0, ..., 0.0 ]]] # padding data
It will pad all-zero data when ids is 0.
Args:
input(Tensor): A Tensor or LoDTensor with type int64, which contains the id
information. The value of the input id should satisfy :math:`0<= id < size[0]` .
size(tuple|list): The shape of lookup table parameter (vocab_size, emb_size). It
should have two elements which indicates the size of the dictionary of embeddings
and the size of each embedding vector respectively. The initial parameter size
is 0 in the large-scale sparse scenario, which will gradually expand with the
training. So if vocab_size is temporarily useless, its value can be any integer.
The emb_size is the dimensional configuration of the word embedding weight parameter.
padding_idx(int|long|None, optional): padding_idx needs to be in the interval [-vocab_size, vocab_size).
If :math:`padding\_idx < 0`, the :math:`padding\_idx` will automatically be converted
to :math:`vocab\_size + padding\_idx` . It will output all-zero padding data whenever
lookup encounters :math:`padding\_idx` in id. And the padding data will not be updated
while training. If set None, it makes no efe mfect to output. Default: None.
is_test(bool, optional): Training or prediction mode. In prediction mode (is_test=False),
the output is not initialized and created, and it is filled with 0 and returned. Default: False.
entry(str, optional): Entry config with parameter server whose value is ProbabilityEntry,
CountFilterEntry or None. Default: None.
table_class(str, optional): The type of the sparse table. The value can be CommonSparseTable
or SSDSparseTable. The default is CommonSparseTable.
param_attr(ParamAttr, optional): To specify the weight parameter property. Default: None, which means the
default weight parameter property is used. In addition, user-defined or pre-trained word
vectors can be loaded with the :attr:`param_attr` parameter. The local word vector needs
to be transformed into numpy format, and the shape of local word vector should be consistent
with :attr:`size` .
dtype(str): It refers to the data type of output Tensor. It must be float32 or
float64. Default: float32.
Returns:
Tensor: Embedding Tensor or LoDTensor mapped by input. The data type is the same as :attr:`dtype` .
Examples:
.. code-block:: python
import paddle
paddle.enable_static()
sparse_feature_dim = 1024
embedding_size = 64
# Only when the feature appear more than 10 times or more will be participated in the training.
entry = paddle.distributed.CountFilterEntry(10)
input = paddle.static.data(name='ins', shape=[1], dtype='int64')
emb = paddle.static.nn.sparse_embedding(
input=input,
size=[sparse_feature_dim, embedding_size],
is_test=False,
entry=entry,
param_attr=paddle.ParamAttr(name="SparseFeatFactors",
initializer=paddle.nn.initializer.Uniform()))
"""
helper = LayerHelper('sparse_embedding', **locals())
check_variable_and_dtype(
input, 'input', ['int64'], 'paddle.incubate.layers.sparse_embedding'
)
check_dtype(
dtype,
'dtype',
['float32', 'float64'],
'paddle.static.nn.sparse_embedding',
)
if input.size == 0:
raise ValueError("input size should not be 0")
w = helper.create_parameter(
attr=helper.param_attr,
shape=size,
type=core.VarDesc.VarType.SELECTED_ROWS,
dtype=dtype,
is_bias=False,
)
tmp = helper.create_variable_for_type_inference(dtype)
padding_idx = (
-1
if padding_idx is None
else padding_idx
if padding_idx >= 0
else (size[0] + padding_idx)
)
if table_class not in [
"CommonSparseTable",
"SSDSparseTable",
"MemorySparseTable",
]:
raise ValueError(
"table_class must be in [CommonSparseTable, SSDSparseTable, MemorySparseTable]"
)
entry_str = "none"
if entry is not None:
if entry.__class__.__name__ not in [
"ProbabilityEntry",
"CountFilterEntry",
"ShowClickEntry",
]:
raise ValueError(
"entry must be instance in [paddle.distributed.ProbabilityEntry, paddle.distributed.CountFilterEntry, paddle.distributed.ShowClickEntry]"
)
entry_str = entry._to_attr()
if slot is None:
slot = 0
helper.append_op(
type='lookup_table',
inputs={'Ids': input, 'W': w},
outputs={'Out': tmp},
attrs={
'padding_idx': padding_idx,
'is_sparse': True,
'is_distributed': True,
'remote_prefetch': True,
'is_test': is_test,
'entry': entry_str,
'table_class': table_class,
'slot': slot,
},
)
return tmp
...@@ -317,3 +317,253 @@ def auc( ...@@ -317,3 +317,253 @@ def auc(
batch_auc_out, batch_auc_out,
[batch_stat_pos, batch_stat_neg, stat_pos, stat_neg], [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg],
) )
def ctr_metric_bundle(input, label, ins_tag_weight=None):
"""
ctr related metric layer
This function help compute the ctr related metrics: RMSE, MAE, predicted_ctr, q_value.
To compute the final values of these metrics, we should do following computations using
total instance number:
MAE = local_abserr / instance number
RMSE = sqrt(local_sqrerr / instance number)
predicted_ctr = local_prob / instance number
q = local_q / instance number
Note that if you are doing distribute job, you should all reduce these metrics and instance
number first
Args:
input(Tensor): A floating-point 2D Tensor, values are in the range
[0, 1]. Each row is sorted in descending order. This
input should be the output of topk. Typically, this
Tensor indicates the probability of each label.
label(Tensor): A 2D int Tensor indicating the label of the training
data. The height is batch size and width is always 1.
ins_tag_weight(Tensor): A 2D int Tensor indicating the ins_tag_weight of the training
data. 1 means real data, 0 means fake data.
A LoDTensor or Tensor with type float32,float64.
Returns:
local_sqrerr(Tensor): Local sum of squared error
local_abserr(Tensor): Local sum of abs error
local_prob(Tensor): Local sum of predicted ctr
local_q(Tensor): Local sum of q value
Examples 1:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name="data", shape=[32, 32], dtype="float32")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32")
predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(input=data, size=1))
auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label)
Examples 2:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name="data", shape=[32, 32], dtype="float32")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32")
predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(input=data, size=1))
ins_tag_weight = paddle.static.data(name='ins_tag', shape=[-1,16], lod_level=0, dtype='int64')
auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label, ins_tag_weight=ins_tag_weight)
"""
if ins_tag_weight is None:
ins_tag_weight = paddle.tensor.fill_constant(
shape=[1, 1], dtype="float32", value=1.0
)
assert input.shape == label.shape
helper = LayerHelper("ctr_metric_bundle", **locals())
local_abserr = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_sqrerr = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_prob = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_q = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_pos_num = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
local_ins_num = helper.create_global_variable(
persistable=True, dtype='float32', shape=[1]
)
tmp_res_elesub = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1]
)
tmp_res_sigmoid = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1]
)
tmp_ones = helper.create_global_variable(
persistable=False, dtype='float32', shape=[-1]
)
batch_prob = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_abserr = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_sqrerr = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_q = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_pos_num = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
batch_ins_num = helper.create_global_variable(
persistable=False, dtype='float32', shape=[1]
)
for var in [
local_abserr,
batch_abserr,
local_sqrerr,
batch_sqrerr,
local_prob,
batch_prob,
local_q,
batch_q,
batch_pos_num,
batch_ins_num,
local_pos_num,
local_ins_num,
]:
helper.set_variable_initializer(
var,
paddle.nn.initializer.ConstantInitializer(
value=0.0, force_cpu=True
),
)
helper.append_op(
type="elementwise_sub",
inputs={"X": [input], "Y": [label]},
outputs={"Out": [tmp_res_elesub]},
)
helper.append_op(
type="squared_l2_norm",
inputs={"X": [tmp_res_elesub]},
outputs={"Out": [batch_sqrerr]},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_sqrerr], "Y": [local_sqrerr]},
outputs={"Out": [local_sqrerr]},
)
helper.append_op(
type="l1_norm",
inputs={"X": [tmp_res_elesub]},
outputs={"Out": [batch_abserr]},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_abserr], "Y": [local_abserr]},
outputs={"Out": [local_abserr]},
)
helper.append_op(
type="reduce_sum", inputs={"X": [input]}, outputs={"Out": [batch_prob]}
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_prob], "Y": [local_prob]},
outputs={"Out": [local_prob]},
)
helper.append_op(
type="sigmoid",
inputs={"X": [input]},
outputs={"Out": [tmp_res_sigmoid]},
)
helper.append_op(
type="reduce_sum",
inputs={"X": [tmp_res_sigmoid]},
outputs={"Out": [batch_q]},
)
helper.append_op(
type="reduce_sum",
inputs={"X": [label]},
outputs={"Out": [batch_pos_num]},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_pos_num], "Y": [local_pos_num]},
outputs={"Out": [local_pos_num]},
)
helper.append_op(
type='fill_constant_batch_size_like',
inputs={"Input": label},
outputs={'Out': [tmp_ones]},
attrs={
'shape': [-1, 1],
'dtype': tmp_ones.dtype,
'value': float(1.0),
},
)
helper.append_op(
type="reduce_sum",
inputs={"X": [tmp_ones]},
outputs={"Out": [batch_ins_num]},
)
# if data is fake, return 0
inputs_slice = {'Input': ins_tag_weight}
attrs = {'axes': [0]}
attrs['starts'] = [0]
attrs['ends'] = [1]
helper.append_op(
type="slice",
inputs=inputs_slice,
attrs=attrs,
outputs={"Out": ins_tag_weight},
)
axis = helper.kwargs.get('axis', 0)
helper.append_op(
type="elementwise_mul",
inputs={"X": [batch_ins_num], "Y": [ins_tag_weight]},
outputs={"Out": [batch_ins_num]},
attrs={'axis': axis},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_ins_num], "Y": [local_ins_num]},
outputs={"Out": [local_ins_num]},
)
helper.append_op(
type="elementwise_mul",
inputs={"X": [batch_q], "Y": [ins_tag_weight]},
outputs={"Out": [batch_q]},
attrs={'axis': axis},
)
helper.append_op(
type="elementwise_add",
inputs={"X": [batch_q], "Y": [local_q]},
outputs={"Out": [local_q]},
)
return (
local_sqrerr,
local_abserr,
local_prob,
local_q,
local_pos_num,
local_ins_num,
)
...@@ -448,7 +448,6 @@ packages=['paddle', ...@@ -448,7 +448,6 @@ packages=['paddle',
'paddle.fluid.dataloader', 'paddle.fluid.dataloader',
'paddle.fluid.contrib', 'paddle.fluid.contrib',
'paddle.fluid.contrib.extend_optimizer', 'paddle.fluid.contrib.extend_optimizer',
'paddle.fluid.contrib.layers',
'paddle.fluid.incubate', 'paddle.fluid.incubate',
'paddle.incubate.distributed.fleet', 'paddle.incubate.distributed.fleet',
'paddle.fluid.incubate.checkpoint', 'paddle.fluid.incubate.checkpoint',
...@@ -483,6 +482,7 @@ packages=['paddle', ...@@ -483,6 +482,7 @@ packages=['paddle',
'paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler', 'paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler',
'paddle.incubate.distributed.fleet.parameter_server.pslib', 'paddle.incubate.distributed.fleet.parameter_server.pslib',
'paddle.incubate.distributed.fleet.parameter_server.ir', 'paddle.incubate.distributed.fleet.parameter_server.ir',
'paddle.incubate.layers',
'paddle.quantization', 'paddle.quantization',
'paddle.quantization.quanters', 'paddle.quantization.quanters',
'paddle.quantization.observers', 'paddle.quantization.observers',
......
...@@ -1424,7 +1424,6 @@ def get_setup_parameters(): ...@@ -1424,7 +1424,6 @@ def get_setup_parameters():
'paddle.fluid.dataloader', 'paddle.fluid.dataloader',
'paddle.fluid.contrib', 'paddle.fluid.contrib',
'paddle.fluid.contrib.extend_optimizer', 'paddle.fluid.contrib.extend_optimizer',
'paddle.fluid.contrib.layers',
'paddle.fluid.incubate', 'paddle.fluid.incubate',
'paddle.incubate.distributed.fleet', 'paddle.incubate.distributed.fleet',
'paddle.fluid.incubate.checkpoint', 'paddle.fluid.incubate.checkpoint',
...@@ -1459,6 +1458,7 @@ def get_setup_parameters(): ...@@ -1459,6 +1458,7 @@ def get_setup_parameters():
'paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler', 'paddle.incubate.distributed.fleet.parameter_server.distribute_transpiler',
'paddle.incubate.distributed.fleet.parameter_server.ir', 'paddle.incubate.distributed.fleet.parameter_server.ir',
'paddle.incubate.distributed.fleet.parameter_server.pslib', 'paddle.incubate.distributed.fleet.parameter_server.pslib',
'paddle.incubate.layers',
'paddle.quantization', 'paddle.quantization',
'paddle.quantization.quanters', 'paddle.quantization.quanters',
'paddle.quantization.observers', 'paddle.quantization.observers',
......
...@@ -114,7 +114,7 @@ class TestCorrelationOp(unittest.TestCase): ...@@ -114,7 +114,7 @@ class TestCorrelationOp(unittest.TestCase):
stride2=1, stride2=1,
) )
out = fluid.contrib.correlation( out = paddle.incubate.layers.correlation(
x1, x1,
x2, x2,
pad_size=4, pad_size=4,
...@@ -142,7 +142,7 @@ class Net(paddle.nn.Layer): ...@@ -142,7 +142,7 @@ class Net(paddle.nn.Layer):
super().__init__(name_scope) super().__init__(name_scope)
def forward(self, x1, x2): def forward(self, x1, x2):
y = fluid.contrib.correlation( y = paddle.incubate.layers.correlation(
x1, x1,
x2, x2,
pad_size=4, pad_size=4,
......
...@@ -171,10 +171,10 @@ class TestMulticlassNMS2(unittest.TestCase): ...@@ -171,10 +171,10 @@ class TestMulticlassNMS2(unittest.TestCase):
scores = paddle.static.data( scores = paddle.static.data(
name='scores', shape=[-1, 10], dtype='float32' name='scores', shape=[-1, 10], dtype='float32'
) )
output = fluid.contrib.multiclass_nms2( output = paddle.incubate.layers.multiclass_nms2(
bboxes, scores, 0.3, 400, 200, 0.7 bboxes, scores, 0.3, 400, 200, 0.7
) )
output2, index = fluid.contrib.multiclass_nms2( output2, index = paddle.incubate.layers.multiclass_nms2(
bboxes, scores, 0.3, 400, 200, 0.7, return_index=True bboxes, scores, 0.3, 400, 200, 0.7, return_index=True
) )
self.assertIsNotNone(output) self.assertIsNotNone(output)
......
...@@ -75,7 +75,7 @@ class DNNLayer(nn.Layer): ...@@ -75,7 +75,7 @@ class DNNLayer(nn.Layer):
sparse_embs = [] sparse_embs = []
for s_input in sparse_inputs: for s_input in sparse_inputs:
if self.sync_mode == "gpubox": if self.sync_mode == "gpubox":
emb = paddle.fluid.contrib.sparse_embedding( emb = paddle.static.nn.sparse_embedding(
input=s_input, input=s_input,
size=[self.sparse_feature_number, self.sparse_feature_dim], size=[self.sparse_feature_number, self.sparse_feature_dim],
param_attr=paddle.ParamAttr(name="embedding"), param_attr=paddle.ParamAttr(name="embedding"),
......
...@@ -20,7 +20,7 @@ sys.path.append('../../python/paddle/fluid/tests/unittests') ...@@ -20,7 +20,7 @@ sys.path.append('../../python/paddle/fluid/tests/unittests')
from get_test_cover_info import record_op_test from get_test_cover_info import record_op_test
import paddle import paddle
from paddle.fluid.contrib.layers.nn import pow2_decay_with_linear_warmup from paddle.incubate.layers.nn import pow2_decay_with_linear_warmup
from paddle.optimizer.lr import LinearWarmup, PolynomialDecay from paddle.optimizer.lr import LinearWarmup, PolynomialDecay
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册