未验证 提交 121b7429 编写于 作者: W wangzhen38 提交者: GitHub

[api move] cvm (#48989)

* [api move] cvm

* [api move] cvm

* [api move] cvm

* [api move] cvm

* [api move] cvm

* [api move] cvm

* [api move] cvm

* [api move] ci test

* [api move] ci test

* [api move] ci test
上级 b6aa9f53
...@@ -182,6 +182,46 @@ class TestLayer(LayerTest): ...@@ -182,6 +182,46 @@ class TestLayer(LayerTest):
self.assertRaises(TypeError, test_type) self.assertRaises(TypeError, test_type)
def test_cvm(self):
inp = np.ones([10, 10], dtype='float32')
arr = [[0.6931472, -1.904654e-09, 1, 1, 1, 1, 1, 1, 1, 1]] * 10
cvm1 = np.array(arr, dtype='float32')
cvm2 = np.ones([10, 8], dtype='float32')
show_clk = np.ones([10, 2], dtype='float32')
with self.static_graph():
x = paddle.static.data(
name='data',
shape=[10, 10],
dtype='float32',
)
u = paddle.static.data(
name='show_click',
shape=[10, 2],
dtype='float32',
)
no_cvm = paddle.static.nn.continuous_value_model(x, u, True)
static_ret1 = self.get_static_graph_result(
feed={'data': inp, 'show_click': show_clk},
fetch_list=[no_cvm],
)[0]
with self.static_graph():
x = paddle.static.data(
name='data',
shape=[10, 10],
dtype='float32',
)
u = paddle.static.data(
name='show_click',
shape=[10, 2],
dtype='float32',
)
cvm = paddle.static.nn.continuous_value_model(x, u, False)
static_ret2 = self.get_static_graph_result(
feed={'data': inp, 'show_click': show_clk}, fetch_list=[cvm]
)[0]
np.testing.assert_allclose(static_ret1, cvm1, rtol=1e-5, atol=1e-06)
np.testing.assert_allclose(static_ret2, cvm2, rtol=1e-5, atol=1e-06)
def test_Flatten(self): def test_Flatten(self):
inp = np.ones([3, 4, 4, 5], dtype='float32') inp = np.ones([3, 4, 4, 5], dtype='float32')
with self.static_graph(): with self.static_graph():
......
...@@ -16,6 +16,7 @@ from .common import fc # noqa: F401 ...@@ -16,6 +16,7 @@ from .common import fc # noqa: F401
from .common import batch_norm # noqa: F401 from .common import batch_norm # noqa: F401
from .common import instance_norm # noqa: F401 from .common import instance_norm # noqa: F401
from .common import data_norm # noqa: F401 from .common import data_norm # noqa: F401
from .common import continuous_value_model # noqa: F401
from .common import group_norm # noqa: F401 from .common import group_norm # noqa: F401
from .common import deform_conv2d # noqa: F401 from .common import deform_conv2d # noqa: F401
from .common import conv3d # noqa: F401 from .common import conv3d # noqa: F401
......
...@@ -321,6 +321,55 @@ def instance_norm( ...@@ -321,6 +321,55 @@ def instance_norm(
return instance_norm_out return instance_norm_out
@static_only
def continuous_value_model(input, cvm, use_cvm=True):
r"""
**continuous_value_model layers**
Now, this OP is used in CTR project to remove or dispose show and click value in :attr:`input`.
:attr:`input` is an embedding vector including show and click value, whose shape is :math:`[N, D]` (N is batch size. D is `2 + embedding dim` ).
Show and click at first two dims of embedding vector D.
If :attr:`use_cvm` is True, it will calculate :math:`log(show)` and :math:`log(click)` , and output shape is :math:`[N, D]` .
If :attr:`use_cvm` is False, it will remove show and click from :attr:`input` , and output shape is :math:`[N, D - 2]` .
:attr:`cvm` is show_click info, whose shape is :math:`[N, 2]` .
Args:
input (Variable): The input variable. A 2-D LoDTensor with shape :math:`[N, D]` , where N is the batch size, D is `2 + the embedding dim` . `lod level = 1` .
A Tensor with type float32, float64.
cvm (Variable): Show and click variable. A 2-D Tensor with shape :math:`[N, 2]` , where N is the batch size, 2 is show and click.
A Tensor with type float32, float64.
use_cvm (bool): Use show_click or not. if use, the output dim is the same as input.
if not use, the output dim is `input dim - 2` (remove show and click)
Returns:
Variable: A 2-D LodTensor with shape :math:`[N, M]` . if :attr:`use_cvm` = True, M is equal to input dim D. if False, M is equal to `D - 2`. \
A Tensor with same type as input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle
input = paddle.static.data(name="input", shape=[64, 1], dtype="int64")
label = paddle.static.data(name="label", shape=[64, 1], dtype="int64")
w0 = paddle.full(shape=(100, 1), fill_value=2).astype(paddle.float32)
embed = paddle.nn.functional.embedding(
input,
w0)
ones = paddle.full_like(label, 1, dtype="int64")
show_clk = paddle.cast(paddle.concat([ones, label], axis=1), dtype='float32')
show_clk.stop_gradient = True
input_with_cvm = paddle.static.nn.continuous_value_model(embed, show_clk, True)
"""
helper = LayerHelper('cvm', **locals())
out = helper.create_variable(dtype=input.dtype)
check_variable_and_dtype(
input, 'input', ['float16', 'float32', 'float64'], 'cvm'
)
helper.append_op(
type='cvm',
inputs={'X': [input], 'CVM': [cvm]},
outputs={'Y': [out]},
attrs={"use_cvm": use_cvm},
)
return out
@static_only @static_only
def data_norm( def data_norm(
input, input,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册