未验证 提交 55e5ab82 编写于 作者: W wanghuancoder 提交者: GitHub

[Eager] bilinear_tensor_product yaml (#44459)

* bilinear_tensor_product yaml
上级 78b5c103
...@@ -280,6 +280,16 @@ ...@@ -280,6 +280,16 @@
func : bce_loss func : bce_loss
backward : bce_loss_grad backward : bce_loss_grad
- api : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearTensorProductInferMeta
kernel :
func : bilinear_tensor_product
optional : bias
backward : bilinear_tensor_product_grad
# bitwise_and # bitwise_and
- api : bitwise_and - api : bitwise_and
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
......
...@@ -251,6 +251,15 @@ ...@@ -251,6 +251,15 @@
func : bce_loss_grad func : bce_loss_grad
inplace : (out_grad -> input_grad) inplace : (out_grad -> input_grad)
- backward_api : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad)
infer_meta :
func : BilinearTensorProductGradInferMeta
kernel :
func : bilinear_tensor_product_grad
- backward_api : brelu_grad - backward_api : brelu_grad
forward : brelu (Tensor x, float t_min, float t_max) -> Tensor(out) forward : brelu (Tensor x, float t_min, float t_max) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float t_min, float t_max) args : (Tensor x, Tensor out_grad, float t_min, float t_max)
......
...@@ -18,6 +18,7 @@ import unittest ...@@ -18,6 +18,7 @@ import unittest
import numpy as np import numpy as np
import paddle.fluid as fluid import paddle.fluid as fluid
from op_test import OpTest from op_test import OpTest
import paddle
class TestDygraphBilinearTensorProductAPIError(unittest.TestCase): class TestDygraphBilinearTensorProductAPIError(unittest.TestCase):
...@@ -41,6 +42,7 @@ class TestBilinearTensorProductOp(OpTest): ...@@ -41,6 +42,7 @@ class TestBilinearTensorProductOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "bilinear_tensor_product" self.op_type = "bilinear_tensor_product"
self.python_api = paddle.nn.functional.bilinear
batch_size = 6 batch_size = 6
size0 = 5 size0 = 5
size1 = 4 size1 = 4
...@@ -63,10 +65,10 @@ class TestBilinearTensorProductOp(OpTest): ...@@ -63,10 +65,10 @@ class TestBilinearTensorProductOp(OpTest):
self.outputs = {'Out': output + bias} self.outputs = {'Out': output + bias}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad(['X', 'Y', 'Weight', 'Bias'], 'Out') self.check_grad(['X', 'Y', 'Weight', 'Bias'], 'Out', check_eager=True)
if __name__ == "__main__": if __name__ == "__main__":
......
...@@ -850,7 +850,9 @@ def bilinear(x1, x2, weight, bias=None, name=None): ...@@ -850,7 +850,9 @@ def bilinear(x1, x2, weight, bias=None, name=None):
""" """
if in_dynamic_mode(): if in_dygraph_mode():
return _C_ops.final_state_bilinear_tensor_product(x1, x2, weight, bias)
elif _non_static_mode():
return _C_ops.bilinear_tensor_product(x1, x2, weight, bias) return _C_ops.bilinear_tensor_product(x1, x2, weight, bias)
check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear') check_variable_and_dtype(x1, 'x1', ['float32', 'float64'], 'bilinear')
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册