未验证 提交 6f4bd0ea 编写于 作者: Y YuanRisheng 提交者: GitHub

[Phi]Add graph_send_recv yaml file (#41206)

* add graph_send_recv yaml

* deal with confict

* fix compile bugs
上级 0c968b9d
......@@ -118,12 +118,12 @@ void GraphSendRecvGradOpKernelLaunchHelper(
template <typename T, typename Context>
void GraphSendRecvGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out,
const DenseTensor& src_index,
const DenseTensor& dst_index,
paddle::optional<const DenseTensor&> out,
paddle::optional<const DenseTensor&> dst_count,
const DenseTensor& out_grad,
const std::string& pool_type,
DenseTensor* x_grad) {
auto index_type = src_index.dtype();
......
......@@ -102,12 +102,12 @@ void GraphSendRecvGradOpCUDAKernelLaunchHelper(
template <typename T, typename Context>
void GraphSendRecvGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out,
const DenseTensor& src_index,
const DenseTensor& dst_index,
paddle::optional<const DenseTensor&> out,
paddle::optional<const DenseTensor&> dst_count,
const DenseTensor& out_grad,
const std::string& pool_type,
DenseTensor* x_grad) {
auto index_type = src_index.dtype();
......
......@@ -22,12 +22,12 @@ namespace phi {
template <typename T, typename Context>
void GraphSendRecvGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const DenseTensor& x,
paddle::optional<const DenseTensor&> out,
const DenseTensor& src_index,
const DenseTensor& dst_index,
paddle::optional<const DenseTensor&> out,
paddle::optional<const DenseTensor&> dst_count,
const DenseTensor& out_grad,
const std::string& pool_type,
DenseTensor* x_grad);
} // namespace phi
......@@ -28,7 +28,7 @@ KernelSignature GraphSendRecvGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"graph_send_recv_grad",
{GradVarName("Out"), "X", "Out", "Src_index", "Dst_index", "Dst_count"},
{"X", "Src_index", "Dst_index", "Out", "Dst_count", GradVarName("Out")},
{"pool_type"},
{GradVarName("X")});
}
......
......@@ -22,6 +22,14 @@ from paddle.fluid import framework
from paddle import _C_ops
final_state_name_mapping = {
"graph_send_recv": {
"final_op_name": "final_state_graph_send_recv",
"x": "X",
"src_index": "Src_index",
"dst_index": "Dst_index",
"out": "Out",
"dst_count": "Dst_count"
},
"matmul_v2": {
"final_op_name": "final_state_matmul",
"transpose_x": "trans_x",
......
......@@ -17,13 +17,26 @@ import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from op_test import OpTest
def graph_send_recv_wrapper(x,
src_index,
dst_index,
pool_type="sum",
out_size=None,
name=None):
return paddle.incubate.graph_send_recv(x, src_index, dst_index,
pool_type.lower(), out_size, name)
class TestGraphSendRecvMaxOp(OpTest):
def setUp(self):
paddle.enable_static()
self.python_api = graph_send_recv_wrapper
self.python_out_sig = ["Out"]
self.op_type = "graph_send_recv"
x = np.random.random((10, 20)).astype("float64")
index = np.random.randint(0, 10, (15, 2)).astype(np.int64)
......@@ -39,15 +52,18 @@ class TestGraphSendRecvMaxOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient])
self.check_grad(
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True)
class TestGraphSendRecvMinOp(OpTest):
def setUp(self):
paddle.enable_static()
self.python_api = graph_send_recv_wrapper
self.python_out_sig = ["Out"]
self.op_type = "graph_send_recv"
x = np.random.random((10, 20)).astype("float64")
index = np.random.randint(0, 10, (15, 2)).astype(np.int64)
......@@ -64,15 +80,18 @@ class TestGraphSendRecvMinOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient])
self.check_grad(
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True)
class TestGraphSendRecvSumOp(OpTest):
def setUp(self):
paddle.enable_static()
self.python_api = graph_send_recv_wrapper
self.python_out_sig = ["Out"]
self.op_type = "graph_send_recv"
x = np.random.random((10, 20)).astype("float64")
index = np.random.randint(0, 10, (15, 2)).astype(np.int64)
......@@ -88,15 +107,17 @@ class TestGraphSendRecvSumOp(OpTest):
self.outputs = {'Out': out}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestGraphSendRecvMeanOp(OpTest):
def setUp(self):
paddle.enable_static()
self.python_api = graph_send_recv_wrapper
self.python_out_sig = ["Out"]
self.op_type = "graph_send_recv"
x = np.random.random((10, 20)).astype("float64")
index = np.random.randint(0, 10, (15, 2)).astype(np.int64)
......@@ -113,10 +134,10 @@ class TestGraphSendRecvMeanOp(OpTest):
self.outputs = {'Out': out, 'Dst_count': dst_count}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
def compute_graph_send_recv_for_sum_mean(inputs, attributes):
......@@ -333,6 +354,12 @@ class API_GraphSendRecvOpTest(unittest.TestCase):
{}\n{}, check diff!"
.format(np_res_set_outsize, res_set_outsize))
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph()
self.test_int32_input()
self.test_set_outsize_gpu()
if __name__ == '__main__':
unittest.main()
......@@ -13,7 +13,7 @@
# limitations under the License.
from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode
from paddle.fluid.framework import _non_static_mode, _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid import core
from paddle import _C_ops
......@@ -109,15 +109,30 @@ def graph_send_recv(x,
# TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1.
if _non_static_mode():
if out_size is None or out_size <= 0:
if out_size is None or out_size <= 0:
if _in_legacy_dygraph():
out, tmp = _C_ops.graph_send_recv(x, src_index, dst_index,
'pool_type', pool_type.upper())
else:
return out
if in_dygraph_mode():
return _C_ops.final_state_graph_send_recv(x, src_index, dst_index,
pool_type.upper(), 0)
else:
if _in_legacy_dygraph():
out, tmp = _C_ops.graph_send_recv(
x, src_index, dst_index, 'pool_type',
pool_type.upper(), 'out_size', out_size)
return out
return out
if in_dygraph_mode():
if isinstance(out_size, core.eager.Tensor):
if (out_size.size < 1):
raise ValueError(
"out_size should be long type, but received Tensor type."
)
out_size = out_size.numpy()[0]
return _C_ops.final_state_graph_send_recv(x, src_index, dst_index,
pool_type.upper(),
out_size)
check_variable_and_dtype(x, "X", ("float32", "float64", "int32", "int64"),
"graph_send_recv")
......
......@@ -756,6 +756,17 @@
func : gelu
backward : gelu_grad
- api : graph_send_recv
args : (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0)
output : Tensor(out), Tensor(dst_count)
infer_meta :
func : GraphSendRecvInferMeta
kernel :
func : graph_send_recv
data_type : x
intermediate : dst_count
backward : graph_send_recv_grad
- api : greater_equal
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor
......@@ -1162,7 +1173,7 @@
kernel :
func : mean_all
backward : mean_all_grad
- api : meshgrid
args : (Tensor[] inputs)
output : Tensor[]
......
......@@ -537,6 +537,17 @@
kernel :
func : gelu_grad
- backward_api : graph_send_recv_grad
forward : graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str pool_type = "SUM")
output : Tensor(x_grad)
infer_meta :
func : GeneralUnaryGradInferMeta
param : [x]
kernel :
func : graph_send_recv_grad
optional: out, dst_count
- backward_api : hard_shrink_grad
forward : hard_shrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册