未验证 提交 a0b0a32f 编写于 作者: Y YuanRisheng 提交者: GitHub

[Cherry-Pick]Add...

[Cherry-Pick]Add hard_swish/kron/linspace/logit/graph_send_recv/multi_dot/maxout/multiplex op yaml file  (#41566)

* [Phi]Add graph_send_recv yaml file (#41206)

* add graph_send_recv yaml

* deal with confict

* fix compile bugs

* cherry-pick pr 41298

* cherry-pick pr41550

* fix compile bugs
上级 883d5be3
...@@ -67,7 +67,7 @@ class LinspaceOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -67,7 +67,7 @@ class LinspaceOpMaker : public framework::OpProtoAndCheckerMaker {
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(linspace, LinspaceInferShapeFunctor, DECLARE_INFER_SHAPE_FUNCTOR(linspace, LinspaceInferShapeFunctor,
PD_INFER_META(phi::LinspaceInferMeta)); PD_INFER_META(phi::LinspaceRawInferMeta));
REGISTER_OPERATOR( REGISTER_OPERATOR(
linspace, ops::LinspaceOp, ops::LinspaceOpMaker, linspace, ops::LinspaceOp, ops::LinspaceOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>, paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
......
...@@ -276,10 +276,10 @@ void LerpInferMeta(const MetaTensor& x, ...@@ -276,10 +276,10 @@ void LerpInferMeta(const MetaTensor& x,
out->share_lod(x); out->share_lod(x);
} }
void LinspaceInferMeta(const MetaTensor& start, void LinspaceRawInferMeta(const MetaTensor& start,
const MetaTensor& stop, const MetaTensor& stop,
const MetaTensor& number, const MetaTensor& number,
MetaTensor* out) { MetaTensor* out) {
auto s_dims = start.dims(); auto s_dims = start.dims();
PADDLE_ENFORCE_EQ( PADDLE_ENFORCE_EQ(
(s_dims.size() == 1) && (s_dims[0] == 1), (s_dims.size() == 1) && (s_dims[0] == 1),
...@@ -305,6 +305,14 @@ void LinspaceInferMeta(const MetaTensor& start, ...@@ -305,6 +305,14 @@ void LinspaceInferMeta(const MetaTensor& start,
out->set_dtype(start.dtype()); out->set_dtype(start.dtype());
} }
void LinspaceInferMeta(const MetaTensor& start,
const MetaTensor& stop,
const MetaTensor& number,
DataType dtype,
MetaTensor* out) {
LinspaceRawInferMeta(start, stop, number, out);
}
void NllLossRawInferMeta(const MetaTensor& input, void NllLossRawInferMeta(const MetaTensor& input,
const MetaTensor& label, const MetaTensor& label,
paddle::optional<const MetaTensor&> weight, paddle::optional<const MetaTensor&> weight,
......
...@@ -65,9 +65,15 @@ void LerpInferMeta(const MetaTensor& x, ...@@ -65,9 +65,15 @@ void LerpInferMeta(const MetaTensor& x,
const MetaTensor& weight, const MetaTensor& weight,
MetaTensor* out); MetaTensor* out);
void LinspaceRawInferMeta(const MetaTensor& start,
const MetaTensor& stop,
const MetaTensor& number,
MetaTensor* out);
void LinspaceInferMeta(const MetaTensor& start, void LinspaceInferMeta(const MetaTensor& start,
const MetaTensor& stop, const MetaTensor& stop,
const MetaTensor& number, const MetaTensor& number,
DataType dtype,
MetaTensor* out); MetaTensor* out);
void NllLossRawInferMeta(const MetaTensor& input, void NllLossRawInferMeta(const MetaTensor& input,
......
...@@ -197,6 +197,7 @@ DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(ThresholdedRelu, threshold); ...@@ -197,6 +197,7 @@ DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(ThresholdedRelu, threshold);
DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(SoftShrink, lambda); DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(SoftShrink, lambda);
DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(HardShrink, threshold); DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(HardShrink, threshold);
DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(Swish, beta); DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(Swish, beta);
DECLARE_ACT_GRAD_KERNEL_WITH_ONE_ATTRS_DEPX(Logit, eps);
DECLARE_ACT_GRAD_KERNEL_WITH_TWO_ATTRS_DEPX(BRelu, t_min, t_max); DECLARE_ACT_GRAD_KERNEL_WITH_TWO_ATTRS_DEPX(BRelu, t_min, t_max);
......
...@@ -118,12 +118,12 @@ void GraphSendRecvGradOpKernelLaunchHelper( ...@@ -118,12 +118,12 @@ void GraphSendRecvGradOpKernelLaunchHelper(
template <typename T, typename Context> template <typename T, typename Context>
void GraphSendRecvGradKernel(const Context& ctx, void GraphSendRecvGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const DenseTensor& x, const DenseTensor& x,
paddle::optional<const DenseTensor&> out,
const DenseTensor& src_index, const DenseTensor& src_index,
const DenseTensor& dst_index, const DenseTensor& dst_index,
paddle::optional<const DenseTensor&> out,
paddle::optional<const DenseTensor&> dst_count, paddle::optional<const DenseTensor&> dst_count,
const DenseTensor& out_grad,
const std::string& pool_type, const std::string& pool_type,
DenseTensor* x_grad) { DenseTensor* x_grad) {
auto index_type = src_index.dtype(); auto index_type = src_index.dtype();
......
...@@ -102,12 +102,12 @@ void GraphSendRecvGradOpCUDAKernelLaunchHelper( ...@@ -102,12 +102,12 @@ void GraphSendRecvGradOpCUDAKernelLaunchHelper(
template <typename T, typename Context> template <typename T, typename Context>
void GraphSendRecvGradKernel(const Context& ctx, void GraphSendRecvGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const DenseTensor& x, const DenseTensor& x,
paddle::optional<const DenseTensor&> out,
const DenseTensor& src_index, const DenseTensor& src_index,
const DenseTensor& dst_index, const DenseTensor& dst_index,
paddle::optional<const DenseTensor&> out,
paddle::optional<const DenseTensor&> dst_count, paddle::optional<const DenseTensor&> dst_count,
const DenseTensor& out_grad,
const std::string& pool_type, const std::string& pool_type,
DenseTensor* x_grad) { DenseTensor* x_grad) {
auto index_type = src_index.dtype(); auto index_type = src_index.dtype();
......
...@@ -22,12 +22,12 @@ namespace phi { ...@@ -22,12 +22,12 @@ namespace phi {
template <typename T, typename Context> template <typename T, typename Context>
void GraphSendRecvGradKernel(const Context& ctx, void GraphSendRecvGradKernel(const Context& ctx,
const DenseTensor& out_grad,
const DenseTensor& x, const DenseTensor& x,
paddle::optional<const DenseTensor&> out,
const DenseTensor& src_index, const DenseTensor& src_index,
const DenseTensor& dst_index, const DenseTensor& dst_index,
paddle::optional<const DenseTensor&> out,
paddle::optional<const DenseTensor&> dst_count, paddle::optional<const DenseTensor&> dst_count,
const DenseTensor& out_grad,
const std::string& pool_type, const std::string& pool_type,
DenseTensor* x_grad); DenseTensor* x_grad);
} // namespace phi } // namespace phi
...@@ -28,7 +28,7 @@ KernelSignature GraphSendRecvGradOpArgumentMapping( ...@@ -28,7 +28,7 @@ KernelSignature GraphSendRecvGradOpArgumentMapping(
const ArgumentMappingContext& ctx) { const ArgumentMappingContext& ctx) {
return KernelSignature( return KernelSignature(
"graph_send_recv_grad", "graph_send_recv_grad",
{GradVarName("Out"), "X", "Out", "Src_index", "Dst_index", "Dst_count"}, {"X", "Src_index", "Dst_index", "Out", "Dst_count", GradVarName("Out")},
{"pool_type"}, {"pool_type"},
{GradVarName("X")}); {GradVarName("X")});
} }
......
...@@ -22,6 +22,14 @@ from paddle.fluid import framework ...@@ -22,6 +22,14 @@ from paddle.fluid import framework
from paddle import _C_ops from paddle import _C_ops
final_state_name_mapping = { final_state_name_mapping = {
"graph_send_recv": {
"final_op_name": "final_state_graph_send_recv",
"x": "X",
"src_index": "Src_index",
"dst_index": "Dst_index",
"out": "Out",
"dst_count": "Dst_count"
},
"matmul_v2": { "matmul_v2": {
"final_op_name": "final_state_matmul", "final_op_name": "final_state_matmul",
"transpose_x": "trans_x", "transpose_x": "trans_x",
......
...@@ -1562,10 +1562,12 @@ def linspace(start, stop, num, dtype=None, name=None): ...@@ -1562,10 +1562,12 @@ def linspace(start, stop, num, dtype=None, name=None):
if not isinstance(num, Variable): if not isinstance(num, Variable):
with device_guard("cpu"): with device_guard("cpu"):
tensor_num = fill_constant([1], 'int32', num) tensor_num = fill_constant([1], 'int32', num)
if _non_static_mode(): if _in_legacy_dygraph():
return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype', return _C_ops.linspace(tensor_start, tensor_stop, tensor_num, 'dtype',
dtype) dtype)
if in_dygraph_mode():
return _C_ops.final_state_linspace(tensor_start, tensor_stop,
tensor_num, dtype)
helper = LayerHelper("linspace", **locals()) helper = LayerHelper("linspace", **locals())
start_dtype = convert_dtype(tensor_start.dtype) start_dtype = convert_dtype(tensor_start.dtype)
......
...@@ -1753,7 +1753,7 @@ class TestHardSwish(TestActivation): ...@@ -1753,7 +1753,7 @@ class TestHardSwish(TestActivation):
def setUp(self): def setUp(self):
self.op_type = 'hard_swish' self.op_type = 'hard_swish'
self.init_dtype() self.init_dtype()
self.python_api = paddle.nn.functional.hardswish
skip_check_grad_ci(reason="not implemented yet") skip_check_grad_ci(reason="not implemented yet")
np.random.seed(1024) np.random.seed(1024)
...@@ -1775,7 +1775,10 @@ class TestHardSwish(TestActivation): ...@@ -1775,7 +1775,10 @@ class TestHardSwish(TestActivation):
return return
return # not implemented yet return # not implemented yet
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def test_check_output(self):
self.check_output(check_eager=True)
class TestHardswishAPI(unittest.TestCase): class TestHardswishAPI(unittest.TestCase):
...@@ -1836,6 +1839,11 @@ class TestHardswishAPI(unittest.TestCase): ...@@ -1836,6 +1839,11 @@ class TestHardswishAPI(unittest.TestCase):
name='x_fp16', shape=[12, 10], dtype='float16') name='x_fp16', shape=[12, 10], dtype='float16')
F.hardswish(x_fp16) F.hardswish(x_fp16)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph_api()
self.test_errors()
class TestSoftRelu(TestActivation): class TestSoftRelu(TestActivation):
def setUp(self): def setUp(self):
......
...@@ -17,13 +17,26 @@ import unittest ...@@ -17,13 +17,26 @@ import unittest
import numpy as np import numpy as np
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
from op_test import OpTest from op_test import OpTest
def graph_send_recv_wrapper(x,
src_index,
dst_index,
pool_type="sum",
out_size=None,
name=None):
return paddle.incubate.graph_send_recv(x, src_index, dst_index,
pool_type.lower(), out_size, name)
class TestGraphSendRecvMaxOp(OpTest): class TestGraphSendRecvMaxOp(OpTest):
def setUp(self): def setUp(self):
paddle.enable_static() paddle.enable_static()
self.python_api = graph_send_recv_wrapper
self.python_out_sig = ["Out"]
self.op_type = "graph_send_recv" self.op_type = "graph_send_recv"
x = np.random.random((10, 20)).astype("float64") x = np.random.random((10, 20)).astype("float64")
index = np.random.randint(0, 10, (15, 2)).astype(np.int64) index = np.random.randint(0, 10, (15, 2)).astype(np.int64)
...@@ -39,15 +52,18 @@ class TestGraphSendRecvMaxOp(OpTest): ...@@ -39,15 +52,18 @@ class TestGraphSendRecvMaxOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient]) self.check_grad(
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True)
class TestGraphSendRecvMinOp(OpTest): class TestGraphSendRecvMinOp(OpTest):
def setUp(self): def setUp(self):
paddle.enable_static() paddle.enable_static()
self.python_api = graph_send_recv_wrapper
self.python_out_sig = ["Out"]
self.op_type = "graph_send_recv" self.op_type = "graph_send_recv"
x = np.random.random((10, 20)).astype("float64") x = np.random.random((10, 20)).astype("float64")
index = np.random.randint(0, 10, (15, 2)).astype(np.int64) index = np.random.randint(0, 10, (15, 2)).astype(np.int64)
...@@ -64,15 +80,18 @@ class TestGraphSendRecvMinOp(OpTest): ...@@ -64,15 +80,18 @@ class TestGraphSendRecvMinOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=[self.gradient]) self.check_grad(
['X'], 'Out', user_defined_grads=[self.gradient], check_eager=True)
class TestGraphSendRecvSumOp(OpTest): class TestGraphSendRecvSumOp(OpTest):
def setUp(self): def setUp(self):
paddle.enable_static() paddle.enable_static()
self.python_api = graph_send_recv_wrapper
self.python_out_sig = ["Out"]
self.op_type = "graph_send_recv" self.op_type = "graph_send_recv"
x = np.random.random((10, 20)).astype("float64") x = np.random.random((10, 20)).astype("float64")
index = np.random.randint(0, 10, (15, 2)).astype(np.int64) index = np.random.randint(0, 10, (15, 2)).astype(np.int64)
...@@ -88,15 +107,17 @@ class TestGraphSendRecvSumOp(OpTest): ...@@ -88,15 +107,17 @@ class TestGraphSendRecvSumOp(OpTest):
self.outputs = {'Out': out} self.outputs = {'Out': out}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
class TestGraphSendRecvMeanOp(OpTest): class TestGraphSendRecvMeanOp(OpTest):
def setUp(self): def setUp(self):
paddle.enable_static() paddle.enable_static()
self.python_api = graph_send_recv_wrapper
self.python_out_sig = ["Out"]
self.op_type = "graph_send_recv" self.op_type = "graph_send_recv"
x = np.random.random((10, 20)).astype("float64") x = np.random.random((10, 20)).astype("float64")
index = np.random.randint(0, 10, (15, 2)).astype(np.int64) index = np.random.randint(0, 10, (15, 2)).astype(np.int64)
...@@ -113,10 +134,10 @@ class TestGraphSendRecvMeanOp(OpTest): ...@@ -113,10 +134,10 @@ class TestGraphSendRecvMeanOp(OpTest):
self.outputs = {'Out': out, 'Dst_count': dst_count} self.outputs = {'Out': out, 'Dst_count': dst_count}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], 'Out') self.check_grad(['X'], 'Out', check_eager=True)
def compute_graph_send_recv_for_sum_mean(inputs, attributes): def compute_graph_send_recv_for_sum_mean(inputs, attributes):
...@@ -333,6 +354,12 @@ class API_GraphSendRecvOpTest(unittest.TestCase): ...@@ -333,6 +354,12 @@ class API_GraphSendRecvOpTest(unittest.TestCase):
{}\n{}, check diff!" {}\n{}, check diff!"
.format(np_res_set_outsize, res_set_outsize)) .format(np_res_set_outsize, res_set_outsize))
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_dygraph()
self.test_int32_input()
self.test_set_outsize_gpu()
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -21,11 +21,13 @@ from op_test import OpTest ...@@ -21,11 +21,13 @@ from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
import paddle.fluid.dygraph as dg import paddle.fluid.dygraph as dg
from paddle.fluid.framework import _test_eager_guard
class TestKronOp(OpTest): class TestKronOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "kron" self.op_type = "kron"
self.python_api = paddle.kron
self.dtype = self._init_dtype() self.dtype = self._init_dtype()
x = np.random.uniform(size=(10, 10)).astype(self.dtype) x = np.random.uniform(size=(10, 10)).astype(self.dtype)
y = np.random.uniform(size=(10, 10)).astype(self.dtype) y = np.random.uniform(size=(10, 10)).astype(self.dtype)
...@@ -37,21 +39,22 @@ class TestKronOp(OpTest): ...@@ -37,21 +39,22 @@ class TestKronOp(OpTest):
return "float64" return "float64"
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X', 'Y'], 'Out') self.check_grad(['X', 'Y'], 'Out', check_eager=True)
def test_check_grad_ignore_x(self): def test_check_grad_ignore_x(self):
self.check_grad(['Y'], 'Out', no_grad_set=set('X')) self.check_grad(['Y'], 'Out', no_grad_set=set('X'), check_eager=True)
def test_check_grad_ignore_y(self): def test_check_grad_ignore_y(self):
self.check_grad(['X'], 'Out', no_grad_set=set('Y')) self.check_grad(['X'], 'Out', no_grad_set=set('Y'), check_eager=True)
class TestKronOp2(TestKronOp): class TestKronOp2(TestKronOp):
def setUp(self): def setUp(self):
self.op_type = "kron" self.op_type = "kron"
self.python_api = paddle.kron
self.dtype = self._init_dtype() self.dtype = self._init_dtype()
x = np.random.uniform(size=(5, 5, 4)).astype(self.dtype) x = np.random.uniform(size=(5, 5, 4)).astype(self.dtype)
y = np.random.uniform(size=(10, 10)).astype(self.dtype) y = np.random.uniform(size=(10, 10)).astype(self.dtype)
...@@ -63,6 +66,7 @@ class TestKronOp2(TestKronOp): ...@@ -63,6 +66,7 @@ class TestKronOp2(TestKronOp):
class TestKronOp3(TestKronOp): class TestKronOp3(TestKronOp):
def setUp(self): def setUp(self):
self.op_type = "kron" self.op_type = "kron"
self.python_api = paddle.kron
self.dtype = self._init_dtype() self.dtype = self._init_dtype()
x = np.random.uniform(size=(10, 10)).astype(self.dtype) x = np.random.uniform(size=(10, 10)).astype(self.dtype)
y = np.random.uniform(size=(5, 5, 4)).astype(self.dtype) y = np.random.uniform(size=(5, 5, 4)).astype(self.dtype)
...@@ -101,10 +105,16 @@ class TestKronLayer(unittest.TestCase): ...@@ -101,10 +105,16 @@ class TestKronLayer(unittest.TestCase):
c, = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var]) c, = exe.run(main, feed={'a': a, 'b': b}, fetch_list=[out_var])
np.testing.assert_allclose(c, np.kron(a, b)) np.testing.assert_allclose(c, np.kron(a, b))
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_case()
self.test_case_with_output()
class TestComplexKronOp(OpTest): class TestComplexKronOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = "kron" self.op_type = "kron"
self.python_api = paddle.kron
self.x_shape = np.array([10, 10]) self.x_shape = np.array([10, 10])
self.y_shape = np.array([3, 35]) self.y_shape = np.array([3, 35])
self.out_shape = self.x_shape * self.y_shape self.out_shape = self.x_shape * self.y_shape
...@@ -160,14 +170,15 @@ class TestComplexKronOp(OpTest): ...@@ -160,14 +170,15 @@ class TestComplexKronOp(OpTest):
return grad_y return grad_y
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad_normal(self): def test_check_grad_normal(self):
self.check_grad( self.check_grad(
['X', 'Y'], ['X', 'Y'],
'Out', 'Out',
user_defined_grads=[self.grad_x, self.grad_y], user_defined_grads=[self.grad_x, self.grad_y],
user_defined_grad_outputs=[self.grad_out]) user_defined_grad_outputs=[self.grad_out],
check_eager=True)
def test_check_grad_ingore_x(self): def test_check_grad_ingore_x(self):
self.check_grad( self.check_grad(
...@@ -175,7 +186,8 @@ class TestComplexKronOp(OpTest): ...@@ -175,7 +186,8 @@ class TestComplexKronOp(OpTest):
'Out', 'Out',
no_grad_set=set("X"), no_grad_set=set("X"),
user_defined_grads=[self.grad_y], user_defined_grads=[self.grad_y],
user_defined_grad_outputs=[self.grad_out]) user_defined_grad_outputs=[self.grad_out],
check_eager=True)
def test_check_grad_ingore_y(self): def test_check_grad_ingore_y(self):
self.check_grad( self.check_grad(
...@@ -183,7 +195,8 @@ class TestComplexKronOp(OpTest): ...@@ -183,7 +195,8 @@ class TestComplexKronOp(OpTest):
'Out', 'Out',
no_grad_set=set('Y'), no_grad_set=set('Y'),
user_defined_grads=[self.grad_x], user_defined_grads=[self.grad_x],
user_defined_grad_outputs=[self.grad_out]) user_defined_grad_outputs=[self.grad_out],
check_eager=True)
class TestKronOpTypePromotion(TestComplexKronOp): class TestKronOpTypePromotion(TestComplexKronOp):
......
...@@ -21,11 +21,13 @@ import paddle ...@@ -21,11 +21,13 @@ import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle.fluid import core from paddle.fluid import core
from paddle.fluid.framework import _test_eager_guard
class TestLinspaceOpCommonCase(OpTest): class TestLinspaceOpCommonCase(OpTest):
def setUp(self): def setUp(self):
self.op_type = "linspace" self.op_type = "linspace"
self.python_api = paddle.linspace
dtype = 'float32' dtype = 'float32'
self.inputs = { self.inputs = {
'Start': np.array([0]).astype(dtype), 'Start': np.array([0]).astype(dtype),
...@@ -37,12 +39,13 @@ class TestLinspaceOpCommonCase(OpTest): ...@@ -37,12 +39,13 @@ class TestLinspaceOpCommonCase(OpTest):
self.outputs = {'Out': np.arange(0, 11).astype(dtype)} self.outputs = {'Out': np.arange(0, 11).astype(dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestLinspaceOpReverseCase(OpTest): class TestLinspaceOpReverseCase(OpTest):
def setUp(self): def setUp(self):
self.op_type = "linspace" self.op_type = "linspace"
self.python_api = paddle.linspace
dtype = 'float32' dtype = 'float32'
self.inputs = { self.inputs = {
'Start': np.array([10]).astype(dtype), 'Start': np.array([10]).astype(dtype),
...@@ -54,12 +57,13 @@ class TestLinspaceOpReverseCase(OpTest): ...@@ -54,12 +57,13 @@ class TestLinspaceOpReverseCase(OpTest):
self.outputs = {'Out': np.arange(10, -1, -1).astype(dtype)} self.outputs = {'Out': np.arange(10, -1, -1).astype(dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestLinspaceOpNumOneCase(OpTest): class TestLinspaceOpNumOneCase(OpTest):
def setUp(self): def setUp(self):
self.op_type = "linspace" self.op_type = "linspace"
self.python_api = paddle.linspace
dtype = 'float32' dtype = 'float32'
self.inputs = { self.inputs = {
'Start': np.array([10]).astype(dtype), 'Start': np.array([10]).astype(dtype),
...@@ -71,7 +75,7 @@ class TestLinspaceOpNumOneCase(OpTest): ...@@ -71,7 +75,7 @@ class TestLinspaceOpNumOneCase(OpTest):
self.outputs = {'Out': np.array(10, dtype=dtype)} self.outputs = {'Out': np.array(10, dtype=dtype)}
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
class TestLinspaceAPI(unittest.TestCase): class TestLinspaceAPI(unittest.TestCase):
...@@ -123,6 +127,11 @@ class TestLinspaceAPI(unittest.TestCase): ...@@ -123,6 +127,11 @@ class TestLinspaceAPI(unittest.TestCase):
self.assertEqual((out2.numpy() == np_out2).all(), True) self.assertEqual((out2.numpy() == np_out2).all(), True)
self.assertEqual((out3.numpy() == np_out3).all(), True) self.assertEqual((out3.numpy() == np_out3).all(), True)
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_variable_input2()
self.test_imperative()
class TestLinspaceOpError(unittest.TestCase): class TestLinspaceOpError(unittest.TestCase):
def test_errors(self): def test_errors(self):
......
...@@ -16,6 +16,7 @@ import unittest ...@@ -16,6 +16,7 @@ import unittest
import numpy as np import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
from paddle.fluid.framework import _test_eager_guard
np.random.seed(10) np.random.seed(10)
...@@ -37,6 +38,7 @@ def logit_grad(x, eps=1e-8): ...@@ -37,6 +38,7 @@ def logit_grad(x, eps=1e-8):
class TestLogitOp(OpTest): class TestLogitOp(OpTest):
def setUp(self): def setUp(self):
self.op_type = 'logit' self.op_type = 'logit'
self.python_api = paddle.logit
self.dtype = np.float64 self.dtype = np.float64
self.shape = [120] self.shape = [120]
self.eps = 1e-8 self.eps = 1e-8
...@@ -52,10 +54,11 @@ class TestLogitOp(OpTest): ...@@ -52,10 +54,11 @@ class TestLogitOp(OpTest):
pass pass
def test_check_output(self): def test_check_output(self):
self.check_output() self.check_output(check_eager=True)
def test_check_grad(self): def test_check_grad(self):
self.check_grad(['X'], ['Out'], user_defined_grads=[self.x_grad]) self.check_grad(
['X'], ['Out'], user_defined_grads=[self.x_grad], check_eager=True)
class TestLogitShape(TestLogitOp): class TestLogitShape(TestLogitOp):
...@@ -106,6 +109,11 @@ class TestLogitAPI(unittest.TestCase): ...@@ -106,6 +109,11 @@ class TestLogitAPI(unittest.TestCase):
x = paddle.fluid.data(name='X2', shape=[100], dtype='float32') x = paddle.fluid.data(name='X2', shape=[100], dtype='float32')
self.assertRaises(TypeError, paddle.logit, x, dtype='int32') self.assertRaises(TypeError, paddle.logit, x, dtype='int32')
def test_api_eager_dygraph(self):
with _test_eager_guard():
self.test_check_api()
self.test_errors()
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
...@@ -19,6 +19,7 @@ import numpy as np ...@@ -19,6 +19,7 @@ import numpy as np
from op_test import OpTest from op_test import OpTest
import paddle import paddle
import paddle.fluid as fluid import paddle.fluid as fluid
from paddle.fluid.framework import _test_eager_guard
class TestMultiplexOp(OpTest): class TestMultiplexOp(OpTest):
...@@ -102,6 +103,30 @@ class TestMultiplexODygrap(unittest.TestCase): ...@@ -102,6 +103,30 @@ class TestMultiplexODygrap(unittest.TestCase):
res = paddle.multiplex(inputs, index) res = paddle.multiplex(inputs, index)
paddle.enable_static() paddle.enable_static()
def test_dygraph_final_state_api(self):
with fluid.dygraph.guard():
img1 = np.array([[1, 2], [3, 4]]).astype(np.float32)
img2 = np.array([[5, 6], [7, 8]]).astype(np.float32)
inputs = [paddle.to_tensor(img1), paddle.to_tensor(img2)]
index = paddle.to_tensor(np.array([[1], [0]]).astype(np.int32))
inputs[0].stop_gradient = False
inputs[1].stop_gradient = False
res = paddle.multiplex(inputs, index)
res.backward()
with _test_eager_guard():
inputs_eager = [paddle.to_tensor(img1), paddle.to_tensor(img2)]
index_eager = paddle.to_tensor(
np.array([[1], [0]]).astype(np.int32))
inputs_eager[0].stop_gradient = False
inputs_eager[1].stop_gradient = False
res_eager = paddle.multiplex(inputs_eager, index_eager)
res_eager.backward()
self.assertEqual((res.numpy() == res_eager.numpy()).all(), True)
self.assertEqual((inputs[0].grad.numpy() ==
inputs_eager[0].grad.numpy()).all(), True)
self.assertEqual((inputs[1].grad.numpy() ==
inputs_eager[1].grad.numpy()).all(), True)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.layer_helper import LayerHelper
from paddle.fluid.framework import _non_static_mode from paddle.fluid.framework import _non_static_mode, _in_legacy_dygraph, in_dygraph_mode
from paddle.fluid.data_feeder import check_variable_and_dtype from paddle.fluid.data_feeder import check_variable_and_dtype
from paddle.fluid import core from paddle.fluid import core
from paddle import _C_ops from paddle import _C_ops
...@@ -109,15 +109,30 @@ def graph_send_recv(x, ...@@ -109,15 +109,30 @@ def graph_send_recv(x,
# TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1. # TODO(daisiming): Should we add judgement for out_size: max(dst_index) + 1.
if _non_static_mode(): if out_size is None or out_size <= 0:
if out_size is None or out_size <= 0: if _in_legacy_dygraph():
out, tmp = _C_ops.graph_send_recv(x, src_index, dst_index, out, tmp = _C_ops.graph_send_recv(x, src_index, dst_index,
'pool_type', pool_type.upper()) 'pool_type', pool_type.upper())
else: return out
if in_dygraph_mode():
return _C_ops.final_state_graph_send_recv(x, src_index, dst_index,
pool_type.upper(), 0)
else:
if _in_legacy_dygraph():
out, tmp = _C_ops.graph_send_recv( out, tmp = _C_ops.graph_send_recv(
x, src_index, dst_index, 'pool_type', x, src_index, dst_index, 'pool_type',
pool_type.upper(), 'out_size', out_size) pool_type.upper(), 'out_size', out_size)
return out return out
if in_dygraph_mode():
if isinstance(out_size, core.eager.Tensor):
if (out_size.size < 1):
raise ValueError(
"out_size should be long type, but received Tensor type."
)
out_size = out_size.numpy()[0]
return _C_ops.final_state_graph_send_recv(x, src_index, dst_index,
pool_type.upper(),
out_size)
check_variable_and_dtype(x, "X", ("float32", "float64", "int32", "int64"), check_variable_and_dtype(x, "X", ("float32", "float64", "int32", "int64"),
"graph_send_recv") "graph_send_recv")
......
...@@ -28,6 +28,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_dtype ...@@ -28,6 +28,7 @@ from ...fluid.data_feeder import check_variable_and_dtype, check_dtype
import paddle import paddle
from paddle import _C_ops, in_dynamic_mode from paddle import _C_ops, in_dynamic_mode
from paddle.framework import core from paddle.framework import core
from paddle.fluid.framework import _in_legacy_dygraph, in_dygraph_mode
__all__ = [] __all__ = []
...@@ -386,8 +387,10 @@ def hardswish(x, name=None): ...@@ -386,8 +387,10 @@ def hardswish(x, name=None):
out = F.hardswish(x) # [0., 5., 0.666667] out = F.hardswish(x) # [0., 5., 0.666667]
""" """
if in_dynamic_mode(): if _in_legacy_dygraph():
return _C_ops.hard_swish(x) return _C_ops.hard_swish(x)
if in_dygraph_mode():
return _C_ops.final_state_hard_swish(x, 6, 6, 3)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'],
'hardswish') 'hardswish')
......
...@@ -2674,9 +2674,10 @@ ${comment} ...@@ -2674,9 +2674,10 @@ ${comment}
# [12, 15, 18, 16, 20, 24], # [12, 15, 18, 16, 20, 24],
# [21, 24, 27, 28, 32, 36]]) # [21, 24, 27, 28, 32, 36]])
""" """
if paddle.in_dynamic_mode(): if _in_legacy_dygraph():
return _C_ops.kron(x, y) return _C_ops.kron(x, y)
if in_dygraph_mode():
return _C_ops.final_state_kron(x, y)
helper = LayerHelper('kron', **locals()) helper = LayerHelper('kron', **locals())
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron') check_variable_and_dtype(y, 'y', ['float16', 'float32', 'float64', 'int32', 'int64'], 'kron')
...@@ -3525,9 +3526,10 @@ def logit(x, eps=None, name=None): ...@@ -3525,9 +3526,10 @@ def logit(x, eps=None, name=None):
if eps == None: if eps == None:
eps = 0.0 eps = 0.0
if paddle.in_dynamic_mode(): if _in_legacy_dygraph():
return _C_ops.logit(x, 'eps', eps) return _C_ops.logit(x, 'eps', eps)
if in_dygraph_mode():
return _C_ops.final_state_logit(x, eps)
check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'logit') check_variable_and_dtype(x, 'x', ['float16', 'float32', 'float64'], 'logit')
helper = LayerHelper("logit", **locals()) helper = LayerHelper("logit", **locals())
out = helper.create_variable_for_type_inference(x.dtype) out = helper.create_variable_for_type_inference(x.dtype)
......
...@@ -749,6 +749,17 @@ ...@@ -749,6 +749,17 @@
func : gelu func : gelu
backward : gelu_grad backward : gelu_grad
- api : graph_send_recv
args : (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0)
output : Tensor(out), Tensor(dst_count)
infer_meta :
func : GraphSendRecvInferMeta
kernel :
func : graph_send_recv
data_type : x
intermediate : dst_count
backward : graph_send_recv_grad
- api : greater_equal - api : greater_equal
args : (Tensor x, Tensor y, int axis = -1) args : (Tensor x, Tensor y, int axis = -1)
output : Tensor output : Tensor
...@@ -796,6 +807,16 @@ ...@@ -796,6 +807,16 @@
func : hard_sigmoid func : hard_sigmoid
backward : hard_sigmoid_grad backward : hard_sigmoid_grad
- api : hard_swish
args : (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_swish
backward : hard_swish_grad
# histogram # histogram
- api : histogram - api : histogram
args : (Tensor x, int64_t bins, int min, int max) args : (Tensor x, int64_t bins, int min, int max)
...@@ -907,6 +928,15 @@ ...@@ -907,6 +928,15 @@
data_type : x data_type : x
backward : kldiv_loss_grad backward : kldiv_loss_grad
- api : kron
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : KronInferMeta
kernel :
func : kron
backward : kron_grad
- api : kthvalue - api : kthvalue
args : (Tensor x, int k, int axis, bool keepdim) args : (Tensor x, int k, int axis, bool keepdim)
output : Tensor(out), Tensor(indices) output : Tensor(out), Tensor(indices)
...@@ -974,6 +1004,15 @@ ...@@ -974,6 +1004,15 @@
func : lgamma func : lgamma
backward : lgamma_grad backward : lgamma_grad
- api : linspace
args : (Tensor start, Tensor stop, Tensor number, DataType dtype)
output : Tensor
infer_meta :
func : LinspaceInferMeta
kernel :
func : linspace
data_type : dtype
- api : log - api : log
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
...@@ -1065,6 +1104,17 @@ ...@@ -1065,6 +1104,17 @@
kernel : kernel :
func : logical_xor func : logical_xor
# logit
- api : logit
args : (Tensor x, float eps = 1e-6f)
output : Tensor
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logit
backward : logit_grad
# logsigmoid # logsigmoid
- api : logsigmoid - api : logsigmoid
args : (Tensor x) args : (Tensor x)
...@@ -1168,7 +1218,7 @@ ...@@ -1168,7 +1218,7 @@
kernel : kernel :
func : mean_all func : mean_all
backward : mean_all_grad backward : mean_all_grad
- api : meshgrid - api : meshgrid
args : (Tensor[] inputs) args : (Tensor[] inputs)
output : Tensor[] output : Tensor[]
......
...@@ -548,6 +548,16 @@ ...@@ -548,6 +548,16 @@
kernel : kernel :
func : hard_sigmoid_grad func : hard_sigmoid_grad
- backward_api : hard_swish_grad
forward : hard_swish (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold, float scale, float offset)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : hard_swish_grad
- backward_api : huber_loss_grad - backward_api : huber_loss_grad
forward : huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual) forward : huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
args : (Tensor residual, Tensor out_grad, float delta) args : (Tensor residual, Tensor out_grad, float delta)
...@@ -597,6 +607,17 @@ ...@@ -597,6 +607,17 @@
kernel : kernel :
func : kldiv_loss_grad func : kldiv_loss_grad
- backward_api : kron_grad
forward : kron (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : kron_grad
data_type : out_grad
- backward_api : kthvalue_grad - backward_api : kthvalue_grad
forward : kthvalue(Tensor x, int k, int axis, bool keepdim) -> Tensor(out), Tensor(indices) forward : kthvalue(Tensor x, int k, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, int k, int axis, bool keepdim) args : (Tensor x, Tensor indices, Tensor out_grad, int k, int axis, bool keepdim)
...@@ -708,6 +729,16 @@ ...@@ -708,6 +729,16 @@
kernel : kernel :
func : log_softmax_grad func : log_softmax_grad
- backward_api : logit_grad
forward : logit (Tensor x, float eps = 1e-6f) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float eps)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : logit_grad
- backward_api : logsigmoid_grad - backward_api : logsigmoid_grad
forward : logsigmoid (Tensor x) -> Tensor(out) forward : logsigmoid (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册