Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
Crayon鑫
Paddle
提交
36d76840
P
Paddle
项目概览
Crayon鑫
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
36d76840
编写于
4月 11, 2022
作者:
Y
YuanRisheng
提交者:
GitHub
4月 11, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Phi]Add multi_dot/maxout/multiplex op yaml (#41550)
* add multi_dot,maxout,multiplex yaml * add code converage
上级
89bfa964
变更
16
隐藏空白更改
内联
并排
Showing
16 changed file
with
298 addition
and
28 deletion
+298
-28
paddle/phi/api/lib/api_custom_impl.cc
paddle/phi/api/lib/api_custom_impl.cc
+130
-0
paddle/phi/api/lib/api_custom_impl.h
paddle/phi/api/lib/api_custom_impl.h
+9
-1
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+32
-0
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+8
-0
paddle/phi/kernels/impl/multi_dot_kernel_impl.h
paddle/phi/kernels/impl/multi_dot_kernel_impl.h
+1
-1
paddle/phi/kernels/multi_dot_grad_kernel.h
paddle/phi/kernels/multi_dot_grad_kernel.h
+1
-1
paddle/phi/ops/compat/multi_dot_sig.cc
paddle/phi/ops/compat/multi_dot_sig.cc
+1
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+4
-1
python/paddle/fluid/tests/unittests/test_maxout_op.py
python/paddle/fluid/tests/unittests/test_maxout_op.py
+8
-2
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
+22
-16
python/paddle/fluid/tests/unittests/test_multiplex_op.py
python/paddle/fluid/tests/unittests/test_multiplex_op.py
+25
-0
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+3
-3
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+3
-1
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+28
-0
python/paddle/utils/code_gen/api_base.py
python/paddle/utils/code_gen/api_base.py
+1
-1
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+22
-0
未找到文件。
paddle/phi/api/lib/api_custom_impl.cc
浏览文件 @
36d76840
...
...
@@ -1014,5 +1014,135 @@ std::vector<Tensor> meshgrid_grad_impl(
return
api_output
;
}
std
::
vector
<
Tensor
>
multi_dot_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
const
Tensor
&
out_grad
)
{
Backend
kernel_backend
=
Backend
::
UNDEFINED
;
DataLayout
kernel_layout
=
DataLayout
::
UNDEFINED
;
DataType
kernel_data_type
=
DataType
::
UNDEFINED
;
if
(
kernel_backend
==
Backend
::
UNDEFINED
||
kernel_layout
==
DataLayout
::
UNDEFINED
||
kernel_data_type
==
DataType
::
UNDEFINED
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
x
,
out_grad
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
if
(
kernel_backend
==
Backend
::
UNDEFINED
)
{
kernel_backend
=
kernel_key
.
backend
();
}
if
(
kernel_layout
==
DataLayout
::
UNDEFINED
)
{
kernel_layout
=
kernel_key
.
layout
();
}
if
(
kernel_data_type
==
DataType
::
UNDEFINED
)
{
kernel_data_type
=
kernel_key
.
dtype
();
}
}
VLOG
(
6
)
<<
"multi_dot_grad API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
const
auto
&
kernel
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"multi_dot_grad"
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
VLOG
(
6
)
<<
"multi_dot_grad API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
input_x_vec
=
PrepareData
(
x
,
kernel
.
InputAt
(
0
),
{});
std
::
vector
<
const
phi
::
DenseTensor
*>
input_x
(
input_x_vec
->
size
());
for
(
size_t
i
=
0
;
i
<
input_x
.
size
();
++
i
)
{
input_x
[
i
]
=
&
input_x_vec
->
at
(
i
);
}
auto
input_out_grad
=
PrepareData
(
out_grad
,
kernel
.
InputAt
(
1
),
{});
size_t
out_number
=
input_x
.
size
();
std
::
vector
<
Tensor
>
api_output
;
auto
kernel_out
=
SetKernelOutput
(
out_number
,
kernel_backend
,
&
api_output
);
auto
x_meta_vec
=
MakeMetaTensor
(
input_x
);
std
::
vector
<
phi
::
MetaTensor
*>
x_metas
(
x_meta_vec
.
size
());
for
(
size_t
i
=
0
;
i
<
x_meta_vec
.
size
();
++
i
)
{
x_metas
[
i
]
=
&
x_meta_vec
[
i
];
}
std
::
vector
<
phi
::
MetaTensor
>
meta_outs
;
meta_outs
.
reserve
(
out_number
);
std
::
vector
<
phi
::
MetaTensor
*>
meta_out_ptrs
;
meta_out_ptrs
.
reserve
(
out_number
);
for
(
size_t
i
=
0
;
i
<
out_number
;
++
i
)
{
meta_outs
.
push_back
(
kernel_out
[
i
]);
meta_out_ptrs
.
push_back
(
&
meta_outs
.
back
());
}
phi
::
MultiDotGradInferMeta
(
x_metas
,
MakeMetaTensor
(
*
input_out_grad
),
meta_out_ptrs
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
std
::
vector
<
const
phi
::
DenseTensor
*>&
,
const
phi
::
DenseTensor
&
,
std
::
vector
<
phi
::
DenseTensor
*>&
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
(
*
kernel_fn
)(
*
dev_ctx
,
input_x
,
*
input_out_grad
,
kernel_out
);
return
api_output
;
}
std
::
vector
<
Tensor
>
multiplex_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
Tensor
&
ids
,
const
Tensor
&
out_grad
)
{
Backend
kernel_backend
=
Backend
::
UNDEFINED
;
DataLayout
kernel_layout
=
DataLayout
::
UNDEFINED
;
DataType
kernel_data_type
=
DataType
::
UNDEFINED
;
if
(
kernel_backend
==
Backend
::
UNDEFINED
||
kernel_layout
==
DataLayout
::
UNDEFINED
||
kernel_data_type
==
DataType
::
UNDEFINED
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
out_grad
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
if
(
kernel_backend
==
Backend
::
UNDEFINED
)
{
kernel_backend
=
kernel_key
.
backend
();
}
if
(
kernel_layout
==
DataLayout
::
UNDEFINED
)
{
kernel_layout
=
kernel_key
.
layout
();
}
if
(
kernel_data_type
==
DataType
::
UNDEFINED
)
{
kernel_data_type
=
kernel_key
.
dtype
();
}
}
VLOG
(
6
)
<<
"multiplex_grad API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
const
auto
&
kernel
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"multiplex_grad"
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
VLOG
(
6
)
<<
"multiplex_grad API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
input_ids
=
PrepareData
(
ids
,
kernel
.
InputAt
(
0
),
{});
auto
input_out_grad
=
PrepareData
(
out_grad
,
kernel
.
InputAt
(
1
),
{});
auto
out_number
=
inputs
.
size
();
std
::
vector
<
Tensor
>
api_output
;
auto
kernel_out
=
SetKernelOutput
(
out_number
,
kernel_backend
,
&
api_output
);
std
::
vector
<
phi
::
MetaTensor
>
meta_outs
;
meta_outs
.
reserve
(
out_number
);
std
::
vector
<
phi
::
MetaTensor
*>
meta_out_ptrs
;
meta_out_ptrs
.
reserve
(
out_number
);
for
(
size_t
i
=
0
;
i
<
out_number
;
++
i
)
{
meta_outs
.
push_back
(
kernel_out
[
i
]);
meta_out_ptrs
.
push_back
(
&
meta_outs
.
back
());
}
phi
::
MultiplexGradInferMeta
(
MakeMetaTensor
(
*
input_ids
),
MakeMetaTensor
(
*
input_out_grad
),
meta_out_ptrs
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
std
::
vector
<
phi
::
DenseTensor
*>&
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
(
*
kernel_fn
)(
*
dev_ctx
,
*
input_ids
,
*
input_out_grad
,
kernel_out
);
return
api_output
;
}
}
// namespace experimental
}
// namespace paddle
paddle/phi/api/lib/api_custom_impl.h
浏览文件 @
36d76840
...
...
@@ -62,6 +62,8 @@ std::vector<Tensor> split_impl(const Tensor& x,
const
IntArray
&
num_or_sections
,
const
Scalar
&
axis
);
std
::
vector
<
Tensor
>
meshgrid_impl
(
const
std
::
vector
<
Tensor
>&
inputs
);
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
>
momentum_impl
(
const
Tensor
&
param
,
const
Tensor
&
grad
,
...
...
@@ -109,9 +111,15 @@ Tensor real_grad_impl(const Tensor& x);
std
::
vector
<
Tensor
>
stack_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
const
Tensor
&
out_grad
,
int
axis
);
std
::
vector
<
Tensor
>
meshgrid_impl
(
const
std
::
vector
<
Tensor
>&
inputs
);
std
::
vector
<
Tensor
>
meshgrid_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
Tensor
>&
outputs_grad
);
std
::
vector
<
Tensor
>
multi_dot_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
const
Tensor
&
out_grad
);
std
::
vector
<
Tensor
>
multiplex_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
Tensor
&
ids
,
const
Tensor
&
out_grad
);
}
// namespace experimental
}
// namespace paddle
paddle/phi/infermeta/backward.cc
浏览文件 @
36d76840
...
...
@@ -308,6 +308,38 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
}
}
void
MultiDotGradInferMeta
(
const
std
::
vector
<
MetaTensor
*>&
x
,
const
MetaTensor
&
out_grad
,
std
::
vector
<
MetaTensor
*>
x_grad
)
{
PADDLE_ENFORCE_EQ
(
x
.
size
(),
x_grad
.
size
(),
errors
::
InvalidArgument
(
"Number of Inputs(X) should be equal with Outputs(X@Grad)."
"But received Inputs(X)' size = %d , Outputs(X@Grad)' size = %d."
,
x
.
size
(),
x_grad
.
size
()));
for
(
size_t
i
=
0
;
i
<
x
.
size
();
i
++
)
{
if
(
x_grad
[
i
]
!=
nullptr
)
{
x_grad
[
i
]
->
set_dims
(
x
[
i
]
->
dims
());
x_grad
[
i
]
->
share_lod
(
*
x
[
i
]);
}
}
}
void
MultiplexGradInferMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
out_grad
,
std
::
vector
<
MetaTensor
*>
ins_grad
)
{
PADDLE_ENFORCE_NE
(
ins_grad
.
empty
(),
true
,
errors
::
InvalidArgument
(
"Output(X@Grad) should not be null."
));
auto
dout_dim
=
out_grad
.
dims
();
for
(
auto
in_grad
:
ins_grad
)
{
in_grad
->
set_dims
(
dout_dim
);
}
}
void
NllLossGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
36d76840
...
...
@@ -139,6 +139,14 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
const
std
::
vector
<
MetaTensor
*>&
outputs_grad
,
std
::
vector
<
MetaTensor
*>
inputs_grad
);
void
MultiDotGradInferMeta
(
const
std
::
vector
<
MetaTensor
*>&
x
,
const
MetaTensor
&
out_grad
,
std
::
vector
<
MetaTensor
*>
x_grad
);
void
MultiplexGradInferMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
out_grad
,
std
::
vector
<
MetaTensor
*>
ins_grad
);
void
NllLossGradInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
...
paddle/phi/kernels/impl/multi_dot_kernel_impl.h
浏览文件 @
36d76840
...
...
@@ -339,8 +339,8 @@ void MultiDotGradMatChainOrder(const Context& ctx,
template
<
typename
T
,
typename
Context
>
void
MultiDotGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
std
::
vector
<
const
DenseTensor
*>&
x
,
const
DenseTensor
&
out_grad
,
std
::
vector
<
DenseTensor
*>
x_grad
)
{
auto
ins
=
x
;
auto
dout
=
out_grad
;
...
...
paddle/phi/kernels/multi_dot_grad_kernel.h
浏览文件 @
36d76840
...
...
@@ -20,8 +20,8 @@ namespace phi {
template
<
typename
T
,
typename
Context
>
void
MultiDotGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
std
::
vector
<
const
DenseTensor
*>&
x
,
const
DenseTensor
&
out_grad
,
std
::
vector
<
DenseTensor
*>
x_grad
);
}
// namespace phi
paddle/phi/ops/compat/multi_dot_sig.cc
浏览文件 @
36d76840
...
...
@@ -19,7 +19,7 @@ namespace phi {
KernelSignature
MultiDotGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
"multi_dot_grad"
,
{
GradVarName
(
"Out"
),
"X"
},
{},
{
GradVarName
(
"X"
)});
"multi_dot_grad"
,
{
"X"
,
GradVarName
(
"Out"
)
},
{},
{
GradVarName
(
"X"
)});
}
}
// namespace phi
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
36d76840
...
...
@@ -5970,8 +5970,11 @@ def multiplex(inputs, index, name=None):
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
"""
if _non_static_mode():
if _in_legacy_dygraph():
return _C_ops.multiplex(index, inputs)
if in_dygraph_mode():
return _C_ops.final_state_multiplex(inputs, index)
helper = LayerHelper('multiplex', **locals())
check_type(inputs, 'inputs', (list), 'multiplex')
...
...
python/paddle/fluid/tests/unittests/test_maxout_op.py
浏览文件 @
36d76840
...
...
@@ -21,6 +21,7 @@ import paddle.fluid as fluid
import
paddle.fluid.core
as
core
import
paddle.nn.functional
as
F
from
op_test
import
OpTest
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
enable_static
()
np
.
random
.
seed
(
1
)
...
...
@@ -38,6 +39,7 @@ def maxout_forward_naive(x, groups, channel_axis):
class
TestMaxOutOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"maxout"
self
.
python_api
=
paddle
.
nn
.
functional
.
maxout
self
.
dtype
=
'float64'
self
.
shape
=
[
3
,
6
,
2
,
4
]
self
.
groups
=
2
...
...
@@ -55,10 +57,10 @@ class TestMaxOutOp(OpTest):
pass
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestMaxOutOpAxis0
(
TestMaxOutOp
):
...
...
@@ -144,6 +146,10 @@ class TestMaxoutAPI(unittest.TestCase):
x_float32
=
paddle
.
fluid
.
data
(
name
=
'x_float32'
,
shape
=
[
2
,
4
,
6
,
8
])
self
.
assertRaises
(
ValueError
,
F
.
maxout
,
x_float32
,
2
,
2
)
def
test_dygraph_final_state_api
(
self
):
with
_test_eager_guard
():
self
.
test_dygraph_api
()
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
浏览文件 @
36d76840
...
...
@@ -18,6 +18,7 @@ from op_test import OpTest, skip_check_grad_ci
from
numpy.linalg
import
multi_dot
from
op_test
import
OpTest
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
enable_static
()
...
...
@@ -27,6 +28,7 @@ paddle.enable_static()
class
TestMultiDotOp
(
OpTest
):
def
setUp
(
self
):
self
.
op_type
=
"multi_dot"
self
.
python_api
=
paddle
.
linalg
.
multi_dot
self
.
dtype
=
self
.
get_dtype
()
self
.
get_inputs_and_outputs
()
...
...
@@ -40,11 +42,11 @@ class TestMultiDotOp(OpTest):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
])}
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
#(A*B)*C
...
...
@@ -57,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x2'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x2'
],
'Out'
,
check_eager
=
True
)
#A*(B*C)
...
...
@@ -72,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x2'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x2'
],
'Out'
,
check_eager
=
True
)
class
TestMultiDotOp4Mat
(
TestMultiDotOp
):
...
...
@@ -90,10 +92,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
,
self
.
D
])}
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x2'
],
'Out'
)
self
.
check_grad
([
'x3'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x2'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x3'
],
'Out'
,
check_eager
=
True
)
class
TestMultiDotOpFirst1D
(
TestMultiDotOp
):
...
...
@@ -143,9 +145,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x2'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x2'
],
'Out'
,
check_eager
=
True
)
class
TestMultiDotOp4MatLast1D
(
TestMultiDotOp4Mat
):
...
...
@@ -260,6 +262,10 @@ class APITestMultiDot(unittest.TestCase):
expected_result
=
np
.
linalg
.
multi_dot
([
input_array1
,
input_array2
])
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
out
.
numpy
()))
def
test_dygraph_final_state_api
(
self
):
with
_test_eager_guard
():
self
.
test_dygraph_without_out
()
if
__name__
==
"__main__"
:
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_multiplex_op.py
浏览文件 @
36d76840
...
...
@@ -19,6 +19,7 @@ import numpy as np
from
op_test
import
OpTest
import
paddle
import
paddle.fluid
as
fluid
from
paddle.fluid.framework
import
_test_eager_guard
class
TestMultiplexOp
(
OpTest
):
...
...
@@ -102,6 +103,30 @@ class TestMultiplexODygrap(unittest.TestCase):
res
=
paddle
.
multiplex
(
inputs
,
index
)
paddle
.
enable_static
()
def
test_dygraph_final_state_api
(
self
):
with
fluid
.
dygraph
.
guard
():
img1
=
np
.
array
([[
1
,
2
],
[
3
,
4
]]).
astype
(
np
.
float32
)
img2
=
np
.
array
([[
5
,
6
],
[
7
,
8
]]).
astype
(
np
.
float32
)
inputs
=
[
paddle
.
to_tensor
(
img1
),
paddle
.
to_tensor
(
img2
)]
index
=
paddle
.
to_tensor
(
np
.
array
([[
1
],
[
0
]]).
astype
(
np
.
int32
))
inputs
[
0
].
stop_gradient
=
False
inputs
[
1
].
stop_gradient
=
False
res
=
paddle
.
multiplex
(
inputs
,
index
)
res
.
backward
()
with
_test_eager_guard
():
inputs_eager
=
[
paddle
.
to_tensor
(
img1
),
paddle
.
to_tensor
(
img2
)]
index_eager
=
paddle
.
to_tensor
(
np
.
array
([[
1
],
[
0
]]).
astype
(
np
.
int32
))
inputs_eager
[
0
].
stop_gradient
=
False
inputs_eager
[
1
].
stop_gradient
=
False
res_eager
=
paddle
.
multiplex
(
inputs_eager
,
index_eager
)
res_eager
.
backward
()
self
.
assertEqual
((
res
.
numpy
()
==
res_eager
.
numpy
()).
all
(),
True
)
self
.
assertEqual
((
inputs
[
0
].
grad
.
numpy
()
==
inputs_eager
[
0
].
grad
.
numpy
()).
all
(),
True
)
self
.
assertEqual
((
inputs
[
1
].
grad
.
numpy
()
==
inputs_eager
[
1
].
grad
.
numpy
()).
all
(),
True
)
if
__name__
==
'__main__'
:
unittest
.
main
()
python/paddle/nn/functional/activation.py
浏览文件 @
36d76840
...
...
@@ -684,10 +684,10 @@ def maxout(x, groups, axis=1, name=None):
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.7142536 0.88725346 0.61093384 0.38833922]]]]
"""
if
in_dynamic_mode
():
if
_in_legacy_dygraph
():
return
_C_ops
.
maxout
(
x
,
'groups'
,
groups
,
'axis'
,
axis
)
if
in_dygraph_mode
():
return
_C_ops
.
final_state_maxout
(
x
,
groups
,
axis
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'maxout'
)
if
axis
not
in
[
1
,
-
1
,
3
]:
raise
ValueError
(
...
...
python/paddle/tensor/linalg.py
浏览文件 @
36d76840
...
...
@@ -2273,8 +2273,10 @@ def multi_dot(x, name=None):
# [10, 7]
"""
if
paddle
.
in_dynamic_mode
():
if
_in_legacy_dygraph
():
return
_C_ops
.
multi_dot
(
x
)
if
in_dygraph_mode
():
return
_C_ops
.
final_state_multi_dot
(
x
)
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
for
id
,
item
in
enumerate
(
x
):
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
36d76840
...
...
@@ -1261,6 +1261,15 @@
func
:
maximum
backward
:
maximum_grad
-
api
:
maxout
args
:
(Tensor x, int groups, int axis)
output
:
Tensor(out)
infer_meta
:
func
:
MaxOutInferMeta
kernel
:
func
:
maxout
backward
:
maxout_grad
-
api
:
mean
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
...
...
@@ -1337,6 +1346,15 @@
invoke
:
momentum_impl(param, grad, velocity, learning_rate, master_param, mu, use_nesterov, regularization_method, regularization_coeff, multi_precision, rescale_grad)
optional
:
master_param
-
api
:
multi_dot
args
:
(Tensor[] x)
output
:
Tensor
infer_meta
:
func
:
MultiDotInferMeta
kernel
:
func
:
multi_dot
backward
:
multi_dot_grad
# multinomial
-
api
:
multinomial
args
:
(Tensor x, int num_samples, bool replacement)
...
...
@@ -1346,6 +1364,16 @@
kernel
:
func
:
multinomial
-
api
:
multiplex
args
:
(Tensor[] ins, Tensor ids)
output
:
Tensor
infer_meta
:
func
:
MultiplexInferMeta
kernel
:
func
:
multiplex
data_type
:
ins
backward
:
multiplex_grad
-
api
:
multiply
args
:
(Tensor x, Tensor y)
output
:
Tensor
...
...
python/paddle/utils/code_gen/api_base.py
浏览文件 @
36d76840
...
...
@@ -600,7 +600,7 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
if
self
.
inputs
[
'input_info'
][
param
]
==
"const Tensor&"
:
kernel_args
=
kernel_args
+
"*"
+
PREFIX_TENSOR_NAME
+
param
+
", "
elif
self
.
inputs
[
'input_info'
][
input_name
]
==
"const std::vector<Tensor>&"
:
param
]
==
"const std::vector<Tensor>&"
:
kernel_args
=
kernel_args
+
PREFIX_TENSOR_NAME
+
param
+
", "
else
:
# do nothing
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
36d76840
...
...
@@ -902,6 +902,16 @@
kernel
:
func
:
maximum_grad
-
backward_api
:
maxout_grad
forward
:
maxout(Tensor x, int groups, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int groups, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
maxout_grad
-
backward_api
:
mean_all_grad
forward
:
mean_all(Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
...
...
@@ -979,6 +989,18 @@
func
:
modulo_grad
no_need_buffer
:
x, y
-
backward_api
:
multi_dot_grad
forward
:
multi_dot (Tensor[] x) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad)
output
:
Tensor[](x_grad)
invoke
:
multi_dot_grad_impl(x, out_grad)
-
backward_api
:
multiplex_grad
forward
:
multiplex (Tensor[] ins, Tensor ids) -> Tensor(out)
args
:
(Tensor[] ins, Tensor ids, Tensor out_grad)
output
:
Tensor[](ins_grad)
invoke
:
multiplex_grad_impl(ins, ids, out_grad)
-
backward_api
:
multiply_grad
forward
:
multiply (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录