Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
机器未来
Paddle
提交
3449a34e
P
Paddle
项目概览
机器未来
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1
Issue
1
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
3449a34e
编写于
4月 15, 2022
作者:
Y
YuanRisheng
提交者:
GitHub
4月 15, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
[Phi]Add multi_dot/maxout/multiplex op yaml (#41550) (#41818)
* add multi_dot,maxout,multiplex yaml * add code converage
上级
6c067e09
变更
15
隐藏空白更改
内联
并排
Showing
15 changed file
with
273 addition
and
28 deletion
+273
-28
paddle/phi/api/lib/api_custom_impl.cc
paddle/phi/api/lib/api_custom_impl.cc
+130
-0
paddle/phi/api/lib/api_custom_impl.h
paddle/phi/api/lib/api_custom_impl.h
+9
-1
paddle/phi/infermeta/backward.cc
paddle/phi/infermeta/backward.cc
+32
-0
paddle/phi/infermeta/backward.h
paddle/phi/infermeta/backward.h
+8
-0
paddle/phi/kernels/impl/multi_dot_kernel_impl.h
paddle/phi/kernels/impl/multi_dot_kernel_impl.h
+1
-1
paddle/phi/kernels/multi_dot_grad_kernel.h
paddle/phi/kernels/multi_dot_grad_kernel.h
+1
-1
paddle/phi/ops/compat/multi_dot_sig.cc
paddle/phi/ops/compat/multi_dot_sig.cc
+1
-1
python/paddle/fluid/layers/nn.py
python/paddle/fluid/layers/nn.py
+4
-1
python/paddle/fluid/tests/unittests/test_maxout_op.py
python/paddle/fluid/tests/unittests/test_maxout_op.py
+8
-2
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
+22
-16
python/paddle/nn/functional/activation.py
python/paddle/nn/functional/activation.py
+3
-3
python/paddle/tensor/linalg.py
python/paddle/tensor/linalg.py
+3
-1
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+28
-0
python/paddle/utils/code_gen/api_base.py
python/paddle/utils/code_gen/api_base.py
+1
-1
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+22
-0
未找到文件。
paddle/phi/api/lib/api_custom_impl.cc
浏览文件 @
3449a34e
...
@@ -1014,5 +1014,135 @@ std::vector<Tensor> meshgrid_grad_impl(
...
@@ -1014,5 +1014,135 @@ std::vector<Tensor> meshgrid_grad_impl(
return
api_output
;
return
api_output
;
}
}
std
::
vector
<
Tensor
>
multi_dot_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
const
Tensor
&
out_grad
)
{
Backend
kernel_backend
=
Backend
::
UNDEFINED
;
DataLayout
kernel_layout
=
DataLayout
::
UNDEFINED
;
DataType
kernel_data_type
=
DataType
::
UNDEFINED
;
if
(
kernel_backend
==
Backend
::
UNDEFINED
||
kernel_layout
==
DataLayout
::
UNDEFINED
||
kernel_data_type
==
DataType
::
UNDEFINED
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
x
,
out_grad
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
if
(
kernel_backend
==
Backend
::
UNDEFINED
)
{
kernel_backend
=
kernel_key
.
backend
();
}
if
(
kernel_layout
==
DataLayout
::
UNDEFINED
)
{
kernel_layout
=
kernel_key
.
layout
();
}
if
(
kernel_data_type
==
DataType
::
UNDEFINED
)
{
kernel_data_type
=
kernel_key
.
dtype
();
}
}
VLOG
(
6
)
<<
"multi_dot_grad API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
const
auto
&
kernel
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"multi_dot_grad"
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
VLOG
(
6
)
<<
"multi_dot_grad API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
input_x_vec
=
PrepareData
(
x
,
kernel
.
InputAt
(
0
),
{});
std
::
vector
<
const
phi
::
DenseTensor
*>
input_x
(
input_x_vec
->
size
());
for
(
size_t
i
=
0
;
i
<
input_x
.
size
();
++
i
)
{
input_x
[
i
]
=
&
input_x_vec
->
at
(
i
);
}
auto
input_out_grad
=
PrepareData
(
out_grad
,
kernel
.
InputAt
(
1
),
{});
size_t
out_number
=
input_x
.
size
();
std
::
vector
<
Tensor
>
api_output
;
auto
kernel_out
=
SetKernelOutput
(
out_number
,
kernel_backend
,
&
api_output
);
auto
x_meta_vec
=
MakeMetaTensor
(
input_x
);
std
::
vector
<
phi
::
MetaTensor
*>
x_metas
(
x_meta_vec
.
size
());
for
(
size_t
i
=
0
;
i
<
x_meta_vec
.
size
();
++
i
)
{
x_metas
[
i
]
=
&
x_meta_vec
[
i
];
}
std
::
vector
<
phi
::
MetaTensor
>
meta_outs
;
meta_outs
.
reserve
(
out_number
);
std
::
vector
<
phi
::
MetaTensor
*>
meta_out_ptrs
;
meta_out_ptrs
.
reserve
(
out_number
);
for
(
size_t
i
=
0
;
i
<
out_number
;
++
i
)
{
meta_outs
.
push_back
(
kernel_out
[
i
]);
meta_out_ptrs
.
push_back
(
&
meta_outs
.
back
());
}
phi
::
MultiDotGradInferMeta
(
x_metas
,
MakeMetaTensor
(
*
input_out_grad
),
meta_out_ptrs
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
std
::
vector
<
const
phi
::
DenseTensor
*>&
,
const
phi
::
DenseTensor
&
,
std
::
vector
<
phi
::
DenseTensor
*>&
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
(
*
kernel_fn
)(
*
dev_ctx
,
input_x
,
*
input_out_grad
,
kernel_out
);
return
api_output
;
}
std
::
vector
<
Tensor
>
multiplex_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
Tensor
&
ids
,
const
Tensor
&
out_grad
)
{
Backend
kernel_backend
=
Backend
::
UNDEFINED
;
DataLayout
kernel_layout
=
DataLayout
::
UNDEFINED
;
DataType
kernel_data_type
=
DataType
::
UNDEFINED
;
if
(
kernel_backend
==
Backend
::
UNDEFINED
||
kernel_layout
==
DataLayout
::
UNDEFINED
||
kernel_data_type
==
DataType
::
UNDEFINED
)
{
auto
kernel_key_set
=
ParseKernelKeyByInputArgs
(
out_grad
);
auto
kernel_key
=
kernel_key_set
.
GetHighestPriorityKernelKey
();
if
(
kernel_backend
==
Backend
::
UNDEFINED
)
{
kernel_backend
=
kernel_key
.
backend
();
}
if
(
kernel_layout
==
DataLayout
::
UNDEFINED
)
{
kernel_layout
=
kernel_key
.
layout
();
}
if
(
kernel_data_type
==
DataType
::
UNDEFINED
)
{
kernel_data_type
=
kernel_key
.
dtype
();
}
}
VLOG
(
6
)
<<
"multiplex_grad API kernel key: ["
<<
kernel_backend
<<
", "
<<
kernel_layout
<<
", "
<<
kernel_data_type
<<
"]"
;
const
auto
&
kernel
=
phi
::
KernelFactory
::
Instance
().
SelectKernelOrThrowError
(
"multiplex_grad"
,
{
kernel_backend
,
kernel_layout
,
kernel_data_type
});
VLOG
(
6
)
<<
"multiplex_grad API kernel: "
<<
kernel
;
auto
*
dev_ctx
=
GetDeviceContextByBackend
(
kernel_backend
);
auto
input_ids
=
PrepareData
(
ids
,
kernel
.
InputAt
(
0
),
{});
auto
input_out_grad
=
PrepareData
(
out_grad
,
kernel
.
InputAt
(
1
),
{});
auto
out_number
=
inputs
.
size
();
std
::
vector
<
Tensor
>
api_output
;
auto
kernel_out
=
SetKernelOutput
(
out_number
,
kernel_backend
,
&
api_output
);
std
::
vector
<
phi
::
MetaTensor
>
meta_outs
;
meta_outs
.
reserve
(
out_number
);
std
::
vector
<
phi
::
MetaTensor
*>
meta_out_ptrs
;
meta_out_ptrs
.
reserve
(
out_number
);
for
(
size_t
i
=
0
;
i
<
out_number
;
++
i
)
{
meta_outs
.
push_back
(
kernel_out
[
i
]);
meta_out_ptrs
.
push_back
(
&
meta_outs
.
back
());
}
phi
::
MultiplexGradInferMeta
(
MakeMetaTensor
(
*
input_ids
),
MakeMetaTensor
(
*
input_out_grad
),
meta_out_ptrs
);
using
kernel_signature
=
void
(
*
)(
const
platform
::
DeviceContext
&
,
const
phi
::
DenseTensor
&
,
const
phi
::
DenseTensor
&
,
std
::
vector
<
phi
::
DenseTensor
*>&
);
auto
*
kernel_fn
=
kernel
.
GetVariadicKernelFn
<
kernel_signature
>
();
(
*
kernel_fn
)(
*
dev_ctx
,
*
input_ids
,
*
input_out_grad
,
kernel_out
);
return
api_output
;
}
}
// namespace experimental
}
// namespace experimental
}
// namespace paddle
}
// namespace paddle
paddle/phi/api/lib/api_custom_impl.h
浏览文件 @
3449a34e
...
@@ -62,6 +62,8 @@ std::vector<Tensor> split_impl(const Tensor& x,
...
@@ -62,6 +62,8 @@ std::vector<Tensor> split_impl(const Tensor& x,
const
IntArray
&
num_or_sections
,
const
IntArray
&
num_or_sections
,
const
Scalar
&
axis
);
const
Scalar
&
axis
);
std
::
vector
<
Tensor
>
meshgrid_impl
(
const
std
::
vector
<
Tensor
>&
inputs
);
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
>
momentum_impl
(
std
::
tuple
<
Tensor
,
Tensor
,
Tensor
>
momentum_impl
(
const
Tensor
&
param
,
const
Tensor
&
param
,
const
Tensor
&
grad
,
const
Tensor
&
grad
,
...
@@ -109,9 +111,15 @@ Tensor real_grad_impl(const Tensor& x);
...
@@ -109,9 +111,15 @@ Tensor real_grad_impl(const Tensor& x);
std
::
vector
<
Tensor
>
stack_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
std
::
vector
<
Tensor
>
stack_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
const
Tensor
&
out_grad
,
const
Tensor
&
out_grad
,
int
axis
);
int
axis
);
std
::
vector
<
Tensor
>
meshgrid_impl
(
const
std
::
vector
<
Tensor
>&
inputs
);
std
::
vector
<
Tensor
>
meshgrid_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
std
::
vector
<
Tensor
>
meshgrid_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
Tensor
>&
outputs_grad
);
const
std
::
vector
<
Tensor
>&
outputs_grad
);
std
::
vector
<
Tensor
>
multi_dot_grad_impl
(
const
std
::
vector
<
Tensor
>&
x
,
const
Tensor
&
out_grad
);
std
::
vector
<
Tensor
>
multiplex_grad_impl
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
Tensor
&
ids
,
const
Tensor
&
out_grad
);
}
// namespace experimental
}
// namespace experimental
}
// namespace paddle
}
// namespace paddle
paddle/phi/infermeta/backward.cc
浏览文件 @
3449a34e
...
@@ -329,6 +329,38 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
...
@@ -329,6 +329,38 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
}
}
}
}
void
MultiDotGradInferMeta
(
const
std
::
vector
<
MetaTensor
*>&
x
,
const
MetaTensor
&
out_grad
,
std
::
vector
<
MetaTensor
*>
x_grad
)
{
PADDLE_ENFORCE_EQ
(
x
.
size
(),
x_grad
.
size
(),
errors
::
InvalidArgument
(
"Number of Inputs(X) should be equal with Outputs(X@Grad)."
"But received Inputs(X)' size = %d , Outputs(X@Grad)' size = %d."
,
x
.
size
(),
x_grad
.
size
()));
for
(
size_t
i
=
0
;
i
<
x
.
size
();
i
++
)
{
if
(
x_grad
[
i
]
!=
nullptr
)
{
x_grad
[
i
]
->
set_dims
(
x
[
i
]
->
dims
());
x_grad
[
i
]
->
share_lod
(
*
x
[
i
]);
}
}
}
void
MultiplexGradInferMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
out_grad
,
std
::
vector
<
MetaTensor
*>
ins_grad
)
{
PADDLE_ENFORCE_NE
(
ins_grad
.
empty
(),
true
,
errors
::
InvalidArgument
(
"Output(X@Grad) should not be null."
));
auto
dout_dim
=
out_grad
.
dims
();
for
(
auto
in_grad
:
ins_grad
)
{
in_grad
->
set_dims
(
dout_dim
);
}
}
void
NllLossGradInferMeta
(
const
MetaTensor
&
x
,
void
NllLossGradInferMeta
(
const
MetaTensor
&
x
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
...
paddle/phi/infermeta/backward.h
浏览文件 @
3449a34e
...
@@ -155,6 +155,14 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
...
@@ -155,6 +155,14 @@ void MeshgridGradInferMeta(const std::vector<MetaTensor*>& inputs,
const
std
::
vector
<
MetaTensor
*>&
outputs_grad
,
const
std
::
vector
<
MetaTensor
*>&
outputs_grad
,
std
::
vector
<
MetaTensor
*>
inputs_grad
);
std
::
vector
<
MetaTensor
*>
inputs_grad
);
void
MultiDotGradInferMeta
(
const
std
::
vector
<
MetaTensor
*>&
x
,
const
MetaTensor
&
out_grad
,
std
::
vector
<
MetaTensor
*>
x_grad
);
void
MultiplexGradInferMeta
(
const
MetaTensor
&
ids
,
const
MetaTensor
&
out_grad
,
std
::
vector
<
MetaTensor
*>
ins_grad
);
void
NllLossGradInferMeta
(
const
MetaTensor
&
input
,
void
NllLossGradInferMeta
(
const
MetaTensor
&
input
,
const
MetaTensor
&
label
,
const
MetaTensor
&
label
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
paddle
::
optional
<
const
MetaTensor
&>
weight
,
...
...
paddle/phi/kernels/impl/multi_dot_kernel_impl.h
浏览文件 @
3449a34e
...
@@ -339,8 +339,8 @@ void MultiDotGradMatChainOrder(const Context& ctx,
...
@@ -339,8 +339,8 @@ void MultiDotGradMatChainOrder(const Context& ctx,
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
MultiDotGradKernel
(
const
Context
&
ctx
,
void
MultiDotGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
std
::
vector
<
const
DenseTensor
*>&
x
,
const
std
::
vector
<
const
DenseTensor
*>&
x
,
const
DenseTensor
&
out_grad
,
std
::
vector
<
DenseTensor
*>
x_grad
)
{
std
::
vector
<
DenseTensor
*>
x_grad
)
{
auto
ins
=
x
;
auto
ins
=
x
;
auto
dout
=
out_grad
;
auto
dout
=
out_grad
;
...
...
paddle/phi/kernels/multi_dot_grad_kernel.h
浏览文件 @
3449a34e
...
@@ -20,8 +20,8 @@ namespace phi {
...
@@ -20,8 +20,8 @@ namespace phi {
template
<
typename
T
,
typename
Context
>
template
<
typename
T
,
typename
Context
>
void
MultiDotGradKernel
(
const
Context
&
ctx
,
void
MultiDotGradKernel
(
const
Context
&
ctx
,
const
DenseTensor
&
out_grad
,
const
std
::
vector
<
const
DenseTensor
*>&
x
,
const
std
::
vector
<
const
DenseTensor
*>&
x
,
const
DenseTensor
&
out_grad
,
std
::
vector
<
DenseTensor
*>
x_grad
);
std
::
vector
<
DenseTensor
*>
x_grad
);
}
// namespace phi
}
// namespace phi
paddle/phi/ops/compat/multi_dot_sig.cc
浏览文件 @
3449a34e
...
@@ -19,7 +19,7 @@ namespace phi {
...
@@ -19,7 +19,7 @@ namespace phi {
KernelSignature
MultiDotGradOpArgumentMapping
(
KernelSignature
MultiDotGradOpArgumentMapping
(
const
ArgumentMappingContext
&
ctx
)
{
const
ArgumentMappingContext
&
ctx
)
{
return
KernelSignature
(
return
KernelSignature
(
"multi_dot_grad"
,
{
GradVarName
(
"Out"
),
"X"
},
{},
{
GradVarName
(
"X"
)});
"multi_dot_grad"
,
{
"X"
,
GradVarName
(
"Out"
)
},
{},
{
GradVarName
(
"X"
)});
}
}
}
// namespace phi
}
// namespace phi
...
...
python/paddle/fluid/layers/nn.py
浏览文件 @
3449a34e
...
@@ -5971,8 +5971,11 @@ def multiplex(inputs, index, name=None):
...
@@ -5971,8 +5971,11 @@ def multiplex(inputs, index, name=None):
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
print(res) # [array([[5., 6.], [3., 4.]], dtype=float32)]
"""
"""
if _non_static_mode():
if _in_legacy_dygraph():
return _C_ops.multiplex(index, inputs)
return _C_ops.multiplex(index, inputs)
if in_dygraph_mode():
return _C_ops.final_state_multiplex(inputs, index)
helper = LayerHelper('multiplex', **locals())
helper = LayerHelper('multiplex', **locals())
check_type(inputs, 'inputs', (list), 'multiplex')
check_type(inputs, 'inputs', (list), 'multiplex')
...
...
python/paddle/fluid/tests/unittests/test_maxout_op.py
浏览文件 @
3449a34e
...
@@ -21,6 +21,7 @@ import paddle.fluid as fluid
...
@@ -21,6 +21,7 @@ import paddle.fluid as fluid
import
paddle.fluid.core
as
core
import
paddle.fluid.core
as
core
import
paddle.nn.functional
as
F
import
paddle.nn.functional
as
F
from
op_test
import
OpTest
from
op_test
import
OpTest
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
enable_static
()
paddle
.
enable_static
()
np
.
random
.
seed
(
1
)
np
.
random
.
seed
(
1
)
...
@@ -38,6 +39,7 @@ def maxout_forward_naive(x, groups, channel_axis):
...
@@ -38,6 +39,7 @@ def maxout_forward_naive(x, groups, channel_axis):
class
TestMaxOutOp
(
OpTest
):
class
TestMaxOutOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"maxout"
self
.
op_type
=
"maxout"
self
.
python_api
=
paddle
.
nn
.
functional
.
maxout
self
.
dtype
=
'float64'
self
.
dtype
=
'float64'
self
.
shape
=
[
3
,
6
,
2
,
4
]
self
.
shape
=
[
3
,
6
,
2
,
4
]
self
.
groups
=
2
self
.
groups
=
2
...
@@ -55,10 +57,10 @@ class TestMaxOutOp(OpTest):
...
@@ -55,10 +57,10 @@ class TestMaxOutOp(OpTest):
pass
pass
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'X'
],
'Out'
)
self
.
check_grad
([
'X'
],
'Out'
,
check_eager
=
True
)
class
TestMaxOutOpAxis0
(
TestMaxOutOp
):
class
TestMaxOutOpAxis0
(
TestMaxOutOp
):
...
@@ -144,6 +146,10 @@ class TestMaxoutAPI(unittest.TestCase):
...
@@ -144,6 +146,10 @@ class TestMaxoutAPI(unittest.TestCase):
x_float32
=
paddle
.
fluid
.
data
(
name
=
'x_float32'
,
shape
=
[
2
,
4
,
6
,
8
])
x_float32
=
paddle
.
fluid
.
data
(
name
=
'x_float32'
,
shape
=
[
2
,
4
,
6
,
8
])
self
.
assertRaises
(
ValueError
,
F
.
maxout
,
x_float32
,
2
,
2
)
self
.
assertRaises
(
ValueError
,
F
.
maxout
,
x_float32
,
2
,
2
)
def
test_dygraph_final_state_api
(
self
):
with
_test_eager_guard
():
self
.
test_dygraph_api
()
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_multi_dot_op.py
浏览文件 @
3449a34e
...
@@ -18,6 +18,7 @@ from op_test import OpTest, skip_check_grad_ci
...
@@ -18,6 +18,7 @@ from op_test import OpTest, skip_check_grad_ci
from
numpy.linalg
import
multi_dot
from
numpy.linalg
import
multi_dot
from
op_test
import
OpTest
from
op_test
import
OpTest
import
paddle
import
paddle
from
paddle.fluid.framework
import
_test_eager_guard
paddle
.
enable_static
()
paddle
.
enable_static
()
...
@@ -27,6 +28,7 @@ paddle.enable_static()
...
@@ -27,6 +28,7 @@ paddle.enable_static()
class
TestMultiDotOp
(
OpTest
):
class
TestMultiDotOp
(
OpTest
):
def
setUp
(
self
):
def
setUp
(
self
):
self
.
op_type
=
"multi_dot"
self
.
op_type
=
"multi_dot"
self
.
python_api
=
paddle
.
linalg
.
multi_dot
self
.
dtype
=
self
.
get_dtype
()
self
.
dtype
=
self
.
get_dtype
()
self
.
get_inputs_and_outputs
()
self
.
get_inputs_and_outputs
()
...
@@ -40,11 +42,11 @@ class TestMultiDotOp(OpTest):
...
@@ -40,11 +42,11 @@ class TestMultiDotOp(OpTest):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
])}
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
])}
def
test_check_output
(
self
):
def
test_check_output
(
self
):
self
.
check_output
()
self
.
check_output
(
check_eager
=
True
)
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
#(A*B)*C
#(A*B)*C
...
@@ -57,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp):
...
@@ -57,9 +59,9 @@ class TestMultiDotOp3Mat(TestMultiDotOp):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x2'
],
'Out'
)
self
.
check_grad
([
'x2'
],
'Out'
,
check_eager
=
True
)
#A*(B*C)
#A*(B*C)
...
@@ -72,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp):
...
@@ -72,9 +74,9 @@ class TestMultiDotOp3Mat2(TestMultiDotOp):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x2'
],
'Out'
)
self
.
check_grad
([
'x2'
],
'Out'
,
check_eager
=
True
)
class
TestMultiDotOp4Mat
(
TestMultiDotOp
):
class
TestMultiDotOp4Mat
(
TestMultiDotOp
):
...
@@ -90,10 +92,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp):
...
@@ -90,10 +92,10 @@ class TestMultiDotOp4Mat(TestMultiDotOp):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
,
self
.
D
])}
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
,
self
.
D
])}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x2'
],
'Out'
)
self
.
check_grad
([
'x2'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x3'
],
'Out'
)
self
.
check_grad
([
'x3'
],
'Out'
,
check_eager
=
True
)
class
TestMultiDotOpFirst1D
(
TestMultiDotOp
):
class
TestMultiDotOpFirst1D
(
TestMultiDotOp
):
...
@@ -143,9 +145,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat):
...
@@ -143,9 +145,9 @@ class TestMultiDotOp3MatLast1D(TestMultiDotOp3Mat):
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
self
.
outputs
=
{
'Out'
:
multi_dot
([
self
.
A
,
self
.
B
,
self
.
C
])}
def
test_check_grad
(
self
):
def
test_check_grad
(
self
):
self
.
check_grad
([
'x0'
],
'Out'
)
self
.
check_grad
([
'x0'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x1'
],
'Out'
)
self
.
check_grad
([
'x1'
],
'Out'
,
check_eager
=
True
)
self
.
check_grad
([
'x2'
],
'Out'
)
self
.
check_grad
([
'x2'
],
'Out'
,
check_eager
=
True
)
class
TestMultiDotOp4MatLast1D
(
TestMultiDotOp4Mat
):
class
TestMultiDotOp4MatLast1D
(
TestMultiDotOp4Mat
):
...
@@ -260,6 +262,10 @@ class APITestMultiDot(unittest.TestCase):
...
@@ -260,6 +262,10 @@ class APITestMultiDot(unittest.TestCase):
expected_result
=
np
.
linalg
.
multi_dot
([
input_array1
,
input_array2
])
expected_result
=
np
.
linalg
.
multi_dot
([
input_array1
,
input_array2
])
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
out
.
numpy
()))
self
.
assertTrue
(
np
.
allclose
(
expected_result
,
out
.
numpy
()))
def
test_dygraph_final_state_api
(
self
):
with
_test_eager_guard
():
self
.
test_dygraph_without_out
()
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
unittest
.
main
()
unittest
.
main
()
python/paddle/nn/functional/activation.py
浏览文件 @
3449a34e
...
@@ -684,10 +684,10 @@ def maxout(x, groups, axis=1, name=None):
...
@@ -684,10 +684,10 @@ def maxout(x, groups, axis=1, name=None):
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.95313174 0.6228939 0.7129065 0.7087491 ]
# [0.7142536 0.88725346 0.61093384 0.38833922]]]]
# [0.7142536 0.88725346 0.61093384 0.38833922]]]]
"""
"""
if
_in_legacy_dygraph
():
if
in_dynamic_mode
():
return
_C_ops
.
maxout
(
x
,
'groups'
,
groups
,
'axis'
,
axis
)
return
_C_ops
.
maxout
(
x
,
'groups'
,
groups
,
'axis'
,
axis
)
if
in_dygraph_mode
():
return
_C_ops
.
final_state_maxout
(
x
,
groups
,
axis
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'maxout'
)
check_variable_and_dtype
(
x
,
'x'
,
[
'float32'
,
'float64'
],
'maxout'
)
if
axis
not
in
[
1
,
-
1
,
3
]:
if
axis
not
in
[
1
,
-
1
,
3
]:
raise
ValueError
(
raise
ValueError
(
...
...
python/paddle/tensor/linalg.py
浏览文件 @
3449a34e
...
@@ -2274,8 +2274,10 @@ def multi_dot(x, name=None):
...
@@ -2274,8 +2274,10 @@ def multi_dot(x, name=None):
# [10, 7]
# [10, 7]
"""
"""
if
paddle
.
in_dynamic_mode
():
if
_in_legacy_dygraph
():
return
_C_ops
.
multi_dot
(
x
)
return
_C_ops
.
multi_dot
(
x
)
if
in_dygraph_mode
():
return
_C_ops
.
final_state_multi_dot
(
x
)
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
check_type
(
x
,
'x'
,
(
list
,
tuple
),
'multi_dot'
)
for
id
,
item
in
enumerate
(
x
):
for
id
,
item
in
enumerate
(
x
):
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
3449a34e
...
@@ -1283,6 +1283,15 @@
...
@@ -1283,6 +1283,15 @@
func
:
maximum
func
:
maximum
backward
:
maximum_grad
backward
:
maximum_grad
-
api
:
maxout
args
:
(Tensor x, int groups, int axis)
output
:
Tensor(out)
infer_meta
:
func
:
MaxOutInferMeta
kernel
:
func
:
maxout
backward
:
maxout_grad
-
api
:
mean
-
api
:
mean
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
output
:
Tensor(out)
...
@@ -1359,6 +1368,15 @@
...
@@ -1359,6 +1368,15 @@
invoke
:
momentum_impl(param, grad, velocity, learning_rate, master_param, mu, use_nesterov, regularization_method, regularization_coeff, multi_precision, rescale_grad)
invoke
:
momentum_impl(param, grad, velocity, learning_rate, master_param, mu, use_nesterov, regularization_method, regularization_coeff, multi_precision, rescale_grad)
optional
:
master_param
optional
:
master_param
-
api
:
multi_dot
args
:
(Tensor[] x)
output
:
Tensor
infer_meta
:
func
:
MultiDotInferMeta
kernel
:
func
:
multi_dot
backward
:
multi_dot_grad
# multinomial
# multinomial
-
api
:
multinomial
-
api
:
multinomial
args
:
(Tensor x, int num_samples, bool replacement)
args
:
(Tensor x, int num_samples, bool replacement)
...
@@ -1368,6 +1386,16 @@
...
@@ -1368,6 +1386,16 @@
kernel
:
kernel
:
func
:
multinomial
func
:
multinomial
-
api
:
multiplex
args
:
(Tensor[] ins, Tensor ids)
output
:
Tensor
infer_meta
:
func
:
MultiplexInferMeta
kernel
:
func
:
multiplex
data_type
:
ins
backward
:
multiplex_grad
-
api
:
multiply
-
api
:
multiply
args
:
(Tensor x, Tensor y)
args
:
(Tensor x, Tensor y)
output
:
Tensor
output
:
Tensor
...
...
python/paddle/utils/code_gen/api_base.py
浏览文件 @
3449a34e
...
@@ -600,7 +600,7 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
...
@@ -600,7 +600,7 @@ PADDLE_API {self.gene_return_type_code()} {self.get_api_func_name() + '_'}({self
if
self
.
inputs
[
'input_info'
][
param
]
==
"const Tensor&"
:
if
self
.
inputs
[
'input_info'
][
param
]
==
"const Tensor&"
:
kernel_args
=
kernel_args
+
"*"
+
PREFIX_TENSOR_NAME
+
param
+
", "
kernel_args
=
kernel_args
+
"*"
+
PREFIX_TENSOR_NAME
+
param
+
", "
elif
self
.
inputs
[
'input_info'
][
elif
self
.
inputs
[
'input_info'
][
input_name
]
==
"const std::vector<Tensor>&"
:
param
]
==
"const std::vector<Tensor>&"
:
kernel_args
=
kernel_args
+
PREFIX_TENSOR_NAME
+
param
+
", "
kernel_args
=
kernel_args
+
PREFIX_TENSOR_NAME
+
param
+
", "
else
:
else
:
# do nothing
# do nothing
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
3449a34e
...
@@ -920,6 +920,16 @@
...
@@ -920,6 +920,16 @@
kernel
:
kernel
:
func
:
maximum_grad
func
:
maximum_grad
-
backward_api
:
maxout_grad
forward
:
maxout(Tensor x, int groups, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int groups, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
maxout_grad
-
backward_api
:
mean_all_grad
-
backward_api
:
mean_all_grad
forward
:
mean_all(Tensor x) -> Tensor(out)
forward
:
mean_all(Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
args
:
(Tensor x, Tensor out_grad)
...
@@ -998,6 +1008,18 @@
...
@@ -998,6 +1008,18 @@
func
:
modulo_grad
func
:
modulo_grad
no_need_buffer
:
x, y
no_need_buffer
:
x, y
-
backward_api
:
multi_dot_grad
forward
:
multi_dot (Tensor[] x) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad)
output
:
Tensor[](x_grad)
invoke
:
multi_dot_grad_impl(x, out_grad)
-
backward_api
:
multiplex_grad
forward
:
multiplex (Tensor[] ins, Tensor ids) -> Tensor(out)
args
:
(Tensor[] ins, Tensor ids, Tensor out_grad)
output
:
Tensor[](ins_grad)
invoke
:
multiplex_grad_impl(ins, ids, out_grad)
-
backward_api
:
multiply_grad
-
backward_api
:
multiply_grad
forward
:
multiply (Tensor x, Tensor y) -> Tensor(out)
forward
:
multiply (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录