Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
30a627aa
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
30a627aa
编写于
3月 15, 2021
作者:
C
Chen Weihang
提交者:
GitHub
3月 15, 2021
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Normalized function parameter writing (#31588)
上级
cac9635a
变更
4
隐藏空白更改
内联
并排
Showing
4 changed file
with
316 addition
and
161 deletion
+316
-161
paddle/fluid/extension/include/ext_op_meta_info.h
paddle/fluid/extension/include/ext_op_meta_info.h
+144
-107
python/paddle/fluid/tests/custom_op/attr_test_op.cc
python/paddle/fluid/tests/custom_op/attr_test_op.cc
+142
-39
python/paddle/fluid/tests/custom_op/custom_concat_op.cc
python/paddle/fluid/tests/custom_op/custom_concat_op.cc
+4
-3
python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py
python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py
+26
-12
未找到文件。
paddle/fluid/extension/include/ext_op_meta_info.h
浏览文件 @
30a627aa
...
...
@@ -80,30 +80,31 @@ inline std::string Vec(const std::string& t_name) {
////////////////////// Kernel Function (PD_KERNEL) ////////////////////////
// Record Op kernel core function
using
KernelFunc
=
std
::
vector
<
Tensor
>
(
*
)(
std
::
vector
<
Tensor
>
inputs
,
std
::
vector
<
std
::
vector
<
Tensor
>>
vec_inputs
,
std
::
vector
<
boost
::
any
>
attrs
);
#define PD_SPECIALIZE_ComputeCallHelper(attr_type) \
template <typename... Tail> \
struct ComputeCallHelper<attr_type, Tail...> { \
template <int in_idx, int vec_in_idx, int attr_idx, \
typename... PreviousArgs> \
static Return Compute(std::vector<Tensor> inputs, \
std::vector<std::vector<Tensor>> vec_inputs, \
std::vector<boost::any> attrs, \
const PreviousArgs&... pargs) { \
try { \
attr_type arg = boost::any_cast<attr_type>(attrs[attr_idx]); \
return ComputeCallHelper<Tail...>::template Compute< \
in_idx, vec_in_idx, attr_idx + 1>(inputs, vec_inputs, attrs, \
pargs..., arg); \
} catch (boost::bad_any_cast&) { \
PD_THROW( \
"Attribute cast error in custom operator. Expected " #attr_type \
" value."); \
} \
} \
using
KernelFunc
=
std
::
vector
<
Tensor
>
(
*
)(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
std
::
vector
<
Tensor
>>&
vec_inputs
,
const
std
::
vector
<
boost
::
any
>&
attrs
);
#define PD_SPECIALIZE_ComputeCallHelper(attr_type) \
template <typename... Tail> \
struct ComputeCallHelper<attr_type, Tail...> { \
template <int in_idx, int vec_in_idx, int attr_idx, \
typename... PreviousArgs> \
static Return Compute(const std::vector<Tensor>& inputs, \
const std::vector<std::vector<Tensor>>& vec_inputs, \
const std::vector<boost::any>& attrs, \
const PreviousArgs&... pargs) { \
try { \
attr_type arg = boost::any_cast<attr_type>(attrs[attr_idx]); \
return ComputeCallHelper<Tail...>::template Compute< \
in_idx, vec_in_idx, attr_idx + 1>(inputs, vec_inputs, attrs, \
pargs..., arg); \
} catch (boost::bad_any_cast&) { \
PD_THROW( \
"Attribute cast error in custom operator. Expected " #attr_type \
" value."); \
} \
} \
}
template
<
typename
T
>
...
...
@@ -114,9 +115,9 @@ struct KernelFuncImpl;
template
<
typename
Return
,
typename
...
Args
,
Return
(
*
impl_fn
)(
Args
...)>
struct
KernelFuncImpl
<
Return
(
*
)(
Args
...),
impl_fn
>
{
static
Return
Compute
(
std
::
vector
<
Tensor
>
inputs
,
std
::
vector
<
std
::
vector
<
Tensor
>>
vec_inputs
,
std
::
vector
<
boost
::
any
>
attrs
)
{
static
Return
Compute
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
std
::
vector
<
Tensor
>>&
vec_inputs
,
const
std
::
vector
<
boost
::
any
>&
attrs
)
{
return
ComputeCallHelper
<
Args
...,
TypeTag
<
int
>>::
template
Compute
<
0
,
0
,
0
>(
inputs
,
vec_inputs
,
attrs
);
}
...
...
@@ -125,14 +126,13 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
template
<
typename
...
RemainingArgs
>
struct
ComputeCallHelper
;
// for Tensor input
template
<
typename
...
Tail
>
struct
ComputeCallHelper
<
const
Tensor
&
,
Tail
...
>
{
template
<
int
in_idx
,
int
vec_in_idx
,
int
attr_idx
,
typename
...
PreviousArgs
>
static
Return
Compute
(
std
::
vector
<
Tensor
>
inputs
,
std
::
vector
<
std
::
vector
<
Tensor
>>
vec_inputs
,
std
::
vector
<
boost
::
any
>
attrs
,
static
Return
Compute
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
std
::
vector
<
Tensor
>>&
vec_inputs
,
const
std
::
vector
<
boost
::
any
>&
attrs
,
const
PreviousArgs
&
...
pargs
)
{
const
Tensor
&
arg
=
inputs
[
in_idx
];
return
ComputeCallHelper
<
Tail
...
>::
template
Compute
<
in_idx
+
1
,
...
...
@@ -141,14 +141,13 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
}
};
// for std::vector<Tensor> input
template
<
typename
...
Tail
>
struct
ComputeCallHelper
<
const
std
::
vector
<
Tensor
>&
,
Tail
...
>
{
template
<
int
in_idx
,
int
vec_in_idx
,
int
attr_idx
,
typename
...
PreviousArgs
>
static
Return
Compute
(
std
::
vector
<
Tensor
>
inputs
,
std
::
vector
<
std
::
vector
<
Tensor
>>
vec_inputs
,
std
::
vector
<
boost
::
any
>
attrs
,
static
Return
Compute
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
std
::
vector
<
Tensor
>>&
vec_inputs
,
const
std
::
vector
<
boost
::
any
>&
attrs
,
const
PreviousArgs
&
...
pargs
)
{
const
std
::
vector
<
Tensor
>&
arg
=
vec_inputs
[
vec_in_idx
];
return
ComputeCallHelper
<
Tail
...
>::
template
Compute
<
...
...
@@ -157,6 +156,23 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
}
};
PD_SPECIALIZE_ComputeCallHelper
(
const
bool
&
);
PD_SPECIALIZE_ComputeCallHelper
(
const
int
&
);
PD_SPECIALIZE_ComputeCallHelper
(
const
float
&
);
PD_SPECIALIZE_ComputeCallHelper
(
const
int64_t
&
);
PD_SPECIALIZE_ComputeCallHelper
(
const
std
::
string
&
);
PD_SPECIALIZE_ComputeCallHelper
(
const
std
::
vector
<
int
>&
);
PD_SPECIALIZE_ComputeCallHelper
(
const
std
::
vector
<
float
>&
);
PD_SPECIALIZE_ComputeCallHelper
(
const
std
::
vector
<
int64_t
>&
);
PD_SPECIALIZE_ComputeCallHelper
(
const
std
::
vector
<
std
::
string
>&
);
// TODO(chenweihang): support other attribute type if needed.
// Why not support other attribute type here?
// - boost::blank, std::vector<bool> and std::vector<double>
// are not used in op
// - BlockDesc* and std::vector<BlockDesc*> are used in framework
// NOTE(chenweihang): Used to be compatible with the 2.0.1 released
// interface, and will be deprecated in the future
PD_SPECIALIZE_ComputeCallHelper
(
bool
);
PD_SPECIALIZE_ComputeCallHelper
(
int
);
PD_SPECIALIZE_ComputeCallHelper
(
float
);
...
...
@@ -166,18 +182,15 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
PD_SPECIALIZE_ComputeCallHelper
(
std
::
vector
<
float
>
);
PD_SPECIALIZE_ComputeCallHelper
(
std
::
vector
<
int64_t
>
);
PD_SPECIALIZE_ComputeCallHelper
(
std
::
vector
<
std
::
string
>
);
// TODO(chenweihang): support other attribute type if needed.
// Why not support other attribute type here?
// - boost::blank, std::vector<bool> and std::vector<double>
// are not used in op
// - BlockDesc* and std::vector<BlockDesc*> are used in framework
// end: base template
template
<
typename
T
>
struct
ComputeCallHelper
<
TypeTag
<
T
>>
{
template
<
int
in_idx
,
int
vec_in_idx
,
int
attr_idx
>
static
Return
Compute
(
std
::
vector
<
Tensor
>
inputs
,
std
::
vector
<
std
::
vector
<
Tensor
>>
vec_inputs
,
std
::
vector
<
boost
::
any
>
attrs
,
const
Args
&
...
args
)
{
static
Return
Compute
(
const
std
::
vector
<
Tensor
>&
inputs
,
const
std
::
vector
<
std
::
vector
<
Tensor
>>&
vec_inputs
,
const
std
::
vector
<
boost
::
any
>&
attrs
,
const
Args
&
...
args
)
{
return
impl_fn
(
args
...);
}
};
...
...
@@ -190,8 +203,40 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
// Record Op infershape core function
using
InferShapeFunc
=
std
::
vector
<
std
::
vector
<
int64_t
>>
(
*
)(
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
,
std
::
vector
<
std
::
vector
<
std
::
vector
<
int64_t
>>>
vec_input_shapes
);
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
input_shapes
,
const
std
::
vector
<
std
::
vector
<
std
::
vector
<
int64_t
>>>&
vec_input_shapes
);
#define PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE(input_type) \
template <typename... Tail> \
struct InferShapeCallHelper<input_type, Tail...> { \
template <int in_idx, int vec_in_idx, typename... PreviousArgs> \
static Return InferShape( \
const std::vector<std::vector<int64_t>>& input_shapes, \
const std::vector<std::vector<std::vector<int64_t>>>& \
vec_input_shapes, \
const PreviousArgs&... pargs) { \
input_type arg = input_shapes[in_idx]; \
return InferShapeCallHelper<Tail...>::template InferShape<in_idx + 1, \
vec_in_idx>( \
input_shapes, vec_input_shapes, pargs..., arg); \
} \
}
#define PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES(input_type) \
template <typename... Tail> \
struct InferShapeCallHelper<input_type, Tail...> { \
template <int in_idx, int vec_in_idx, typename... PreviousArgs> \
static Return InferShape( \
const std::vector<std::vector<int64_t>>& input_shapes, \
const std::vector<std::vector<std::vector<int64_t>>>& \
vec_input_shapes, \
const PreviousArgs&... pargs) { \
input_type arg = vec_input_shapes[vec_in_idx]; \
return InferShapeCallHelper<Tail...>::template InferShape< \
in_idx, vec_in_idx + 1>(input_shapes, vec_input_shapes, pargs..., \
arg); \
} \
}
template
<
typename
F
,
F
f
>
struct
InferShapeFuncImpl
;
...
...
@@ -199,8 +244,8 @@ struct InferShapeFuncImpl;
template
<
typename
Return
,
typename
...
Args
,
Return
(
*
impl_fn
)(
Args
...)>
struct
InferShapeFuncImpl
<
Return
(
*
)(
Args
...),
impl_fn
>
{
static
Return
InferShape
(
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
,
std
::
vector
<
std
::
vector
<
std
::
vector
<
int64_t
>>>
vec_input_shapes
)
{
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
input_shapes
,
const
std
::
vector
<
std
::
vector
<
std
::
vector
<
int64_t
>>>&
vec_input_shapes
)
{
return
InferShapeCallHelper
<
Args
...,
TypeTag
<
int
>>::
template
InferShape
<
0
,
0
>(
input_shapes
,
vec_input_shapes
);
...
...
@@ -210,41 +255,23 @@ struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> {
template
<
typename
...
RemainingArgs
>
struct
InferShapeCallHelper
;
template
<
typename
...
Tail
>
struct
InferShapeCallHelper
<
std
::
vector
<
int64_t
>
,
Tail
...
>
{
template
<
int
in_idx
,
int
vec_in_idx
,
typename
...
PreviousArgs
>
static
Return
InferShape
(
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
,
std
::
vector
<
std
::
vector
<
std
::
vector
<
int64_t
>>>
vec_input_shapes
,
const
PreviousArgs
&
...
pargs
)
{
std
::
vector
<
int64_t
>
arg
=
input_shapes
[
in_idx
];
return
InferShapeCallHelper
<
Tail
...
>::
template
InferShape
<
in_idx
+
1
,
vec_in_idx
>(
input_shapes
,
vec_input_shapes
,
pargs
...,
arg
);
}
};
PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE
(
const
std
::
vector
<
int64_t
>&
);
PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES
(
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
);
template
<
typename
...
Tail
>
struct
InferShapeCallHelper
<
std
::
vector
<
std
::
vector
<
int64_t
>>
,
Tail
...
>
{
template
<
int
in_idx
,
int
vec_in_idx
,
typename
...
PreviousArgs
>
static
Return
InferShape
(
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
,
std
::
vector
<
std
::
vector
<
std
::
vector
<
int64_t
>>>
vec_input_shapes
,
const
PreviousArgs
&
...
pargs
)
{
std
::
vector
<
std
::
vector
<
int64_t
>>
arg
=
vec_input_shapes
[
vec_in_idx
];
return
InferShapeCallHelper
<
Tail
...
>::
template
InferShape
<
in_idx
,
vec_in_idx
+
1
>(
input_shapes
,
vec_input_shapes
,
pargs
...,
arg
);
}
};
// NOTE(chenweihang): Used to be compatible with the 2.0.1 released
// interface, and will be deprecated in the future
PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE
(
std
::
vector
<
int64_t
>
);
PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES
(
std
::
vector
<
std
::
vector
<
int64_t
>>
);
// end: base template
template
<
typename
T
>
struct
InferShapeCallHelper
<
TypeTag
<
T
>>
{
template
<
int
in_idx
,
int
vec_in_idx
>
static
Return
InferShape
(
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
,
std
::
vector
<
std
::
vector
<
std
::
vector
<
int64_t
>>>
vec_input_shapes
,
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
input_shapes
,
const
std
::
vector
<
std
::
vector
<
std
::
vector
<
int64_t
>>>&
vec_input_shapes
,
const
Args
&
...
args
)
{
return
impl_fn
(
args
...);
}
...
...
@@ -258,8 +285,38 @@ struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> {
// Record Op Infer dtype core function
using
InferDtypeFunc
=
std
::
vector
<
DataType
>
(
*
)(
std
::
vector
<
DataType
>
input_dtypes
,
std
::
vector
<
std
::
vector
<
DataType
>>
vec_input_dtypes
);
const
std
::
vector
<
DataType
>&
input_dtypes
,
const
std
::
vector
<
std
::
vector
<
DataType
>>&
vec_input_dtypes
);
#define PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE(input_type) \
template <typename... Tail> \
struct InferDtypeCallHelper<input_type, Tail...> { \
template <int in_idx, int vec_in_idx, typename... PreviousArgs> \
static Return InferDtype( \
const std::vector<DataType>& input_dtypes, \
const std::vector<std::vector<DataType>>& vec_input_dtypes, \
const PreviousArgs&... pargs) { \
input_type arg = input_dtypes[in_idx]; \
return InferDtypeCallHelper<Tail...>::template InferDtype<in_idx + 1, \
vec_in_idx>( \
input_dtypes, vec_input_dtypes, pargs..., arg); \
} \
}
#define PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES(input_type) \
template <typename... Tail> \
struct InferDtypeCallHelper<input_type, Tail...> { \
template <int in_idx, int vec_in_idx, typename... PreviousArgs> \
static Return InferDtype( \
const std::vector<DataType>& input_dtypes, \
const std::vector<std::vector<DataType>>& vec_input_dtypes, \
const PreviousArgs&... pargs) { \
input_type arg = vec_input_dtypes[vec_in_idx]; \
return InferDtypeCallHelper<Tail...>::template InferDtype< \
in_idx, vec_in_idx + 1>(input_dtypes, vec_input_dtypes, pargs..., \
arg); \
} \
}
template
<
typename
F
,
F
f
>
struct
InferDtypeFuncImpl
;
...
...
@@ -267,8 +324,8 @@ struct InferDtypeFuncImpl;
template
<
typename
Return
,
typename
...
Args
,
Return
(
*
impl_fn
)(
Args
...)>
struct
InferDtypeFuncImpl
<
Return
(
*
)(
Args
...),
impl_fn
>
{
static
Return
InferDtype
(
std
::
vector
<
DataType
>
input_dtypes
,
std
::
vector
<
std
::
vector
<
DataType
>>
vec_input_dtypes
)
{
const
std
::
vector
<
DataType
>&
input_dtypes
,
const
std
::
vector
<
std
::
vector
<
DataType
>>&
vec_input_dtypes
)
{
return
InferDtypeCallHelper
<
Args
...,
TypeTag
<
int
>>::
template
InferDtype
<
0
,
0
>(
input_dtypes
,
vec_input_dtypes
);
...
...
@@ -278,41 +335,21 @@ struct InferDtypeFuncImpl<Return (*)(Args...), impl_fn> {
template
<
typename
...
RemainingArgs
>
struct
InferDtypeCallHelper
;
template
<
typename
...
Tail
>
struct
InferDtypeCallHelper
<
DataType
,
Tail
...
>
{
template
<
int
in_idx
,
int
vec_in_idx
,
typename
...
PreviousArgs
>
static
Return
InferDtype
(
std
::
vector
<
DataType
>
input_dtypes
,
std
::
vector
<
std
::
vector
<
DataType
>>
vec_input_dtypes
,
const
PreviousArgs
&
...
pargs
)
{
DataType
arg
=
input_dtypes
[
in_idx
];
return
InferDtypeCallHelper
<
Tail
...
>::
template
InferDtype
<
in_idx
+
1
,
vec_in_idx
>(
input_dtypes
,
vec_input_dtypes
,
pargs
...,
arg
);
}
};
PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE
(
const
DataType
&
);
PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES
(
const
std
::
vector
<
DataType
>&
);
template
<
typename
...
Tail
>
struct
InferDtypeCallHelper
<
std
::
vector
<
DataType
>
,
Tail
...
>
{
template
<
int
in_idx
,
int
vec_in_idx
,
typename
...
PreviousArgs
>
static
Return
InferDtype
(
std
::
vector
<
DataType
>
input_dtypes
,
std
::
vector
<
std
::
vector
<
DataType
>>
vec_input_dtypes
,
const
PreviousArgs
&
...
pargs
)
{
std
::
vector
<
DataType
>
arg
=
vec_input_dtypes
[
vec_in_idx
];
return
InferDtypeCallHelper
<
Tail
...
>::
template
InferDtype
<
in_idx
,
vec_in_idx
+
1
>(
input_dtypes
,
vec_input_dtypes
,
pargs
...,
arg
);
}
};
// NOTE(chenweihang): Used to be compatible with the 2.0.1 released
// interface, and will be deprecated in the future
PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE
(
DataType
);
PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES
(
std
::
vector
<
DataType
>
);
// end: base template
template
<
typename
T
>
struct
InferDtypeCallHelper
<
TypeTag
<
T
>>
{
template
<
int
in_idx
,
int
vec_in_idx
>
static
Return
InferDtype
(
std
::
vector
<
DataType
>
input_dtypes
,
std
::
vector
<
std
::
vector
<
DataType
>>
vec_input_dtypes
,
const
std
::
vector
<
DataType
>&
input_dtypes
,
const
std
::
vector
<
std
::
vector
<
DataType
>>&
vec_input_dtypes
,
const
Args
&
...
args
)
{
return
impl_fn
(
args
...);
}
...
...
python/paddle/fluid/tests/custom_op/attr_test_op.cc
浏览文件 @
30a627aa
...
...
@@ -27,27 +27,15 @@ void assign_cpu_kernel(const data_t* x_data,
}
}
std
::
vector
<
paddle
::
Tensor
>
AttrTestForward
(
const
paddle
::
Tensor
&
x
,
bool
bool_attr
,
int
int_attr
,
float
float_attr
,
int64_t
int64_attr
,
std
::
string
str_attr
,
std
::
vector
<
int
>
int_vec_attr
,
std
::
vector
<
float
>
float_vec_attr
,
std
::
vector
<
int64_t
>
int64_vec_attr
,
std
::
vector
<
std
::
string
>
str_vec_attr
)
{
auto
out
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
);
out
.
reshape
(
x
.
shape
());
PD_DISPATCH_FLOATING_TYPES
(
x
.
type
(),
"assign_cpu_kernel"
,
([
&
]
{
assign_cpu_kernel
<
data_t
>
(
x
.
data
<
data_t
>
(),
out
.
mutable_data
<
data_t
>
(),
x
.
size
());
}));
// Check attrs value
void
CheckAllForwardAttrs
(
const
bool
&
bool_attr
,
const
int
&
int_attr
,
const
float
&
float_attr
,
const
int64_t
&
int64_attr
,
const
std
::
string
&
str_attr
,
const
std
::
vector
<
int
>&
int_vec_attr
,
const
std
::
vector
<
float
>&
float_vec_attr
,
const
std
::
vector
<
int64_t
>&
int64_vec_attr
,
const
std
::
vector
<
std
::
string
>&
str_vec_attr
)
{
if
(
bool_attr
!=
true
)
{
throw
std
::
runtime_error
(
"bool_attr value error."
);
}
...
...
@@ -103,26 +91,11 @@ std::vector<paddle::Tensor> AttrTestForward(
}
}
}
return
{
out
};
}
// The attrs of backward op must be the subset of attrs of forward op
std
::
vector
<
paddle
::
Tensor
>
AttrTestBackward
(
const
paddle
::
Tensor
&
grad_out
,
int
int_attr
,
std
::
vector
<
float
>
float_vec_attr
,
std
::
vector
<
std
::
string
>
str_vec_attr
)
{
auto
grad_x
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
);
grad_x
.
reshape
(
grad_out
.
shape
());
PD_DISPATCH_FLOATING_TYPES
(
grad_out
.
type
(),
"assign_cpu_kernel"
,
([
&
]
{
assign_cpu_kernel
<
data_t
>
(
grad_out
.
data
<
data_t
>
(),
grad_x
.
mutable_data
<
data_t
>
(),
grad_out
.
size
());
}));
void
CheckAllBackwardAttrs
(
const
int
&
int_attr
,
const
std
::
vector
<
float
>&
float_vec_attr
,
const
std
::
vector
<
std
::
string
>&
str_vec_attr
)
{
if
(
int_attr
!=
10
)
{
throw
std
::
runtime_error
(
"int_attr value error."
);
}
...
...
@@ -146,6 +119,114 @@ std::vector<paddle::Tensor> AttrTestBackward(
}
}
}
}
std
::
vector
<
paddle
::
Tensor
>
AttrTestForward
(
const
paddle
::
Tensor
&
x
,
bool
bool_attr
,
int
int_attr
,
float
float_attr
,
int64_t
int64_attr
,
std
::
string
str_attr
,
std
::
vector
<
int
>
int_vec_attr
,
std
::
vector
<
float
>
float_vec_attr
,
std
::
vector
<
int64_t
>
int64_vec_attr
,
std
::
vector
<
std
::
string
>
str_vec_attr
)
{
auto
out
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
);
out
.
reshape
(
x
.
shape
());
PD_DISPATCH_FLOATING_TYPES
(
x
.
type
(),
"assign_cpu_kernel"
,
([
&
]
{
assign_cpu_kernel
<
data_t
>
(
x
.
data
<
data_t
>
(),
out
.
mutable_data
<
data_t
>
(),
x
.
size
());
}));
// Check attrs value
CheckAllForwardAttrs
(
bool_attr
,
int_attr
,
float_attr
,
int64_attr
,
str_attr
,
int_vec_attr
,
float_vec_attr
,
int64_vec_attr
,
str_vec_attr
);
return
{
out
};
}
// The attrs of backward op must be the subset of attrs of forward op
std
::
vector
<
paddle
::
Tensor
>
AttrTestBackward
(
const
paddle
::
Tensor
&
grad_out
,
int
int_attr
,
std
::
vector
<
float
>
float_vec_attr
,
std
::
vector
<
std
::
string
>
str_vec_attr
)
{
auto
grad_x
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
);
grad_x
.
reshape
(
grad_out
.
shape
());
PD_DISPATCH_FLOATING_TYPES
(
grad_out
.
type
(),
"assign_cpu_kernel"
,
([
&
]
{
assign_cpu_kernel
<
data_t
>
(
grad_out
.
data
<
data_t
>
(),
grad_x
.
mutable_data
<
data_t
>
(),
grad_out
.
size
());
}));
CheckAllBackwardAttrs
(
int_attr
,
float_vec_attr
,
str_vec_attr
);
return
{
grad_x
};
}
std
::
vector
<
paddle
::
Tensor
>
ConstAttrTestForward
(
const
paddle
::
Tensor
&
x
,
const
bool
&
bool_attr
,
const
int
&
int_attr
,
const
float
&
float_attr
,
const
int64_t
&
int64_attr
,
const
std
::
string
&
str_attr
,
const
std
::
vector
<
int
>&
int_vec_attr
,
const
std
::
vector
<
float
>&
float_vec_attr
,
const
std
::
vector
<
int64_t
>&
int64_vec_attr
,
const
std
::
vector
<
std
::
string
>&
str_vec_attr
)
{
auto
out
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
);
out
.
reshape
(
x
.
shape
());
PD_DISPATCH_FLOATING_TYPES
(
x
.
type
(),
"assign_cpu_kernel"
,
([
&
]
{
assign_cpu_kernel
<
data_t
>
(
x
.
data
<
data_t
>
(),
out
.
mutable_data
<
data_t
>
(),
x
.
size
());
}));
// Check attrs value
CheckAllForwardAttrs
(
bool_attr
,
int_attr
,
float_attr
,
int64_attr
,
str_attr
,
int_vec_attr
,
float_vec_attr
,
int64_vec_attr
,
str_vec_attr
);
return
{
out
};
}
// The attrs of backward op must be the subset of attrs of forward op
std
::
vector
<
paddle
::
Tensor
>
ConstAttrTestBackward
(
const
paddle
::
Tensor
&
grad_out
,
const
int
&
int_attr
,
const
std
::
vector
<
float
>&
float_vec_attr
,
const
std
::
vector
<
std
::
string
>&
str_vec_attr
)
{
auto
grad_x
=
paddle
::
Tensor
(
paddle
::
PlaceType
::
kCPU
);
grad_x
.
reshape
(
grad_out
.
shape
());
PD_DISPATCH_FLOATING_TYPES
(
grad_out
.
type
(),
"assign_cpu_kernel"
,
([
&
]
{
assign_cpu_kernel
<
data_t
>
(
grad_out
.
data
<
data_t
>
(),
grad_x
.
mutable_data
<
data_t
>
(),
grad_out
.
size
());
}));
CheckAllBackwardAttrs
(
int_attr
,
float_vec_attr
,
str_vec_attr
);
return
{
grad_x
};
}
...
...
@@ -171,3 +252,25 @@ PD_BUILD_GRAD_OP(attr_test)
"float_vec_attr: std::vector<float>"
,
"str_vec_attr: std::vector<std::string>"
})
.
SetKernelFn
(
PD_KERNEL
(
AttrTestBackward
));
PD_BUILD_OP
(
const_attr_test
)
.
Inputs
({
"X"
})
.
Outputs
({
"Out"
})
.
Attrs
({
"bool_attr: bool"
,
"int_attr: int"
,
"float_attr: float"
,
"int64_attr: int64_t"
,
"str_attr: std::string"
,
"int_vec_attr: std::vector<int>"
,
"float_vec_attr: std::vector<float>"
,
"int64_vec_attr: std::vector<int64_t>"
,
"str_vec_attr: std::vector<std::string>"
})
.
SetKernelFn
(
PD_KERNEL
(
AttrTestForward
));
PD_BUILD_GRAD_OP
(
const_attr_test
)
.
Inputs
({
paddle
::
Grad
(
"Out"
)})
.
Outputs
({
paddle
::
Grad
(
"X"
)})
.
Attrs
({
"int_attr: int"
,
"float_vec_attr: std::vector<float>"
,
"str_vec_attr: std::vector<std::string>"
})
.
SetKernelFn
(
PD_KERNEL
(
AttrTestBackward
));
python/paddle/fluid/tests/custom_op/custom_concat_op.cc
浏览文件 @
30a627aa
...
...
@@ -122,13 +122,14 @@ std::vector<paddle::Tensor> ConcatBackwardDynamicAxis(
}
std
::
vector
<
std
::
vector
<
int64_t
>>
ConcatInferShapeDynamicAxis
(
std
::
vector
<
std
::
vector
<
int64_t
>>
input_shapes
,
std
::
vector
<
int64_t
>
axis_shape
)
{
const
std
::
vector
<
std
::
vector
<
int64_t
>>&
input_shapes
,
const
std
::
vector
<
int64_t
>&
axis_shape
)
{
return
{
std
::
vector
<
int64_t
>
(
input_shapes
[
0
].
size
(),
-
1
)};
}
std
::
vector
<
paddle
::
DataType
>
ConcatInferDtypeDynamicAxis
(
std
::
vector
<
paddle
::
DataType
>
input_dtypes
,
paddle
::
DataType
axis_dtype
)
{
const
std
::
vector
<
paddle
::
DataType
>&
input_dtypes
,
const
paddle
::
DataType
&
axis_dtype
)
{
return
{
input_dtypes
[
0
]};
}
...
...
python/paddle/fluid/tests/custom_op/test_custom_attrs_jit.py
浏览文件 @
30a627aa
...
...
@@ -40,24 +40,38 @@ custom_attrs = load(
class
TestJitCustomAttrs
(
unittest
.
TestCase
):
def
test_attr_value
(
self
):
def
setUp
(
self
):
paddle
.
set_device
(
'cpu'
)
# prepare test value
bool_attr
=
True
int_attr
=
10
float_attr
=
3.14
int64_attr
=
10000000000
str_attr
=
"StrAttr"
int_vec_attr
=
[
10
,
10
,
10
]
float_vec_attr
=
[
3.14
,
3.14
,
3.14
]
int64_vec_attr
=
[
10000000000
,
10000000000
,
10000000000
]
str_vec_attr
=
[
"StrAttr"
,
"StrAttr"
,
"StrAttr"
]
self
.
bool_attr
=
True
self
.
int_attr
=
10
self
.
float_attr
=
3.14
self
.
int64_attr
=
10000000000
s
elf
.
s
tr_attr
=
"StrAttr"
self
.
int_vec_attr
=
[
10
,
10
,
10
]
self
.
float_vec_attr
=
[
3.14
,
3.14
,
3.14
]
self
.
int64_vec_attr
=
[
10000000000
,
10000000000
,
10000000000
]
s
elf
.
s
tr_vec_attr
=
[
"StrAttr"
,
"StrAttr"
,
"StrAttr"
]
def
test_attr_value
(
self
):
x
=
paddle
.
ones
([
2
,
2
],
dtype
=
'float32'
)
x
.
stop_gradient
=
False
out
=
custom_attrs
.
attr_test
(
x
,
bool_attr
,
int_attr
,
float_attr
,
int64_attr
,
str_attr
,
int_vec_attr
,
float_vec_attr
,
int64_vec_attr
,
str_vec_attr
)
x
,
self
.
bool_attr
,
self
.
int_attr
,
self
.
float_attr
,
self
.
int64_attr
,
self
.
str_attr
,
self
.
int_vec_attr
,
self
.
float_vec_attr
,
self
.
int64_vec_attr
,
self
.
str_vec_attr
)
out
.
stop_gradient
=
False
out
.
backward
()
self
.
assertTrue
(
np
.
array_equal
(
x
.
numpy
(),
out
.
numpy
()))
def
test_const_attr_value
(
self
):
x
=
paddle
.
ones
([
2
,
2
],
dtype
=
'float32'
)
x
.
stop_gradient
=
False
out
=
custom_attrs
.
const_attr_test
(
x
,
self
.
bool_attr
,
self
.
int_attr
,
self
.
float_attr
,
self
.
int64_attr
,
self
.
str_attr
,
self
.
int_vec_attr
,
self
.
float_vec_attr
,
self
.
int64_vec_attr
,
self
.
str_vec_attr
)
out
.
stop_gradient
=
False
out
.
backward
()
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录