未验证 提交 30a627aa 编写于 作者: C Chen Weihang 提交者: GitHub

Normalized function parameter writing (#31588)

上级 cac9635a
...@@ -80,30 +80,31 @@ inline std::string Vec(const std::string& t_name) { ...@@ -80,30 +80,31 @@ inline std::string Vec(const std::string& t_name) {
////////////////////// Kernel Function (PD_KERNEL) //////////////////////// ////////////////////// Kernel Function (PD_KERNEL) ////////////////////////
// Record Op kernel core function // Record Op kernel core function
using KernelFunc = std::vector<Tensor> (*)( using KernelFunc =
std::vector<Tensor> inputs, std::vector<std::vector<Tensor>> vec_inputs, std::vector<Tensor> (*)(const std::vector<Tensor>& inputs,
std::vector<boost::any> attrs); const std::vector<std::vector<Tensor>>& vec_inputs,
const std::vector<boost::any>& attrs);
#define PD_SPECIALIZE_ComputeCallHelper(attr_type) \
template <typename... Tail> \ #define PD_SPECIALIZE_ComputeCallHelper(attr_type) \
struct ComputeCallHelper<attr_type, Tail...> { \ template <typename... Tail> \
template <int in_idx, int vec_in_idx, int attr_idx, \ struct ComputeCallHelper<attr_type, Tail...> { \
typename... PreviousArgs> \ template <int in_idx, int vec_in_idx, int attr_idx, \
static Return Compute(std::vector<Tensor> inputs, \ typename... PreviousArgs> \
std::vector<std::vector<Tensor>> vec_inputs, \ static Return Compute(const std::vector<Tensor>& inputs, \
std::vector<boost::any> attrs, \ const std::vector<std::vector<Tensor>>& vec_inputs, \
const PreviousArgs&... pargs) { \ const std::vector<boost::any>& attrs, \
try { \ const PreviousArgs&... pargs) { \
attr_type arg = boost::any_cast<attr_type>(attrs[attr_idx]); \ try { \
return ComputeCallHelper<Tail...>::template Compute< \ attr_type arg = boost::any_cast<attr_type>(attrs[attr_idx]); \
in_idx, vec_in_idx, attr_idx + 1>(inputs, vec_inputs, attrs, \ return ComputeCallHelper<Tail...>::template Compute< \
pargs..., arg); \ in_idx, vec_in_idx, attr_idx + 1>(inputs, vec_inputs, attrs, \
} catch (boost::bad_any_cast&) { \ pargs..., arg); \
PD_THROW( \ } catch (boost::bad_any_cast&) { \
"Attribute cast error in custom operator. Expected " #attr_type \ PD_THROW( \
" value."); \ "Attribute cast error in custom operator. Expected " #attr_type \
} \ " value."); \
} \ } \
} \
} }
template <typename T> template <typename T>
...@@ -114,9 +115,9 @@ struct KernelFuncImpl; ...@@ -114,9 +115,9 @@ struct KernelFuncImpl;
template <typename Return, typename... Args, Return (*impl_fn)(Args...)> template <typename Return, typename... Args, Return (*impl_fn)(Args...)>
struct KernelFuncImpl<Return (*)(Args...), impl_fn> { struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
static Return Compute(std::vector<Tensor> inputs, static Return Compute(const std::vector<Tensor>& inputs,
std::vector<std::vector<Tensor>> vec_inputs, const std::vector<std::vector<Tensor>>& vec_inputs,
std::vector<boost::any> attrs) { const std::vector<boost::any>& attrs) {
return ComputeCallHelper<Args..., TypeTag<int>>::template Compute<0, 0, 0>( return ComputeCallHelper<Args..., TypeTag<int>>::template Compute<0, 0, 0>(
inputs, vec_inputs, attrs); inputs, vec_inputs, attrs);
} }
...@@ -125,14 +126,13 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -125,14 +126,13 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
template <typename... RemainingArgs> template <typename... RemainingArgs>
struct ComputeCallHelper; struct ComputeCallHelper;
// for Tensor input
template <typename... Tail> template <typename... Tail>
struct ComputeCallHelper<const Tensor&, Tail...> { struct ComputeCallHelper<const Tensor&, Tail...> {
template <int in_idx, int vec_in_idx, int attr_idx, template <int in_idx, int vec_in_idx, int attr_idx,
typename... PreviousArgs> typename... PreviousArgs>
static Return Compute(std::vector<Tensor> inputs, static Return Compute(const std::vector<Tensor>& inputs,
std::vector<std::vector<Tensor>> vec_inputs, const std::vector<std::vector<Tensor>>& vec_inputs,
std::vector<boost::any> attrs, const std::vector<boost::any>& attrs,
const PreviousArgs&... pargs) { const PreviousArgs&... pargs) {
const Tensor& arg = inputs[in_idx]; const Tensor& arg = inputs[in_idx];
return ComputeCallHelper<Tail...>::template Compute<in_idx + 1, return ComputeCallHelper<Tail...>::template Compute<in_idx + 1,
...@@ -141,14 +141,13 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -141,14 +141,13 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
} }
}; };
// for std::vector<Tensor> input
template <typename... Tail> template <typename... Tail>
struct ComputeCallHelper<const std::vector<Tensor>&, Tail...> { struct ComputeCallHelper<const std::vector<Tensor>&, Tail...> {
template <int in_idx, int vec_in_idx, int attr_idx, template <int in_idx, int vec_in_idx, int attr_idx,
typename... PreviousArgs> typename... PreviousArgs>
static Return Compute(std::vector<Tensor> inputs, static Return Compute(const std::vector<Tensor>& inputs,
std::vector<std::vector<Tensor>> vec_inputs, const std::vector<std::vector<Tensor>>& vec_inputs,
std::vector<boost::any> attrs, const std::vector<boost::any>& attrs,
const PreviousArgs&... pargs) { const PreviousArgs&... pargs) {
const std::vector<Tensor>& arg = vec_inputs[vec_in_idx]; const std::vector<Tensor>& arg = vec_inputs[vec_in_idx];
return ComputeCallHelper<Tail...>::template Compute< return ComputeCallHelper<Tail...>::template Compute<
...@@ -157,6 +156,23 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -157,6 +156,23 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
} }
}; };
PD_SPECIALIZE_ComputeCallHelper(const bool&);
PD_SPECIALIZE_ComputeCallHelper(const int&);
PD_SPECIALIZE_ComputeCallHelper(const float&);
PD_SPECIALIZE_ComputeCallHelper(const int64_t&);
PD_SPECIALIZE_ComputeCallHelper(const std::string&);
PD_SPECIALIZE_ComputeCallHelper(const std::vector<int>&);
PD_SPECIALIZE_ComputeCallHelper(const std::vector<float>&);
PD_SPECIALIZE_ComputeCallHelper(const std::vector<int64_t>&);
PD_SPECIALIZE_ComputeCallHelper(const std::vector<std::string>&);
// TODO(chenweihang): support other attribute type if needed.
// Why not support other attribute type here?
// - boost::blank, std::vector<bool> and std::vector<double>
// are not used in op
// - BlockDesc* and std::vector<BlockDesc*> are used in framework
// NOTE(chenweihang): Used to be compatible with the 2.0.1 released
// interface, and will be deprecated in the future
PD_SPECIALIZE_ComputeCallHelper(bool); PD_SPECIALIZE_ComputeCallHelper(bool);
PD_SPECIALIZE_ComputeCallHelper(int); PD_SPECIALIZE_ComputeCallHelper(int);
PD_SPECIALIZE_ComputeCallHelper(float); PD_SPECIALIZE_ComputeCallHelper(float);
...@@ -166,18 +182,15 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -166,18 +182,15 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
PD_SPECIALIZE_ComputeCallHelper(std::vector<float>); PD_SPECIALIZE_ComputeCallHelper(std::vector<float>);
PD_SPECIALIZE_ComputeCallHelper(std::vector<int64_t>); PD_SPECIALIZE_ComputeCallHelper(std::vector<int64_t>);
PD_SPECIALIZE_ComputeCallHelper(std::vector<std::string>); PD_SPECIALIZE_ComputeCallHelper(std::vector<std::string>);
// TODO(chenweihang): support other attribute type if needed.
// Why not support other attribute type here?
// - boost::blank, std::vector<bool> and std::vector<double>
// are not used in op
// - BlockDesc* and std::vector<BlockDesc*> are used in framework
// end: base template // end: base template
template <typename T> template <typename T>
struct ComputeCallHelper<TypeTag<T>> { struct ComputeCallHelper<TypeTag<T>> {
template <int in_idx, int vec_in_idx, int attr_idx> template <int in_idx, int vec_in_idx, int attr_idx>
static Return Compute(std::vector<Tensor> inputs, static Return Compute(const std::vector<Tensor>& inputs,
std::vector<std::vector<Tensor>> vec_inputs, const std::vector<std::vector<Tensor>>& vec_inputs,
std::vector<boost::any> attrs, const Args&... args) { const std::vector<boost::any>& attrs,
const Args&... args) {
return impl_fn(args...); return impl_fn(args...);
} }
}; };
...@@ -190,8 +203,40 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -190,8 +203,40 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
// Record Op infershape core function // Record Op infershape core function
using InferShapeFunc = std::vector<std::vector<int64_t>> (*)( using InferShapeFunc = std::vector<std::vector<int64_t>> (*)(
std::vector<std::vector<int64_t>> input_shapes, const std::vector<std::vector<int64_t>>& input_shapes,
std::vector<std::vector<std::vector<int64_t>>> vec_input_shapes); const std::vector<std::vector<std::vector<int64_t>>>& vec_input_shapes);
#define PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE(input_type) \
template <typename... Tail> \
struct InferShapeCallHelper<input_type, Tail...> { \
template <int in_idx, int vec_in_idx, typename... PreviousArgs> \
static Return InferShape( \
const std::vector<std::vector<int64_t>>& input_shapes, \
const std::vector<std::vector<std::vector<int64_t>>>& \
vec_input_shapes, \
const PreviousArgs&... pargs) { \
input_type arg = input_shapes[in_idx]; \
return InferShapeCallHelper<Tail...>::template InferShape<in_idx + 1, \
vec_in_idx>( \
input_shapes, vec_input_shapes, pargs..., arg); \
} \
}
#define PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES(input_type) \
template <typename... Tail> \
struct InferShapeCallHelper<input_type, Tail...> { \
template <int in_idx, int vec_in_idx, typename... PreviousArgs> \
static Return InferShape( \
const std::vector<std::vector<int64_t>>& input_shapes, \
const std::vector<std::vector<std::vector<int64_t>>>& \
vec_input_shapes, \
const PreviousArgs&... pargs) { \
input_type arg = vec_input_shapes[vec_in_idx]; \
return InferShapeCallHelper<Tail...>::template InferShape< \
in_idx, vec_in_idx + 1>(input_shapes, vec_input_shapes, pargs..., \
arg); \
} \
}
template <typename F, F f> template <typename F, F f>
struct InferShapeFuncImpl; struct InferShapeFuncImpl;
...@@ -199,8 +244,8 @@ struct InferShapeFuncImpl; ...@@ -199,8 +244,8 @@ struct InferShapeFuncImpl;
template <typename Return, typename... Args, Return (*impl_fn)(Args...)> template <typename Return, typename... Args, Return (*impl_fn)(Args...)>
struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> { struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> {
static Return InferShape( static Return InferShape(
std::vector<std::vector<int64_t>> input_shapes, const std::vector<std::vector<int64_t>>& input_shapes,
std::vector<std::vector<std::vector<int64_t>>> vec_input_shapes) { const std::vector<std::vector<std::vector<int64_t>>>& vec_input_shapes) {
return InferShapeCallHelper<Args..., TypeTag<int>>::template InferShape<0, return InferShapeCallHelper<Args..., TypeTag<int>>::template InferShape<0,
0>( 0>(
input_shapes, vec_input_shapes); input_shapes, vec_input_shapes);
...@@ -210,41 +255,23 @@ struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -210,41 +255,23 @@ struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> {
template <typename... RemainingArgs> template <typename... RemainingArgs>
struct InferShapeCallHelper; struct InferShapeCallHelper;
template <typename... Tail> PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE(const std::vector<int64_t>&);
struct InferShapeCallHelper<std::vector<int64_t>, Tail...> { PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES(
template <int in_idx, int vec_in_idx, typename... PreviousArgs> const std::vector<std::vector<int64_t>>&);
static Return InferShape(
std::vector<std::vector<int64_t>> input_shapes,
std::vector<std::vector<std::vector<int64_t>>> vec_input_shapes,
const PreviousArgs&... pargs) {
std::vector<int64_t> arg = input_shapes[in_idx];
return InferShapeCallHelper<Tail...>::template InferShape<in_idx + 1,
vec_in_idx>(
input_shapes, vec_input_shapes, pargs..., arg);
}
};
template <typename... Tail> // NOTE(chenweihang): Used to be compatible with the 2.0.1 released
struct InferShapeCallHelper<std::vector<std::vector<int64_t>>, Tail...> { // interface, and will be deprecated in the future
template <int in_idx, int vec_in_idx, typename... PreviousArgs> PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPE(std::vector<int64_t>);
static Return InferShape( PD_SPECIALIZE_InferShapeCallHelper_FOR_SHAPES(
std::vector<std::vector<int64_t>> input_shapes, std::vector<std::vector<int64_t>>);
std::vector<std::vector<std::vector<int64_t>>> vec_input_shapes,
const PreviousArgs&... pargs) {
std::vector<std::vector<int64_t>> arg = vec_input_shapes[vec_in_idx];
return InferShapeCallHelper<Tail...>::template InferShape<in_idx,
vec_in_idx + 1>(
input_shapes, vec_input_shapes, pargs..., arg);
}
};
// end: base template // end: base template
template <typename T> template <typename T>
struct InferShapeCallHelper<TypeTag<T>> { struct InferShapeCallHelper<TypeTag<T>> {
template <int in_idx, int vec_in_idx> template <int in_idx, int vec_in_idx>
static Return InferShape( static Return InferShape(
std::vector<std::vector<int64_t>> input_shapes, const std::vector<std::vector<int64_t>>& input_shapes,
std::vector<std::vector<std::vector<int64_t>>> vec_input_shapes, const std::vector<std::vector<std::vector<int64_t>>>& vec_input_shapes,
const Args&... args) { const Args&... args) {
return impl_fn(args...); return impl_fn(args...);
} }
...@@ -258,8 +285,38 @@ struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -258,8 +285,38 @@ struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> {
// Record Op Infer dtype core function // Record Op Infer dtype core function
using InferDtypeFunc = std::vector<DataType> (*)( using InferDtypeFunc = std::vector<DataType> (*)(
std::vector<DataType> input_dtypes, const std::vector<DataType>& input_dtypes,
std::vector<std::vector<DataType>> vec_input_dtypes); const std::vector<std::vector<DataType>>& vec_input_dtypes);
#define PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE(input_type) \
template <typename... Tail> \
struct InferDtypeCallHelper<input_type, Tail...> { \
template <int in_idx, int vec_in_idx, typename... PreviousArgs> \
static Return InferDtype( \
const std::vector<DataType>& input_dtypes, \
const std::vector<std::vector<DataType>>& vec_input_dtypes, \
const PreviousArgs&... pargs) { \
input_type arg = input_dtypes[in_idx]; \
return InferDtypeCallHelper<Tail...>::template InferDtype<in_idx + 1, \
vec_in_idx>( \
input_dtypes, vec_input_dtypes, pargs..., arg); \
} \
}
#define PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES(input_type) \
template <typename... Tail> \
struct InferDtypeCallHelper<input_type, Tail...> { \
template <int in_idx, int vec_in_idx, typename... PreviousArgs> \
static Return InferDtype( \
const std::vector<DataType>& input_dtypes, \
const std::vector<std::vector<DataType>>& vec_input_dtypes, \
const PreviousArgs&... pargs) { \
input_type arg = vec_input_dtypes[vec_in_idx]; \
return InferDtypeCallHelper<Tail...>::template InferDtype< \
in_idx, vec_in_idx + 1>(input_dtypes, vec_input_dtypes, pargs..., \
arg); \
} \
}
template <typename F, F f> template <typename F, F f>
struct InferDtypeFuncImpl; struct InferDtypeFuncImpl;
...@@ -267,8 +324,8 @@ struct InferDtypeFuncImpl; ...@@ -267,8 +324,8 @@ struct InferDtypeFuncImpl;
template <typename Return, typename... Args, Return (*impl_fn)(Args...)> template <typename Return, typename... Args, Return (*impl_fn)(Args...)>
struct InferDtypeFuncImpl<Return (*)(Args...), impl_fn> { struct InferDtypeFuncImpl<Return (*)(Args...), impl_fn> {
static Return InferDtype( static Return InferDtype(
std::vector<DataType> input_dtypes, const std::vector<DataType>& input_dtypes,
std::vector<std::vector<DataType>> vec_input_dtypes) { const std::vector<std::vector<DataType>>& vec_input_dtypes) {
return InferDtypeCallHelper<Args..., TypeTag<int>>::template InferDtype<0, return InferDtypeCallHelper<Args..., TypeTag<int>>::template InferDtype<0,
0>( 0>(
input_dtypes, vec_input_dtypes); input_dtypes, vec_input_dtypes);
...@@ -278,41 +335,21 @@ struct InferDtypeFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -278,41 +335,21 @@ struct InferDtypeFuncImpl<Return (*)(Args...), impl_fn> {
template <typename... RemainingArgs> template <typename... RemainingArgs>
struct InferDtypeCallHelper; struct InferDtypeCallHelper;
template <typename... Tail> PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE(const DataType&);
struct InferDtypeCallHelper<DataType, Tail...> { PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES(const std::vector<DataType>&);
template <int in_idx, int vec_in_idx, typename... PreviousArgs>
static Return InferDtype(
std::vector<DataType> input_dtypes,
std::vector<std::vector<DataType>> vec_input_dtypes,
const PreviousArgs&... pargs) {
DataType arg = input_dtypes[in_idx];
return InferDtypeCallHelper<Tail...>::template InferDtype<in_idx + 1,
vec_in_idx>(
input_dtypes, vec_input_dtypes, pargs..., arg);
}
};
template <typename... Tail> // NOTE(chenweihang): Used to be compatible with the 2.0.1 released
struct InferDtypeCallHelper<std::vector<DataType>, Tail...> { // interface, and will be deprecated in the future
template <int in_idx, int vec_in_idx, typename... PreviousArgs> PD_SPECIALIZE_InferDtypeCallHelper_TO_DTYPE(DataType);
static Return InferDtype( PD_SPECIALIZE_InferDtypeCallHelper_FOR_DTYPES(std::vector<DataType>);
std::vector<DataType> input_dtypes,
std::vector<std::vector<DataType>> vec_input_dtypes,
const PreviousArgs&... pargs) {
std::vector<DataType> arg = vec_input_dtypes[vec_in_idx];
return InferDtypeCallHelper<Tail...>::template InferDtype<in_idx,
vec_in_idx + 1>(
input_dtypes, vec_input_dtypes, pargs..., arg);
}
};
// end: base template // end: base template
template <typename T> template <typename T>
struct InferDtypeCallHelper<TypeTag<T>> { struct InferDtypeCallHelper<TypeTag<T>> {
template <int in_idx, int vec_in_idx> template <int in_idx, int vec_in_idx>
static Return InferDtype( static Return InferDtype(
std::vector<DataType> input_dtypes, const std::vector<DataType>& input_dtypes,
std::vector<std::vector<DataType>> vec_input_dtypes, const std::vector<std::vector<DataType>>& vec_input_dtypes,
const Args&... args) { const Args&... args) {
return impl_fn(args...); return impl_fn(args...);
} }
......
...@@ -27,27 +27,15 @@ void assign_cpu_kernel(const data_t* x_data, ...@@ -27,27 +27,15 @@ void assign_cpu_kernel(const data_t* x_data,
} }
} }
std::vector<paddle::Tensor> AttrTestForward( void CheckAllForwardAttrs(const bool& bool_attr,
const paddle::Tensor& x, const int& int_attr,
bool bool_attr, const float& float_attr,
int int_attr, const int64_t& int64_attr,
float float_attr, const std::string& str_attr,
int64_t int64_attr, const std::vector<int>& int_vec_attr,
std::string str_attr, const std::vector<float>& float_vec_attr,
std::vector<int> int_vec_attr, const std::vector<int64_t>& int64_vec_attr,
std::vector<float> float_vec_attr, const std::vector<std::string>& str_vec_attr) {
std::vector<int64_t> int64_vec_attr,
std::vector<std::string> str_vec_attr) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
out.reshape(x.shape());
PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
// Check attrs value
if (bool_attr != true) { if (bool_attr != true) {
throw std::runtime_error("bool_attr value error."); throw std::runtime_error("bool_attr value error.");
} }
...@@ -103,26 +91,11 @@ std::vector<paddle::Tensor> AttrTestForward( ...@@ -103,26 +91,11 @@ std::vector<paddle::Tensor> AttrTestForward(
} }
} }
} }
return {out};
} }
// The attrs of backward op must be the subset of attrs of forward op void CheckAllBackwardAttrs(const int& int_attr,
std::vector<paddle::Tensor> AttrTestBackward( const std::vector<float>& float_vec_attr,
const paddle::Tensor& grad_out, const std::vector<std::string>& str_vec_attr) {
int int_attr,
std::vector<float> float_vec_attr,
std::vector<std::string> str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU);
grad_x.reshape(grad_out.shape());
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(),
grad_out.size());
}));
if (int_attr != 10) { if (int_attr != 10) {
throw std::runtime_error("int_attr value error."); throw std::runtime_error("int_attr value error.");
} }
...@@ -146,6 +119,114 @@ std::vector<paddle::Tensor> AttrTestBackward( ...@@ -146,6 +119,114 @@ std::vector<paddle::Tensor> AttrTestBackward(
} }
} }
} }
}
std::vector<paddle::Tensor> AttrTestForward(
const paddle::Tensor& x,
bool bool_attr,
int int_attr,
float float_attr,
int64_t int64_attr,
std::string str_attr,
std::vector<int> int_vec_attr,
std::vector<float> float_vec_attr,
std::vector<int64_t> int64_vec_attr,
std::vector<std::string> str_vec_attr) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
out.reshape(x.shape());
PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
// Check attrs value
CheckAllForwardAttrs(bool_attr,
int_attr,
float_attr,
int64_attr,
str_attr,
int_vec_attr,
float_vec_attr,
int64_vec_attr,
str_vec_attr);
return {out};
}
// The attrs of backward op must be the subset of attrs of forward op
std::vector<paddle::Tensor> AttrTestBackward(
const paddle::Tensor& grad_out,
int int_attr,
std::vector<float> float_vec_attr,
std::vector<std::string> str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU);
grad_x.reshape(grad_out.shape());
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(),
grad_out.size());
}));
CheckAllBackwardAttrs(int_attr, float_vec_attr, str_vec_attr);
return {grad_x};
}
std::vector<paddle::Tensor> ConstAttrTestForward(
const paddle::Tensor& x,
const bool& bool_attr,
const int& int_attr,
const float& float_attr,
const int64_t& int64_attr,
const std::string& str_attr,
const std::vector<int>& int_vec_attr,
const std::vector<float>& float_vec_attr,
const std::vector<int64_t>& int64_vec_attr,
const std::vector<std::string>& str_vec_attr) {
auto out = paddle::Tensor(paddle::PlaceType::kCPU);
out.reshape(x.shape());
PD_DISPATCH_FLOATING_TYPES(
x.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
x.data<data_t>(), out.mutable_data<data_t>(), x.size());
}));
// Check attrs value
CheckAllForwardAttrs(bool_attr,
int_attr,
float_attr,
int64_attr,
str_attr,
int_vec_attr,
float_vec_attr,
int64_vec_attr,
str_vec_attr);
return {out};
}
// The attrs of backward op must be the subset of attrs of forward op
std::vector<paddle::Tensor> ConstAttrTestBackward(
const paddle::Tensor& grad_out,
const int& int_attr,
const std::vector<float>& float_vec_attr,
const std::vector<std::string>& str_vec_attr) {
auto grad_x = paddle::Tensor(paddle::PlaceType::kCPU);
grad_x.reshape(grad_out.shape());
PD_DISPATCH_FLOATING_TYPES(grad_out.type(), "assign_cpu_kernel", ([&] {
assign_cpu_kernel<data_t>(
grad_out.data<data_t>(),
grad_x.mutable_data<data_t>(),
grad_out.size());
}));
CheckAllBackwardAttrs(int_attr, float_vec_attr, str_vec_attr);
return {grad_x}; return {grad_x};
} }
...@@ -171,3 +252,25 @@ PD_BUILD_GRAD_OP(attr_test) ...@@ -171,3 +252,25 @@ PD_BUILD_GRAD_OP(attr_test)
"float_vec_attr: std::vector<float>", "float_vec_attr: std::vector<float>",
"str_vec_attr: std::vector<std::string>"}) "str_vec_attr: std::vector<std::string>"})
.SetKernelFn(PD_KERNEL(AttrTestBackward)); .SetKernelFn(PD_KERNEL(AttrTestBackward));
PD_BUILD_OP(const_attr_test)
.Inputs({"X"})
.Outputs({"Out"})
.Attrs({"bool_attr: bool",
"int_attr: int",
"float_attr: float",
"int64_attr: int64_t",
"str_attr: std::string",
"int_vec_attr: std::vector<int>",
"float_vec_attr: std::vector<float>",
"int64_vec_attr: std::vector<int64_t>",
"str_vec_attr: std::vector<std::string>"})
.SetKernelFn(PD_KERNEL(AttrTestForward));
PD_BUILD_GRAD_OP(const_attr_test)
.Inputs({paddle::Grad("Out")})
.Outputs({paddle::Grad("X")})
.Attrs({"int_attr: int",
"float_vec_attr: std::vector<float>",
"str_vec_attr: std::vector<std::string>"})
.SetKernelFn(PD_KERNEL(AttrTestBackward));
...@@ -122,13 +122,14 @@ std::vector<paddle::Tensor> ConcatBackwardDynamicAxis( ...@@ -122,13 +122,14 @@ std::vector<paddle::Tensor> ConcatBackwardDynamicAxis(
} }
std::vector<std::vector<int64_t>> ConcatInferShapeDynamicAxis( std::vector<std::vector<int64_t>> ConcatInferShapeDynamicAxis(
std::vector<std::vector<int64_t>> input_shapes, const std::vector<std::vector<int64_t>>& input_shapes,
std::vector<int64_t> axis_shape) { const std::vector<int64_t>& axis_shape) {
return {std::vector<int64_t>(input_shapes[0].size(), -1)}; return {std::vector<int64_t>(input_shapes[0].size(), -1)};
} }
std::vector<paddle::DataType> ConcatInferDtypeDynamicAxis( std::vector<paddle::DataType> ConcatInferDtypeDynamicAxis(
std::vector<paddle::DataType> input_dtypes, paddle::DataType axis_dtype) { const std::vector<paddle::DataType>& input_dtypes,
const paddle::DataType& axis_dtype) {
return {input_dtypes[0]}; return {input_dtypes[0]};
} }
......
...@@ -40,24 +40,38 @@ custom_attrs = load( ...@@ -40,24 +40,38 @@ custom_attrs = load(
class TestJitCustomAttrs(unittest.TestCase): class TestJitCustomAttrs(unittest.TestCase):
def test_attr_value(self): def setUp(self):
paddle.set_device('cpu') paddle.set_device('cpu')
# prepare test value # prepare test value
bool_attr = True self.bool_attr = True
int_attr = 10 self.int_attr = 10
float_attr = 3.14 self.float_attr = 3.14
int64_attr = 10000000000 self.int64_attr = 10000000000
str_attr = "StrAttr" self.str_attr = "StrAttr"
int_vec_attr = [10, 10, 10] self.int_vec_attr = [10, 10, 10]
float_vec_attr = [3.14, 3.14, 3.14] self.float_vec_attr = [3.14, 3.14, 3.14]
int64_vec_attr = [10000000000, 10000000000, 10000000000] self.int64_vec_attr = [10000000000, 10000000000, 10000000000]
str_vec_attr = ["StrAttr", "StrAttr", "StrAttr"] self.str_vec_attr = ["StrAttr", "StrAttr", "StrAttr"]
def test_attr_value(self):
x = paddle.ones([2, 2], dtype='float32') x = paddle.ones([2, 2], dtype='float32')
x.stop_gradient = False x.stop_gradient = False
out = custom_attrs.attr_test( out = custom_attrs.attr_test(
x, bool_attr, int_attr, float_attr, int64_attr, str_attr, x, self.bool_attr, self.int_attr, self.float_attr, self.int64_attr,
int_vec_attr, float_vec_attr, int64_vec_attr, str_vec_attr) self.str_attr, self.int_vec_attr, self.float_vec_attr,
self.int64_vec_attr, self.str_vec_attr)
out.stop_gradient = False
out.backward()
self.assertTrue(np.array_equal(x.numpy(), out.numpy()))
def test_const_attr_value(self):
x = paddle.ones([2, 2], dtype='float32')
x.stop_gradient = False
out = custom_attrs.const_attr_test(
x, self.bool_attr, self.int_attr, self.float_attr, self.int64_attr,
self.str_attr, self.int_vec_attr, self.float_vec_attr,
self.int64_vec_attr, self.str_vec_attr)
out.stop_gradient = False out.stop_gradient = False
out.backward() out.backward()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册