未验证 提交 59c9d75e 编写于 作者: H HongyuJia 提交者: GitHub

[CustomOP Optional Inplace] Custom operator supports inplace optional vector Tensor input (#52421)

* [CustomOP Optional Inplace] Custom operator supports inplace optional vector Tensor input

* uncomment unittest codes
上级 0b60f28c
......@@ -833,7 +833,11 @@ static void RunInferDtypeFunc(
auto in_name = inplace_reverse_map.at(out_name);
// make sure ctx has valid inplace optional outputs
if (ctx->HasOutput(out_name)) {
ctx->SetOutputDataTypes(out_name, ctx->GetInputDataTypes(in_name));
size_t size = ctx->InputSize(in_name);
for (size_t i = 0; i < size; ++i) {
auto dtype = ctx->GetInputDataType(in_name, i);
ctx->SetOutputDataType(out_name, dtype, i);
}
} else {
PADDLE_ENFORCE(
detail::IsOptionalVar(out_name),
......
......@@ -120,6 +120,8 @@ class PADDLE_API CustomOpKernelContext {
std::vector<Tensor> InputsBetween(size_t start, size_t end) const;
Tensor& MutableInputAt(size_t idx);
paddle::optional<Tensor> OptionalInputAt(size_t idx);
paddle::optional<std::vector<Tensor>> OptionalInputsBetween(size_t start,
size_t end);
const std::vector<paddle::any>& Attrs() const { return attrs_; }
const std::vector<std::pair<size_t, size_t>>& InputRange() {
return input_range_;
......@@ -294,6 +296,34 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
}
};
// Handle args for inplace vector<Tensor> case
template <typename... Tail>
struct ComputeCallHelper<std::vector<Tensor>&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto arg = ctx->InputsBetween(range.first, range.second);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};
// Handle args for optional inplace vector<Tensor> case
template <typename... Tail>
struct ComputeCallHelper<paddle::optional<std::vector<Tensor>>&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto arg = ctx->OptionalInputsBetween(range.first, range.second);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};
PD_SPECIALIZE_ComputeCallHelper(bool);
PD_SPECIALIZE_ComputeCallHelper(int);
PD_SPECIALIZE_ComputeCallHelper(float);
......@@ -358,20 +388,6 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
}
};
// Handle args for inplace vector<Tensor> case
template <typename... Tail>
struct ComputeCallHelper<std::vector<Tensor>&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto arg = ctx->InputsBetween(range.first, range.second);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};
template <int out_idx, typename T>
struct ComputeReturnHelper;
......
......@@ -110,6 +110,18 @@ paddle::optional<Tensor> CustomOpKernelContext::OptionalInputAt(size_t idx) {
return paddle::make_optional<paddle::Tensor>(inputs_.at(idx));
}
paddle::optional<std::vector<Tensor>>
CustomOpKernelContext::OptionalInputsBetween(size_t start, size_t end) {
std::vector<Tensor> rlt;
for (size_t i = start; i < end; ++i) {
if (!inputs_.at(i).is_initialized()) {
return paddle::none;
}
rlt.emplace_back(inputs_.at(i));
}
return paddle::make_optional<std::vector<Tensor>>(rlt);
}
Tensor* CustomOpKernelContext::MutableOutputAt(size_t idx) {
return &(outputs_.at(idx));
}
......
......@@ -1057,6 +1057,22 @@ def _gen_output_content(
if out_idx in inplace_reverse_idx:
in_idx = inplace_reverse_idx[out_idx]
if (
in_idx != -1
and "@VECTOR" in in_names[in_idx]
and "@OPTIONAL" in in_names[in_idx]
):
# inplace optional vector<Tensor> output case
lower_in_names = in_names[in_idx].split("@")[0].lower()
dynamic_content += f"""
{indent}if {lower_in_names} is not None:
{indent} outs['{out_name}'] = [core.eager.Tensor() for _ in range(len({lower_in_names}))]
{indent}else:
{indent} outs['{out_name}'] = core.eager.Tensor()
{indent}ctx.add_outputs(outs['{out_name}'])"""
static_content += f"""
{indent}if {lower_in_names} is not None:
{indent} outs['{out_name}'] = [helper.create_variable(dtype='float32') for _ in range(len({lower_in_names}))]"""
elif (
in_idx != -1 and "@VECTOR" in in_names[in_idx]
): # inplace vector<Tensor> output case
lower_in_names = in_names[in_idx].split("@")[0].lower()
......
......@@ -300,3 +300,116 @@ PD_BUILD_GRAD_OP(custom_optional_inplace_add)
paddle::Grad(paddle::Optional("Y"))}})
.SetKernelFn(PD_KERNEL(AddOptionalInplaceBackward))
.SetInferShapeFn(PD_INFER_SHAPE(AddOptionalInplaceBackwardInferShape));
/*
if (y) {
outX = 2 * x + y[1...n];
outY[i] = x + y[i];
} else {
outX = 2 * x;
outY = None;
}
*/
std::vector<paddle::Tensor> AddOptionalInplaceVectorForward(
const paddle::Tensor& x,
paddle::optional<std::vector<paddle::Tensor>>& y) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor.");
paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place());
PD_DISPATCH_FLOATING_TYPES(
x.type(), "AddOptionalInplaceVectorForward", ([&] {
add_two_pointers<data_t>(
x.data<data_t>(), x.data<data_t>(), outX.data<data_t>(), x.size());
if (y) {
for (size_t i = 0; i < y->size(); ++i) {
add_one_pointer<data_t>(
y->at(i).data<data_t>(), outX.data<data_t>(), outX.size());
add_one_pointer<data_t>(
x.data<data_t>(), y->at(i).data<data_t>(), x.size());
}
}
}));
// No need to return y, because we set it as inplace input.
return {outX};
}
std::vector<paddle::DataType> AddOptionalInplaceVectorInferDtype(
const paddle::DataType& x_dtype,
const paddle::optional<std::vector<paddle::DataType>>& y_dtype) {
return {x_dtype};
}
std::vector<std::vector<int64_t>> AddOptionalInplaceVectorInferShape(
const std::vector<int64_t>& x_shape,
const paddle::optional<std::vector<std::vector<int64_t>>>& y_shape) {
return {x_shape};
}
/*
if (outy_grad) {
x_grad = outX_grad * 2 + outY_grad[1...n];
y_grad[i] = outX_grad + outY_grad[i];
} else {
x_grad = outX_grad * 2;
y_grad = None;
}
*/
std::vector<paddle::Tensor> AddOptionalInplaceVectorBackward(
const paddle::Tensor& x,
const paddle::optional<std::vector<paddle::Tensor>>& y,
const paddle::Tensor& outx_grad,
paddle::optional<std::vector<paddle::Tensor>>& outy_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor.");
paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place());
PD_DISPATCH_FLOATING_TYPES(
outx_grad.type(), "AddOptionalInplaceVectorBackward", ([&] {
add_two_pointers<data_t>(outx_grad.data<data_t>(),
outx_grad.data<data_t>(),
x_grad.data<data_t>(),
x_grad.size());
if (outy_grad) {
for (size_t i = 0; i < outy_grad->size(); ++i) {
add_one_pointer<data_t>(outy_grad->at(i).data<data_t>(),
x_grad.data<data_t>(),
x_grad.size());
add_one_pointer<data_t>(outx_grad.data<data_t>(),
outy_grad->at(i).data<data_t>(),
outx_grad.size());
}
}
}));
return {x_grad};
}
std::vector<std::vector<int64_t>> AddOptionalInplaceVectorBackwardInferShape(
const std::vector<int64_t>& x_shape,
const paddle::optional<std::vector<std::vector<int64_t>>>& y_shape,
const std::vector<int64_t>& x_grad_shape,
const paddle::optional<std::vector<std::vector<int64_t>>>& y_grad_shape) {
return {x_shape};
}
PD_BUILD_OP(custom_optional_inplace_add_vec)
.Inputs({"X", paddle::Optional(paddle::Vec("Y"))})
.Outputs({"OutX", paddle::Optional(paddle::Vec("OutY"))})
.SetInplaceMap({{paddle::Optional(paddle::Vec("Y")),
paddle::Optional(paddle::Vec("OutY"))}})
.SetKernelFn(PD_KERNEL(AddOptionalInplaceVectorForward))
.SetInferShapeFn(PD_INFER_SHAPE(AddOptionalInplaceVectorInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(AddOptionalInplaceVectorInferDtype));
PD_BUILD_GRAD_OP(custom_optional_inplace_add_vec)
.Inputs({"X",
paddle::Optional(paddle::Vec("Y")),
paddle::Grad("OutX"),
paddle::Grad(paddle::Optional(paddle::Vec("OutY")))})
.Outputs({paddle::Grad("X"),
paddle::Grad(paddle::Optional(paddle::Vec("Y")))})
.SetInplaceMap({{paddle::Grad(paddle::Optional(paddle::Vec("OutY"))),
paddle::Grad(paddle::Optional(paddle::Vec("Y")))}})
.SetKernelFn(PD_KERNEL(AddOptionalInplaceVectorBackward))
.SetInferShapeFn(
PD_INFER_SHAPE(AddOptionalInplaceVectorBackwardInferShape));
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册