未验证 提交 fcd77346 编写于 作者: H HongyuJia 提交者: GitHub

[CustomOP Optional Inplace] Custom op supports inplace optional tensor (#52216)

* [CustomOP Inplace] Automap inplace dtype and shape, prepare for vector<Tensor> output

* delete custom_inplace_setup.py

* [CustomOP Optional Inplace] Custom operator supports inplace optional Tensor input

* fix bug for vector<Tensor> inplace test
上级 404162ba
...@@ -240,6 +240,26 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>, ...@@ -240,6 +240,26 @@ RunCustomOpNode::operator()(paddle::small_vector<std::vector<paddle::Tensor>,
(*paddle::OpMetaInfoHelper::GetKernelFn(kernel_map.at(op_type_)[1]))(&ctx); (*paddle::OpMetaInfoHelper::GetKernelFn(kernel_map.at(op_type_)[1]))(&ctx);
ctx.AssignInplaceOutputs(); ctx.AssignInplaceOutputs();
// handle optional None output when construct backward graph
for (size_t i = 0; i < ctx.OutputRange().size(); i++) {
if (ctx.OutputRangeAt(i).first + 1 == ctx.OutputRangeAt(i).second) {
size_t idx = ctx.OutputRangeAt(i).first;
paddle::Tensor* out_tensor = ctx.MutableOutputAt(idx);
if (!out_tensor->initialized()) {
PADDLE_ENFORCE(grad_outputs_names.at(idx).find(
paddle::kOptionalSuffix) != std::string::npos,
phi::errors::InvalidArgument(
"Custom operator's %d-th output is not initialized. "
"Please check your implementation again. If you are "
"using inplace optional outputs, then you must use "
"`paddle::Optional` to decorate this output",
idx));
// We can also consider using `autograd_meta` to tolerant nullptr.
out_tensor->set_autograd_meta(std::make_shared<egr::AutogradMeta>());
}
}
}
VLOG(7) << "Get AutogradMeta for inputs and outputs for Custom Op"; VLOG(7) << "Get AutogradMeta for inputs and outputs for Custom Op";
std::vector<std::vector<egr::AutogradMeta*>> ins_auto_grad_metas; std::vector<std::vector<egr::AutogradMeta*>> ins_auto_grad_metas;
std::vector<std::vector<egr::AutogradMeta*>> outs_auto_grad_metas; std::vector<std::vector<egr::AutogradMeta*>> outs_auto_grad_metas;
......
...@@ -148,7 +148,7 @@ static void RunKernelFunc( ...@@ -148,7 +148,7 @@ static void RunKernelFunc(
VLOG(3) << "Custom Operator: input name - " << in_name; VLOG(3) << "Custom Operator: input name - " << in_name;
if (detail::IsDuplicableVar(in_name)) { // inputs vector<Tensor> if (detail::IsDuplicableVar(in_name)) { // inputs vector<Tensor>
std::vector<paddle::Tensor> custom_vec_in; std::vector<paddle::Tensor> custom_vec_in;
if (ctx.HasInputs(in_name)) { // general inputs if (ctx.HasInputs(in_name)) { // general vector<Tensor> inputs
// return const std::vector<const phi::DenseTensor*> // return const std::vector<const phi::DenseTensor*>
auto vec_x = ctx.MultiInput<phi::DenseTensor>(in_name); auto vec_x = ctx.MultiInput<phi::DenseTensor>(in_name);
PADDLE_ENFORCE_NE(vec_x.empty(), PADDLE_ENFORCE_NE(vec_x.empty(),
...@@ -174,7 +174,7 @@ static void RunKernelFunc( ...@@ -174,7 +174,7 @@ static void RunKernelFunc(
custom_t.set_impl(std::make_shared<phi::DenseTensor>(*x)); custom_t.set_impl(std::make_shared<phi::DenseTensor>(*x));
custom_vec_in.emplace_back(custom_t); custom_vec_in.emplace_back(custom_t);
} }
} else { // optional inputs. } else { // optional vector<Tensor> inputs.
PADDLE_ENFORCE( PADDLE_ENFORCE(
detail::IsOptionalVar(in_name), detail::IsOptionalVar(in_name),
phi::errors::NotFound("Your custom operator's KernelFunc cannot " phi::errors::NotFound("Your custom operator's KernelFunc cannot "
...@@ -191,7 +191,7 @@ static void RunKernelFunc( ...@@ -191,7 +191,7 @@ static void RunKernelFunc(
} }
kernel_ctx.EmplaceBackInputs(std::move(custom_vec_in)); kernel_ctx.EmplaceBackInputs(std::move(custom_vec_in));
} else { // inputs Tensor } else { // inputs Tensor
if (ctx.HasInput(in_name)) { // general inputs if (ctx.HasInput(in_name)) { // general Tensor inputs
auto* x = ctx.Input<phi::DenseTensor>(in_name); auto* x = ctx.Input<phi::DenseTensor>(in_name);
PADDLE_ENFORCE_NOT_NULL(x, PADDLE_ENFORCE_NOT_NULL(x,
platform::errors::NotFound( platform::errors::NotFound(
...@@ -215,7 +215,7 @@ static void RunKernelFunc( ...@@ -215,7 +215,7 @@ static void RunKernelFunc(
#else #else
kernel_ctx.EmplaceBackInput(std::move(custom_in)); kernel_ctx.EmplaceBackInput(std::move(custom_in));
#endif #endif
} else { // optional inputs } else { // optional Tensor inputs
PADDLE_ENFORCE( PADDLE_ENFORCE(
detail::IsOptionalVar(in_name), detail::IsOptionalVar(in_name),
phi::errors::NotFound("Your custom operator's KernelFunc cannot " phi::errors::NotFound("Your custom operator's KernelFunc cannot "
...@@ -267,17 +267,34 @@ static void RunKernelFunc( ...@@ -267,17 +267,34 @@ static void RunKernelFunc(
std::vector<phi::DenseTensor*> true_out_ptrs; std::vector<phi::DenseTensor*> true_out_ptrs;
for (size_t i = 0; i < outputs.size(); ++i) { for (size_t i = 0; i < outputs.size(); ++i) {
auto out_name = outputs[i]; auto out_name = outputs[i];
if (detail::IsDuplicableVar(out_name)) { if (detail::IsDuplicableVar(
out_name)) { // general/inplace vector<Tensor> outputs
PADDLE_ENFORCE( PADDLE_ENFORCE(
!inplace_map.empty() || (i == 0UL && outputs.size() == 1UL), !inplace_map.empty() || (i == 0UL && outputs.size() == 1UL),
phi::errors::PreconditionNotMet( phi::errors::PreconditionNotMet(
"If custom operator's outputs contains `paddle::Vec()` type " "If custom operator's outputs contains `paddle::Vec()` type "
"without setting InplaceMap, it only can hold one output.")); "without setting InplaceMap, it only can hold one output."));
auto vec_out = ctx.MultiOutput<phi::DenseTensor>(out_name); auto vec_out = ctx.MultiOutput<phi::DenseTensor>(out_name);
PADDLE_ENFORCE_NE(vec_out.empty(), // handle inplace optional outputs = None case
true, if (vec_out.empty()) {
phi::errors::NotFound( PADDLE_ENFORCE(
"Output vector<tensor> (%s) is empty.", out_name)); detail::IsOptionalVar(out_name) && !inplace_map.empty(),
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output for name %s. If "
"you "
"are using inplace optional inputs & outputs, please check "
"your "
"InplaceMap and `Outputs` again and make sure %s is wrapped by "
"`paddle::Optional`",
out_name,
out_name));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< out_name << " is None.";
true_out_ptrs.emplace_back(nullptr);
kernel_ctx.EmplaceBackOutput(std::move(paddle::Tensor()));
continue;
}
// general/inplace vector<Tensor> outputs
std::vector<paddle::Tensor> custom_vec_out; std::vector<paddle::Tensor> custom_vec_out;
for (size_t j = 0; j < vec_out.size(); ++j) { for (size_t j = 0; j < vec_out.size(); ++j) {
auto* out = vec_out[j]; auto* out = vec_out[j];
...@@ -295,6 +312,26 @@ static void RunKernelFunc( ...@@ -295,6 +312,26 @@ static void RunKernelFunc(
} }
kernel_ctx.EmplaceBackOutputs(std::move(custom_vec_out)); kernel_ctx.EmplaceBackOutputs(std::move(custom_vec_out));
} else { } else {
// handle inplace optional outputs = None case
if (!ctx.HasOutput(out_name)) {
PADDLE_ENFORCE(
detail::IsOptionalVar(out_name) && !inplace_map.empty(),
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output for name %s. If "
"you "
"are using inplace optional inputs & outputs, please check "
"your "
"InplaceMap and `Outputs` again and make sure %s is wrapped by "
"`paddle::Optional`",
out_name,
out_name));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< out_name << " is None.";
true_out_ptrs.emplace_back(nullptr);
kernel_ctx.EmplaceBackOutput(std::move(paddle::Tensor()));
continue;
}
// general/inplace Tensor outputs
auto* out = ctx.Output<phi::DenseTensor>(out_name); auto* out = ctx.Output<phi::DenseTensor>(out_name);
PADDLE_ENFORCE_NOT_NULL(out, PADDLE_ENFORCE_NOT_NULL(out,
platform::errors::NotFound( platform::errors::NotFound(
...@@ -335,6 +372,15 @@ static void RunKernelFunc( ...@@ -335,6 +372,15 @@ static void RunKernelFunc(
calc_outs->size())); calc_outs->size()));
for (size_t i = 0; i < true_out_ptrs.size(); ++i) { for (size_t i = 0; i < true_out_ptrs.size(); ++i) {
auto* true_out = true_out_ptrs.at(i); auto* true_out = true_out_ptrs.at(i);
// handle optional inplace outputs = None case
if (true_out == nullptr && !calc_outs->at(i).defined()) {
continue;
}
PADDLE_ENFORCE(
true_out != nullptr && calc_outs->at(i).defined(),
platform::errors::InvalidArgument(
"The returned Tensor is not defined in the KernelFn or custom "
"operator passes wrong output in static mode."));
auto calc_out = auto calc_out =
std::dynamic_pointer_cast<phi::DenseTensor>(calc_outs->at(i).impl()); std::dynamic_pointer_cast<phi::DenseTensor>(calc_outs->at(i).impl());
// assign meta info // assign meta info
...@@ -404,9 +450,41 @@ static void RunDefaultInferShapeFunc( ...@@ -404,9 +450,41 @@ static void RunDefaultInferShapeFunc(
inplace_map.size())); inplace_map.size()));
for (auto const& pair : inplace_map) { for (auto const& pair : inplace_map) {
if (detail::IsDuplicableVar(pair.first)) { if (detail::IsDuplicableVar(pair.first)) {
ctx->SetOutputsDim(pair.second, ctx->GetInputsDim(pair.first)); // make sure ctx has valid inplace optional outputs
if (!ctx->HasOutputs(pair.second)) {
PADDLE_ENFORCE(
detail::IsOptionalVar(pair.second),
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output name for %s. If "
"you are using inplace optional inputs & outputs, please "
"check "
"your InplaceMap and `Outputs` again and make sure %s is "
"wrapped by `paddle::Optional`",
pair.second,
pair.second));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< pair.second << " is None.";
} else {
ctx->SetOutputsDim(pair.second, ctx->GetInputsDim(pair.first));
}
} else { } else {
ctx->ShareDim(pair.first, pair.second); // make sure ctx has valid inplace optional outputs
if (!ctx->HasOutput(pair.second)) {
PADDLE_ENFORCE(
detail::IsOptionalVar(pair.second),
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output name for %s. If "
"you are using inplace optional inputs & outputs, please "
"check "
"your InplaceMap and `Outputs` again and make sure %s is "
"wrapped by `paddle::Optional`",
pair.second,
pair.second));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< pair.second << " is None.";
} else {
ctx->ShareDim(pair.first, pair.second);
}
} }
} }
} }
...@@ -544,12 +622,42 @@ static void RunInferShapeFunc( ...@@ -544,12 +622,42 @@ static void RunInferShapeFunc(
"cannot support `paddle::Vec(...)` output without setting " "cannot support `paddle::Vec(...)` output without setting "
"InplaceMap. If you have to use `paddle::Vec(...)` output, " "InplaceMap. If you have to use `paddle::Vec(...)` output, "
"please indicate it by setting InplaceMap manully.")); "please indicate it by setting InplaceMap manully."));
auto in_name = inplace_reverse_map.at(out_name); // make sure ctx has valid inplace optional outputs
ctx->SetOutputsDim(out_name, ctx->GetInputsDim(in_name)); if (ctx->HasOutputs(out_name)) {
auto in_name = inplace_reverse_map.at(out_name);
ctx->SetOutputsDim(out_name, ctx->GetInputsDim(in_name));
} else {
PADDLE_ENFORCE(
detail::IsOptionalVar(out_name),
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output name for %s. If "
"you are using inplace optional inputs & outputs, please check "
"your InplaceMap and `Outputs` again and make sure %s is "
"wrapped by `paddle::Optional`",
out_name,
out_name));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< out_name << " is None.";
}
} else { } else {
if (inplace_reverse_map.find(out_name) != inplace_reverse_map.end()) { if (inplace_reverse_map.find(out_name) != inplace_reverse_map.end()) {
// Share dims between inplace inputs and outputs // make sure ctx has valid inplace optional outputs
ctx->ShareDim(inplace_reverse_map.at(out_name), out_name); if (ctx->HasOutput(out_name)) {
// Share dims between inplace inputs and outputs
ctx->ShareDim(inplace_reverse_map.at(out_name), out_name);
} else {
PADDLE_ENFORCE(
detail::IsOptionalVar(out_name),
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output name for %s. If "
"you are using inplace optional inputs & outputs, please "
"check your InplaceMap and `Outputs` again and make sure %s "
"is wrapped by `paddle::Optional`",
out_name,
out_name));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< out_name << " is None.";
}
} else { } else {
// Set output dims by the output of InferShapeFn // Set output dims by the output of InferShapeFn
ctx->SetOutputDim(out_name, ctx->SetOutputDim(out_name,
...@@ -606,6 +714,21 @@ static void RunDefaultInferDtypeFunc( ...@@ -606,6 +714,21 @@ static void RunDefaultInferDtypeFunc(
for (auto const& pair : inplace_map) { for (auto const& pair : inplace_map) {
VLOG(3) << "Custom Operator: InferDtype - inplace dtype: " << pair.first VLOG(3) << "Custom Operator: InferDtype - inplace dtype: " << pair.first
<< "->" << pair.second; << "->" << pair.second;
// make sure ctx has valid inplace optional outputs
if (!ctx->HasOutput(pair.second)) {
PADDLE_ENFORCE(
detail::IsOptionalVar(pair.second),
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output name for %s. If "
"you are using inplace optional inputs & outputs, please check "
"your InplaceMap and `Outputs` again and make sure %s is "
"wrapped by `paddle::Optional`",
pair.second,
pair.second));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< pair.second << " is None.";
continue;
}
if (detail::IsDuplicableVar(pair.first)) { if (detail::IsDuplicableVar(pair.first)) {
size_t size = ctx->InputSize(pair.first); size_t size = ctx->InputSize(pair.first);
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
...@@ -708,12 +831,42 @@ static void RunInferDtypeFunc( ...@@ -708,12 +831,42 @@ static void RunInferDtypeFunc(
"InplaceMap. If you have to use `paddle::Vec(...)` output, " "InplaceMap. If you have to use `paddle::Vec(...)` output, "
"please indicate it by setting InplaceMap manully.")); "please indicate it by setting InplaceMap manully."));
auto in_name = inplace_reverse_map.at(out_name); auto in_name = inplace_reverse_map.at(out_name);
ctx->SetOutputDataTypes(out_name, ctx->GetInputDataTypes(in_name)); // make sure ctx has valid inplace optional outputs
if (ctx->HasOutput(out_name)) {
ctx->SetOutputDataTypes(out_name, ctx->GetInputDataTypes(in_name));
} else {
PADDLE_ENFORCE(
detail::IsOptionalVar(out_name),
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output name for %s. If "
"you are using inplace optional inputs & outputs, please check "
"your InplaceMap and `Outputs` again and make sure %s is "
"wrapped by `paddle::Optional`",
out_name,
out_name));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< out_name << " is None.";
}
} else { } else {
if (inplace_reverse_map.find(out_name) != inplace_reverse_map.end()) { if (inplace_reverse_map.find(out_name) != inplace_reverse_map.end()) {
auto in_name = inplace_reverse_map.at(out_name); // make sure ctx has valid inplace optional outputs
// Share dtype between inplace inputs and outputs if (ctx->HasOutput(out_name)) {
ctx->SetOutputDataType(out_name, ctx->GetInputDataType(in_name)); auto in_name = inplace_reverse_map.at(out_name);
// Share dtype between inplace inputs and outputs
ctx->SetOutputDataType(out_name, ctx->GetInputDataType(in_name));
} else {
PADDLE_ENFORCE(
out_name.find(paddle::kOptionalSuffix) != std::string::npos,
phi::errors::InvalidArgument(
"Custom operator couldn't find custom output name for %s. If "
"you are using inplace optional inputs & outputs, please "
"check your InplaceMap and `Outputs` again and make sure %s "
"is wrapped by `paddle::Optional`",
out_name,
out_name));
VLOG(3) << "Custom Operator: InferDtype - inplace optional outputs : "
<< out_name << " is None.";
}
} else { } else {
// Set output dtype by the output of InferDtypeFn // Set output dtype by the output of InferDtypeFn
ctx->SetOutputDataType(out_name, ctx->SetOutputDataType(out_name,
...@@ -785,11 +938,13 @@ class CustomOpMaker : public OpProtoAndCheckerMaker { ...@@ -785,11 +938,13 @@ class CustomOpMaker : public OpProtoAndCheckerMaker {
} }
} }
for (auto& out_name : outputs_) { for (auto& out_name : outputs_) {
auto output_var_builder =
AddOutput(out_name, "The output " + out_name + "of Custom Operator.");
if (detail::IsDuplicableVar(out_name)) { if (detail::IsDuplicableVar(out_name)) {
AddOutput(out_name, "The output " + out_name + "of Custom Operator.") output_var_builder.AsDuplicable();
.AsDuplicable(); }
} else { if (detail::IsOptionalVar(out_name)) {
AddOutput(out_name, "The output " + out_name + "of Custom Operator."); output_var_builder.AsDispensable();
} }
} }
for (auto& attr : attrs_) { for (auto& attr : attrs_) {
...@@ -896,10 +1051,40 @@ class CustomGradOpMaker<OpDesc> : public SingleGradOpMaker<OpDesc> { ...@@ -896,10 +1051,40 @@ class CustomGradOpMaker<OpDesc> : public SingleGradOpMaker<OpDesc> {
in_name)); in_name));
} }
} else { } else {
grad_op->SetInput(in_name, this->OutputGrad(detail::NoGrad(in_name))); if (this->HasOutput(detail::NoGrad(in_name))) {
grad_op->SetInput(in_name, this->OutputGrad(detail::NoGrad(in_name)));
} else {
// Maybe visit here! handle inplace optional case
PADDLE_ENFORCE(
in_name.find(paddle::kOptionalSuffix) != std::string::npos,
phi::errors::InvalidArgument(
"Custom operator couldn't find grad operator input name for "
"%s. If you are using inplace optional inputs & outputs, "
"please check your InplaceMap and `Outputs` again and make "
"sure %s is wrapped by `paddle::Optional`",
in_name,
in_name));
VLOG(3) << "Custom Operator: GradOpDescMaker - handle unfound input: "
<< in_name;
}
} }
} }
for (auto& out_name : outputs_) { for (auto& out_name : outputs_) {
// Handle inplace optional case
if (!this->HasInput(detail::NoGrad(out_name, is_double_grad_))) {
PADDLE_ENFORCE(
out_name.find(paddle::kOptionalSuffix) != std::string::npos,
phi::errors::InvalidArgument(
"Custom operator couldn't find grad operator output name for "
"%s. If you are using inplace optional inputs & outputs, "
"please check your InplaceMap and `Outputs` again and make "
"sure %s is wrapped by `paddle::Optional`",
out_name,
out_name));
VLOG(3) << "Custom Operator: GradOpDescMaker - handle unfound output: "
<< out_name;
continue;
}
VLOG(3) << "Custom Operator: GradOpDescMaker - output: " << out_name; VLOG(3) << "Custom Operator: GradOpDescMaker - output: " << out_name;
if (detail::IsDuplicableVar(out_name)) { if (detail::IsDuplicableVar(out_name)) {
grad_op->SetOutput( grad_op->SetOutput(
...@@ -969,10 +1154,40 @@ class CustomGradOpMaker<imperative::OpBase> ...@@ -969,10 +1154,40 @@ class CustomGradOpMaker<imperative::OpBase>
in_name)); in_name));
} }
} else { } else {
grad_op->SetInput(in_name, this->OutputGrad(detail::NoGrad(in_name))); // Handle inplace optional case
if (this->HasOutput(detail::NoGrad(in_name))) {
grad_op->SetInput(in_name, this->OutputGrad(detail::NoGrad(in_name)));
} else {
PADDLE_ENFORCE(
in_name.find(paddle::kOptionalSuffix) != std::string::npos,
phi::errors::InvalidArgument(
"Custom operator couldn't find grad operator input name for "
"%s. If you are using inplace optional inputs & outputs, "
"please check your InplaceMap and `Outputs` again and make "
"sure %s is wrapped by `paddle::Optional`",
in_name,
in_name));
VLOG(3) << "Custom Operator: GradOpBaseMaker - handle unfound input: "
<< in_name;
}
} }
} }
for (auto& out_name : outputs_) { for (auto& out_name : outputs_) {
// Handle inplace optional case
if (!this->HasInput(detail::NoGrad(out_name, is_double_grad_))) {
PADDLE_ENFORCE(
out_name.find(paddle::kOptionalSuffix) != std::string::npos,
phi::errors::InvalidArgument(
"Custom operator couldn't find grad operator output name for "
"%s. If you are using inplace optional inputs & outputs, "
"please check your InplaceMap and `Outputs` again and make "
"sure %s is wrapped by `paddle::Optional`",
out_name,
out_name));
VLOG(3) << "Custom Operator: GradOpBaseMaker - handle unfound output: "
<< out_name;
continue;
}
VLOG(3) << "Custom Operator: GradOpBaseMaker - output: " << out_name; VLOG(3) << "Custom Operator: GradOpBaseMaker - output: " << out_name;
grad_op->SetOutput( grad_op->SetOutput(
out_name, this->InputGrad(detail::NoGrad(out_name, is_double_grad_))); out_name, this->InputGrad(detail::NoGrad(out_name, is_double_grad_)));
......
...@@ -581,6 +581,27 @@ static PyObject* eager_api_run_custom_op(PyObject* self, ...@@ -581,6 +581,27 @@ static PyObject* eager_api_run_custom_op(PyObject* self,
(*paddle::OpMetaInfoHelper::GetKernelFn(vec_map[0]))(&ctx); (*paddle::OpMetaInfoHelper::GetKernelFn(vec_map[0]))(&ctx);
ctx.AssignInplaceOutputs(); ctx.AssignInplaceOutputs();
// handle optional None output when construct backward graph
for (size_t i = 0; i < ctx.OutputRange().size(); i++) {
if (ctx.OutputRangeAt(i).first + 1 == ctx.OutputRangeAt(i).second) {
size_t idx = ctx.OutputRangeAt(i).first;
paddle::Tensor* out_tensor = ctx.MutableOutputAt(idx);
if (!out_tensor->initialized()) {
PADDLE_ENFORCE(
outputs.at(idx).find(paddle::kOptionalSuffix) !=
std::string::npos,
phi::errors::InvalidArgument(
"Custom operator's %d-th output is not initialized. "
"Please check your implementation again. If you are "
"using inplace optional output, then you must use "
"`paddle::Optional` to decorate this output",
idx));
// We can also consider using `autograd_meta` to tolerant nullptr.
out_tensor->set_autograd_meta(std::make_shared<egr::AutogradMeta>());
}
}
}
VLOG(7) << "Get AutogradMeta for inputs and outputs for Custom Op"; VLOG(7) << "Get AutogradMeta for inputs and outputs for Custom Op";
std::vector<std::vector<egr::AutogradMeta*>> ins_auto_grad_metas; std::vector<std::vector<egr::AutogradMeta*>> ins_auto_grad_metas;
std::vector<std::vector<egr::AutogradMeta*>> outs_auto_grad_metas; std::vector<std::vector<egr::AutogradMeta*>> outs_auto_grad_metas;
...@@ -616,11 +637,13 @@ static PyObject* eager_api_run_custom_op(PyObject* self, ...@@ -616,11 +637,13 @@ static PyObject* eager_api_run_custom_op(PyObject* self,
egr::EagerUtils::CheckInplace(ctx.InputAt(start_idx + j), egr::EagerUtils::CheckInplace(ctx.InputAt(start_idx + j),
ins_auto_grad_metas[i][j], ins_auto_grad_metas[i][j],
require_any_grad); require_any_grad);
// Bump Inplace Version if (ctx.MutableInputAt(start_idx + j).defined()) {
ctx.MutableInputAt(start_idx + j).bump_inplace_version(); // Bump Inplace Version
VLOG(3) << "Custom operator: Tensor(" ctx.MutableInputAt(start_idx + j).bump_inplace_version();
<< ctx.InputAt(start_idx + j).name() VLOG(3) << "Custom operator: Tensor("
<< ") uses Inplace Strategy."; << ctx.InputAt(start_idx + j).name()
<< ") uses Inplace Strategy.";
}
} }
} }
} }
......
...@@ -119,6 +119,7 @@ class PADDLE_API CustomOpKernelContext { ...@@ -119,6 +119,7 @@ class PADDLE_API CustomOpKernelContext {
const Tensor& InputAt(size_t idx) const; const Tensor& InputAt(size_t idx) const;
std::vector<Tensor> InputsBetween(size_t start, size_t end) const; std::vector<Tensor> InputsBetween(size_t start, size_t end) const;
Tensor& MutableInputAt(size_t idx); Tensor& MutableInputAt(size_t idx);
paddle::optional<Tensor> OptionalInputAt(size_t idx);
const std::vector<paddle::any>& Attrs() const { return attrs_; } const std::vector<paddle::any>& Attrs() const { return attrs_; }
const std::vector<std::pair<size_t, size_t>>& InputRange() { const std::vector<std::pair<size_t, size_t>>& InputRange() {
return input_range_; return input_range_;
...@@ -230,6 +231,34 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -230,6 +231,34 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
} }
}; };
// Handle args for inplace Tensor case
template <typename... Tail>
struct ComputeCallHelper<Tensor&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto& arg = ctx->MutableInputAt(range.first);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};
// Handle args for optional inplace Tensor input case
template <typename... Tail>
struct ComputeCallHelper<paddle::optional<paddle::Tensor>&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto arg = ctx->OptionalInputAt(range.first);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};
// Handle args for general vector<Tensor> input case // Handle args for general vector<Tensor> input case
template <typename... Tail> template <typename... Tail>
struct ComputeCallHelper<const std::vector<Tensor>&, Tail...> { struct ComputeCallHelper<const std::vector<Tensor>&, Tail...> {
...@@ -329,20 +358,6 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> { ...@@ -329,20 +358,6 @@ struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
} }
}; };
// Handle args for inplace Tensor case
template <typename... Tail>
struct ComputeCallHelper<Tensor&, Tail...> {
template <int in_idx, int attr_idx, int out_idx, typename... PreviousArgs>
static void Compute(CustomOpKernelContext* ctx, PreviousArgs&... pargs) {
auto& range = ctx->InputRangeAt(in_idx);
auto& arg = ctx->MutableInputAt(range.first);
ComputeCallHelper<
Tail...>::template Compute<in_idx + 1, attr_idx, out_idx>(ctx,
pargs...,
arg);
}
};
// Handle args for inplace vector<Tensor> case // Handle args for inplace vector<Tensor> case
template <typename... Tail> template <typename... Tail>
struct ComputeCallHelper<std::vector<Tensor>&, Tail...> { struct ComputeCallHelper<std::vector<Tensor>&, Tail...> {
......
...@@ -25,6 +25,11 @@ limitations under the License. */ ...@@ -25,6 +25,11 @@ limitations under the License. */
namespace paddle { namespace paddle {
PADDLE_API void AssignTensorImpl(const Tensor& src, Tensor* dst) { PADDLE_API void AssignTensorImpl(const Tensor& src, Tensor* dst) {
if (!src.initialized() || !dst->defined()) {
VLOG(3) << "Custom operator assigns non-initialized tensor, this only "
"happens when handling inplace optional inputs & outputs.";
return;
}
PADDLE_ENFORCE_EQ(src.is_dense_tensor() && dst->is_dense_tensor(), PADDLE_ENFORCE_EQ(src.is_dense_tensor() && dst->is_dense_tensor(),
true, true,
phi::errors::Unavailable( phi::errors::Unavailable(
...@@ -98,6 +103,13 @@ Tensor& CustomOpKernelContext::MutableInputAt(size_t idx) { ...@@ -98,6 +103,13 @@ Tensor& CustomOpKernelContext::MutableInputAt(size_t idx) {
return inputs_.at(idx); return inputs_.at(idx);
} }
paddle::optional<Tensor> CustomOpKernelContext::OptionalInputAt(size_t idx) {
if (!inputs_.at(idx).is_initialized()) {
return paddle::none;
}
return paddle::make_optional<paddle::Tensor>(inputs_.at(idx));
}
Tensor* CustomOpKernelContext::MutableOutputAt(size_t idx) { Tensor* CustomOpKernelContext::MutableOutputAt(size_t idx) {
return &(outputs_.at(idx)); return &(outputs_.at(idx));
} }
...@@ -187,8 +199,9 @@ void CustomOpKernelContext::AssignInplaceOutputs() { ...@@ -187,8 +199,9 @@ void CustomOpKernelContext::AssignInplaceOutputs() {
for (size_t i = 0; i < assign_tensor_size; ++i) { for (size_t i = 0; i < assign_tensor_size; ++i) {
AssignTensorImpl(inputs_[in_start_idx + i], &outputs_[out_start_idx + i]); AssignTensorImpl(inputs_[in_start_idx + i], &outputs_[out_start_idx + i]);
} }
VLOG(4) VLOG(4) << "Custom opertor update inplace input-output tensor "
<< "Custom opertor update inplace input-output tensor successfully."; "successfully. Update map size = "
<< inplace_tensor_map_.size();
} }
} }
std::vector<Tensor*>* CustomOpKernelContext::AllMutablePlainOutput() { std::vector<Tensor*>* CustomOpKernelContext::AllMutablePlainOutput() {
......
...@@ -1036,29 +1036,62 @@ def _generate_python_module( ...@@ -1036,29 +1036,62 @@ def _generate_python_module(
return custom_module return custom_module
def _gen_output_content(in_names, out_names, inplace_reverse_idx): def _gen_output_content(
op_name, in_names, out_names, ins_map, attrs_map, inplace_reverse_idx
):
# ' ' * tab space * tab number # ' ' * tab space * tab number
indent = ' ' * 4 * 2 indent = ' ' * 4 * 2
inplace_idx = {v: k for k, v in inplace_reverse_idx.items()}
dynamic_content = "" dynamic_content = ""
static_content = "" static_content = f"""
{indent}ins = {{}}
{indent}ins_map = {ins_map}
{indent}for key, value in ins_map.items():
{indent} # handle optional inputs
{indent} if value is not None:
{indent} ins[key] = value
{indent}helper = LayerHelper("{op_name}", **locals())
"""
for out_idx, out_name in enumerate(out_names): for out_idx, out_name in enumerate(out_names):
in_idx = -1 in_idx = -1
if out_idx in inplace_reverse_idx: if out_idx in inplace_reverse_idx:
in_idx = inplace_reverse_idx[out_idx] in_idx = inplace_reverse_idx[out_idx]
if in_idx != -1 and "@VECTOR" in in_names[in_idx]: if (
in_idx != -1 and "@VECTOR" in in_names[in_idx]
): # inplace vector<Tensor> output case
lower_in_names = in_names[in_idx].split("@")[0].lower() lower_in_names = in_names[in_idx].split("@")[0].lower()
dynamic_content += f""" dynamic_content += f"""
{indent}outs['{out_name}'] = [core.eager.Tensor() for _ in range(len({lower_in_names}))] {indent}outs['{out_name}'] = [core.eager.Tensor() for _ in range(len({lower_in_names}))]
{indent}ctx.add_outputs(outs['{out_name}'])""" {indent}ctx.add_outputs(outs['{out_name}'])"""
static_content += f""" static_content += f"""
{indent}outs['{out_name}'] = [helper.create_variable(dtype='float32') for _ in range(len({lower_in_names}))]""" {indent}outs['{out_name}'] = [helper.create_variable(dtype='float32') for _ in range(len({lower_in_names}))]"""
else: elif (
in_idx != -1 and "@OPTIONAL" in in_names[in_idx]
): # inplace optional Tensor output case, handle inplace None input
lower_in_names = in_names[in_idx].split("@")[0].lower()
dynamic_content += f"""
{indent}outs['{out_name}'] = core.eager.Tensor()
{indent}ctx.add_outputs(outs['{out_name}'])"""
static_content += f"""
{indent}if {lower_in_names} is not None:
{indent} outs['{out_name}'] = helper.create_variable(dtype='float32')"""
else: # general/inplace Tensor output case
dynamic_content += f""" dynamic_content += f"""
{indent}outs['{out_name}'] = core.eager.Tensor() {indent}outs['{out_name}'] = core.eager.Tensor()
{indent}ctx.add_outputs(outs['{out_name}'])""" {indent}ctx.add_outputs(outs['{out_name}'])"""
static_content += f""" static_content += f"""
{indent}outs['{out_name}'] = helper.create_variable(dtype='float32')""" {indent}outs['{out_name}'] = helper.create_variable(dtype='float32')"""
dynamic_content += f"""
{indent}core.eager._run_custom_op(ctx, "{op_name}", True)
{indent}res = [outs[out_name] if isinstance(outs[out_name], list) or outs[out_name]._is_initialized() else None for out_name in outs_list]
{indent}return res[0] if len(res)==1 else res"""
static_content += f"""
{indent}helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs={attrs_map})
{indent}res = [outs[out_name] if out_name in outs.keys() else None for out_name in outs_list]
{indent}return res[0] if len(res)==1 else res"""
return dynamic_content, static_content return dynamic_content, static_content
...@@ -1074,7 +1107,12 @@ def _custom_api_content(op_name): ...@@ -1074,7 +1107,12 @@ def _custom_api_content(op_name):
inplace_reverse_idx, inplace_reverse_idx,
) = _get_api_inputs_str(op_name) ) = _get_api_inputs_str(op_name)
dynamic_content, static_content = _gen_output_content( dynamic_content, static_content = _gen_output_content(
in_names, out_names, inplace_reverse_idx op_name,
in_names,
out_names,
ins_map,
attrs_map,
inplace_reverse_idx,
) )
lower_in_list = [p.split("@")[0].lower() for p in in_names] lower_in_list = [p.split("@")[0].lower() for p in in_names]
API_TEMPLATE = textwrap.dedent( API_TEMPLATE = textwrap.dedent(
...@@ -1098,20 +1136,8 @@ def _custom_api_content(op_name): ...@@ -1098,20 +1136,8 @@ def _custom_api_content(op_name):
for j in {attr_names}: for j in {attr_names}:
ctx.add_attr(j) ctx.add_attr(j)
{dynamic_content} {dynamic_content}
core.eager._run_custom_op(ctx, "{op_name}", True)
else: else:
ins = {{}}
for key, value in dict({ins_map}).items():
# handle optional inputs
if value is not None:
ins[key] = value
helper = LayerHelper("{op_name}", **locals())
{static_content} {static_content}
helper.append_op(type="{op_name}", inputs=ins, outputs=outs, attrs={attrs_map})
res = [outs[out_name] for out_name in outs_list]
return res[0] if len(res)==1 else res
""" """
).lstrip() ).lstrip()
......
...@@ -208,3 +208,108 @@ PD_BUILD_GRAD_OP(custom_add_vec) ...@@ -208,3 +208,108 @@ PD_BUILD_GRAD_OP(custom_add_vec)
.Inputs({"X", paddle::Optional(paddle::Vec("Y")), paddle::Grad("Out")}) .Inputs({"X", paddle::Optional(paddle::Vec("Y")), paddle::Grad("Out")})
.Outputs({paddle::Grad("X")}) .Outputs({paddle::Grad("X")})
.SetKernelFn(PD_KERNEL(AddVectorBackward)); .SetKernelFn(PD_KERNEL(AddVectorBackward));
/*
if (y) {
outX = 2 * x + y;
outY = x + y;
} else {
outX = 2 * x;
outY = None;
}
*/
std::vector<paddle::Tensor> AddOptionalInplaceForward(
const paddle::Tensor& x,
paddle::optional<paddle::Tensor>& y) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor.");
paddle::Tensor outX = paddle::zeros(x.shape(), x.dtype(), x.place());
PD_DISPATCH_FLOATING_TYPES(
x.type(), "AddOptionalInplaceForward", ([&] {
add_two_pointers<data_t>(
x.data<data_t>(), x.data<data_t>(), outX.data<data_t>(), x.size());
if (y) {
add_one_pointer<data_t>(
y->data<data_t>(), outX.data<data_t>(), outX.size());
add_one_pointer<data_t>(
x.data<data_t>(), y->data<data_t>(), x.size());
}
}));
// No need to return y, because we set it as inplace input.
return {outX};
}
std::vector<paddle::DataType> AddOptionalInplaceInferDtype(
const paddle::DataType& x_dtype,
const paddle::optional<paddle::DataType>& y_dtype) {
return {x_dtype};
}
std::vector<std::vector<int64_t>> AddOptionalInplaceInferShape(
const std::vector<int64_t>& x_shape,
const paddle::optional<std::vector<int64_t>>& y_shape) {
return {x_shape};
}
/*
if (y) {
x_grad = outX_grad * 2 + outY_grad;
y_grad = outX_grad + outY_grad;
} else {
x_grad = outX_grad * 2;
y_grad = None;
}
*/
std::vector<paddle::Tensor> AddOptionalInplaceBackward(
const paddle::Tensor& x,
const paddle::optional<paddle::Tensor>& y,
const paddle::Tensor& outx_grad,
paddle::optional<paddle::Tensor>& outy_grad) { // NOLINT
PD_CHECK(x.place() == paddle::PlaceType::kCPU, "x must be a CPU Tensor.");
paddle::Tensor x_grad = paddle::zeros(x.shape(), x.dtype(), x.place());
PD_DISPATCH_FLOATING_TYPES(
outx_grad.type(), "AddOptionalInplaceBackward", ([&] {
add_two_pointers<data_t>(outx_grad.data<data_t>(),
outx_grad.data<data_t>(),
x_grad.data<data_t>(),
x_grad.size());
if (outy_grad) {
add_one_pointer<data_t>(
outy_grad->data<data_t>(), x_grad.data<data_t>(), x_grad.size());
add_one_pointer<data_t>(outx_grad.data<data_t>(),
outy_grad->data<data_t>(),
outx_grad.size());
}
}));
return {x_grad};
}
std::vector<std::vector<int64_t>> AddOptionalInplaceBackwardInferShape(
const std::vector<int64_t>& x_shape,
const paddle::optional<std::vector<int64_t>>& y_shape,
const std::vector<int64_t>& x_grad_shape,
const paddle::optional<std::vector<int64_t>>& y_grad_shape) {
return {x_shape};
}
PD_BUILD_OP(custom_optional_inplace_add)
.Inputs({"X", paddle::Optional("Y")})
.Outputs({"OutX", paddle::Optional("OutY")})
.SetInplaceMap({{paddle::Optional("Y"), paddle::Optional("OutY")}})
.SetKernelFn(PD_KERNEL(AddOptionalInplaceForward))
.SetInferShapeFn(PD_INFER_SHAPE(AddOptionalInplaceInferShape))
.SetInferDtypeFn(PD_INFER_DTYPE(AddOptionalInplaceInferDtype));
PD_BUILD_GRAD_OP(custom_optional_inplace_add)
.Inputs({"X",
paddle::Optional("Y"),
paddle::Grad("OutX"),
paddle::Grad(paddle::Optional("OutY"))})
.Outputs({paddle::Grad("X"), paddle::Grad(paddle::Optional("Y"))})
.SetInplaceMap({{paddle::Grad(paddle::Optional("OutY")),
paddle::Grad(paddle::Optional("Y"))}})
.SetKernelFn(PD_KERNEL(AddOptionalInplaceBackward))
.SetInferShapeFn(PD_INFER_SHAPE(AddOptionalInplaceBackwardInferShape));
...@@ -105,6 +105,124 @@ def optional_static_add(phi_func, device, dtype, np_x, np_y): ...@@ -105,6 +105,124 @@ def optional_static_add(phi_func, device, dtype, np_x, np_y):
return x_v, out_v, x_grad_v return x_v, out_v, x_grad_v
'''
if (y) {
outX = 2 * x + y;
outY = x + y;
} else {
outX = 2 * x;
outY = None;
}
'''
def optional_inplace_dynamic_add(phi_func, device, dtype, np_x, np_y):
paddle.set_device(device)
x = paddle.to_tensor(np_x, dtype=dtype, stop_gradient=False)
if np_y is not None:
y = paddle.to_tensor(np_y, dtype=dtype, stop_gradient=True)
if phi_func:
outx, outy = custom_optional.custom_optional_inplace_add(x, y)
else:
# We need to accumulate y's grad here.
y.stop_gradient = False
outx = 2 * x + y
# Inplace leaf Tensor's stop_gradient should be True
y.stop_gradient = True
outy = y.add_(x)
else:
y = None
if phi_func:
outx, outy = custom_optional.custom_optional_inplace_add(x, y)
else:
outx = 2 * x
outy = None
assert (
outy is None
), "The output `outy` of optional_inplace_dynamic_add should be None"
out = outx + outy if outy is not None else outx
out.backward()
return (
x.numpy(),
outx.numpy(),
y.numpy() if y is not None else None,
outy.numpy() if outy is not None else None,
out.numpy(),
x.grad.numpy(),
y.grad.numpy() if y is not None and y.grad is not None else None,
)
def optional_inplace_static_add(phi_func, device, dtype, np_x, np_y):
paddle.enable_static()
paddle.set_device(device)
with static.scope_guard(static.Scope()):
with static.program_guard(static.Program()):
x = static.data(name="x", shape=[None, np_x.shape[1]], dtype=dtype)
x.stop_gradient = False
if np_y is not None:
y = static.data(
name="y", shape=[None, np_x.shape[1]], dtype=dtype
)
y.stop_gradient = False
feed_dict = {
"x": np_x.astype(dtype),
"y": np_y.astype(dtype),
}
if phi_func:
outx, outy = custom_optional.custom_optional_inplace_add(
x, y
)
else:
outx = 2 * x + y
outy = x + y
else:
feed_dict = {
"x": np_x.astype(dtype),
}
if phi_func:
outx, outy = custom_optional.custom_optional_inplace_add(
x, None
)
else:
outx = 2 * x
outy = None
out = outx + outy if outy is not None else outx
mean_out = paddle.mean(out)
static.append_backward(mean_out)
exe = static.Executor()
exe.run(static.default_startup_program())
if np_y is not None:
x_v, out_v, x_grad_v, y_grad_v = exe.run(
static.default_main_program(),
feed=feed_dict,
fetch_list=[
x.name,
out.name,
x.name + "@GRAD",
y.name + "@GRAD",
],
)
paddle.disable_static()
return [x_v, out_v, x_grad_v, y_grad_v]
else:
x_v, out_v, x_grad_v = exe.run(
static.default_main_program(),
feed=feed_dict,
fetch_list=[
x.name,
out.name,
x.name + "@GRAD",
],
)
paddle.disable_static()
return [x_v, out_v, x_grad_v]
def optional_vector_dynamic_add(phi_func, device, dtype, np_x, np_inputs): def optional_vector_dynamic_add(phi_func, device, dtype, np_x, np_inputs):
paddle.set_device(device) paddle.set_device(device)
x = paddle.to_tensor(np_x, dtype=dtype, stop_gradient=False) x = paddle.to_tensor(np_x, dtype=dtype, stop_gradient=False)
...@@ -195,6 +313,10 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -195,6 +313,10 @@ class TestCustomOptionalJit(unittest.TestCase):
] ]
def check_output(self, out, pd_out, name): def check_output(self, out, pd_out, name):
if out is None and pd_out is None:
return
assert out is not None, "out value of " + name + " is None"
assert pd_out is not None, "pd_out value of " + name + " is None"
np.testing.assert_array_equal( np.testing.assert_array_equal(
out, out,
pd_out, pd_out,
...@@ -204,6 +326,10 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -204,6 +326,10 @@ class TestCustomOptionalJit(unittest.TestCase):
) )
def check_output_allclose(self, out, pd_out, name): def check_output_allclose(self, out, pd_out, name):
if out is None and pd_out is None:
return
assert out is not None, "out value of " + name + " is None"
assert pd_out is not None, "pd_out value of " + name + " is None"
np.testing.assert_allclose( np.testing.assert_allclose(
out, out,
pd_out, pd_out,
...@@ -260,6 +386,77 @@ class TestCustomOptionalJit(unittest.TestCase): ...@@ -260,6 +386,77 @@ class TestCustomOptionalJit(unittest.TestCase):
self.check_output(phi_out, pd_out, "out") self.check_output(phi_out, pd_out, "out")
self.check_output(phi_x_grad, pd_x_grad, "x_grad") self.check_output(phi_x_grad, pd_x_grad, "x_grad")
def test_optional_inplace_static_add(self):
for device in self.devices:
for dtype in self.dtypes:
for np_y in [None, self.np_y]:
pd_tuple = optional_inplace_static_add(
False,
device,
dtype,
self.np_x,
np_y,
)
phi_tuple = optional_inplace_static_add(
True,
device,
dtype,
self.np_x,
np_y,
)
self.check_output(phi_tuple[0], pd_tuple[0], "x")
self.check_output(phi_tuple[1], pd_tuple[1], "out")
self.check_output(phi_tuple[2], pd_tuple[2], "x_grad")
if len(phi_tuple) > 3:
self.check_output(phi_tuple[3], pd_tuple[3], "y_grad")
def test_optional_inplace_dynamic_add(self):
for device in self.devices:
for dtype in self.dtypes:
for np_y in [None, self.np_y]:
(
pd_x,
pd_outx,
pd_y,
pd_outy,
pd_out,
pd_x_grad,
pd_y_grad,
) = optional_inplace_dynamic_add(
False,
device,
dtype,
self.np_x,
np_y,
)
(
phi_x,
phi_outx,
phi_y,
phi_outy,
phi_out,
phi_x_grad,
phi_y_grad,
) = optional_inplace_dynamic_add(
True,
device,
dtype,
self.np_x,
np_y,
)
self.check_output(pd_y, pd_outy, "inplace_pd_y")
self.check_output(phi_y, phi_outy, "inplace_phi_y")
self.check_output(phi_x, pd_x, "x")
self.check_output(phi_outx, pd_outx, "outx")
self.check_output(phi_y, pd_y, "y")
self.check_output(phi_outy, pd_outy, "outy")
self.check_output(phi_out, pd_out, "out")
self.check_output(phi_x_grad, pd_x_grad, "x_grad")
self.check_output(phi_y_grad, pd_y_grad, "y_grad")
def test_optional_vector_static_add(self): def test_optional_vector_static_add(self):
for device in self.devices: for device in self.devices:
for dtype in self.dtypes: for dtype in self.dtypes:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册