未验证 提交 11d7026b 编写于 作者: HappyHeavyRain's avatar HappyHeavyRain 提交者: GitHub

Generate static graph code of some ops (#49092)

* generate static graph code of some ops

* change the default value of 'num' of 'unstack'

* revert the pow

* fix the 'real' 'imag' op error because of 'complex'

* fix the code according to review
上级 d5366c47
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/ternary.h"
namespace paddle {
namespace operators {
using framework::OpKernelType;
class AddMMOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const {
auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(input_data_type, ctx.GetPlace());
}
};
class AddMMOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("Input", "(Tensor), tensor to be added to the final result.");
AddInput("X", "(Tensor), The first input tensor for mul.");
AddInput("Y", "(Tensor), The second input tensor for mul.");
AddOutput("Out", "(Tensor), The output tensor of addmm op.");
AddAttr<float>("Alpha", "coefficient of x*y.").SetDefault(1.0f);
AddAttr<float>("Beta", "coefficient of input.").SetDefault(1.0f);
AddComment(R"DOC(
AddMM Operator.
This operator is used to perform matrix multiplication for input $x$ and $y$ with coefficient $alpha$.
$input$ with coefficient $beta$ is added to the final result.
The equation is:
$$Out = alpha * x * y + beta * input$$
$x$ and $y$ must be two-dimensional, and $input$ can be broadcastable.
)DOC");
}
};
class AddMMGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput("Input"),
true,
platform::errors::NotFound("Input(Input) should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"),
true,
platform::errors::NotFound("Input(X) should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasInput("Y"),
true,
platform::errors::NotFound("Input(Y) should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")),
true,
platform::errors::NotFound("Input(Out@GRAD) should not be null"));
const auto& input_dims = ctx->GetInputDim("Input");
const auto& x_dims = ctx->GetInputDim("X");
const auto& y_dims = ctx->GetInputDim("Y");
auto input_grad_name = framework::GradVarName("Input");
auto x_grad_name = framework::GradVarName("X");
auto y_grad_name = framework::GradVarName("Y");
if (ctx->HasOutput(input_grad_name)) {
ctx->SetOutputDim(input_grad_name, input_dims);
}
if (ctx->HasOutput(x_grad_name)) {
ctx->SetOutputDim(x_grad_name, x_dims);
}
if (ctx->HasOutput(y_grad_name)) {
ctx->SetOutputDim(y_grad_name, y_dims);
}
}
};
template <typename T>
class AddMMOpGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("addmm_grad");
retv->SetInput("Input", this->Input("Input"));
retv->SetInput("X", this->Input("X"));
retv->SetInput("Y", this->Input("Y"));
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetOutput(framework::GradVarName("Input"), this->InputGrad("Input"));
retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
retv->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
retv->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(addmm,
AddmmInferShapeFunctor,
PD_INFER_META(phi::AddmmInferMeta));
REGISTER_OPERATOR(addmm,
ops::AddMMOp,
ops::AddMMOpMaker,
ops::AddMMOpGradMaker<paddle::framework::OpDesc>,
ops::AddMMOpGradMaker<paddle::imperative::OpBase>,
AddmmInferShapeFunctor);
REGISTER_OPERATOR(addmm_grad, ops::AddMMGradOp);
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/elementwise/elementwise_op_function.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
namespace paddle {
namespace operators {
class ComplexOpMaker : public framework::OpProtoAndCheckerMaker {
protected:
void Make() override {
AddInput("X", "(Tensor), real part of complex_op");
AddInput("Y", "(Tensor), image part of complex_op");
AddOutput("Out", "(Tensor), output of complex_op");
AddComment(R"DOC(
Complex Operator.
Return a complex tensor given the real and image tensors.
)DOC");
}
};
template <typename T>
class ComplexGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("complex_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
// op->SetInput("Out", this->Output("Out"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};
class ComplexOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(data_type, ctx.GetPlace());
}
};
class ComplexGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
auto out_grad_name = framework::GradVarName("Out");
auto computation_dtype = framework::ToRealType(
OperatorWithKernel::IndicateVarDataType(ctx, out_grad_name));
return framework::OpKernelType(computation_dtype, ctx.GetPlace());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(complex,
ComplexInferShapeFunctor,
PD_INFER_META(phi::ComplexInferMeta));
REGISTER_OPERATOR(complex,
ops::ComplexOp,
ops::ComplexOpMaker,
ops::ComplexGradOpMaker<paddle::framework::OpDesc>,
ops::ComplexGradOpMaker<paddle::imperative::OpBase>,
ComplexInferShapeFunctor);
DECLARE_INFER_SHAPE_FUNCTOR(complex_grad,
ComplexGradInferShapeFunctor,
PD_INFER_META(phi::ComplexGradInferMeta));
REGISTER_OPERATOR(complex_grad,
ops::ComplexGradOp,
ComplexGradInferShapeFunctor);
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/op_version_registry.h"
#include "paddle/fluid/operators/utils.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class RollOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(data_type, ctx.device_context());
}
};
class RollGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")),
true,
platform::errors::InvalidArgument(
"Input(Out@GRAD) should be not null."));
PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")),
true,
platform::errors::InvalidArgument(
"Output(X@GRAD) should be not null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.device_context());
}
};
class RollOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor) the input tensor.");
AddOutput("Out", "(Tensor), the output tensor.");
AddAttr<std::vector<int64_t>>("shifts",
"The number of places by which the elements "
"of the tensor are shifted.")
.SetDefault({});
AddInput("ShiftsTensor",
"The number of places by which the elements of the tensor "
"are shifted.")
.AsDispensable();
AddAttr<std::vector<int64_t>>(
"axis",
"Axis along which to roll. It must have the same size "
"with shifts or size == 0")
.SetDefault({});
AddComment(R"DOC(
Roll the tensor along the given dimension(s).
Elements that are shifted beyond the last position
are re-introduced at the first position. If a dimension
is not specified, the tensor will be flattened before
rolling and then restored to the original shape.
)DOC");
}
};
template <typename T>
class RollGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("roll_grad");
op->SetInput("X", this->Input("X"));
if (this->HasInput("ShiftsTensor")) {
op->SetInput("ShiftsTensor", this->Input("ShiftsTensor"));
}
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
}
};
DECLARE_NO_NEED_BUFFER_VARS_INFERER(RollGradNoNeedBufferVarsInferer, "X");
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(roll,
RollInferShapeFunctor,
PD_INFER_META(phi::RollInferMeta));
REGISTER_OPERATOR(roll,
ops::RollOp,
ops::RollOpMaker,
ops::RollGradMaker<paddle::framework::OpDesc>,
ops::RollGradMaker<paddle::imperative::OpBase>,
RollInferShapeFunctor);
REGISTER_OPERATOR(roll_grad,
ops::RollGradOp,
ops::RollGradNoNeedBufferVarsInferer);
REGISTER_OP_VERSION(roll)
.AddCheckpoint(
R"ROC(
Upgrade roll add 1 attribute [axis], delete 1 attribute[dims].
)ROC",
paddle::framework::compatible::OpVersionDesc()
.NewAttr("axis",
"(std::vector<int64_t>) Axis along which to roll. "
"It must have the same size with shifts, or size = 0.",
std::vector<int64_t>())
.DeleteAttr("dims",
"(std::vector<int64_t>) Dims along which to roll. "
"It must have the same size with shifts, or size = 0."))
.AddCheckpoint(
R"ROC(Upgrade roll add a dispensable input "ShiftsTensor".)ROC",
paddle::framework::compatible::OpVersionDesc().NewInput(
"ShiftsTensor",
"The number of places by which the elements of"
"the tensor are shifted."));
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/platform/for_range.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class UnStackOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
class UnStackOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "The input of unstack op.");
AddOutput("Y", "The output of unstack op.").AsDuplicable();
AddAttr<int>("axis", "The axis along which Input(X) should be unstacked.")
.SetDefault(0);
AddAttr<int>("num", "The number of outputs(Y).").GreaterThan(0);
AddComment(R"DOC(
UnStack Operator.
UnStack Input(X) into several tensors along Attr(axis).
)DOC");
}
};
template <typename T>
class UnStackGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("unstack_grad");
op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
}
};
class UnStackGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
} // namespace operators
} // namespace paddle
namespace plat = paddle::platform;
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(unstack,
UnStackInferMetaFunctor,
PD_INFER_META(phi::UnStackInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(unstack_grad,
UnStackGradInferMetaFunctor,
PD_INFER_META(phi::UnStackGradInferMeta));
REGISTER_OPERATOR(unstack,
ops::UnStackOp,
ops::UnStackOpMaker,
ops::UnStackGradOpMaker<paddle::framework::OpDesc>,
ops::UnStackGradOpMaker<paddle::imperative::OpBase>,
UnStackInferMetaFunctor);
REGISTER_OPERATOR(unstack_grad,
ops::UnStackGradOp,
UnStackGradInferMetaFunctor);
......@@ -20,6 +20,16 @@
func : acosh_grad
inplace : (out_grad -> x_grad)
- backward_op : addmm_grad
forward : addmm (Tensor input, Tensor x, Tensor y, float beta=1.0, float alpha=1.0) -> Tensor(out)
args : (Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha, float beta)
output : Tensor(input_grad), Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param : [input, x, y]
kernel :
func : addmm_grad
- backward_op : angle_grad
forward : angle (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......@@ -172,6 +182,16 @@
kernel :
func : cholesky_solve_grad
- backward_op : complex_grad
forward : complex (Tensor real, Tensor imag) -> Tensor(out)
args : (Tensor real, Tensor imag, Tensor out_grad)
output : Tensor(real_grad), Tensor(imag_grad)
infer_meta :
func : ComplexGradInferMeta
kernel :
func : complex_grad
data_type : real
- backward_op : conj_grad
forward : conj (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
......@@ -853,6 +873,18 @@
kernel :
func : renorm_grad
- backward_op : roll_grad
forward : roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : roll_grad
data_type : x
no_need_buffer : x
- backward_op : round_grad
forward : round(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
......@@ -1250,6 +1282,15 @@
data_type : out_grad
no_need_buffer : x
- backward_op : unstack_grad
forward : unstack (Tensor x, int axis=0, int num=0) -> Tensor[](out)
args : (Tensor[] out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnStackGradInferMeta
kernel :
func : unstack_grad
- backward_op : where_grad
forward : where (Tensor condition, Tensor x, Tensor y) -> Tensor(out)
args : (Tensor condition, Tensor x, Tensor y, Tensor out_grad)
......
......@@ -56,16 +56,6 @@
func : add_triple_grad
inplace : (grad_grad_out_grad -> grad_grad_x_grad)
- backward_op : addmm_grad
forward : addmm (Tensor input, Tensor x, Tensor y, float beta, float alpha) -> Tensor(out)
args : (Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha, float beta)
output : Tensor(input_grad), Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param : [input, x, y]
kernel :
func : addmm_grad
- backward_op : affine_grid_grad
forward : affine_grid (Tensor input, IntArray outputShape, bool align_corners=true) -> Tensor(output)
args : (Tensor input, Tensor output_grad, IntArray outputShape, bool align_corners=true)
......@@ -225,16 +215,6 @@
backward : clip_double_grad
inplace : (out_grad -> x_grad)
- backward_op : complex_grad
forward : complex (Tensor real, Tensor imag) -> Tensor(out)
args : (Tensor real, Tensor imag, Tensor out_grad)
output : Tensor(real_grad), Tensor(imag_grad)
infer_meta :
func : ComplexGradInferMeta
kernel :
func : complex_grad
data_type : real
- backward_op : concat_double_grad
forward : concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x)
args : (Tensor[] grad_x_grad, Scalar axis = 0)
......@@ -1294,18 +1274,6 @@
data_type : x
optional : boxes_num
- backward_op : roll_grad
forward : roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : roll_grad
data_type : x
no_need_buffer : x
- backward_op : scale_grad
forward : scale (Tensor x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(out)
args : (Tensor out_grad, Scalar scale=1.0, bool bias_after_scale=true)
......@@ -1665,16 +1633,6 @@
inplace : (out_grad -> x_grad)
backward : unsqueeze_double_grad
- backward_op : unstack_grad
forward : unstack (Tensor x, int axis, int num) -> Tensor[](out)
args : (Tensor[] out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : UnStackGradInferMeta
param : [out_grad, axis]
kernel :
func : unstack_grad
- backward_op : warpctc_grad
forward : warpctc (Tensor logits, Tensor label, Tensor logits_length, Tensor labels_length, int blank, bool norm_by_times) -> Tensor(loss), Tensor(warpctcgrad)
args : (Tensor logits, Tensor logits_length, Tensor warpctcgrad, Tensor loss_grad, int blank, bool norm_by_times)
......
......@@ -87,15 +87,6 @@
invoke : add_n_impl(inputs)
backward : add_n_grad
- op : addmm
args : (Tensor input, Tensor x, Tensor y, float beta, float alpha)
output : Tensor
infer_meta :
func : AddmmInferMeta
kernel :
func : addmm
backward : addmm_grad
- op : affine_grid
args : (Tensor input, IntArray outputShape, bool align_corners=true)
output : Tensor
......@@ -404,15 +395,6 @@
func : coalesce_tensor
data_type : dtype
- op : complex
args : (Tensor real, Tensor imag)
output : Tensor
infer_meta :
func : ComplexInferMeta
kernel :
func : complex
backward : complex_grad
- op : concat
args : (Tensor[] x, Scalar(int64_t) axis)
output : Tensor
......@@ -1507,6 +1489,7 @@
param: [x]
kernel :
func : pow
data_type : x
backward : pow_grad
- op : prelu
......@@ -1687,15 +1670,6 @@
intermediate : arg_max
backward : roi_pool_grad
- op : roll
args : (Tensor x, IntArray shifts, int64_t[] axis)
output : Tensor(out)
infer_meta :
func : RollInferMeta
kernel :
func : roll
backward : roll_grad
- op : scale
args : (Tensor x, Scalar scale, float bias, bool bias_after_scale)
output : Tensor(out)
......@@ -2106,15 +2080,6 @@
intermediate : xshape
backward : unsqueeze_grad
- op : unstack
args : (Tensor x, int axis, int num)
output : Tensor[]{num}
infer_meta :
func : UnStackInferMeta
kernel :
func : unstack
backward : unstack_grad
- op : update_loss_scaling_
args : (Tensor[] x, Tensor found_infinite, Tensor prev_loss_scaling, Tensor in_good_steps, Tensor in_bad_steps, int incr_every_n_steps, int decr_every_n_nan_or_inf, float incr_ratio, float decr_ratio, Scalar stop_update)
output : Tensor[](out){x.size()}, Tensor(loss_scaling), Tensor(out_good_steps), Tensor(out_bad_steps)
......
......@@ -46,6 +46,12 @@
- op : addmm
backward : addmm_grad
inputs :
{input : Input, x : X, y : Y}
outputs :
out : Out
attrs :
{alpha : Alpha, beta : Beta}
extra :
attrs : [bool use_mkldnn = false]
......@@ -178,6 +184,13 @@
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- op : complex
backward : complex_grad
inputs :
{real : X, imag : Y}
outputs :
out : Out
- op : concat
backward : concat_grad
extra :
......@@ -1023,6 +1036,17 @@
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : roll
backward : roll_grad
inputs :
x : X
outputs :
out : Out
int_array :
shifts :
data_type : int64_t
tensor_name : ShiftsTensor
- op : round
backward : round_grad
inputs :
......@@ -1324,6 +1348,13 @@
outputs :
out : Y
- op : unstack
backward : unstack_grad
inputs :
x : X
outputs :
out : Y
- op : viterbi_decode
inputs :
{potentials : Input, transition_params : Transition, lengths : Length}
......
......@@ -16,6 +16,20 @@
comment : In order to specify interpolation mode
default : std::string("bilinear")
- op : roll
version :
- checkpoint : Upgrade roll add 1 attribute [axis], delete 1 attribute[dims].
action :
- add_attr : axis
comment : Axis along which to roll. It must have the same size with shifts, or size = 0.
default : std::vector<float>()
- delete_attr : dims
comment : Dims along which to roll. It must have the same size with shifts, or size = 0
- checkpoint : Upgrade roll add a dispensable input "ShiftsTensor"
action :
- add_input : ShiftsTensor
comment : The number of places by which the elements of the tensor are shifted.
- op : trace
version :
- checkpoint : Upgrade trace add a new attribute [axis2]
......
......@@ -16,6 +16,16 @@
func : acosh
backward : acosh_grad
- op : addmm
args : (Tensor input, Tensor x, Tensor y, float beta=1.0, float alpha=1.0)
output : Tensor
infer_meta :
func : AddmmInferMeta
kernel :
func : addmm
data_type : x
backward : addmm_grad
- op : angle
args : (Tensor x)
output : Tensor
......@@ -152,6 +162,16 @@
func : cholesky_solve
backward : cholesky_solve_grad
- op : complex
args : (Tensor real, Tensor imag)
output : Tensor
infer_meta :
func : ComplexInferMeta
kernel :
func : complex
data_type : real
backward : complex_grad
- op : conj
args : (Tensor x)
output : Tensor (out)
......@@ -801,6 +821,16 @@
func : renorm
backward : renorm_grad
- op : roll
args : (Tensor x, IntArray shifts={}, int64_t[] axis={})
output : Tensor(out)
infer_meta :
func : RollInferMeta
kernel :
func : roll
data_type : x
backward : roll_grad
- op : round
args : (Tensor x)
output : Tensor(out)
......@@ -1071,6 +1101,15 @@
func : unfold
backward : unfold_grad
- op : unstack
args : (Tensor x, int axis=0, int num=0)
output : Tensor[](out){num}
infer_meta :
func : UnStackInferMeta
kernel :
func : unstack
backward : unstack_grad
- op : viterbi_decode
args : (Tensor potentials, Tensor transition_params, Tensor lengths, bool include_bos_eos_tag = true)
output : Tensor(scores), Tensor(path)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature AddmmOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"addmm", {"Input", "X", "Y"}, {"Beta", "Alpha"}, {"Out"});
}
KernelSignature AddmmGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("addmm_grad",
{"Input", "X", "Y", "Out@GRAD"},
{"Alpha", "Beta"},
{"Input@GRAD", "X@GRAD", "Y@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(addmm, phi::AddmmOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(addmm_grad, phi::AddmmGradOpArgumentMapping);
......@@ -24,14 +24,7 @@ KernelSignature ImagGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("imag_grad", {"Out@GRAD"}, {}, {"X@GRAD"});
}
KernelSignature ComplexGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"complex_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(real_grad, phi::RealGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(imag_grad, phi::ImagGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(complex_grad, phi::ComplexGradOpArgumentMapping);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature RollOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.HasInput("ShiftsTensor")) {
return KernelSignature("roll", {"X"}, {"ShiftsTensor", "axis"}, {"Out"});
}
return KernelSignature("roll", {"X"}, {"shifts", "axis"}, {"Out"});
}
KernelSignature RollGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"roll_grad", {"X", "Out@GRAD"}, {"shifts", "axis"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(roll, phi::RollOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(roll_grad, phi::RollGradOpArgumentMapping);
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature UnStackGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("unstack_grad", {"Y@GRAD"}, {"axis"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(unstack_grad, phi::UnStackGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册