未验证 提交 28864137 编写于 作者: HappyHeavyRain's avatar HappyHeavyRain 提交者: GitHub

Support the 'data_transform' for generating static graph ops (#49772)

* support the 'data_transform' for generating static graph ops

* reset 'pow' code

* change the 'GetKernelTypeForVar'
上级 1885d55a
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_proto_maker.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class SlogDeterminantOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("Input"), "Input", "Input", "determinant");
OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", "determinant");
}
};
class SlogDeterminantOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("Input", "(Tensor) The input tensor of SlogDeterminant.");
AddOutput("Out",
"(Tensor) The output tensor containing the sign of the"
"determinant and the natural logarithm"
"of the absolute value of determinant,");
AddComment(R"DOC(
SlogDeterminant Operator.)DOC");
}
};
class SlogDeterminantGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
OP_INOUT_CHECK(
ctx->HasInput("Input"), "Input", "Input", "SlogDeterminantGradOp");
OP_INOUT_CHECK(
ctx->HasInput("Out"), "Input", "Out", "SlogDeterminantGradOp");
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
"Input",
framework::GradVarName("Out"),
"SlogDeterminantGradOp");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("Input")),
"Output",
framework::GradVarName("Input"),
"SlogDeterminantGradOp");
ctx->SetOutputDim(framework::GradVarName("Input"),
ctx->GetInputDim("Input"));
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.GetPlace());
}
};
template <typename T>
class SlogDeterminantGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("slogdeterminant_grad");
grad_op->SetInput("Input", this->Input("Input"));
grad_op->SetInput("Out", this->Output("Out"));
grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
grad_op->SetOutput(framework::GradVarName("Input"),
this->InputGrad("Input"));
grad_op->SetAttrMap(this->Attrs());
}
};
DECLARE_NO_NEED_BUFFER_VARS_INFERER(SlogDeterminantGradNoNeedBufferVarsInferer,
"Input");
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(slogdeterminant,
SlogDeterminantInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(slogdeterminant,
ops::SlogDeterminantOp,
ops::SlogDeterminantOpMaker,
ops::SlogDeterminantGradOpMaker<paddle::framework::OpDesc>,
ops::SlogDeterminantGradOpMaker<paddle::imperative::OpBase>,
SlogDeterminantInferShapeFunctor);
DECLARE_INFER_SHAPE_FUNCTOR(slogdeterminant_grad,
SlogDeterminantGradInferShapeFunctor,
PD_INFER_META(phi::GeneralUnaryGradInferMeta));
REGISTER_OPERATOR(slogdeterminant_grad,
ops::SlogDeterminantGradOp,
SlogDeterminantGradInferShapeFunctor) // reuse det grad op
......@@ -256,6 +256,16 @@ def replace_compat_name(op_fluid_map_list, forward_op_dict, backward_op_dict):
op_item['no_need_buffer'] = get_param_list_alias(
op_item['no_need_buffer'], args_map
)
if 'data_transform' in op_item and op_item['data_transform']:
data_trans_item = op_item['data_transform']
if 'skip_transform' in data_trans_item:
data_trans_item['skip_transform'] = get_param_list_alias(
data_trans_item['skip_transform'], args_map
)
if 'support_trans_dtype' in data_trans_item:
data_trans_item['support_trans_dtype'] = get_param_list_alias(
data_trans_item['support_trans_dtype'], args_map
)
process_scalar(op_item, scalar_configs)
process_int_array(op_item, int_array_configs)
......
......@@ -427,7 +427,41 @@ def parse_op_entry(op_entry: Dict[str, Any], name_field="op"):
else:
no_buffer_args = None
# TODO(chenfeiyu): data_transform
# add data_transform tag for every input.
# the format is {data_transform : {skip_transform : [x, z], support_trans_dtype : y}}
for input in inputs:
input["data_transform"] = {}
if "data_transform" in op_entry:
skip_trans_args = []
support_trans_args = []
data_trans = op_entry["data_transform"]
if "skip_transform" in data_trans:
skip_trans_args = parse_plain_list(data_trans["skip_transform"])
for name in skip_trans_args:
assert (
name in input_names
), f"{op_name} has an skip_transform input: '{name}' which is not an input."
data_trans["skip_transform"] = skip_trans_args
if "support_trans_dtype" in data_trans:
support_trans_args = parse_plain_list(
data_trans["support_trans_dtype"]
)
for name in support_trans_args:
assert (
name in input_names
), f"{op_name} has an support_trans_dtype input: '{name}' which is not an input."
data_trans["support_trans_dtype"] = support_trans_args
for input in inputs:
if input["name"] in skip_trans_args:
input["data_transform"]["skip_trans_args"] = True
else:
input["data_transform"]["skip_trans_args"] = False
if input["name"] in support_trans_args:
input["data_transform"]["support_trans_dtype"] = True
else:
input["data_transform"]["support_trans_dtype"] = False
else:
data_trans = None
op = {
"name": op_name,
......@@ -435,6 +469,7 @@ def parse_op_entry(op_entry: Dict[str, Any], name_field="op"):
"attrs": attrs,
"outputs": outputs,
"no_need_buffer": no_buffer_args,
"data_transform": data_trans,
}
# invokes another op ?
......
......@@ -117,6 +117,15 @@ static_cast<int>(phi::Place({{"phi::" if not default_value is initializer_list}}
{# --------------------------------------- name mapping ---------------------------------------------- #}
{% macro name_map(op) %}
/*
******************************************************************
NOTE: The following codes are for 'get_compat_kernel_signature.py'
All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgumentMapping:
{{op | cartesian_prod_mapping}}
******************************************************************
*/
KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const ArgumentMappingContext& ctx) {
{% set kernel_args = op["kernel"]["param"] %}
{{get_input_list(op["inputs"], kernel_args)}};
......@@ -136,15 +145,6 @@ KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const Argum
return sig;
{%endif%}
}
/*
******************************************************************
NOTE: The following codes are for 'get_compat_kernel_signature.py'
All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgumentMapping:
{{op | cartesian_prod_mapping}}
******************************************************************
*/
{% endmacro %}
{% macro get_kernel_dispatch(inputs, kernel_config) %}{# inline #}
......@@ -172,6 +172,15 @@ ctx.IsSparseCsrTensorInput("{{input["name"]}}"){{" && " if not loop.last}}
{%- endmacro %}
{% macro sparse_op_name_map(op) %}
/*
******************************************************************
NOTE: The following codes are for 'get_compat_kernel_signature.py'
All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgumentMapping:
{{op | cartesian_prod_mapping}}
******************************************************************
*/
KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const ArgumentMappingContext& ctx) {
{% set kernel_args = op["kernel"]["param"] %}
{{get_input_list(op["inputs"], kernel_args)}};
......@@ -188,15 +197,6 @@ KernelSignature {{op["op_name"] | to_pascal_case }}OpArgumentMapping(const Argum
KernelSignature sig (kernel_name, std::move(inputs), std::move(attrs), std::move(outputs));
return sig;
}
/*
******************************************************************
NOTE: The following codes are for 'get_compat_kernel_signature.py'
All possible KernelSignatures returned by {{op["name"] | to_pascal_case }}OpArgumentMapping:
{{op | cartesian_prod_mapping}}
******************************************************************
*/
{% endmacro %}
{% macro register_base_kernel_name(op) %}
......@@ -284,6 +284,32 @@ phi::KernelKey GetExpectedKernelType(
}
{% endmacro %}
{% macro get_kernel_for_var(op) %} {# only for data_transform #}
{% set skip_args = op["data_transform"]["skip_transform"] %}
{% set var_name = "var_name" %}
{% set skip_args_len = skip_args | length %}
phi::KernelKey GetKernelTypeForVar(
const std::string& {{var_name}},
const phi::DenseTensor& tensor,
const phi::KernelKey& expected_kernel_type) const override {
if (
{%- for skip_arg in skip_args -%}
var_name == "{{ skip_arg }}"
{%- if skip_args_len != 1 and loop.index != skip_args_len %} || {% endif -%}
{%- endfor -%}
){
return phi::KernelKey(phi::Backend::ALL_BACKEND,
expected_kernel_type.layout(),
expected_kernel_type.dtype());
}
else{
return phi::KernelKey(
tensor.place(), tensor.layout(), expected_kernel_type.dtype());
}
}
{% endmacro %}
{# --------------------------------------- operator ---------------------------------------------- #}
{% macro operator(op) %}
class {{op["op_name"] | to_pascal_case}}Op : public framework::OperatorWithKernel {
......@@ -293,9 +319,17 @@ class {{op["op_name"] | to_pascal_case}}Op : public framework::OperatorWithKerne
{% set kernel = op["kernel"] %}
{% if kernel["data_type"] is not none %}
protected:
{% filter indent(2, True)%}
{% filter indent(2, True)%}
{{get_expected_kernel(op)}}
{% endfilter %}
{% endfilter %}
{%- if "data_transform" in op and op["data_transform"] is not none -%}
{%- if "skip_transform" in op["data_transform"] -%}
{% filter indent(2, True) %}
{{get_kernel_for_var(op)}}
{% endfilter %}
{%- endif %}
{%- endif -%}
{# TODO(lizhiyu): add the 'support_trans_dtype' #}
{% endif %}
};
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/ternary.h"
namespace paddle {
namespace operators {
class GraphSendRecvOP : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.device_context().GetPlace());
}
};
class GraphSendRecvGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
auto in_dims = ctx->GetInputDim("X");
ctx->SetOutputDim(framework::GradVarName("X"), in_dims);
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.device_context().GetPlace());
}
};
class GraphSendRecvOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"The input tensor with data type float32, float64, int32, int64.");
AddInput("Src_index", "The source index tensor.");
AddInput("Dst_index", "The destination index tensor.");
AddInput("Out_size",
"(Tensor<int>, optional). The 0th dimension of the output."
"It has a higher priority than Attr(out_size).")
.AsDispensable();
AddOutput("Out", "Output tensor of graph_send_recv op.");
AddOutput("Dst_count",
"Count tensor of Dst_index, mainly for MEAN reduce_op.")
.AsIntermediate();
AddAttr<std::string>("reduce_op",
"(string, default 'SUM')"
"Define different pool types to receive the result "
"tensors of Dst_index.")
.SetDefault("SUM")
.InEnum({"SUM", "MEAN", "MIN", "MAX"});
AddAttr<std::vector<int64_t>>(
"out_size",
"(vector<int64_t>, default {0})"
"Define the first dimension of Output tensor."
"If set default {0}, then the shape of Out is the same with X.")
.SetDefault({0});
AddComment(R"DOC(
Graph Learning Send_Recv combine operator.
$Out = Recv(Send(X, Src_index), Dst_index, reduce_op)$
This operator is mainly used in Graph Learning domain, and the main purpose is to reduce
intermediate memory consumption in the process of message passing.
Take `x` as the input tensor, we first use `src_index` to gather corresponding data,
and then use `dst_index` to update the corresponding position of output tensor in different
pooling types, like sum, mean, max, or min.
)DOC");
}
};
template <typename T>
class GraphSendRecvGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("graph_send_recv_grad");
op->SetInput("Src_index", this->Input("Src_index"));
op->SetInput("Dst_index", this->Input("Dst_index"));
op->SetInput("X", this->Input("X"));
if (PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MEAN") {
op->SetInput("Dst_count", this->Output("Dst_count"));
}
if (PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MIN" ||
PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MAX") {
op->SetInput("Out", this->Output("Out"));
}
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(graph_send_recv,
GraphSendRecvInferShapeFunctor,
PD_INFER_META(phi::SendURecvInferMeta));
REGISTER_OPERATOR(graph_send_recv,
ops::GraphSendRecvOP,
ops::GraphSendRecvOpMaker,
ops::GraphSendRecvGradOpMaker<paddle::framework::OpDesc>,
ops::GraphSendRecvGradOpMaker<paddle::imperative::OpBase>,
GraphSendRecvInferShapeFunctor);
REGISTER_OPERATOR(graph_send_recv_grad, ops::GraphSendRecvGradOp);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/multiary.h"
namespace paddle {
namespace operators {
class GraphSendUERecvOP : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.device_context().GetPlace());
}
};
class GraphSendUERecvGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
auto in_dims = ctx->GetInputDim("X");
ctx->SetOutputDim(framework::GradVarName("X"), in_dims);
auto y_dims = ctx->GetInputDim("Y");
ctx->SetOutputDim(framework::GradVarName("Y"), y_dims);
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.device_context().GetPlace());
}
};
class GraphSendUERecvOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"The input tensor with data type float32, float64, int32, int64.");
AddInput("Y",
"The input edge weight tensor, data type should be same with X");
AddInput("Src_index", "The source index tensor.");
AddInput("Dst_index", "The destination index tensor.");
AddInput("Out_size",
"(Tensor<int>, optional). The 0th dimension of the output."
"It has a higher priority than Attr(out_size).")
.AsDispensable();
AddOutput("Out", "Output tensor of graph_send_ue_recv op.");
AddOutput("Dst_count",
"Count tensor of Dst_index, mainly for MEAN reduce_op.")
.AsIntermediate();
AddAttr<std::string>("message_op",
"(string, default 'ADD')"
"Define differenct computation types between X and E.")
.SetDefault("ADD")
.InEnum({"ADD", "MUL"});
AddAttr<std::string>("reduce_op",
"(string, default 'SUM')"
"Define different pool types to receive the result "
"tensors of Dst_index.")
.SetDefault("SUM")
.InEnum({"SUM", "MEAN", "MIN", "MAX"});
AddAttr<std::vector<int64_t>>(
"out_size",
"(vector<int64_t>, default {0})"
"Define the first dimension of Output tensor."
"If set default {0}, then the shape of Out is the same with X.")
.SetDefault({0});
AddComment(R"DOC(
Graph Learning Send_UE_Recv combine operator.
$Out = Recv(Compute(Send(X, Src_index), Y, message_op), Dst_index, reduce_op)$
This operator is mainly used in Graph Learning domain, and the main purpose is to reduce
intermediate memory consumption in the process of message passing.
Take `X` as the input tensor, we first use `src_index` to gather corresponding data.
Then the gather data should compute with `Y` in different message_ops, like add, sub, mul, and div,
and get the computation result. Then, use `dst_index` to update the corresponding position of output
tensor in different pooling types, like sum, mean, max, or min.
)DOC");
}
};
template <typename T>
class GraphSendUERecvGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("graph_send_ue_recv_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput("Src_index", this->Input("Src_index"));
op->SetInput("Dst_index", this->Input("Dst_index"));
if (PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MEAN") {
op->SetInput("Dst_count", this->Output("Dst_count"));
}
if (PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MIN" ||
PADDLE_GET_CONST(std::string, this->GetAttr("reduce_op")) == "MAX") {
op->SetInput("Out", this->Output("Out"));
}
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(graph_send_ue_recv,
GraphSendUERecvInferShapeFunctor,
PD_INFER_META(phi::SendUERecvInferMeta));
REGISTER_OPERATOR(graph_send_ue_recv,
ops::GraphSendUERecvOP,
ops::GraphSendUERecvOpMaker,
ops::GraphSendUERecvGradOpMaker<paddle::framework::OpDesc>,
ops::GraphSendUERecvGradOpMaker<paddle::imperative::OpBase>,
GraphSendUERecvInferShapeFunctor);
REGISTER_OPERATOR(graph_send_ue_recv_grad, ops::GraphSendUERecvGradOp);
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class SizeOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto dtype = framework::proto::VarType::FP32; // dtype is not important
return phi::KernelKey(dtype, ctx.GetPlace());
}
phi::KernelKey GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const phi::KernelKey& expected_kernel_type) const override {
return phi::KernelKey(phi::Backend::ALL_BACKEND,
expected_kernel_type.layout(),
expected_kernel_type.dtype());
}
};
class SizeOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("Input", "The input tensor.");
AddOutput("Out",
"The returned tensor, the data type "
"is int64_t, will be on the same device with the input Tensor.");
AddComment(R"DOC(
Size Operator.
Return the number of elements in the input.
)DOC");
}
};
DECLARE_NO_NEED_BUFFER_VARS_INFERER(SizeOpNoNeedBufferVarInferer, "Input");
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(size,
SizeInferShapeFunctor,
PD_INFER_META(phi::NumelInferMeta));
REGISTER_OPERATOR(
size,
ops::SizeOp,
ops::SizeOpMaker,
paddle::framework::EmptyGradOpMaker<paddle::framework::OpDesc>,
paddle::framework::EmptyGradOpMaker<paddle::imperative::OpBase>,
SizeInferShapeFunctor,
ops::SizeOpNoNeedBufferVarInferer);
......@@ -1085,6 +1085,30 @@
func : selu_grad
data_type : out
- backward_op : send_u_recv_grad
forward : send_u_recv (Tensor x, Tensor src_index, Tensor dst_index, str reduce_op = "SUM", IntArray out_size = {0}) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str reduce_op = "SUM")
output : Tensor(x_grad)
infer_meta :
func : GeneralUnaryGradInferMeta
param : [x]
kernel :
func : send_u_recv_grad
data_type : out_grad
optional: out, dst_count
- backward_op : send_ue_recv_grad
forward : send_ue_recv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op="ADD", str reduce_op="SUM", IntArray out_size={0}) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str message_op, str reduce_op)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : send_ue_recv_grad
data_type : out_grad
optional: out, dst_count
- backward_op : send_uv_grad
forward : send_uv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") -> Tensor(out)
args: (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out_grad, str message_op = "ADD")
......@@ -1191,6 +1215,17 @@
func : sinh_grad
inplace : (out_grad -> x_grad)
- backward_op : slogdet_grad
forward : slogdet (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : GeneralUnaryGradInferMeta
param : [x]
kernel :
func : slogdet_grad
data_type : out_grad
- backward_op : softplus_grad
forward : softplus (Tensor x, float beta, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float beta, float threshold)
......
......@@ -1201,30 +1201,6 @@
data_type : x
optional : summed_ids
- backward_op : send_u_recv_grad
forward : send_u_recv (Tensor x, Tensor src_index, Tensor dst_index, str reduce_op = "SUM", IntArray out_size = {0}) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str reduce_op = "SUM")
output : Tensor(x_grad)
infer_meta :
func : GeneralUnaryGradInferMeta
param : [x]
kernel :
func : send_u_recv_grad
data_type : out_grad
optional: out, dst_count
- backward_op : send_ue_recv_grad
forward : send_ue_recv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op, str reduce_op, IntArray out_size) -> Tensor(out), Tensor(dst_count)
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str message_op, str reduce_op)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : send_ue_recv_grad
data_type : out_grad
optional: out, dst_count
- backward_op : sigmoid_cross_entropy_with_logits_grad
forward : sigmoid_cross_entropy_with_logits (Tensor x, Tensor label, bool normalize, int ignore_index) -> Tensor(out)
args : (Tensor x, Tensor label, Tensor out_grad, bool normalize, int ignore_index)
......@@ -1260,16 +1236,6 @@
backward : slice_double_grad
no_need_buffer : input
- backward_op : slogdet_grad
forward : slogdet (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : slogdet_grad
- backward_op : softmax_grad
forward : softmax (Tensor x, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
......
......@@ -1298,16 +1298,6 @@
kernel :
func : not_equal
- op : numel
args : (Tensor x)
output : Tensor(size)
infer_meta :
func : NumelInferMeta
kernel :
func : numel
data_transform:
skip_transform : x
- op : one_hot
args : (Tensor x, Scalar(int) num_classes)
output : Tensor(out)
......@@ -1588,28 +1578,6 @@
data_type : x
backward : segment_pool_grad
- op : send_u_recv
args : (Tensor x, Tensor src_index, Tensor dst_index, str reduce_op = "SUM", IntArray out_size = {0})
output : Tensor(out), Tensor(dst_count)
infer_meta :
func : SendURecvInferMeta
kernel :
func : send_u_recv
data_type : x
intermediate : dst_count
backward : send_u_recv_grad
- op : send_ue_recv
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op, str reduce_op, IntArray out_size)
output : Tensor(out), Tensor(dst_count)
infer_meta :
func : SendUERecvInferMeta
kernel :
func : send_ue_recv
data_type : x
intermediate : dst_count
backward : send_ue_recv_grad
- op : sgd_
args : (Tensor param, Tensor learning_rate, Tensor grad, Tensor master_param, bool multi_precision)
output : Tensor(param_out), Tensor(master_param_out)
......@@ -1663,15 +1631,6 @@
func : slice
backward : slice_grad
- op : slogdet
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : slogdet
backward : slogdet_grad
- op : softmax
args : (Tensor x, int axis)
output : Tensor(out)
......
......@@ -993,6 +993,12 @@
outputs :
{out : Out, total_weight : Total_weight}
- op : numel(size)
inputs :
x : Input
outputs :
size : Out
- op : overlap_add
backward : overlap_add_grad
inputs :
......@@ -1215,6 +1221,28 @@
outputs :
out : Out
- op : send_u_recv(graph_send_recv)
backward : send_u_recv_grad(graph_send_recv_grad)
inputs :
{x : X, src_index : Src_index, dst_index : Dst_index}
outputs :
{out : Out, dst_count : Dst_count}
int_array :
out_size:
data_type : int64_t
tensor_name : Out_size
- op : send_ue_recv(graph_send_ue_recv)
backward : send_ue_recv_grad(graph_send_ue_recv_grad)
inputs :
{x : X, y : Y, src_index : Src_index, dst_index : Dst_index}
outputs :
{out : Out, dst_count : Dst_count}
int_array :
out_size:
data_type : int64_t
tensor_name : Out_size
- op : send_uv (graph_send_uv)
backward : send_uv_grad (graph_send_uv_grad)
......@@ -1286,6 +1314,13 @@
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- op : slogdet(slogdeterminant)
backward : slogdet_grad(slogdeterminant_grad)
inputs :
x : Input
outputs :
out : Out
- op : softmax
backward : softmax_grad
inputs :
......
......@@ -871,6 +871,18 @@
kernel :
func : npu_identity
- op : numel
args : (Tensor x)
output : Tensor(size)
infer_meta :
func : NumelInferMeta
kernel :
func : numel
data_type : x
data_transform:
skip_transform : x
no_need_buffer : x
- op : overlap_add
args: (Tensor x, int hop_length, int axis=-1)
output: Tensor
......@@ -1029,6 +1041,28 @@
func : selu
backward : selu_grad
- op : send_u_recv
args : (Tensor x, Tensor src_index, Tensor dst_index, str reduce_op = "SUM", IntArray out_size = {0})
output : Tensor(out), Tensor(dst_count)
infer_meta :
func : SendURecvInferMeta
kernel :
func : send_u_recv
data_type : x
intermediate : dst_count
backward : send_u_recv_grad
- op : send_ue_recv
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op="ADD", str reduce_op="SUM", IntArray out_size={0})
output : Tensor(out), Tensor(dst_count)
infer_meta :
func : SendUERecvInferMeta
kernel :
func : send_ue_recv
data_type : x
intermediate : dst_count
backward : send_ue_recv_grad
- op : send_uv
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD")
output : Tensor(out)
......@@ -1083,6 +1117,15 @@
func : sinh
backward : sinh_grad
- op : slogdet
args : (Tensor x)
output : Tensor
infer_meta :
func : UnchangedInferMeta
kernel :
func : slogdet
backward : slogdet_grad
- op : softplus
args : (Tensor x, float beta = 1.0, float threshold = 20.0f)
output : Tensor
......
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature GraphSendRecvOpArgumentMapping(
const ArgumentMappingContext& ctx) {
if (ctx.HasInput("Out_size")) {
return KernelSignature("send_u_recv",
{"X", "Src_index", "Dst_index"},
{"reduce_op", "Out_size"},
{"Out", "Dst_count"});
} else {
return KernelSignature("send_u_recv",
{"X", "Src_index", "Dst_index"},
{"reduce_op", "out_size"},
{"Out", "Dst_count"});
}
}
KernelSignature GraphSendRecvGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"send_u_recv_grad",
{"X", "Src_index", "Dst_index", "Out", "Dst_count", "Out@GRAD"},
{"reduce_op"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(graph_send_recv, send_u_recv);
PD_REGISTER_BASE_KERNEL_NAME(graph_send_recv_grad, send_u_recv_grad);
PD_REGISTER_ARG_MAPPING_FN(graph_send_recv,
phi::GraphSendRecvOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(graph_send_recv_grad,
phi::GraphSendRecvGradOpArgumentMapping);
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature GraphSendUERecvOpArgumentMapping(
const ArgumentMappingContext& ctx) {
if (ctx.HasInput("Out_size")) {
return KernelSignature("send_ue_recv",
{"X", "Y", "Src_index", "Dst_index"},
{"message_op", "reduce_op", "Out_size"},
{"Out", "Dst_count"});
} else {
return KernelSignature("send_ue_recv",
{"X", "Y", "Src_index", "Dst_index"},
{"message_op", "reduce_op", "out_size"},
{"Out", "Dst_count"});
}
}
KernelSignature GraphSendUERecvGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"send_ue_recv_grad",
{"X", "Y", "Src_index", "Dst_index", "Out", "Dst_count", "Out@GRAD"},
{"message_op", "reduce_op"},
{"X@GRAD", "Y@GRAD"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(graph_send_ue_recv, send_ue_recv);
PD_REGISTER_BASE_KERNEL_NAME(graph_send_ue_recv_grad, send_ue_recv_grad);
PD_REGISTER_ARG_MAPPING_FN(graph_send_ue_recv,
phi::GraphSendUERecvOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(graph_send_ue_recv_grad,
phi::GraphSendUERecvGradOpArgumentMapping);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
PD_REGISTER_BASE_KERNEL_NAME(size, numel);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature SlogDeterminantGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"slogdet_grad", {"Input", "Out", "Out@GRAD"}, {}, {"Input@GRAD"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(slogdeterminant, slogdet);
PD_REGISTER_BASE_KERNEL_NAME(slogdeterminant_grad, slogdet_grad);
PD_REGISTER_ARG_MAPPING_FN(slogdeterminant_grad,
phi::SlogDeterminantGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册