diff --git a/paddle/fluid/operators/cumsum_op.cc b/paddle/fluid/operators/cumsum_op.cc index 7c80917a71369e81dbab855a2dc9e0f6c35777e0..11633fb0b870327f14e4454b3f94a43940a9df53 100644 --- a/paddle/fluid/operators/cumsum_op.cc +++ b/paddle/fluid/operators/cumsum_op.cc @@ -12,8 +12,11 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" #include "paddle/fluid/framework/op_version_registry.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/unary.h" namespace paddle { namespace operators { @@ -21,17 +24,6 @@ namespace operators { class CumOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext *ctx) const override { - if (ctx->Attrs().Get("flatten")) { - ctx->SetOutputDim("Out", - phi::make_ddim({phi::product(ctx->GetInputDim("X"))})); - } else { - ctx->SetOutputDim("Out", ctx->GetInputDim("X")); - } - - ctx->ShareLoD("X", /*->*/ "Out"); - } }; class CumsumOpMaker : public framework::OpProtoAndCheckerMaker { @@ -87,10 +79,12 @@ class CumsumGradMaker : public framework::SingleGradOpMaker { namespace ops = paddle::operators; using CPU = paddle::platform::CPUDeviceContext; - +DECLARE_INFER_SHAPE_FUNCTOR(cumsum, CumsumInferShapeFunctor, + PD_INFER_META(phi::CumsumInferMeta)); REGISTER_OPERATOR(cumsum, ops::CumOp, ops::CumsumOpMaker, ops::CumsumGradMaker, - ops::CumsumGradMaker); + ops::CumsumGradMaker, + CumsumInferShapeFunctor); REGISTER_OP_VERSION(cumsum) .AddCheckpoint( diff --git a/paddle/fluid/operators/log_loss_op.cc b/paddle/fluid/operators/log_loss_op.cc index 2e596ff3e625735e5ff644560c9866f4f15a044a..883e3597d8a31138a6ff1e4cfcb05a165eafc4a6 100644 --- a/paddle/fluid/operators/log_loss_op.cc +++ b/paddle/fluid/operators/log_loss_op.cc @@ -13,7 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. */ #include +#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/binary.h" namespace paddle { namespace operators { @@ -21,43 +24,6 @@ namespace operators { class LogLossOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("Predicted"), "Input", "Predicted", "LogLoss"); - OP_INOUT_CHECK(ctx->HasInput("Labels"), "Input", "Labels", "LogLoss"); - - auto pred_dims = ctx->GetInputDim("Predicted"); - auto label_dims = ctx->GetInputDim("Labels"); - - if (ctx->IsRuntime() || - (phi::product(pred_dims) > 0 && phi::product(label_dims) > 0)) { - PADDLE_ENFORCE_EQ( - pred_dims, label_dims, - platform::errors::InvalidArgument( - "The dimensions of Input(Predicted) must be equal to the" - "dimensions of Input(Labels), but received dimensions of " - "Input(Predicted)" - "is [%s], received dimensions of Input(Labels) is [%s].", - pred_dims, label_dims)); - } - PADDLE_ENFORCE_EQ(pred_dims.size(), 2, - platform::errors::InvalidArgument( - "The dimensions of Input(Predicted) must be 2," - "But received dimensions of Input(Predicted)" - "is [%d]", - pred_dims.size())); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ( - pred_dims[1], 1, - platform::errors::InvalidArgument( - "Each row of Input(Predicted) contains a real value, " - "so the 2nd dimension of Input(X) must be 1," - "But got [%d]", - pred_dims[1])); - } - ctx->SetOutputDim("Loss", {pred_dims[0], 1}); - ctx->ShareLoD("Predicted", "Loss"); - } }; template @@ -145,7 +111,10 @@ class LogLossGradMaker : public framework::SingleGradOpMaker { } // namespace paddle namespace ops = paddle::operators; +DECLARE_INFER_SHAPE_FUNCTOR(log_loss, LogLossInferShapeFunctor, + PD_INFER_META(phi::LogLossInferMeta)); REGISTER_OPERATOR(log_loss, ops::LogLossOp, ops::LogLossOpMaker, ops::LogLossGradMaker, - ops::LogLossGradMaker); + ops::LogLossGradMaker, + LogLossInferShapeFunctor); REGISTER_OPERATOR(log_loss_grad, ops::LogLossGradOp); diff --git a/paddle/fluid/operators/metrics/auc_op.cc b/paddle/fluid/operators/metrics/auc_op.cc index 54ecba08a82dcea9482314cc0a26b3ce2d07ec4f..f3ed98c3f4d1e47a8b7dff81a998c7574859baa2 100644 --- a/paddle/fluid/operators/metrics/auc_op.cc +++ b/paddle/fluid/operators/metrics/auc_op.cc @@ -12,7 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/multiary.h" namespace paddle { namespace operators { @@ -21,70 +24,6 @@ class AucOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - protected: - void InferShape(framework::InferShapeContext *ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("Predict"), "Input", "Predict", "Auc"); - OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", "Auc"); - auto predict_dims = ctx->GetInputDim("Predict"); - auto label_dims = ctx->GetInputDim("Label"); - PADDLE_ENFORCE_GE( - predict_dims.size(), 2, - platform::errors::InvalidArgument( - "The Input(Predict) has not been initialized properly. The " - "shape of Input(Predict) = [%s], the shape size must be " - "greater_equal 2.", - predict_dims)); - auto predict_width = predict_dims[1]; - PADDLE_ENFORCE_NE( - phi::product(predict_dims), 0, - platform::errors::InvalidArgument( - "The Input(Predict) has not been initialized properly. The " - "shape of Input(Predict) = [%s], the shape can not involes 0.", - predict_dims)); - PADDLE_ENFORCE_NE( - phi::product(label_dims), 0, - platform::errors::InvalidArgument( - "The Input(Label) has not been initialized properly. The " - "shape of Input(Label) = [%s], the shape can not involes 0.", - label_dims)); - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_LE(predict_width, 2, - platform::errors::InvalidArgument( - "Only support binary classification," - "prediction dims[1] should be 1 or 2")); - } - auto predict_height = ctx->GetInputDim("Predict")[0]; - auto label_height = ctx->GetInputDim("Label")[0]; - - if (ctx->IsRuntime()) { - PADDLE_ENFORCE_EQ(predict_height, label_height, - platform::errors::InvalidArgument( - "Out and Label should have same height.")); - } - - int num_pred_buckets = ctx->Attrs().Get("num_thresholds") + 1; - int slide_steps = ctx->Attrs().Get("slide_steps"); - - PADDLE_ENFORCE_GE( - num_pred_buckets, 1, - platform::errors::InvalidArgument("num_thresholds must larger than 1")); - PADDLE_ENFORCE_GE(slide_steps, 0, - platform::errors::InvalidArgument( - "slide_steps must be natural number")); - - ctx->SetOutputDim("AUC", {1}); - - if (slide_steps) { - ctx->SetOutputDim("StatPosOut", - {(1 + slide_steps) * num_pred_buckets + 1}); - ctx->SetOutputDim("StatNegOut", - {(1 + slide_steps) * num_pred_buckets + 1}); - } else { - ctx->SetOutputDim("StatPosOut", {1, num_pred_buckets}); - ctx->SetOutputDim("StatNegOut", {1, num_pred_buckets}); - } - } - protected: framework::OpKernelType GetExpectedKernelType( const framework::ExecutionContext &ctx) const override { @@ -145,4 +84,7 @@ There are two types of possible curves: } // namespace paddle namespace ops = paddle::operators; -REGISTER_OP_WITHOUT_GRADIENT(auc, ops::AucOp, ops::AucOpMaker); +DECLARE_INFER_SHAPE_FUNCTOR(auc, AucInferShapeFunctor, + PD_INFER_META(phi::AucInferMeta)); +REGISTER_OP_WITHOUT_GRADIENT(auc, ops::AucOp, ops::AucOpMaker, + AucInferShapeFunctor); diff --git a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc index 8e502fc04dbdb06386839d1ebe63c91dc392a2d0..016ff54645b02e9b3ddfb67595d830ccf5dcfd94 100644 --- a/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc +++ b/paddle/fluid/operators/sigmoid_cross_entropy_with_logits_op.cc @@ -15,7 +15,10 @@ limitations under the License. */ #include #include #include +#include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/phi/core/infermeta_utils.h" +#include "paddle/phi/infermeta/binary.h" namespace paddle { namespace operators { @@ -26,46 +29,6 @@ const int kIgnoreIndex = -100; class SigmoidCrossEntropyWithLogitsOp : public framework::OperatorWithKernel { public: using framework::OperatorWithKernel::OperatorWithKernel; - - void InferShape(framework::InferShapeContext* ctx) const override { - OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", - "SigmoidCrossEntropyWithLogitsOp"); - OP_INOUT_CHECK(ctx->HasInput("Label"), "Input", "Label", - "SigmoidCrossEntropyWithLogitsOp"); - OP_INOUT_CHECK(ctx->HasOutput("Out"), "Output", "Out", - "SigmoidCrossEntropyWithLogitsOp"); - - auto x_dims = ctx->GetInputDim("X"); - auto labels_dims = ctx->GetInputDim("Label"); - - int rank = x_dims.size(); - PADDLE_ENFORCE_EQ(rank, labels_dims.size(), - platform::errors::InvalidArgument( - "Input(X) and Input(Label) shall have the same rank." - "But received: the rank of Input(X) is [%d], " - "the rank of Input(Label) is [%d].", - rank, labels_dims.size())); - - bool check = true; - if ((!ctx->IsRuntime()) && - (phi::product(x_dims) <= 0 || phi::product(labels_dims) <= 0)) { - check = false; - } - - if (check) { - PADDLE_ENFORCE_EQ( - phi::slice_ddim(x_dims, 0, rank), - phi::slice_ddim(labels_dims, 0, rank), - platform::errors::InvalidArgument( - "Input(X) and Input(Label) shall have the same shape " - "except the last dimension. But received: the shape of " - "Input(X) is [%s], the shape of Input(Label) is [%s].", - x_dims, labels_dims)); - } - - ctx->ShareDim("X", /*->*/ "Out"); - ctx->ShareLoD("X", /*->*/ "Out"); - } }; class SigmoidCrossEntropyWithLogitsGradOp @@ -201,12 +164,17 @@ DECLARE_INPLACE_OP_INFERER(SigmoidCrossEntropyWithLogitsGradInplaceInferer, } // namespace paddle namespace ops = paddle::operators; +DECLARE_INFER_SHAPE_FUNCTOR( + sigmoid_cross_entropy_with_logits, + SigmoidCrossEntropyWithLogitsInferShapeFunctor, + PD_INFER_META(phi::SigmoidCrossEntropyWithLogitsInferMeta)); REGISTER_OPERATOR( sigmoid_cross_entropy_with_logits, ops::SigmoidCrossEntropyWithLogitsOp, ops::SigmoidCrossEntropyWithLogitsOpMaker, ops::SigmoidCrossEntropyWithLogitsGradOpMaker, ops::SigmoidCrossEntropyWithLogitsGradOpMaker, - ops::SigmoidCrossEntropyWithLogitsInplaceInferer); + ops::SigmoidCrossEntropyWithLogitsInplaceInferer, + SigmoidCrossEntropyWithLogitsInferShapeFunctor); REGISTER_OPERATOR(sigmoid_cross_entropy_with_logits_grad, ops::SigmoidCrossEntropyWithLogitsGradOp, ops::SigmoidCrossEntropyWithLogitsGradInplaceInferer); diff --git a/paddle/phi/infermeta/binary.cc b/paddle/phi/infermeta/binary.cc index 55230aa8d0516543dfc734d3c05c5955bc48248d..b17405990fb722981964560fb74c05e8f091446e 100644 --- a/paddle/phi/infermeta/binary.cc +++ b/paddle/phi/infermeta/binary.cc @@ -575,6 +575,48 @@ void GatherTreeMeta(const MetaTensor& ids, out->set_dims(ids_dims); } +void LogLossInferMeta(const MetaTensor& input, + const MetaTensor& label, + float epsilon, + MetaTensor* out, + MetaConfig config) { + auto pred_dims = input.dims(); + auto label_dims = label.dims(); + + if (config.is_runtime || + (phi::product(pred_dims) > 0 && phi::product(label_dims) > 0)) { + PADDLE_ENFORCE_EQ( + pred_dims, + label_dims, + phi::errors::InvalidArgument( + "The dimensions of Input(Predicted) must be equal to the" + "dimensions of Input(Labels), but received dimensions of " + "Input(Predicted)" + "is [%s], received dimensions of Input(Labels) is [%s].", + pred_dims, + label_dims)); + } + PADDLE_ENFORCE_EQ(pred_dims.size(), + 2, + phi::errors::InvalidArgument( + "The dimensions of Input(Predicted) must be 2," + "But received dimensions of Input(Predicted)" + "is [%d]", + pred_dims.size())); + if (config.is_runtime) { + PADDLE_ENFORCE_EQ(pred_dims[1], + 1, + phi::errors::InvalidArgument( + "Each row of Input(Predicted) contains a real value, " + "so the 2nd dimension of Input(X) must be 1," + "But got [%d]", + pred_dims[1])); + } + out->set_dims({pred_dims[0], 1}); + out->set_dtype(input.dtype()); + out->share_lod(input); +} + void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { auto dim_x = x.dims(); auto dim_vec = vec.dims(); @@ -605,4 +647,45 @@ void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out) { out->share_lod(x); } +void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, + const MetaTensor& label, + bool normalize, + int ignore_index, + MetaTensor* out, + MetaConfig config) { + auto x_dims = x.dims(); + auto labels_dims = label.dims(); + int rank = x_dims.size(); + PADDLE_ENFORCE_EQ(rank, + labels_dims.size(), + phi::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same rank." + "But received: the rank of Input(X) is [%d], " + "the rank of Input(Label) is [%d].", + rank, + labels_dims.size())); + + bool check = true; + if ((!config.is_runtime) && + (phi::product(x_dims) <= 0 || phi::product(labels_dims) <= 0)) { + check = false; + } + + if (check) { + PADDLE_ENFORCE_EQ( + phi::slice_ddim(x_dims, 0, rank), + phi::slice_ddim(labels_dims, 0, rank), + phi::errors::InvalidArgument( + "Input(X) and Input(Label) shall have the same shape " + "except the last dimension. But received: the shape of " + "Input(X) is [%s], the shape of Input(Label) is [%s].", + x_dims, + labels_dims)); + } + + out->set_dims(x_dims); + out->set_dtype(x.dtype()); + out->share_lod(x); +} + } // namespace phi diff --git a/paddle/phi/infermeta/binary.h b/paddle/phi/infermeta/binary.h index 106c22f7548bd4c73df373141a7e292411e0dc43..934ed688bf2df3be185d527ef258b9a175fe780d 100644 --- a/paddle/phi/infermeta/binary.h +++ b/paddle/phi/infermeta/binary.h @@ -89,6 +89,7 @@ void BincountInferMeta(const MetaTensor& x, const paddle::optional weights, int minlength, MetaTensor* out); + void DistInferMeta(const MetaTensor& x, const MetaTensor& y, float p, @@ -102,6 +103,19 @@ void GatherTreeMeta(const MetaTensor& ids, const MetaTensor& parents, MetaTensor* out); +void LogLossInferMeta(const MetaTensor& input, + const MetaTensor& label, + float epsilon, + MetaTensor* out, + MetaConfig config = MetaConfig()); + void MvInferMeta(const MetaTensor& x, const MetaTensor& vec, MetaTensor* out); +void SigmoidCrossEntropyWithLogitsInferMeta(const MetaTensor& x, + const MetaTensor& label, + bool normalize, + int ignore_index, + MetaTensor* out, + MetaConfig config = MetaConfig()); + } // namespace phi diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index a21f077c09f09dfa493a8f5e6eceb5a71cf99e0a..acce40713b82159e9e6fd902a30c8b269c6c4e52 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -28,6 +28,86 @@ std::vector GetMetaTensorsDim(const std::vector& tensors) { return dims; } +void AucInferMeta(const MetaTensor& input, + const MetaTensor& label, + const MetaTensor& stat_pos, + const MetaTensor& stat_neg, + const std::string& curve, + int num_thresholds, + int slide_steps, + MetaTensor* auc, + MetaTensor* stat_pos_out, + MetaTensor* stat_neg_out, + MetaConfig config) { + auto predict_dims = input.dims(); + auto label_dims = label.dims(); + PADDLE_ENFORCE_GE( + predict_dims.size(), + 2, + phi::errors::InvalidArgument( + "The Input(Predict) has not been initialized properly. The " + "shape of Input(Predict) = [%s], the shape size must be " + "greater_equal 2.", + predict_dims)); + auto predict_width = predict_dims[1]; + PADDLE_ENFORCE_NE( + phi::product(predict_dims), + 0, + phi::errors::InvalidArgument( + "The Input(Predict) has not been initialized properly. The " + "shape of Input(Predict) = [%s], the shape can not involes 0.", + predict_dims)); + PADDLE_ENFORCE_NE( + phi::product(label_dims), + 0, + phi::errors::InvalidArgument( + "The Input(Label) has not been initialized properly. The " + "shape of Input(Label) = [%s], the shape can not involes 0.", + label_dims)); + if (config.is_runtime) { + PADDLE_ENFORCE_LE( + predict_width, + 2, + phi::errors::InvalidArgument("Only support binary classification," + "prediction dims[1] should be 1 or 2")); + } + auto predict_height = input.dims()[0]; + auto label_height = label.dims()[0]; + + if (config.is_runtime) { + PADDLE_ENFORCE_EQ( + predict_height, + label_height, + phi::errors::InvalidArgument("Out and Label should have same height.")); + } + + int num_pred_buckets = num_thresholds + 1; + + PADDLE_ENFORCE_GE( + num_pred_buckets, + 1, + phi::errors::InvalidArgument("num_thresholds must larger than 1")); + PADDLE_ENFORCE_GE( + slide_steps, + 0, + phi::errors::InvalidArgument("slide_steps must be natural number")); + + auc->set_dims({1}); + auc->set_dtype(DataType::INT64); + + if (slide_steps) { + stat_pos_out->set_dims({(1 + slide_steps) * num_pred_buckets + 1}); + stat_pos_out->set_dtype(DataType::INT64); + stat_neg_out->set_dims({(1 + slide_steps) * num_pred_buckets + 1}); + stat_neg_out->set_dtype(DataType::INT64); + } else { + stat_pos_out->set_dims({1, num_pred_buckets}); + stat_pos_out->set_dtype(DataType::INT64); + stat_neg_out->set_dims({1, num_pred_buckets}); + stat_neg_out->set_dtype(DataType::INT64); + } +} + void AdamaxInferMeta(const MetaTensor& param, const MetaTensor& grad, const MetaTensor& learning_rate, diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index 8cb6f70481de3160c7eb0a7d6633ba76905a5c41..26bdc62302f18ad011fd3ab74f4b2dd708d4c1ef 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -20,6 +20,18 @@ namespace phi { std::vector GetMetaTensorsDim(const std::vector& tensors); +void AucInferMeta(const MetaTensor& input, + const MetaTensor& label, + const MetaTensor& stat_pos, + const MetaTensor& stat_neg, + const std::string& curve, + int num_thresholds, + int slide_steps, + MetaTensor* auc, + MetaTensor* stat_pos_out, + MetaTensor* stat_neg_out, + MetaConfig config = MetaConfig()); + void BilinearTensorProductInferMeta(const MetaTensor& x, const MetaTensor& y, const MetaTensor& weight, diff --git a/paddle/phi/infermeta/unary.cc b/paddle/phi/infermeta/unary.cc index b9eb5196b1e8f18382269e8854ab013222269af3..4053cfbc362e3fb4ed5ba74de1666df2d13111fe 100644 --- a/paddle/phi/infermeta/unary.cc +++ b/paddle/phi/infermeta/unary.cc @@ -156,6 +156,24 @@ void CreateLikeInferMeta(const MetaTensor& x, DataType dtype, MetaTensor* out) { out->set_layout(x.layout()); } +void CumsumInferMeta(const MetaTensor& x, + int axis, + bool flatten, + bool exclusive, + bool reverse, + MetaTensor* out) { + auto x_dims = x.dims(); + if (flatten) { + out->set_dims(phi::make_ddim({phi::product(x_dims)})); + out->set_dtype(x.dtype()); + } else { + out->set_dims(x_dims); + out->set_dtype(x.dtype()); + } + + out->share_lod(x); +} + void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out) { PADDLE_ENFORCE_EQ( product(x.dims()), diff --git a/paddle/phi/infermeta/unary.h b/paddle/phi/infermeta/unary.h index 37b17f6e3d1822e78a93233a937523573d3b1cf2..a679ef8c11af6ae59640b409ae17a93494e14f7d 100644 --- a/paddle/phi/infermeta/unary.h +++ b/paddle/phi/infermeta/unary.h @@ -63,6 +63,13 @@ void CopyToInferMeta(const MetaTensor& x, void CreateLikeInferMeta(const MetaTensor& x, DataType dtype, MetaTensor* out); +void CumsumInferMeta(const MetaTensor& x, + int axis, + bool flatten, + bool exclusive, + bool reverse, + MetaTensor* out); + void IncrementInferMeta(const MetaTensor& x, float value, MetaTensor* out); void InferMetaFromVecValue(const MetaTensor& x,