diff --git a/paddle/fluid/operators/metrics/auc_op.cc b/paddle/fluid/operators/metrics/auc_op.cc index 1e21f3b33a095a33586933ac332f729f2f02dcad..7529523becaf8512315c7bd4297a24b7c0c70d8a 100644 --- a/paddle/fluid/operators/metrics/auc_op.cc +++ b/paddle/fluid/operators/metrics/auc_op.cc @@ -14,6 +14,7 @@ limitations under the License. */ #include "paddle/fluid/framework/infershape_utils.h" #include "paddle/fluid/framework/op_registry.h" +#include "paddle/fluid/framework/op_version_registry.h" #include "paddle/phi/core/infermeta_utils.h" #include "paddle/phi/infermeta/multiary.h" @@ -47,6 +48,10 @@ class AucOpMaker : public framework::OpProtoAndCheckerMaker { // TODO(typhoonzero): support weight input AddInput("StatPos", "Statistic value when label = 1"); AddInput("StatNeg", "Statistic value when label = 0"); + AddInput("InsTagWeight", + "(Tensor, optional) If provided, auc Op will use this " + "1 means real data, 0 means false data") + .AsDispensable(); AddOutput("AUC", "A scalar representing the " @@ -91,3 +96,10 @@ REGISTER_OP_WITHOUT_GRADIENT(auc, ops::AucOp, ops::AucOpMaker, AucInferShapeFunctor); + +REGISTER_OP_VERSION(auc).AddCheckpoint( + R"ROC( + Upgrade auc, add a new input [InsTagWeight]. + )ROC", + paddle::framework::compatible::OpVersionDesc().NewInput( + "ValueTensor", "In order to support multi-tag task")); diff --git a/paddle/phi/api/yaml/legacy_api.yaml b/paddle/phi/api/yaml/legacy_api.yaml index be8602b74ac8581fe5cee28f74a6cda9fb59ef16..78184cf1da7b20ca6bbda51343200ead1220a0b7 100755 --- a/paddle/phi/api/yaml/legacy_api.yaml +++ b/paddle/phi/api/yaml/legacy_api.yaml @@ -300,12 +300,13 @@ # auc - api : auc - args : (Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, str curve, int num_thresholds, int slide_steps) + args : (Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, Tensor ins_tag_weight, str curve, int num_thresholds, int slide_steps) output : Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out) infer_meta : func : AucInferMeta kernel : func : auc + optional : ins_tag_weight #average_accumulates - api : average_accumulates_ diff --git a/paddle/phi/infermeta/multiary.cc b/paddle/phi/infermeta/multiary.cc index f89b13abc5214d190a982419e107be63793dc2e4..70177c05f0bc2aa52481c89b1f262a9c6b8da2b4 100644 --- a/paddle/phi/infermeta/multiary.cc +++ b/paddle/phi/infermeta/multiary.cc @@ -358,6 +358,7 @@ void AucInferMeta(const MetaTensor& input, const MetaTensor& label, const MetaTensor& stat_pos, const MetaTensor& stat_neg, + const MetaTensor& ins_tag_weight, const std::string& curve, int num_thresholds, int slide_steps, @@ -390,6 +391,7 @@ void AucInferMeta(const MetaTensor& input, "The Input(Label) has not been initialized properly. The " "shape of Input(Label) = [%s], the shape can not involes 0.", label_dims)); + if (config.is_runtime) { PADDLE_ENFORCE_LE( predict_width, diff --git a/paddle/phi/infermeta/multiary.h b/paddle/phi/infermeta/multiary.h index 98008a3ebd06738cfb43363379230c923473e46a..af9fea2d3ce871e16783e95e98dd3c86a8ad5093 100644 --- a/paddle/phi/infermeta/multiary.h +++ b/paddle/phi/infermeta/multiary.h @@ -126,6 +126,7 @@ void AucInferMeta(const MetaTensor& input, const MetaTensor& label, const MetaTensor& stat_pos, const MetaTensor& stat_neg, + const MetaTensor& ins_tag_weight, const std::string& curve, int num_thresholds, int slide_steps, diff --git a/paddle/phi/kernels/auc_kernel.h b/paddle/phi/kernels/auc_kernel.h index f58c3ce112bd77069e67a865951cc67e1397b8c1..dd85b409786ebde52ca548f44ca05dce10a1ec73 100644 --- a/paddle/phi/kernels/auc_kernel.h +++ b/paddle/phi/kernels/auc_kernel.h @@ -27,6 +27,7 @@ void AucKernel(const Context& dev_ctx, const DenseTensor& label, const DenseTensor& stat_pos, const DenseTensor& stat_neg, + const paddle::optional& ins_tag_weight, const std::string& curve, int num_thresholds, int slide_steps, diff --git a/paddle/phi/kernels/cpu/auc_kernel.cc b/paddle/phi/kernels/cpu/auc_kernel.cc index bc25091de757d029ced9babab9bcc55f1d2a10a6..0cf85348e6a78b3bd58a3ff3e573e69034da0310 100644 --- a/paddle/phi/kernels/cpu/auc_kernel.cc +++ b/paddle/phi/kernels/cpu/auc_kernel.cc @@ -13,7 +13,7 @@ // limitations under the License. #include "paddle/phi/kernels/auc_kernel.h" - +#include #include "paddle/phi/backends/cpu/cpu_context.h" #include "paddle/phi/core/kernel_registry.h" @@ -29,7 +29,8 @@ void statAuc(const DenseTensor &label, const int num_thresholds, const int slide_steps, int64_t *origin_stat_pos, - int64_t *origin_stat_neg) { + int64_t *origin_stat_neg, + const bool is_fake_data) { size_t batch_size = predict.dims()[0]; size_t inference_width = predict.dims()[1]; const T *inference_data = predict.data(); @@ -97,9 +98,13 @@ void statAuc(const DenseTensor &label, origin_stat_neg[cur_step_begin + binIdx] += 1; } } - for (int i = 0; i < bucket_length; ++i) { - origin_stat_pos[sum_step_begin + i] += origin_stat_pos[cur_step_begin + i]; - origin_stat_neg[sum_step_begin + i] += origin_stat_neg[cur_step_begin + i]; + if (!is_fake_data) { + for (int i = 0; i < bucket_length; ++i) { + origin_stat_pos[sum_step_begin + i] += + origin_stat_pos[cur_step_begin + i]; + origin_stat_neg[sum_step_begin + i] += + origin_stat_neg[cur_step_begin + i]; + } } } @@ -136,6 +141,7 @@ void AucKernel(const Context &dev_ctx, const DenseTensor &label, const DenseTensor &stat_pos, const DenseTensor &stat_neg, + const paddle::optional &ins_tag_weight, const std::string &curve, int num_thresholds, int slide_steps, @@ -153,6 +159,14 @@ void AucKernel(const Context &dev_ctx, auto *stat_neg_in_tensor = &stat_neg; auto *pos_in_data = stat_pos.data(); auto *neg_in_data = stat_neg.data(); + bool is_fake_data = false; + if (ins_tag_weight.get_ptr() != nullptr) { + const auto *ins_tag_weight_data = ins_tag_weight->data(); + VLOG(4) << "auc ins_tag_weight = " << ins_tag_weight_data[0]; + if (ins_tag_weight_data[0] == 0) { + is_fake_data = true; + } + } if (stat_pos_in_tensor != stat_pos_out) { memcpy( origin_stat_pos, @@ -167,12 +181,18 @@ void AucKernel(const Context &dev_ctx, ((1 + slide_steps) * (num_thresholds + 1) + (slide_steps > 0 ? 1 : 0)) * sizeof(int64_t)); } + + // when calculate global_auc && is fake data, just do nothing + if (slide_steps == 0 && is_fake_data) { + return; + } statAuc(label, input, num_thresholds, slide_steps, origin_stat_pos, - origin_stat_neg); + origin_stat_neg, + is_fake_data); int sum_offset = slide_steps * (num_thresholds + 1); calcAuc(origin_stat_pos + sum_offset, diff --git a/paddle/phi/kernels/gpu/auc_kernel.cu b/paddle/phi/kernels/gpu/auc_kernel.cu index 5a1bb9874fe19a21c46994b06779e6ed2ef5dc4f..44c0df3b52213fdd1ab8f764f7c33c13f14a05f8 100644 --- a/paddle/phi/kernels/gpu/auc_kernel.cu +++ b/paddle/phi/kernels/gpu/auc_kernel.cu @@ -123,7 +123,8 @@ void statAuc(const Context &dev_ctx, const int num_thresholds, const int slide_steps, int64_t *origin_stat_pos, - int64_t *origin_stat_neg) { + int64_t *origin_stat_neg, + const bool is_fake_data) { size_t batch_size = predict.dims()[0]; size_t inference_width = predict.dims()[1]; const T *inference_data = predict.data(); @@ -172,12 +173,14 @@ void statAuc(const Context &dev_ctx, origin_stat_neg, batch_size, slide_steps); - UpdateSumDataKernel<<<(bucket_length + PADDLE_CUDA_NUM_THREADS - 1) / - PADDLE_CUDA_NUM_THREADS, - PADDLE_CUDA_NUM_THREADS, - 0, - dev_ctx.stream()>>>( - origin_stat_pos, origin_stat_neg, bucket_length, slide_steps); + if (!is_fake_data) { + UpdateSumDataKernel<<<(bucket_length + PADDLE_CUDA_NUM_THREADS - 1) / + PADDLE_CUDA_NUM_THREADS, + PADDLE_CUDA_NUM_THREADS, + 0, + dev_ctx.stream()>>>( + origin_stat_pos, origin_stat_neg, bucket_length, slide_steps); + } } template @@ -186,6 +189,7 @@ void AucKernel(const Context &dev_ctx, const DenseTensor &label, const DenseTensor &stat_pos, const DenseTensor &stat_neg, + const paddle::optional &ins_tag_weight, const std::string &curve, int num_thresholds, int slide_steps, @@ -202,6 +206,14 @@ void AucKernel(const Context &dev_ctx, auto *stat_neg_in_tensor = &stat_neg; auto *pos_in_data = stat_pos.data(); auto *neg_in_data = stat_neg.data(); + bool is_fake_data = false; + if (ins_tag_weight.get_ptr() != nullptr) { + const auto *ins_tag_weight_data = ins_tag_weight->data(); + if (ins_tag_weight_data[0] == 0) { + is_fake_data = true; + } + } + #ifdef PADDLE_WITH_CUDA if (stat_pos_in_tensor != stat_pos_out) { cudaMemcpy( @@ -238,13 +250,19 @@ void AucKernel(const Context &dev_ctx, } #endif + // when calculate global_auc && is fake data, just do nothing + if (slide_steps == 0 && is_fake_data) { + return; + } + statAuc(dev_ctx, label, input, num_thresholds, slide_steps, origin_stat_pos, - origin_stat_neg); + origin_stat_neg, + is_fake_data); int sum_offset = slide_steps * (num_thresholds + 1); CalcAucKernel<<<1, 1, 0, dev_ctx.stream()>>>(origin_stat_pos + sum_offset, origin_stat_neg + sum_offset, diff --git a/paddle/phi/ops/compat/auc_sig.cc b/paddle/phi/ops/compat/auc_sig.cc new file mode 100644 index 0000000000000000000000000000000000000000..47ae4c17bede8a306d2e5059693fd948f521725e --- /dev/null +++ b/paddle/phi/ops/compat/auc_sig.cc @@ -0,0 +1,30 @@ +/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include "paddle/phi/core/compat/op_utils.h" + +namespace phi { + +// we have to return every specific KernelSignature for infrt now +KernelSignature AucOpArgumentMapping(const ArgumentMappingContext& ctx) { + return KernelSignature( + "auc", + {"Predict", "Label", "StatPos", "StatNeg", "InsTagWeight"}, + {"curve", "num_thresholds", "slide_steps"}, + {"AUC", "StatPosOut", "StatNegOut"}); +} + +} // namespace phi + +PD_REGISTER_ARG_MAPPING_FN(auc, phi::AucOpArgumentMapping); diff --git a/python/paddle/fluid/contrib/layers/metric_op.py b/python/paddle/fluid/contrib/layers/metric_op.py old mode 100644 new mode 100755 index 812f616ef9912b04ba672331adfd4ce4055ae92d..6f72086410a514f7e7732d1c9bb6d09a975597dc --- a/python/paddle/fluid/contrib/layers/metric_op.py +++ b/python/paddle/fluid/contrib/layers/metric_op.py @@ -22,12 +22,12 @@ from paddle.fluid.layer_helper import LayerHelper from paddle.fluid.initializer import Normal, Constant from paddle.fluid.framework import Variable from paddle.fluid.param_attr import ParamAttr -from paddle.fluid.layers import nn +from paddle.fluid.layers import tensor __all__ = ['ctr_metric_bundle'] -def ctr_metric_bundle(input, label): +def ctr_metric_bundle(input, label, ins_tag_weight=None): """ ctr related metric layer @@ -42,28 +42,48 @@ def ctr_metric_bundle(input, label): number first Args: - input(Variable): A floating-point 2D Variable, values are in the range + input(Tensor): A floating-point 2D Tensor, values are in the range [0, 1]. Each row is sorted in descending order. This input should be the output of topk. Typically, this - Variable indicates the probability of each label. - label(Variable): A 2D int Variable indicating the label of the training + Tensor indicates the probability of each label. + label(Tensor): A 2D int Tensor indicating the label of the training data. The height is batch size and width is always 1. + ins_tag_weight(Tensor): A 2D int Tensor indicating the ins_tag_weight of the training + data. 1 means real data, 0 means fake data. + A LoDTensor or Tensor with type float32,float64. Returns: - local_sqrerr(Variable): Local sum of squared error - local_abserr(Variable): Local sum of abs error - local_prob(Variable): Local sum of predicted ctr - local_q(Variable): Local sum of q value + local_sqrerr(Tensor): Local sum of squared error + local_abserr(Tensor): Local sum of abs error + local_prob(Tensor): Local sum of predicted ctr + local_q(Tensor): Local sum of q value - Examples: + Examples 1: .. code-block:: python - import paddle.fluid as fluid - data = fluid.layers.data(name="data", shape=[32, 32], dtype="float32") - label = fluid.layers.data(name="label", shape=[1], dtype="int32") - predict = fluid.layers.sigmoid(fluid.layers.fc(input=data, size=1)) - auc_out = fluid.contrib.layers.ctr_metric_bundle(input=predict, label=label) + import paddle + paddle.enable_static() + data = paddle.static.data(name="data", shape=[32, 32], dtype="float32") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32") + predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(input=data, size=1)) + auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label) + Examples 2: + .. code-block:: python + + import paddle + paddle.enable_static() + data = paddle.static.data(name="data", shape=[32, 32], dtype="float32") + label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32") + predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(input=data, size=1)) + ins_tag_weight = paddle.static.data(name='ins_tag', shape=[-1,16], lod_level=0, dtype='int64') + auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label, ins_tag_weight=ins_tag_weight) + """ + if ins_tag_weight is None: + ins_tag_weight = tensor.fill_constant(shape=[1, 1], + dtype="float32", + value=1.0) + assert input.shape == label.shape helper = LayerHelper("ctr_metric_bundle", **locals()) @@ -164,12 +184,6 @@ def ctr_metric_bundle(input, label): helper.append_op(type="reduce_sum", inputs={"X": [tmp_res_sigmoid]}, outputs={"Out": [batch_q]}) - helper.append_op(type="elementwise_add", - inputs={ - "X": [batch_q], - "Y": [local_q] - }, - outputs={"Out": [local_q]}) helper.append_op(type="reduce_sum", inputs={"X": [label]}, @@ -192,6 +206,26 @@ def ctr_metric_bundle(input, label): helper.append_op(type="reduce_sum", inputs={"X": [tmp_ones]}, outputs={"Out": [batch_ins_num]}) + + #if data is fake, return 0 + inputs_slice = {'Input': ins_tag_weight} + attrs = {'axes': [0]} + attrs['starts'] = [0] + attrs['ends'] = [1] + helper.append_op(type="slice", + inputs=inputs_slice, + attrs=attrs, + outputs={"Out": ins_tag_weight}) + + axis = helper.kwargs.get('axis', 0) + helper.append_op(type="elementwise_mul", + inputs={ + "X": [batch_ins_num], + "Y": [ins_tag_weight] + }, + outputs={"Out": [batch_ins_num]}, + attrs={'axis': axis}) + helper.append_op(type="elementwise_add", inputs={ "X": [batch_ins_num], @@ -199,4 +233,18 @@ def ctr_metric_bundle(input, label): }, outputs={"Out": [local_ins_num]}) + helper.append_op(type="elementwise_mul", + inputs={ + "X": [batch_q], + "Y": [ins_tag_weight] + }, + outputs={"Out": [batch_q]}, + attrs={'axis': axis}) + helper.append_op(type="elementwise_add", + inputs={ + "X": [batch_q], + "Y": [local_q] + }, + outputs={"Out": [local_q]}) + return local_sqrerr, local_abserr, local_prob, local_q, local_pos_num, local_ins_num diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/fluid/layers/metric_op.py old mode 100644 new mode 100755 index 57b8411a54fd6ce0718103b7fc9f944f3cdbd719..7a4cd46dfc6d1399288727a28f44ea9c1fadd704 --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/fluid/layers/metric_op.py @@ -24,6 +24,7 @@ from ..framework import Variable, _non_static_mode, _varbase_creator, _in_legacy from .. import core from ..param_attr import ParamAttr from . import nn +from . import tensor from ..data_feeder import check_variable_and_dtype from paddle import _C_ops @@ -34,48 +35,38 @@ def accuracy(input, label, k=1, correct=None, total=None): """ accuracy layer. Refer to the https://en.wikipedia.org/wiki/Precision_and_recall - This function computes the accuracy using the input and label. If the correct label occurs in top k predictions, then correct will increment by one. Note: the dtype of accuracy is determined by input. the input and label dtype can be different. - Args: - input(Variable): The input of accuracy layer, which is the predictions of network. A LoDTensor or Tensor with type float32,float64. + input(Tensor): The input of accuracy layer, which is the predictions of network. A Tensor with type float32,float64. The shape is ``[sample_number, class_dim]`` . - label(Variable): The label of dataset. LoDTensor or Tensor with type int32,int64. The shape is ``[sample_number, 1]`` . + label(Tensor): The label of dataset. Tensor with type int32,int64. The shape is ``[sample_number, 1]`` . k(int): The top k predictions for each class will be checked. Data type is int64 or int32. - correct(Variable): The correct predictions count. A Tensor with type int64 or int32. - total(Variable): The total entries count. A tensor with type int64 or int32. - + correct(Tensor): The correct predictions count. A Tensor with type int64 or int32. + total(Tensor): The total entries count. A tensor with type int64 or int32. Returns: - Variable: The correct rate. A Tensor with type float32. - + Tensor: The correct rate. A Tensor with type float32. Examples: .. code-block:: python - import numpy as np - import paddle import paddle.static as static import paddle.nn.functional as F - paddle.enable_static() data = static.data(name="input", shape=[-1, 32, 32], dtype="float32") label = static.data(name="label", shape=[-1,1], dtype="int") fc_out = static.nn.fc(x=data, size=10) predict = F.softmax(x=fc_out) result = static.accuracy(input=predict, label=label, k=5) - place = paddle.CPUPlace() exe = static.Executor(place) - exe.run(static.default_startup_program()) x = np.random.rand(3, 32, 32).astype("float32") y = np.array([[1],[0],[1]]) output= exe.run(feed={"input": x,"label": y}, fetch_list=[result[0]]) print(output) - #[array([0.], dtype=float32)] """ if _non_static_mode(): @@ -133,8 +124,9 @@ def auc(input, curve='ROC', num_thresholds=2**12 - 1, topk=1, - slide_steps=1): - r""" + slide_steps=1, + ins_tag_weight=None): + """ **Area Under the Curve (AUC) Layer** This implementation computes the AUC according to forward output and label. @@ -150,57 +142,86 @@ def auc(input, 2. PR: Precision Recall Args: - input(Variable): A floating-point 2D Variable, values are in the range + input(Tensor): A floating-point 2D Tensor, values are in the range [0, 1]. Each row is sorted in descending order. This input should be the output of topk. Typically, this - Variable indicates the probability of each label. - A LoDTensor or Tensor with type float32,float64. - label(Variable): A 2D int Variable indicating the label of the training + Tensor indicates the probability of each label. + A Tensor with type float32,float64. + label(Tensor): A 2D int Tensor indicating the label of the training data. The height is batch size and width is always 1. - A LoDTensor or Tensor with type int32,int64. + A Tensor with type int32,int64. curve(str): Curve type, can be 'ROC' or 'PR'. Default 'ROC'. num_thresholds(int): The number of thresholds to use when discretizing - the roc curve. Default 200. + the roc curve. Default 4095. topk(int): only topk number of prediction output will be used for auc. slide_steps: when calc batch auc, we can not only use step currently but the previous steps can be used. slide_steps=1 means use the current step, slide_steps=3 means use current step and the previous second steps, slide_steps=0 use all of the steps. - + ins_tag_weight(Tensor): A 2D int Tensor indicating the data's tag weight, 1 means real data, 0 means fake data. Default None, and it will be assigned to a tensor of value 1. + A Tensor with type float32,float64. Returns: - Variable: A tuple representing the current AUC. + Tensor: A tuple representing the current AUC. The return tuple is auc_out, batch_auc_out, [ batch_stat_pos, batch_stat_neg, stat_pos, stat_neg ] Data type is Tensor, supporting float32, float64. - Examples: + Examples 1: .. code-block:: python + import paddle import numpy as np + paddle.enable_static() - import paddle - import paddle.static as static - import paddle.nn.functional as F + data = paddle.static.data(name="input", shape=[-1, 32,32], dtype="float32") + label = paddle.static.data(name="label", shape=[-1], dtype="int") + fc_out = paddle.static.nn.fc(input=data, size=2) + predict = paddle.nn.functional.softmax(input=fc_out) + result=paddle.static.auc(input=predict, label=label) + + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + + exe.run(paddle.static.default_startup_program()) + x = np.random.rand(3,32,32).astype("float32") + y = np.array([1,0,1]) + output= exe.run(feed={"input": x,"label": y}, + fetch_list=[result[0]]) + print(output) + #[array([0.5])] + Examples 2: + .. code-block:: python + import paddle + import numpy as np paddle.enable_static() - data = static.data(name="input", shape=[-1, 32,32], dtype="float32") - label = static.data(name="label", shape=[-1], dtype="int") - fc_out = static.nn.fc(x=data, size=2) - predict = F.softmax(x=fc_out) - result = static.auc(input=predict, label=label) + + data = paddle.static.data(name="input", shape=[-1, 32,32], dtype="float32") + label = paddle.static.data(name="label", shape=[-1], dtype="int") + fc_out = paddle.static.nn.fc(input=data, size=2) + predict = paddle.nn.functional.softmax(input=fc_out) + ins_tag_weight = paddle.static.data(name='ins_tag', shape=[-1,16], lod_level=0, dtype='int64') + result=paddle.static.auc(input=predict, label=label, ins_tag_weight=ins_tag_weight) place = paddle.CPUPlace() - exe = static.Executor(place) + exe = paddle.static.Executor(place) - exe.run(static.default_startup_program()) + exe.run(paddle.static.default_startup_program()) x = np.random.rand(3,32,32).astype("float32") y = np.array([1,0,1]) output= exe.run(feed={"input": x,"label": y}, - fetch_list=[result[0]]) + fetch_list=[result[0]]) print(output) - #[array([0.])] + #[array([0.5])] """ helper = LayerHelper("auc", **locals()) + + if ins_tag_weight is None: + ins_tag_weight = tensor.fill_constant(shape=[1, 1], + dtype="float32", + value=1.0) check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'auc') check_variable_and_dtype(label, 'label', ['int32', 'int64'], 'auc') + check_variable_and_dtype(ins_tag_weight, 'ins_tag_weight', + ['float32', 'float64'], 'auc') auc_out = helper.create_variable_for_type_inference(dtype="float64") batch_auc_out = helper.create_variable_for_type_inference(dtype="float64") # make tp, tn, fp, fn persistable, so that can accumulate all batches. @@ -233,6 +254,7 @@ def auc(input, helper.set_variable_initializer(var, Constant(value=0.0, force_cpu=False)) + #"InsTagWeight": [ins_tag_weight] # Batch AUC helper.append_op(type="auc", inputs={ diff --git a/python/paddle/fluid/tests/unittests/test_auc_op.py b/python/paddle/fluid/tests/unittests/test_auc_op.py index c2c206905e3ac468340b5d9225cf7c277b478b9e..b966b01249b6704c91558aad3587178951961cc1 100644 --- a/python/paddle/fluid/tests/unittests/test_auc_op.py +++ b/python/paddle/fluid/tests/unittests/test_auc_op.py @@ -19,6 +19,7 @@ import numpy as np from op_test import OpTest from paddle.fluid import metrics import paddle.fluid as fluid +import paddle class TestAucOp(OpTest): @@ -107,6 +108,38 @@ class TestGlobalAucOp(OpTest): self.check_output() +class TestAucAPI(unittest.TestCase): + + def test_static(self): + paddle.enable_static() + data = paddle.static.data(name="input", shape=[-1, 1], dtype="float32") + label = paddle.static.data(name="label", shape=[4], dtype="int64") + ins_tag_weight = paddle.static.data(name="ins_tag_weight", + shape=[4], + dtype="float32") + result = paddle.static.auc(input=data, + label=label, + ins_tag_weight=ins_tag_weight) + + place = paddle.CPUPlace() + exe = paddle.static.Executor(place) + + exe.run(paddle.static.default_startup_program()) + + x = np.array([[0.0474], [0.5987], [0.7109], [0.9997]]).astype("float32") + + y = np.array([0, 0, 1, 0]).astype('int64') + z = np.array([1, 1, 1, 1]).astype('float32') + output = exe.run(feed={ + "input": x, + "label": y, + "ins_tag_weight": z + }, + fetch_list=[result[0]]) + auc_np = np.array([0.66666667]).astype("float32") + self.assertTrue(np.allclose(output, auc_np)) + + class TestAucOpError(unittest.TestCase): def test_errors(self): @@ -115,7 +148,12 @@ class TestAucOpError(unittest.TestCase): def test_type1(): data1 = fluid.data(name="input1", shape=[-1, 2], dtype="int") label1 = fluid.data(name="label1", shape=[-1], dtype="int") - result1 = fluid.layers.auc(input=data1, label=label1) + ins_tag_w1 = paddle.static.data(name="label1", + shape=[-1], + dtype="int") + result1 = paddle.static.auc(input=data1, + label=label1, + ins_tag_weight=ins_tag_w1) self.assertRaises(TypeError, test_type1)