diff --git a/python/paddle/fluid/metrics.py b/python/paddle/fluid/metrics.py index a533e430c95986e21ec518ff3d59922b0def541d..43c83e6c68b7f42bb8410d7c1a08c7d4fb1d2134 100644 --- a/python/paddle/fluid/metrics.py +++ b/python/paddle/fluid/metrics.py @@ -57,23 +57,48 @@ def _is_number_or_matrix_(var): class MetricBase(object): """ - Base Class for all Metrics. - MetricBase define a group of interfaces for the - model evaluation methods. Metrics accumulate metric states between - consecutive minibatches, at every minibatch, use update - interface to add current minibatch value to global states. - Use eval to compute accumative metric value from last reset() - or from scratch on. - If you need to custom a new metric, please inherit from MetricBase and - custom implementation. + In many cases, we usually have to split the test data into mini-batches for evaluating + deep neural networks, therefore we need to collect the evaluation results of each + mini-batch and aggregate them into the final result. The paddle.fluid.metrics is + designed for a convenient way of deep neural network evaluation. - Args: - name(str): The name of metric instance. such as, "accuracy". - It needed if you want to distinct different metrics in a model. + The paddle.fluid.metrics contains serval different evaluation metrics + like precision and recall, and most of them have the following functions: + + 1. take the prediction result and the corresponding labels of a mini-batch as input, + then compute the evaluation result for the input mini-batch. + + 2. aggregate the existing evaluation results as the overall performance. + + The class Metric is the base class for all classes in paddle.fluid.metrics, it defines + the fundmental APIs for all metrics classes, including: + + 1. update(preds, labels): given the prediction results (preds) and the labels (labels) + of some mini-batch, compute the evaluation result of that mini-batch, and memorize the + evaluation result. + + 2. eval(): aggregate all existing evaluation result in the memory, and return the overall + performance across different mini-batches. + + 3. reset(): empty the memory. """ def __init__(self, name): + """ + The constructor of the metric class. + + Args: + name(str): The name of metric instance. such as, "accuracy". + It can be used to distinguish different metric instances in a model. + + Returns: + The constructed class instance. + + Return types: + The MetricBase or its succeed classes + + """ self._name = str(name) if name != None else self.__class__.__name__ def __str__(self): @@ -81,10 +106,17 @@ class MetricBase(object): def reset(self): """ - reset clear the states of metrics. By default, the states - are the members who do not has _ prefix, reset set them to inital states. - If you violate the implicit name rule, please also custom the reset - interface. + reset function empties the evaluation memory for previous mini-batches. + + Args: + None + + Returns: + None + + Return types: + None + """ states = { attr: value @@ -110,7 +142,10 @@ class MetricBase(object): None Returns: - dict: a dict of metric and states + a python dict, which costains the inner states of the metric instance + + Return types: + a python dict """ states = { attr: value @@ -123,23 +158,38 @@ class MetricBase(object): def update(self, preds, labels): """ - Updates the metric states at every minibatch. - One user can compute the minibatch metric via pure Python, or - via a c++ operator. + Given the prediction results (preds) and the labels (labels) + of some mini-batch, compute the evaluation result of that mini-batch, + and memorize the evaluation result. Please notice that the update function only + memorizes the evaluation result but would not return the score. If you want to + get the evaluation result, please call eval() function. Args: preds(numpy.array): the predictions of current minibatch - labels(numpy.array): the labels of current minibatch, if the label is one-hot - or soft-label, should custom the corresponding update rule. + labels(numpy.array): the labels of current minibatch. + + Returns: + None + + Return types: + None + """ raise NotImplementedError( "Should not use it directly, please extend it.") def eval(self): """ - Evalute the current metrics based the accumulated states. + Aggregate all existing evaluation results in the memory, and return the overall + performance across different mini-batches. + + Args: + None Returns: + The overall performance across different mini-batches. + + Return types: float|list(float)|numpy.array: the metrics via Python. """ raise NotImplementedError( @@ -148,12 +198,17 @@ class MetricBase(object): class CompositeMetric(MetricBase): """ - Composite multiple metrics in one instance. - for example, merge F1, accuracy, recall into one Metric. + This op creates a container that contains the union of all the added metrics. + After the metrics added in, calling eval() method will compute all the contained metrics automatically. + CAUTION: only metrics with the SAME argument list can be added in a CompositeMetric instance. + + Inherit from: `MetricBase `_ + + Args: + name (str, optional): Metric name. For details, please refer to :ref:`api_guide_Name`. Default is None. Examples: .. code-block:: python - import paddle.fluid as fluid import numpy as np preds = [[0.1], [0.7], [0.8], [0.9], [0.2], @@ -162,16 +217,13 @@ class CompositeMetric(MetricBase): [0], [0], [0], [0], [0]] preds = np.array(preds) labels = np.array(labels) - comp = fluid.metrics.CompositeMetric() precision = fluid.metrics.Precision() recall = fluid.metrics.Recall() comp.add_metric(precision) comp.add_metric(recall) - comp.update(preds=preds, labels=labels) numpy_precision, numpy_recall = comp.eval() - print("expect precision: %.2f, got %.2f" % ( 3. / 5, numpy_precision ) ) print("expect recall: %.2f, got %.2f" % (3. / 4, numpy_recall ) ) """ @@ -182,10 +234,11 @@ class CompositeMetric(MetricBase): def add_metric(self, metric): """ - add one metric instance to CompositeMetric. + Add a new metric to container. Noted that the argument list + of the added one should be consistent with existed ones. Args: - metric: a instance of MetricBase. + metric(MetricBase): a instance of MetricBase """ if not isinstance(metric, MetricBase): raise ValueError("SubMetric should be inherit from MetricBase.") @@ -193,22 +246,22 @@ class CompositeMetric(MetricBase): def update(self, preds, labels): """ - Update every metrics in sequence. + Update the metrics of this container. Args: - preds(numpy.array): the predictions of current minibatch - labels(numpy.array): the labels of current minibatch, if the label is one-hot - or soft-label, should custom the corresponding update rule. + preds(numpy.array): predicted results of current mini-batch, the shape and dtype of which should meet the requirements of the corresponded metric. + labels(numpy.array): ground truth of current mini-batch, the shape and dtype of which should meet the requirements of the corresponded metric. """ for m in self._metrics: m.update(preds, labels) def eval(self): """ - Evaluate every metrics in sequence. + Calculate the results of all metrics sequentially. Returns: - list(float|numpy.array): a list of metrics value in Python. + list: results of all added metrics. + The shape and dtype of each result depend on the defination of its metric. """ ans = [] for m in self._metrics: @@ -219,10 +272,13 @@ class CompositeMetric(MetricBase): class Precision(MetricBase): """ Precision (also called positive predictive value) is the fraction of - relevant instances among the retrieved instances. + relevant instances among the retrieved instances. Refer to https://en.wikipedia.org/wiki/Evaluation_of_binary_classifiers - This class mangages the precision score for binary classification task. + Noted that this class mangages the precision score only for binary classification task. + + Args: + name (str, optional): Metric name. For details, please refer to :ref:`api_guide_Name`. Default is None. Examples: .. code-block:: python @@ -246,7 +302,7 @@ class Precision(MetricBase): metric.update(preds=preds, labels=labels) numpy_precision = metric.eval() - print("expct precision: %.2f and got %.2f" % ( 3.0 / 5.0, numpy_precision)) + print("expect precision: %.2f and got %.2f" % ( 3.0 / 5.0, numpy_precision)) """ def __init__(self, name=None): @@ -255,6 +311,17 @@ class Precision(MetricBase): self.fp = 0 # false positive def update(self, preds, labels): + """ + Update the precision based on the current mini-batch prediction results . + + Args: + preds(numpy.ndarray): prediction results of current mini-batch, + the output of two-class sigmoid function. + Shape: [batch_size, 1]. Dtype: 'float64' or 'float32'. + labels(numpy.ndarray): ground truth (labels) of current mini-batch, + the shape should keep the same as preds. + Shape: [batch_size, 1], Dtype: 'int32' or 'int64'. + """ if not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray.") if not _is_numpy_(labels): @@ -272,6 +339,12 @@ class Precision(MetricBase): self.fp += 1 def eval(self): + """ + Calculate the final precision. + + Returns: + float: Results of the calculated Precision. Scalar output with float dtype. + """ ap = self.tp + self.fp return float(self.tp) / ap if ap != 0 else .0 @@ -282,9 +355,13 @@ class Recall(MetricBase): relevant instances that have been retrieved over the total amount of relevant instances + Refer to: https://en.wikipedia.org/wiki/Precision_and_recall - This class mangages the recall score for binary classification task. + Noted that this class mangages the recall score only for binary classification task. + + Args: + name (str, optional): Metric name. For details, please refer to :ref:`api_guide_Name`. Default is None. Examples: .. code-block:: python @@ -306,9 +383,9 @@ class Recall(MetricBase): labels = np.array(labels) metric.update(preds=preds, labels=labels) - numpy_precision = metric.eval() + numpy_recall = metric.eval() - print("expct precision: %.2f and got %.2f" % ( 3.0 / 4.0, numpy_precision)) + print("expect recall: %.2f and got %.2f" % ( 3.0 / 4.0, numpy_recall)) """ def __init__(self, name=None): @@ -317,6 +394,17 @@ class Recall(MetricBase): self.fn = 0 # false negtive def update(self, preds, labels): + """ + Update the recall based on the current mini-batch prediction results. + + Args: + preds(numpy.array): prediction results of current mini-batch, + the output of two-class sigmoid function. + Shape: [batch_size, 1]. Dtype: 'float64' or 'float32'. + labels(numpy.array): ground truth (labels) of current mini-batch, + the shape should keep the same as preds. + Shape: [batch_size, 1], Dtype: 'int32' or 'int64'. + """ if not _is_numpy_(preds): raise ValueError("The 'preds' must be a numpy ndarray.") if not _is_numpy_(labels): @@ -334,17 +422,24 @@ class Recall(MetricBase): self.fn += 1 def eval(self): + """ + Calculate the final recall. + + Returns: + float: results of the calculated Recall. Scalar output with float dtype. + """ recall = self.tp + self.fn return float(self.tp) / recall if recall != 0 else .0 class Accuracy(MetricBase): """ - Calculate the mean accuracy over multiple batches. + This interface is used to calculate the mean accuracy over multiple batches. + Accuracy object has two state: value and weight. The definition of Accuracy is available at https://en.wikipedia.org/wiki/Accuracy_and_precision Args: - name: the metrics name + name (str, optional): Metric name. For details, please refer to :ref:`api_guide_Name`. Default is None. Examples: .. code-block:: python @@ -381,11 +476,15 @@ class Accuracy(MetricBase): def update(self, value, weight): """ - Update minibatch states. + This function takes the minibatch states (value, weight) as input, + to accumulate and update the corresponding status of the Accuracy object. The update method is as follows: + + .. math:: + \\\\ \\begin{array}{l}{\\text { self. value }+=\\text { value } * \\text { weight }} \\\\ {\\text { self. weight }+=\\text { weight }}\\end{array} \\\\ Args: value(float|numpy.array): accuracy of one minibatch. - weight(int|float): batch size. + weight(int|float): minibatch size. """ if not _is_number_or_matrix_(value): raise ValueError( @@ -399,7 +498,11 @@ class Accuracy(MetricBase): def eval(self): """ - Return the mean accuracy (float or numpy.array) for all accumulated batches. + This function returns the mean accuracy (float or numpy.array) for all accumulated minibatches. + + Returns: + float or numpy.array: mean accuracy for all accumulated minibatches. + """ if self.weight == 0: raise ValueError("There is no data in Accuracy Metrics. \ @@ -412,11 +515,16 @@ class ChunkEvaluator(MetricBase): Accumulate counter numbers output by chunk_eval from mini-batches and compute the precision recall and F1-score using the accumulated counter numbers. + ChunkEvaluator has three states: num_infer_chunks, num_label_chunks and num_correct_chunks, + which correspond to the number of chunks, the number of labeled chunks, and the number of correctly identified chunks. For some basics of chunking, please refer to - `Chunking with Support Vector Machines `_ . + `Chunking with Support Vector Machines `_ . ChunkEvalEvaluator computes the precision, recall, and F1-score of chunk detection, and supports IOB, IOE, IOBES and IO (also known as plain) tagging schemes. + Args: + name (str, optional): Metric name. For details, please refer to :ref:`api_guide_Name`. Default is None. + Examples: .. code-block:: python @@ -454,7 +562,11 @@ class ChunkEvaluator(MetricBase): def update(self, num_infer_chunks, num_label_chunks, num_correct_chunks): """ - Update the states based on the layers.chunk_eval() ouputs. + This function takes (num_infer_chunks, num_label_chunks, num_correct_chunks) as input, + to accumulate and update the corresponding status of the ChunkEvaluator object. The update method is as follows: + + .. math:: + \\\\ \\begin{array}{l}{\\text { self. num_infer_chunks }+=\\text { num_infer_chunks }} \\\\ {\\text { self. num_Label_chunks }+=\\text { num_label_chunks }} \\\\ {\\text { self. num_correct_chunks }+=\\text { num_correct_chunks }}\\end{array} \\\\ Args: num_infer_chunks(int|numpy.array): The number of chunks in Inference on the given minibatch. @@ -479,6 +591,13 @@ class ChunkEvaluator(MetricBase): self.num_correct_chunks += num_correct_chunks def eval(self): + """ + This function returns the mean precision, recall and f1 score for all accumulated minibatches. + + Returns: + float: mean precision, recall and f1 score. + + """ precision = float( self.num_correct_chunks ) / self.num_infer_chunks if self.num_infer_chunks else 0 @@ -491,21 +610,14 @@ class ChunkEvaluator(MetricBase): class EditDistance(MetricBase): """ - Edit distance is a way of quantifying how dissimilar two strings - (e.g., words) are to each another by counting the minimum number - of edit operations (add, remove or replace) required to transform - one string into the other. - Refer to https://en.wikipedia.org/wiki/Edit_distance - - This EditDistance class takes two inputs by using update function: - 1. distances: a (batch_size, 1) numpy.array, each element represents the - edit distance between two sequences. - 2. seq_num: a int|float value, standing for the number of sequence pairs. - - and returns the overall edit distance of multiple sequence-pairs. + This API is for the management of edit distances. + Editing distance is a method to quantify the degree of dissimilarity + between two strings, such as words, by calculating the minimum editing + operand (add, delete or replace) required to convert one string into another. + Refer to https://en.wikipedia.org/wiki/Edit_distance. Args: - name: the metrics name + name (str, optional): Metric name. For details, please refer to :ref:`api_guide_Name`. Default is None. Examples: .. code-block:: python @@ -556,10 +668,8 @@ class EditDistance(MetricBase): Update the overall edit distance Args: - distances: a (batch_size, 1) numpy.array, each element represents the - edit distance between two sequences. - seq_num: a int|float value, standing for the number of sequence pairs. - + distances(numpy.array): a (batch_size, 1) numpy.array, each element represents the edit distance between two sequences. + seq_num(int|float): standing for the number of sequence pairs. """ if not _is_numpy_(distances): raise ValueError("The 'distances' must be a numpy ndarray.") @@ -589,7 +699,7 @@ class EditDistance(MetricBase): class Auc(MetricBase): """ The auc metric is for binary classification. - Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve + Refer to https://en.wikipedia.org/wiki/Receiver_operating_characteristic#Area_under_the_curve. Please notice that the auc metric is implemented with python, which may be a little bit slow. If you concern the speed, please use the fluid.layers.auc instead. @@ -602,9 +712,8 @@ class Auc(MetricBase): computed using the height of the precision values by the recall. Args: - name: metric name - curve: Specifies the name of the curve to be computed, 'ROC' [default] or - 'PR' for the Precision-Recall-curve. + name (str, optional): Metric name. For details, please refer to :ref:`api_guide_Name`. Default is None. + curve (str): Specifies the name of the curve to be computed, 'ROC' [default] or 'PR' for the Precision-Recall-curve. "NOTE: only implement the ROC curve type via Python now." @@ -645,13 +754,11 @@ class Auc(MetricBase): def update(self, preds, labels): """ - Update the auc curve with the given predictions and labels + Update the auc curve with the given predictions and labels. Args: - preds: an numpy array in the shape of (batch_size, 2), preds[i][j] denotes the probability - of classifying the instance i into the class j. - labels: an numpy array in the shape of (batch_size, 1), labels[i] is either o or 1, representing - the label of the instance i. + preds (numpy.array): an numpy array in the shape of (batch_size, 2), preds[i][j] denotes the probability of classifying the instance i into the class j. + labels (numpy.array): an numpy array in the shape of (batch_size, 1), labels[i] is either o or 1, representing the label of the instance i. """ if not _is_numpy_(labels): raise ValueError("The 'labels' must be a numpy ndarray.") @@ -674,6 +781,9 @@ class Auc(MetricBase): def eval(self): """ Return the area (a float score) under auc curve + + Return: + float: the area under auc curve """ tot_pos = 0.0 tot_neg = 0.0 @@ -864,7 +974,6 @@ class DetectionMAP(object): def reset(self, executor, reset_program=None): """ Reset metric states at the begin of each pass/user specified batch. - Args: executor(Executor): a executor for executing the reset_program.