diff --git a/python/paddle/hapi/model.py b/python/paddle/hapi/model.py index fa8bd600bb28d8e99f5fbeed54db754274e592bf..6cd879c388c1f6f8d5914e144841e8678119cb45 100644 --- a/python/paddle/hapi/model.py +++ b/python/paddle/hapi/model.py @@ -887,10 +887,10 @@ class Model(object): AdamW and Momentum optimizer. Before using pure float16 training, `multi_precision` could be set to True when creating optimizer, which can avoid poor accuracy or slow convergence in a way, and inputs of dtype float - should be cast to float16 by users. Users should also use - `paddle.static.amp.fp16_guard` API to limit the range of pure float16 - training, otherwise, 'use_fp16_guard' should be set to False by users. - However, limiting the range of is not supported during training using AMP. + should be cast to float16 by users. `paddle.static.amp.fp16_guard` API + should be also used to limit the range of pure float16 training, otherwise, + 'use_fp16_guard' should be set to False by users. However, limiting the + range of is not supported during training using AMP. Args: network (paddle.nn.Layer): The network is an instance of @@ -974,7 +974,7 @@ class Model(object): data = paddle.vision.datasets.MNIST(mode='train', transform=transform) model.fit(data, epochs=2, batch_size=32, verbose=1) - # mixed precision training is only support on GPU now. + # mixed precision training is only supported on GPU now. if paddle.is_compiled_with_cuda(): run_example_code() @@ -1462,19 +1462,18 @@ class Model(object): float16 training is used, the key 'level' of 'amp_configs' should be set to 'O1' or 'O2' respectively. Otherwise, the value of 'level' defaults to 'O0', which means float32 - training. In addition to 'level', users could pass in more - parameters consistent with mixed precision API. The supported + training. In addition to 'level', parameters consistent with + mixed precision API could also be passed in. The supported keys are: 'init_loss_scaling', 'incr_ratio', 'decr_ratio', 'incr_every_n_steps', 'decr_every_n_nan_or_inf', 'use_dynamic_loss_scaling', 'custom_white_list', 'custom_black_list', and 'custom_black_varnames'or - 'use_fp16_guard' is only supported in static mode. Users could - refer to mixed precision API documentations - :ref:`api_paddle_amp_auto_cast` and - :ref:`api_paddle_amp_GradScaler` for details. For convenience, - 'amp_configs' could be set to 'O1' or 'O2' if no more - parameters are needed. 'amp_configs' could be None in float32 - training. Default: None. + 'use_fp16_guard' is only supported in static mode. Mixed + precision API documentations :ref:`api_paddle_amp_auto_cast` + and :ref:`api_paddle_amp_GradScaler` could be referenced + for details. For convenience, 'amp_configs' could be set to + 'O1' or 'O2' if no more parameters are needed. 'amp_configs' + could be None in float32 training. Default: None. Returns: None """ diff --git a/python/paddle/metric/metrics.py b/python/paddle/metric/metrics.py index 0784775b6695eefb091bf0643a0b5c12d4b4664f..b939f548e9c01d7be836a321a876d2abac7b74e4 100644 --- a/python/paddle/metric/metrics.py +++ b/python/paddle/metric/metrics.py @@ -243,7 +243,7 @@ class Accuracy(Metric): def compute(self, pred, label, *args): """ - Compute the top-k (maxinum value in `topk`) indices. + Compute the top-k (maximum value in `topk`) indices. Args: pred (Tensor): The predicted value is a Tensor with dtype @@ -253,7 +253,7 @@ class Accuracy(Metric): [batch_size, d0, ..., num_classes] in one hot representation. Return: - Tensor: Correct mask, a tensor with shape [batch_size, topk]. + Tensor: Correct mask, a tensor with shape [batch_size, d0, ..., topk]. """ pred = paddle.argsort(pred, descending=True) pred = paddle.slice( @@ -277,7 +277,7 @@ class Accuracy(Metric): returns the accuracy of current step. Args: - correct: Correct mask, a tensor with shape [batch_size, topk]. + correct: Correct mask, a tensor with shape [batch_size, d0, ..., topk]. Return: Tensor: the accuracy of current step.