未验证 提交 5bfcb501 编写于 作者: R Ryan 提交者: GitHub

[xdoctest][task 125-137] reformat example code with google style in static/* (#56121)

* LGTM

* [Doctest]fix No.125-137, test=docs_preview

* [Doctest]fix No.125-137, test=docs_preview

* fix codestyle

* resolve conversation

* change Example

* fix example format...

* fix my stupid fp16

* fix codestyple
上级 861a16ce
...@@ -35,10 +35,11 @@ class AutoMixedPrecisionListsBF16: ...@@ -35,10 +35,11 @@ class AutoMixedPrecisionListsBF16:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle
paddle.enable_static() >>> import paddle
with paddle.static.amp.bf16_guard(): >>> paddle.enable_static()
paddle.static.amp.bf16.AutoMixedPrecisionListsBF16(custom_fp32_list={'lstm'}) >>> with paddle.static.amp.bf16.bf16_guard():
... paddle.static.amp.bf16.AutoMixedPrecisionListsBF16(custom_fp32_list={'lstm'})
""" """
def __init__( def __init__(
......
...@@ -230,18 +230,18 @@ def bf16_guard(): ...@@ -230,18 +230,18 @@ def bf16_guard():
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
paddle.enable_static() >>> paddle.enable_static()
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32') >>> data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3) >>> conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
with paddle.static.amp.bf16_guard(): >>> with paddle.static.amp.bf16.bf16_guard():
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu") ... bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
pool = F.max_pool2d(bn, kernel_size=2, stride=2) ... pool = F.max_pool2d(bn, kernel_size=2, stride=2)
hidden = paddle.static.nn.fc(pool, size=10) ... hidden = paddle.static.nn.fc(pool, size=10)
loss = paddle.mean(hidden) ... loss = paddle.mean(hidden)
""" """
with framework.name_scope(prefix=_bf16_guard_pattern): with framework.name_scope(prefix=_bf16_guard_pattern):
yield yield
......
...@@ -135,42 +135,44 @@ class OptimizerWithMixedPrecision: ...@@ -135,42 +135,44 @@ class OptimizerWithMixedPrecision:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
paddle.enable_static() >>> paddle.enable_static()
def run_example_code(): >>> def run_example_code():
place = paddle.CPUPlace(0) ... place = paddle.CPUPlace()
exe = paddle.static.Executor(place) ... exe = paddle.static.Executor(place)
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32') ... data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3) ... conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
# 1) Use bf16_guard to control the range of bf16 kernels used. ... # 1) Use bf16_guard to control the range of bf16 kernels used.
with paddle.static.amp.bf16_guard(): ... with paddle.static.amp.bf16.bf16_guard():
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu") ... bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
pool = F.max_pool2d(bn, kernel_size=2, stride=2) ... pool = F.max_pool2d(bn, kernel_size=2, stride=2)
hidden = paddle.static.nn.fc(pool, size=10) ... hidden = paddle.static.nn.fc(pool, size=10)
loss = paddle.mean(hidden) ... loss = paddle.mean(hidden)
# 2) Create the optimizer and set `multi_precision` to True. ... # 2) Create the optimizer and set `multi_precision` to True.
# Setting `multi_precision` to True can avoid the poor accuracy ... # Setting `multi_precision` to True can avoid the poor accuracy
# or the slow convergence in a way. ... # or the slow convergence in a way.
optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True) ... optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True)
# 3) These ops in `custom_fp32_list` will keep in the float32 computation type. ... # 3) These ops in `custom_black_list` will keep in the float32 computation type.
amp_list = paddle.static.amp.CustomOpLists( ... amp_list = paddle.static.amp.CustomOpLists(
custom_fp32_list=['pool2d']) ... custom_black_list=['pool2d'])
# 4) The entry of Paddle AMP. ... # 4) The entry of Paddle AMP.
# Enable pure bf16 training by setting `use_pure_bf16` to True. ... # Enable pure bf16 training by setting `use_pure_bf16` to True.
optimizer = paddle.static.amp.bf16.decorate_bf16( ... optimizer = paddle.static.amp.bf16.decorate_bf16(
optimizer, ... optimizer,
amp_list, ... amp_list,
use_pure_bf16=True) ... use_pure_bf16=True)
# If you don't use the default_startup_program(), you sholud pass ... # If you don't use the default_startup_program(), you sholud pass
# your defined `startup_program` into `minimize`. ... # your defined `startup_program` into `minimize`.
optimizer.minimize(loss) ... optimizer.minimize(loss)
exe.run(paddle.static.default_startup_program()) ... exe.run(paddle.static.default_startup_program())
# 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`). ... # 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`).
# If you want to perform the testing process, you should pass `test_program` into `amp_init`. ... # If you want to perform the testing process, you should pass `test_program` into `amp_init`.
optimizer.amp_init(place, scope=paddle.static.global_scope()) ... optimizer.amp_init(place, scope=paddle.static.global_scope())
>>> run_example_code()
""" """
assert ( assert (
...@@ -263,63 +265,68 @@ def decorate_bf16( ...@@ -263,63 +265,68 @@ def decorate_bf16(
An optimizer acting like a normal one but with mixed-precision training An optimizer acting like a normal one but with mixed-precision training
enabled. enabled.
Examples 1: Examples:
.. code-block:: python .. code-block:: python
:name: example-1
# fp32&bf16 list based strategy example # fp32&bf16 list based strategy example
import paddle >>> import paddle
import paddle.static as static >>> import paddle.static as static
>>> paddle.enable_static()
>>> data = static.data(name='X', shape=[None, 1], dtype='float32')
>>> hidden = static.nn.fc(x=data, size=10)
>>> loss = paddle.mean(hidden)
>>> optimizer = paddle.optimizer.Adam(learning_rate=0.001)
>>> mp_optimizer = static.amp.bf16.decorate_bf16(optimizer=optimizer)
paddle.enable_static() >>> ops, param_grads = mp_optimizer.minimize(loss)
data = static.data(name='X', shape=[None, 1], dtype='float32')
hidden = static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden)
optimizer = paddle.optimizer.Adam(learning_rate=0.001)
mp_optimizer = static.amp.decorate_bf16(optimizer=optimizer)
ops, param_grads = mp_optimizer.minimize(loss)
Examples 2:
.. code-block:: python .. code-block:: python
:name: example-2
# pure bf16 training example # pure bf16 training example
import numpy as np >>> import numpy as np
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
def run_example_code(): >>> def run_example_code():
place = paddle.CPUPlace(0) ... place = paddle.CPUPlace()
exe = paddle.static.Executor(place) ... exe = paddle.static.Executor(place)
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32') ... data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3) ... conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
# 1) Use bf16_guard to control the range of bf16 kernels used. ... # 1) Use bf16_guard to control the range of bf16 kernels used.
with paddle.static.amp.bf16_guard(): ... with paddle.static.amp.bf16.bf16_guard():
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu") ... bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
pool = F.max_pool2d(bn, kernel_size=2, stride=2) ... pool = F.max_pool2d(bn, kernel_size=2, stride=2)
hidden = paddle.static.nn.fc(pool, size=10) ... hidden = paddle.static.nn.fc(pool, size=10)
loss = paddle.mean(hidden) ... loss = paddle.mean(hidden)
# 2) Create the optimizer and set `multi_precision` to True. ... # 2) Create the optimizer and set `multi_precision` to True.
# Setting `multi_precision` to True can avoid the poor accuracy ... # Setting `multi_precision` to True can avoid the poor accuracy
# or the slow convergence in a way. ... # or the slow convergence in a way.
optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True) ... optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True)
# 3) These ops in `custom_fp32_list` will keep in the float32 computation type. ... # 3) These ops in `custom_black_list` will keep in the float32 computation type.
amp_list = paddle.static.amp.CustomOpLists( ... amp_list = paddle.static.amp.CustomOpLists(
custom_fp32_list=['pool2d']) ... custom_black_list=['pool2d'])
# 4) The entry of Paddle AMP. ... # 4) The entry of Paddle AMP.
# Enable pure bf16 training by setting `use_pure_bf16` to True. ... # Enable pure bf16 training by setting `use_pure_bf16` to True.
optimizer = paddle.static.amp.decorate_bf16( ... optimizer = paddle.static.amp.bf16.decorate_bf16(
optimizer, ... optimizer,
amp_list, ... amp_list,
use_pure_bf16=True) ... use_pure_bf16=True)
# If you don't use the default_startup_program(), you sholud pass ... # If you don't use the default_startup_program(), you sholud pass
# your defined `startup_program` into `minimize`. ... # your defined `startup_program` into `minimize`.
optimizer.minimize(loss) ... optimizer.minimize(loss)
exe.run(paddle.static.default_startup_program()) ... exe.run(paddle.static.default_startup_program())
# 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`). ... # 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`).
# If you want to perform the testing process, you should pass `test_program` into `amp_init`. ... # If you want to perform the testing process, you should pass `test_program` into `amp_init`.
optimizer.amp_init(place, scope=paddle.static.global_scope()) ... optimizer.amp_init(place, scope=paddle.static.global_scope())
>>> run_example_code()
""" """
if amp_lists is None: if amp_lists is None:
......
...@@ -203,65 +203,64 @@ def collect_operator_stats(program=None, print_subblocks=False): ...@@ -203,65 +203,64 @@ def collect_operator_stats(program=None, print_subblocks=False):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
>>> paddle.enable_static()
paddle.enable_static()
>>> class SimpleConvNet(paddle.nn.Layer):
class SimpleConvNet(paddle.nn.Layer): ... def __init__(self):
def __init__(self): ... super().__init__()
super().__init__() ... self.conv = paddle.nn.Conv2D(in_channels=1, out_channels=6, kernel_size=3)
self.conv = paddle.nn.Conv2D(in_channels=1, out_channels=6, kernel_size=3) ... self.linear = paddle.nn.Linear(in_features=26, out_features=10)
self.linear = paddle.nn.Linear(in_features=26, out_features=10) ...
... def forward(self, x):
def forward(self, x): ... out = self.conv(x)
out = self.conv(x) ... out = paddle.nn.functional.relu(out)
out = paddle.nn.functional.relu(out) ... out = self.linear(out)
out = self.linear(out) ... out = paddle.nn.functional.softmax(out)
out = paddle.nn.functional.softmax(out) ... return out
return out
>>> main_program = paddle.static.Program()
main_program = paddle.static.Program() >>> startup_program = paddle.static.Program()
startup_program = paddle.static.Program() >>> with paddle.utils.unique_name.guard():
with paddle.utils.unique_name.guard(): ... with paddle.static.program_guard(main_program, startup_program):
with paddle.static.program_guard(main_program, startup_program): ... model = SimpleConvNet()
model = SimpleConvNet() ... x = paddle.static.data(
x = paddle.static.data( ... name='input', shape=[None, 1, 28, 28], dtype='float32'
name='input', shape=[None, 1, 28, 28], dtype='float32' ... )
) ... out = model(x)
out = model(x) ... loss = paddle.mean(out)
loss = paddle.mean(out) ... optimizer = paddle.optimizer.AdamW()
optimizer = paddle.optimizer.AdamW() ... optimizer = paddle.static.amp.decorate(optimizer)
optimizer = paddle.static.amp.decorate(optimizer) ... optimizer.minimize(loss)
optimizer.minimize(loss) >>> paddle.static.amp.debugging.collect_operator_stats(main_program)
paddle.static.amp.debugging.collect_operator_stats(main_program) <------------------------------------------------ op list of all blocks ------------------------------------------------->
# <------------------------------------------------ op list of all blocks -------------------------------------------------> <------------------------------------------------------- op list -------------------------------------------------------->
# <------------------------------------------------------- op list --------------------------------------------------------> <--------------- Op Name ---------------- | -- FP16 Calls --- | -- BF16 Calls --- | --- FP32 Calls--- | -- Other Calls -->
# <--------------- Op Name ---------------- | -- FP16 Calls --- | -- BF16 Calls --- | --- FP32 Calls--- | -- Other Calls --> adamw | 0 | 0 | 4 | 0
# adamw | 0 | 0 | 4 | 0 cast | 5 | 0 | 6 | 0
# cast | 5 | 0 | 6 | 0 check_finite_and_unscale | 0 | 0 | 1 | 0
# check_finite_and_unscale | 0 | 0 | 1 | 0 conv2d | 1 | 0 | 0 | 0
# conv2d | 1 | 0 | 0 | 0 conv2d_grad | 1 | 0 | 0 | 0
# conv2d_grad | 1 | 0 | 0 | 0 elementwise_add | 2 | 0 | 0 | 0
# elementwise_add | 2 | 0 | 0 | 0 elementwise_add_grad | 2 | 0 | 0 | 0
# elementwise_add_grad | 2 | 0 | 0 | 0 elementwise_mul | 0 | 0 | 1 | 0
# elementwise_mul | 0 | 0 | 1 | 0 elementwise_mul_grad | 0 | 0 | 1 | 0
# elementwise_mul_grad | 0 | 0 | 1 | 0 fill_constant | 0 | 0 | 1 | 0
# fill_constant | 0 | 0 | 1 | 0 matmul_v2 | 1 | 0 | 0 | 0
# matmul_v2 | 1 | 0 | 0 | 0 matmul_v2_grad | 1 | 0 | 0 | 0
# matmul_v2_grad | 1 | 0 | 0 | 0 memcpy | 0 | 0 | 0 | 1
# memcpy | 0 | 0 | 0 | 1 reduce_mean | 0 | 0 | 1 | 0
# reduce_mean | 0 | 0 | 1 | 0 reduce_mean_grad | 0 | 0 | 1 | 0
# reduce_mean_grad | 0 | 0 | 1 | 0 relu | 1 | 0 | 0 | 0
# relu | 1 | 0 | 0 | 0 relu_grad | 1 | 0 | 0 | 0
# relu_grad | 1 | 0 | 0 | 0 reshape2 | 0 | 0 | 1 | 0
# reshape2 | 0 | 0 | 1 | 0 reshape2_grad | 0 | 0 | 1 | 0
# reshape2_grad | 0 | 0 | 1 | 0 softmax | 0 | 0 | 1 | 0
# softmax | 0 | 0 | 1 | 0 softmax_grad | 0 | 0 | 1 | 0
# softmax_grad | 0 | 0 | 1 | 0 update_loss_scaling | 0 | 0 | 1 | 0
# update_loss_scaling | 0 | 0 | 1 | 0 <----------------------------------------------------- op count: 22 ----------------------------------------------------->
# <----------------------------------------------------- op count: 22 ----------------------------------------------------->
""" """
def _convert_to_list(op_stats_unit_dict): def _convert_to_list(op_stats_unit_dict):
......
...@@ -316,47 +316,48 @@ class OptimizerWithMixedPrecision: ...@@ -316,47 +316,48 @@ class OptimizerWithMixedPrecision:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
paddle.enable_static() >>> paddle.enable_static()
def run_example_code(): >>> # doctest: +REQUIRES(env:GPU)
place = paddle.CUDAPlace(0) >>> def run_example_code():
exe = paddle.static.Executor(place) ... place = paddle.CUDAPlace(0)
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32') ... exe = paddle.static.Executor(place)
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3) ... data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
# 1) Use fp16_guard to control the range of fp16 kernels used. ... conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
with paddle.static.amp.fp16_guard(): ... # 1) Use fp16_guard to control the range of fp16 kernels used.
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu") ... with paddle.static.amp.fp16_guard():
pool = F.max_pool2d(bn, kernel_size=2, stride=2) ... bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
hidden = paddle.static.nn.fc(pool, size=10) ... pool = F.max_pool2d(bn, kernel_size=2, stride=2)
loss = paddle.mean(hidden) ... hidden = paddle.static.nn.fc(pool, size=10)
# 2) Create the optimizer and set `multi_precision` to True. ... loss = paddle.mean(hidden)
# Setting `multi_precision` to True can avoid the poor accuracy ... # 2) Create the optimizer and set `multi_precision` to True.
# or the slow convergence in a way. ... # Setting `multi_precision` to True can avoid the poor accuracy
optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True) ... # or the slow convergence in a way.
# 3) These ops in `custom_black_list` will keep in the float32 computation type. ... optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True)
amp_list = paddle.static.amp.CustomOpLists( ... # 3) These ops in `custom_black_list` will keep in the float32 computation type.
custom_black_list=['pool2d']) ... amp_list = paddle.static.amp.CustomOpLists(
# 4) The entry of Paddle AMP. ... custom_black_list=['pool2d'])
# Enable pure fp16 training by setting `use_pure_fp16` to True. ... # 4) The entry of Paddle AMP.
optimizer = paddle.static.amp.decorate( ... # Enable pure fp16 training by setting `use_pure_fp16` to True.
optimizer, ... optimizer = paddle.static.amp.decorate(
amp_list, ... optimizer,
init_loss_scaling=128.0, ... amp_list,
use_dynamic_loss_scaling=True, ... init_loss_scaling=128.0,
use_pure_fp16=True) ... use_dynamic_loss_scaling=True,
# If you don't use the default_startup_program(), you sholud pass ... use_pure_fp16=True)
# your defined `startup_program` into `minimize`. ... # If you don't use the default_startup_program(), you sholud pass
optimizer.minimize(loss) ... # your defined `startup_program` into `minimize`.
exe.run(paddle.static.default_startup_program()) ... optimizer.minimize(loss)
# 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`). ... exe.run(paddle.static.default_startup_program())
# If you want to perform the testing process, you should pass `test_program` into `amp_init`. ... # 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`).
optimizer.amp_init(place, scope=paddle.static.global_scope()) ... # If you want to perform the testing process, you should pass `test_program` into `amp_init`.
... optimizer.amp_init(place, scope=paddle.static.global_scope())
if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
run_example_code() >>> if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
... run_example_code()
""" """
assert ( assert (
self._train_program is not None self._train_program is not None
...@@ -712,70 +713,74 @@ def decorate( ...@@ -712,70 +713,74 @@ def decorate(
An optimizer acting like a normal one but with mixed-precision training An optimizer acting like a normal one but with mixed-precision training
enabled. enabled.
Examples 1: Examples:
.. code-block:: python .. code-block:: python
:name: example-1
# black&white list based strategy example # black&white list based strategy example
import paddle >>> import paddle
import paddle.static as static >>> import paddle.static as static
paddle.enable_static() >>> paddle.enable_static()
data = static.data(name='X', shape=[None, 1], dtype='float32') >>> data = static.data(name='X', shape=[None, 1], dtype='float32')
hidden = static.nn.fc(x=data, size=10) >>> hidden = static.nn.fc(x=data, size=10)
loss = paddle.mean(hidden) >>> loss = paddle.mean(hidden)
optimizer = paddle.optimizer.Adam(learning_rate=0.001) >>> optimizer = paddle.optimizer.Adam(learning_rate=0.001)
mp_optimizer = static.amp.decorate( >>> mp_optimizer = static.amp.decorate(
optimizer=optimizer, init_loss_scaling=8.0) ... optimizer=optimizer, init_loss_scaling=8.0)
>>> ops, param_grads = mp_optimizer.minimize(loss)
>>> scaled_loss = mp_optimizer.get_scaled_loss()
ops, param_grads = mp_optimizer.minimize(loss)
scaled_loss = mp_optimizer.get_scaled_loss()
Examples 2:
.. code-block:: python .. code-block:: python
:name: example-2
# pure fp16 training example # pure fp16 training example
import numpy as np >>> import numpy as np
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
>>> paddle.enable_static()
def run_example_code():
place = paddle.CUDAPlace(0) >>> # doctest: +REQUIRES(env:GPU)
exe = paddle.static.Executor(place) >>> def run_example_code():
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32') ... place = paddle.CUDAPlace(0)
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3) ... exe = paddle.static.Executor(place)
# 1) Use fp16_guard to control the range of fp16 kernels used. ... data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
with paddle.static.amp.fp16_guard(): ... conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu") ... # 1) Use fp16_guard to control the range of fp16 kernels used.
pool = F.max_pool2d(bn, kernel_size=2, stride=2) ... with paddle.static.amp.fp16_guard():
hidden = paddle.static.nn.fc(pool, size=10) ... bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
loss = paddle.mean(hidden) ... pool = F.max_pool2d(bn, kernel_size=2, stride=2)
# 2) Create the optimizer and set `multi_precision` to True. ... hidden = paddle.static.nn.fc(pool, size=10)
# Setting `multi_precision` to True can avoid the poor accuracy ... loss = paddle.mean(hidden)
# or the slow convergence in a way. ... # 2) Create the optimizer and set `multi_precision` to True.
optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True) ... # Setting `multi_precision` to True can avoid the poor accuracy
# 3) These ops in `custom_black_list` will keep in the float32 computation type. ... # or the slow convergence in a way.
amp_list = paddle.static.amp.CustomOpLists( ... optimizer = paddle.optimizer.Momentum(learning_rate=0.01, multi_precision=True)
custom_black_list=['pool2d']) ... # 3) These ops in `custom_black_list` will keep in the float32 computation type.
# 4) The entry of Paddle AMP. ... amp_list = paddle.static.amp.CustomOpLists(
# Enable pure fp16 training by setting `use_pure_fp16` to True. ... custom_black_list=['pool2d'])
optimizer = paddle.static.amp.decorate( ... # 4) The entry of Paddle AMP.
optimizer, ... # Enable pure fp16 training by setting `use_pure_fp16` to True.
amp_list, ... optimizer = paddle.static.amp.decorate(
init_loss_scaling=128.0, ... optimizer,
use_dynamic_loss_scaling=True, ... amp_list,
use_pure_fp16=True) ... init_loss_scaling=128.0,
# If you don't use the default_startup_program(), you sholud pass ... use_dynamic_loss_scaling=True,
# your defined `startup_program` into `minimize`. ... use_pure_fp16=True)
optimizer.minimize(loss) ... # If you don't use the default_startup_program(), you sholud pass
exe.run(paddle.static.default_startup_program()) ... # your defined `startup_program` into `minimize`.
# 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`). ... optimizer.minimize(loss)
# If you want to perform the testing process, you should pass `test_program` into `amp_init`. ... exe.run(paddle.static.default_startup_program())
optimizer.amp_init(place, scope=paddle.static.global_scope()) ... # 5) Use `amp_init` after FP32 parameters initialization(such as `exe.run(startup_program)`).
... # If you want to perform the testing process, you should pass `test_program` into `amp_init`.
if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0: ... optimizer.amp_init(place, scope=paddle.static.global_scope())
run_example_code()
>>> if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
... run_example_code()
""" """
amp_dtype = "bfloat16" if use_bf16 else "float16" amp_dtype = "bfloat16" if use_bf16 else "float16"
if amp_lists is None: if amp_lists is None:
...@@ -859,47 +864,47 @@ def decorate( # noqa: F811 ...@@ -859,47 +864,47 @@ def decorate( # noqa: F811
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle
paddle.enable_static()
class SimpleConvNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.conv = paddle.nn.Conv2D(in_channels=1, out_channels=6, kernel_size=3)
self.linear = paddle.nn.Linear(in_features=26, out_features=10)
def forward(self, x):
out = self.conv(x)
out = paddle.nn.functional.relu(out)
out = self.linear(out)
out = paddle.nn.functional.softmax(out)
return out
main_program = paddle.static.Program() >>> import paddle
startup_program = paddle.static.Program() >>> paddle.enable_static()
with paddle.utils.unique_name.guard():
with paddle.static.program_guard(main_program, startup_program): >>> # doctest: +REQUIRES(env:GPU)
model = SimpleConvNet() >>> class SimpleConvNet(paddle.nn.Layer):
x = paddle.static.data( ... def __init__(self):
name='input', shape=[None, 1, 28, 28], dtype='float32' ... super().__init__()
) ... self.conv = paddle.nn.Conv2D(in_channels=1, out_channels=6, kernel_size=3)
out = model(x) ... self.linear = paddle.nn.Linear(in_features=26, out_features=10)
loss = paddle.mean(out) ...
optimizer = paddle.optimizer.AdamW() ... def forward(self, x):
optimizer = paddle.static.amp.decorate(optimizer, level="O2", dtype="float16") ... out = self.conv(x)
optimizer.minimize(loss) ... out = paddle.nn.functional.relu(out)
... out = self.linear(out)
if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0: ... out = paddle.nn.functional.softmax(out)
place = paddle.CUDAPlace(0) ... return out
exe = paddle.static.Executor(place)
exe.run(startup_program) >>> main_program = paddle.static.Program()
>>> startup_program = paddle.static.Program()
# Call `amp_init` after FP32 parameters initialization, such as `exe.run(startup_program)`, >>> with paddle.utils.unique_name.guard():
# to convert FP32 parameters to low precision FP16 / BF16. ... with paddle.static.program_guard(main_program, startup_program):
optimizer.amp_init(place, scope=paddle.static.global_scope()) ... model = SimpleConvNet()
... x = paddle.static.data(
... name='input', shape=[None, 1, 28, 28], dtype='float32'
... )
... out = model(x)
... loss = paddle.mean(out)
... optimizer = paddle.optimizer.AdamW()
... optimizer = paddle.static.amp.decorate(optimizer, level="O2", dtype="float16")
... optimizer.minimize(loss)
>>> if paddle.is_compiled_with_cuda() and len(paddle.static.cuda_places()) > 0:
... place = paddle.CUDAPlace(0)
... exe = paddle.static.Executor(place)
... exe.run(startup_program)
...
... # Call `amp_init` after FP32 parameters initialization, such as `exe.run(startup_program)`,
... # to convert FP32 parameters to low precision FP16 / BF16.
... optimizer.amp_init(place, scope=paddle.static.global_scope())
""" """
# check amp_level: O0-O2 # check amp_level: O0-O2
......
...@@ -365,18 +365,18 @@ def fp16_guard(): ...@@ -365,18 +365,18 @@ def fp16_guard():
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
import paddle >>> import paddle
import paddle.nn.functional as F >>> import paddle.nn.functional as F
paddle.enable_static() >>> paddle.enable_static()
data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32') >>> data = paddle.static.data(name='X', shape=[None, 1, 28, 28], dtype='float32')
conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3) >>> conv2d = paddle.static.nn.conv2d(input=data, num_filters=6, filter_size=3)
with paddle.static.amp.fp16_guard(): >>> with paddle.static.amp.fp16_guard():
bn = paddle.static.nn.batch_norm(input=conv2d, act="relu") ... bn = paddle.static.nn.batch_norm(input=conv2d, act="relu")
pool = F.max_pool2d(bn, kernel_size=2, stride=2) ... pool = F.max_pool2d(bn, kernel_size=2, stride=2)
hidden = paddle.static.nn.fc(pool, size=10) ... hidden = paddle.static.nn.fc(pool, size=10)
loss = paddle.mean(hidden) ... loss = paddle.mean(hidden)
""" """
with framework.name_scope(prefix=_fp16_guard_pattern): with framework.name_scope(prefix=_fp16_guard_pattern):
yield yield
......
...@@ -58,38 +58,44 @@ def data(name, shape, dtype=None, lod_level=0): ...@@ -58,38 +58,44 @@ def data(name, shape, dtype=None, lod_level=0):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
# Creates a variable with fixed size [3, 2, 1] # Creates a variable with fixed size [3, 2, 1]
# User can only feed data of the same shape to x # User can only feed data of the same shape to x
# the dtype is not set, so it will set "float32" by # the dtype is not set, so it will set "float32" by
# paddle.get_default_dtype(). You can use paddle.get_default_dtype() to # paddle.get_default_dtype(). You can use paddle.get_default_dtype() to
# change the global dtype # change the global dtype
x = paddle.static.data(name='x', shape=[3, 2, 1]) >>> x = paddle.static.data(name='x', shape=[3, 2, 1])
# Creates a variable with changeable batch size -1. # Creates a variable with changeable batch size -1.
# Users can feed data of any batch size into y, # Users can feed data of any batch size into y,
# but size of each data sample has to be [2, 1] # but size of each data sample has to be [2, 1]
y = paddle.static.data(name='y', shape=[-1, 2, 1], dtype='float32') >>> y = paddle.static.data(name='y', shape=[-1, 2, 1], dtype='float32')
z = x + y >>> z = x + y
# In this example, we will feed x and y with np-ndarray "1" # In this example, we will feed x and y with np-ndarray "1"
# and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle # and fetch z, like implementing "1 + 1 = 2" in PaddlePaddle
feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32) >>> feed_data = np.ones(shape=[3, 2, 1], dtype=np.float32)
exe = paddle.static.Executor(paddle.framework.CPUPlace()) >>> exe = paddle.static.Executor(paddle.framework.CPUPlace())
out = exe.run(paddle.static.default_main_program(), >>> out = exe.run(paddle.static.default_main_program(),
feed={ ... feed={
'x': feed_data, ... 'x': feed_data,
'y': feed_data ... 'y': feed_data
}, ... },
fetch_list=[z.name]) ... fetch_list=[z.name])
# np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2 # np-ndarray of shape=[3, 2, 1], dtype=float32, whose elements are 2
print(out) >>> print(out)
[array([[[2.],
[2.]],
[[2.],
[2.]],
[[2.],
[2.]]], dtype=float32)]
""" """
helper = LayerHelper('data', **locals()) helper = LayerHelper('data', **locals())
...@@ -171,13 +177,17 @@ class InputSpec: ...@@ -171,13 +177,17 @@ class InputSpec:
Examples: Examples:
.. code-block:: python .. code-block:: python
from paddle.static import InputSpec >>> import paddle
>>> from paddle.static import InputSpec
input = InputSpec([None, 784], 'float32', 'x') >>> input = InputSpec([None, 784], 'float32', 'x')
label = InputSpec([None, 1], 'int64', 'label') >>> label = InputSpec([None, 1], 'int64', 'label')
print(input) # InputSpec(shape=(-1, 784), dtype=paddle.float32, name=x) >>> print(input)
print(label) # InputSpec(shape=(-1, 1), dtype=paddle.int64, name=label) InputSpec(shape=(-1, 784), dtype=paddle.float32, name=x, stop_gradient=False)
>>> print(label)
InputSpec(shape=(-1, 1), dtype=paddle.int64, name=label, stop_gradient=False)
""" """
def __init__(self, shape, dtype='float32', name=None, stop_gradient=False): def __init__(self, shape, dtype='float32', name=None, stop_gradient=False):
...@@ -217,14 +227,15 @@ class InputSpec: ...@@ -217,14 +227,15 @@ class InputSpec:
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
from paddle.static import InputSpec >>> from paddle.static import InputSpec
paddle.disable_static() >>> paddle.disable_static()
x = paddle.ones([2, 2], dtype="float32") >>> x = paddle.ones([2, 2], dtype="float32")
x_spec = InputSpec.from_tensor(x, name='x') >>> x_spec = InputSpec.from_tensor(x, name='x')
print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x) >>> print(x_spec)
InputSpec(shape=(2, 2), dtype=paddle.float32, name=x, stop_gradient=False)
""" """
if isinstance(tensor, (Variable, core.eager.Tensor)): if isinstance(tensor, (Variable, core.eager.Tensor)):
...@@ -250,12 +261,13 @@ class InputSpec: ...@@ -250,12 +261,13 @@ class InputSpec:
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
from paddle.static import InputSpec >>> from paddle.static import InputSpec
x = np.ones([2, 2], np.float32) >>> x = np.ones([2, 2], np.float32)
x_spec = InputSpec.from_numpy(x, name='x') >>> x_spec = InputSpec.from_numpy(x, name='x')
print(x_spec) # InputSpec(shape=(2, 2), dtype=paddle.float32, name=x) >>> print(x_spec)
InputSpec(shape=(2, 2), dtype=paddle.float32, name=x, stop_gradient=False)
""" """
return cls(ndarray.shape, ndarray.dtype, name) return cls(ndarray.shape, ndarray.dtype, name)
...@@ -273,11 +285,12 @@ class InputSpec: ...@@ -273,11 +285,12 @@ class InputSpec:
Examples: Examples:
.. code-block:: python .. code-block:: python
from paddle.static import InputSpec >>> from paddle.static import InputSpec
x_spec = InputSpec(shape=[64], dtype='float32', name='x') >>> x_spec = InputSpec(shape=[64], dtype='float32', name='x')
x_spec.batch(4) >>> x_spec.batch(4)
print(x_spec) # InputSpec(shape=(4, 64), dtype=paddle.float32, name=x) >>> print(x_spec)
InputSpec(shape=(4, 64), dtype=paddle.float32, name=x, stop_gradient=False)
""" """
if isinstance(batch_size, (list, tuple)): if isinstance(batch_size, (list, tuple)):
...@@ -310,11 +323,12 @@ class InputSpec: ...@@ -310,11 +323,12 @@ class InputSpec:
Examples: Examples:
.. code-block:: python .. code-block:: python
from paddle.static import InputSpec >>> from paddle.static import InputSpec
x_spec = InputSpec(shape=[4, 64], dtype='float32', name='x') >>> x_spec = InputSpec(shape=[4, 64], dtype='float32', name='x')
x_spec.unbatch() >>> x_spec.unbatch()
print(x_spec) # InputSpec(shape=(64,), dtype=paddle.float32, name=x) >>> print(x_spec) # InputSpec(shape=(64,), dtype=paddle.float32, name=x)
InputSpec(shape=(64,), dtype=paddle.float32, name=x, stop_gradient=False)
""" """
if len(self.shape) == 0: if len(self.shape) == 0:
......
此差异已折叠。
...@@ -31,10 +31,10 @@ def get_logger(name, level, fmt=None): ...@@ -31,10 +31,10 @@ def get_logger(name, level, fmt=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import paddle >>> import paddle
import logging >>> import logging
logger = paddle.static.log_helper.get_logger(__name__, logging.INFO, >>> logger = paddle.static.log_helper.get_logger(__name__, logging.INFO,
fmt='%(asctime)s-%(levelname)s: %(message)s') ... fmt='%(asctime)s-%(levelname)s: %(message)s')
""" """
logger = logging.getLogger(name) logger = logging.getLogger(name)
......
...@@ -86,44 +86,43 @@ def nce( ...@@ -86,44 +86,43 @@ def nce(
Examples: Examples:
.. code-block:: python .. code-block:: python
>>> import paddle
import paddle >>> import numpy as np
import numpy as np
>>> paddle.enable_static()
paddle.enable_static()
>>> window_size = 5
window_size = 5 >>> words = []
words = [] >>> for i in range(window_size):
for i in range(window_size): ... words.append(paddle.static.data(
words.append(paddle.static.data( ... name='word_{0}'.format(i), shape=[-1, 1], dtype='int64'))
name='word_{0}'.format(i), shape=[-1, 1], dtype='int64'))
>>> dict_size = 10000
dict_size = 10000 >>> label_word = int(window_size / 2) + 1
label_word = int(window_size / 2) + 1
>>> embs = []
embs = [] >>> for i in range(window_size):
for i in range(window_size): ... if i == label_word:
if i == label_word: ... continue
continue ...
... emb = paddle.static.nn.embedding(input=words[i], size=[dict_size, 32],
emb = paddle.static.nn.embedding(input=words[i], size=[dict_size, 32], ... param_attr='embed', is_sparse=True)
param_attr='embed', is_sparse=True) ... embs.append(emb)
embs.append(emb)
>>> embs = paddle.concat(x=embs, axis=1) # concat from 4 * [(-1, 1, 32)] to (-1, 4, 32)
embs = paddle.concat(x=embs, axis=1) # concat from 4 * [(-1, 1, 32)] to (-1, 4, 32) >>> embs = paddle.reshape(x=embs, shape=(-1, 4 * 32)) # reshape to (batch_size = -1, dim = 4*32)
embs = paddle.reshape(x=embs, shape=(-1, 4 * 32)) # reshape to (batch_size = -1, dim = 4*32) >>> loss = paddle.static.nn.nce(input=embs, label=words[label_word],
loss = paddle.static.nn.nce(input=embs, label=words[label_word], ... num_total_classes=dict_size, param_attr='nce.w_0',
num_total_classes=dict_size, param_attr='nce.w_0', ... bias_attr='nce.b_0')
bias_attr='nce.b_0')
# or use custom distribution
#or use custom distribution >>> dist = np.array([0.05,0.5,0.1,0.3,0.05])
dist = np.array([0.05,0.5,0.1,0.3,0.05]) >>> loss = paddle.static.nn.nce(input=embs, label=words[label_word],
loss = paddle.static.nn.nce(input=embs, label=words[label_word], ... num_total_classes=5, param_attr='nce.w_1',
num_total_classes=5, param_attr='nce.w_1', ... bias_attr='nce.b_1',
bias_attr='nce.b_1', ... num_neg_samples=3,
num_neg_samples=3, ... sampler="custom_dist",
sampler="custom_dist", ... custom_dist=dist)
custom_dist=dist)
""" """
helper = LayerHelper('nce', **locals()) helper = LayerHelper('nce', **locals())
check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'nce') check_variable_and_dtype(input, 'input', ['float32', 'float64'], 'nce')
......
...@@ -51,25 +51,27 @@ def accuracy(input, label, k=1, correct=None, total=None): ...@@ -51,25 +51,27 @@ def accuracy(input, label, k=1, correct=None, total=None):
Examples: Examples:
.. code-block:: python .. code-block:: python
import numpy as np >>> import numpy as np
import paddle >>> import paddle
import paddle.static as static >>> import paddle.static as static
import paddle.nn.functional as F >>> import paddle.nn.functional as F
paddle.enable_static() >>> paddle.seed(2023)
data = static.data(name="input", shape=[-1, 32, 32], dtype="float32") >>> paddle.enable_static()
label = static.data(name="label", shape=[-1,1], dtype="int") >>> data = static.data(name="input", shape=[-1, 32, 32], dtype="float32")
fc_out = static.nn.fc(x=data, size=10) >>> label = static.data(name="label", shape=[-1,1], dtype="int")
predict = F.softmax(x=fc_out) >>> fc_out = static.nn.fc(x=data, size=10)
result = static.accuracy(input=predict, label=label, k=5) >>> predict = F.softmax(x=fc_out)
place = paddle.CPUPlace() >>> result = static.accuracy(input=predict, label=label, k=5)
exe = static.Executor(place) >>> place = paddle.CPUPlace()
exe.run(static.default_startup_program()) >>> exe = static.Executor(place)
x = np.random.rand(3, 32, 32).astype("float32") >>> exe.run(static.default_startup_program())
y = np.array([[1],[0],[1]]) >>> np.random.seed(1107)
output = exe.run(feed={"input": x,"label": y}, >>> x = np.random.rand(3, 32, 32).astype("float32")
fetch_list=[result]) >>> y = np.array([[1],[0],[1]])
print(output) >>> output = exe.run(feed={"input": x,"label": y},
# [array(0.33333334, dtype=float32)] ... fetch_list=[result])
>>> print(output)
[array(0.33333334, dtype=float32)]
""" """
if in_dygraph_mode(): if in_dygraph_mode():
...@@ -177,51 +179,61 @@ def auc( ...@@ -177,51 +179,61 @@ def auc(
Examples: Examples:
.. code-block:: python .. code-block:: python
:name: example-1
import paddle >>> import paddle
import numpy as np >>> import numpy as np
paddle.enable_static() >>> paddle.enable_static()
data = paddle.static.data(name="input", shape=[-1, 32,32], dtype="float32") >>> paddle.seed(2023)
label = paddle.static.data(name="label", shape=[-1], dtype="int") >>> data = paddle.static.data(name="input", shape=[-1, 32,32], dtype="float32")
fc_out = paddle.static.nn.fc(x=data, size=2) >>> label = paddle.static.data(name="label", shape=[-1], dtype="int")
predict = paddle.nn.functional.softmax(x=fc_out) >>> fc_out = paddle.static.nn.fc(x=data, size=2)
result=paddle.static.auc(input=predict, label=label) >>> predict = paddle.nn.functional.softmax(x=fc_out)
>>> result=paddle.static.auc(input=predict, label=label)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place) >>> place = paddle.CPUPlace()
>>> exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
x = np.random.rand(3,32,32).astype("float32") >>> exe.run(paddle.static.default_startup_program())
y = np.array([1,0,1]) >>> np.random.seed(1107)
output= exe.run(feed={"input": x,"label": y}, >>> x = np.random.rand(3,32,32).astype("float32")
fetch_list=[result[0]]) >>> y = np.array([1,0,1])
print(output) >>> output= exe.run(feed={"input": x,"label": y},
... fetch_list=[result[0]])
#you can learn the usage of ins_tag_weight by the following code. >>> print(output)
''' [array(1.)]
import paddle
import numpy as np
paddle.enable_static() .. code-block:: python
:name: example-2
data = paddle.static.data(name="input", shape=[-1, 32,32], dtype="float32")
label = paddle.static.data(name="label", shape=[-1], dtype="int") # you can learn the usage of ins_tag_weight by the following code.
ins_tag_weight = paddle.static.data(name='ins_tag', shape=[-1,16], lod_level=0, dtype='float64')
fc_out = paddle.static.nn.fc(x=data, size=2) >>> import paddle
predict = paddle.nn.functional.softmax(x=fc_out) >>> import numpy as np
result=paddle.static.auc(input=predict, label=label, ins_tag_weight=ins_tag_weight) >>> paddle.enable_static()
place = paddle.CPUPlace() >>> paddle.seed(2023)
exe = paddle.static.Executor(place) >>> data = paddle.static.data(name="input", shape=[-1, 32,32], dtype="float32")
>>> label = paddle.static.data(name="label", shape=[-1], dtype="int")
exe.run(paddle.static.default_startup_program()) >>> ins_tag_weight = paddle.static.data(name='ins_tag_weight', shape=[-1,16], lod_level=0, dtype='float64')
x = np.random.rand(3,32,32).astype("float32") >>> fc_out = paddle.static.nn.fc(x=data, size=2)
y = np.array([1,0,1]) >>> predict = paddle.nn.functional.softmax(x=fc_out)
z = np.array([1,0,1]) >>> result=paddle.static.auc(input=predict, label=label, ins_tag_weight=ins_tag_weight)
output= exe.run(feed={"input": x,"label": y, "ins_tag_weight":z},
fetch_list=[result[0]]) >>> place = paddle.CPUPlace()
print(output) >>> exe = paddle.static.Executor(place)
'''
>>> exe.run(paddle.static.default_startup_program())
>>> np.random.seed(1107)
>>> x = np.random.rand(3,32,32).astype("float32")
>>> y = np.array([1,0,1])
>>> z = np.array([1,0,1]).astype("float64")
>>> output= exe.run(feed={"input": x,"label": y, "ins_tag_weight":z},
... fetch_list=[result[0]])
>>> print(output)
[array(1.)]
""" """
helper = LayerHelper("auc", **locals()) helper = LayerHelper("auc", **locals())
...@@ -350,26 +362,27 @@ def ctr_metric_bundle(input, label, ins_tag_weight=None): ...@@ -350,26 +362,27 @@ def ctr_metric_bundle(input, label, ins_tag_weight=None):
local_prob(Tensor): Local sum of predicted ctr local_prob(Tensor): Local sum of predicted ctr
local_q(Tensor): Local sum of q value local_q(Tensor): Local sum of q value
Examples 1: Examples:
.. code-block:: python
import paddle
paddle.enable_static()
data = paddle.static.data(name="data", shape=[32, 32], dtype="float32")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32")
predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(input=data, size=1))
auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label)
Examples 2:
.. code-block:: python .. code-block:: python
:name: example-1
import paddle >>> import paddle
paddle.enable_static() >>> paddle.enable_static()
data = paddle.static.data(name="data", shape=[32, 32], dtype="float32") >>> data = paddle.static.data(name="data", shape=[-1, 32], dtype="float32")
label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32") >>> label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32")
predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(input=data, size=1)) >>> predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(x=data, size=1))
ins_tag_weight = paddle.static.data(name='ins_tag', shape=[-1,16], lod_level=0, dtype='int64') >>> auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label)
auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label, ins_tag_weight=ins_tag_weight)
.. code-block:: python
:name: example-2
>>> import paddle
>>> paddle.enable_static()
>>> data = paddle.static.data(name="data", shape=[-1, 32], dtype="float32")
>>> label = paddle.static.data(name="label", shape=[-1, 1], dtype="int32")
>>> predict = paddle.nn.functional.sigmoid(paddle.static.nn.fc(x=data, size=1))
>>> ins_tag_weight = paddle.static.data(name='ins_tag_weight', shape=[-1, 1], lod_level=0, dtype='int64')
>>> auc_out = paddle.static.ctr_metric_bundle(input=predict, label=label, ins_tag_weight=ins_tag_weight)
""" """
if ins_tag_weight is None: if ins_tag_weight is None:
ins_tag_weight = paddle.tensor.fill_constant( ins_tag_weight = paddle.tensor.fill_constant(
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册