From 529e74e41a56dba4bf14885af9d317e53b75d30a Mon Sep 17 00:00:00 2001 From: Roc <30228238+sljlp@users.noreply.github.com> Date: Thu, 1 Dec 2022 17:52:07 +0800 Subject: [PATCH] [Clean Fluid] replace accuracy and auc and remove get_places, distributions(#48554) * mv accuracy and auc * rm distributions * rm get_places * replace metric --- .../distributed/fleet/metrics/metric.py | 6 +- .../contrib/slim/tests/test_imperative_ptq.py | 4 +- .../contrib/slim/tests/test_imperative_qat.py | 10 +- .../slim/tests/test_imperative_qat_amp.py | 8 +- .../slim/tests/test_imperative_qat_lsq.py | 6 +- .../tests/test_image_classification_fp16.py | 2 +- .../incubate/fleet/tests/fleet_deep_ctr.py | 4 +- .../fluid/incubate/fleet/utils/fleet_util.py | 6 +- python/paddle/fluid/layers/__init__.py | 8 - python/paddle/fluid/layers/device.py | 43 - python/paddle/fluid/layers/distributions.py | 721 ---------------- python/paddle/fluid/metrics.py | 2 +- .../tests/book/notest_understand_sentiment.py | 4 +- .../tests/book/test_image_classification.py | 2 +- .../fluid/tests/book/test_recognize_digits.py | 2 +- .../tests/unittests/check_nan_inf_base.py | 2 +- .../fleet/dist_mnist_gradient_merge.py | 2 +- .../collective/fleet/pipeline_mnist.py | 2 +- .../fleet/pipeline_mnist_multi_device.py | 2 +- .../fleet/pipeline_mnist_one_device.py | 2 +- .../tests/unittests/dist_allreduce_op.py | 2 +- .../paddle/fluid/tests/unittests/dist_ctr.py | 4 +- .../fluid/tests/unittests/dist_fleet_ctr.py | 4 +- .../dist_fleet_raw_program_optimizer.py | 2 +- ...et_raw_program_optimizer_fuse_allreduce.py | 2 +- .../dist_fleet_sparse_embedding_ctr.py | 4 +- .../fluid/tests/unittests/dist_mnist.py | 2 +- .../tests/unittests/dist_mnist_batch_merge.py | 2 +- .../unittests/dist_mnist_fp16_allreduce.py | 2 +- .../fluid/tests/unittests/dist_mnist_lars.py | 2 +- .../fluid/tests/unittests/dist_se_resnext.py | 4 +- .../unittests/dist_text_classification.py | 2 +- .../distribution/test_distribution.py | 162 ---- .../dygraph_to_static/bert_dygraph_model.py | 2 +- .../unittests/dygraph_to_static/test_mnist.py | 2 +- .../dygraph_to_static/test_mobile_net.py | 4 +- .../dygraph_to_static/test_resnet.py | 4 +- .../dygraph_to_static/test_resnet_amp.py | 4 +- .../test_resnet_pure_fp16.py | 4 +- .../dygraph_to_static/test_se_resnet.py | 4 +- .../dygraph_to_static/test_sentiment.py | 8 +- .../unittests/dygraph_to_static/test_tsm.py | 4 +- .../unittests/mlu/test_accuracy_op_mlu.py | 6 +- .../fluid/tests/unittests/test_accuracy_op.py | 6 +- .../test_async_ssa_graph_executor_mnist.py | 2 +- .../fluid/tests/unittests/test_auc_op.py | 2 +- .../fluid/tests/unittests/test_desc_clone.py | 2 +- .../tests/unittests/test_distributions.py | 799 ------------------ .../tests/unittests/test_get_places_op.py | 48 -- .../unittests/test_inference_model_io.py | 4 +- .../fluid/tests/unittests/test_layers.py | 14 +- .../fluid/tests/unittests/test_profiler.py | 2 +- .../unittests/test_program_prune_backward.py | 2 +- python/paddle/static/__init__.py | 5 +- .../metric_op.py => static/nn/metric.py} | 18 +- 55 files changed, 93 insertions(+), 1885 deletions(-) delete mode 100644 python/paddle/fluid/layers/device.py delete mode 100644 python/paddle/fluid/layers/distributions.py delete mode 100644 python/paddle/fluid/tests/unittests/test_get_places_op.py rename python/paddle/{fluid/layers/metric_op.py => static/nn/metric.py} (97%) mode change 100755 => 100644 diff --git a/python/paddle/distributed/fleet/metrics/metric.py b/python/paddle/distributed/fleet/metrics/metric.py index aaf1115af8..ba2acd5ee3 100644 --- a/python/paddle/distributed/fleet/metrics/metric.py +++ b/python/paddle/distributed/fleet/metrics/metric.py @@ -148,8 +148,8 @@ def auc(stat_pos, stat_neg, scope=None, util=None): distributed auc in fleet Args: - stat_pos(numpy.array|Variable|string): stat_pos in output of fluid.layers.auc - stat_neg(numpy.array|Variable|string): stat_neg in output of fluid.layers.auc + stat_pos(numpy.array|Variable|string): stat_pos in output of paddle.static.auc + stat_neg(numpy.array|Variable|string): stat_neg in output of paddle.static.auc scope(Scope): specific scope Returns: @@ -163,7 +163,7 @@ def auc(stat_pos, stat_neg, scope=None, util=None): binary_predict = fluid.layers.concat( input=[fluid.layers.elementwise_sub(fluid.layers.ceil(similarity_norm), similarity_norm), similarity_norm], axis=1) self.auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, stat_neg] = - fluid.layers.auc(input=binary_predict, label=label, curve='ROC', num_thresholds=4096) + paddle.static.auc(input=binary_predict, label=label, curve='ROC', num_thresholds=4096) # in train.py, after train or infer pos = np.array(scope.find_var(stat_pos.name).get_tensor()) diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py index 759e74907e..df182f6c9c 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_ptq.py @@ -150,8 +150,8 @@ class TestImperativePTQ(unittest.TestCase): label = paddle.to_tensor(y_data) out = model(img) - acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) - acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) + acc_top1 = paddle.static.accuracy(input=out, label=label, k=1) + acc_top5 = paddle.static.accuracy(input=out, label=label, k=5) eval_acc_top1_list.append(float(acc_top1.numpy())) if batch_id % 50 == 0: diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py index aff07fb397..0e0fbd752b 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat.py @@ -129,7 +129,7 @@ class TestImperativeQat(unittest.TestCase): img = fluid.dygraph.to_variable(x_data) label = fluid.dygraph.to_variable(y_data) out = lenet(img) - acc = fluid.layers.accuracy(out, label) + acc = paddle.static.accuracy(out, label) loss = fluid.layers.cross_entropy(out, label) avg_loss = paddle.mean(loss) avg_loss.backward() @@ -160,10 +160,10 @@ class TestImperativeQat(unittest.TestCase): label = fluid.dygraph.to_variable(y_data) out = lenet(img) - acc_top1 = fluid.layers.accuracy( + acc_top1 = paddle.static.accuracy( input=out, label=label, k=1 ) - acc_top5 = fluid.layers.accuracy( + acc_top5 = paddle.static.accuracy( input=out, label=label, k=5 ) @@ -200,7 +200,7 @@ class TestImperativeQat(unittest.TestCase): label = fluid.dygraph.to_variable(y_data) lenet.eval() fp32_out = lenet(test_img) - fp32_acc = fluid.layers.accuracy(fp32_out, label).numpy() + fp32_acc = paddle.static.accuracy(fp32_out, label).numpy() with tempfile.TemporaryDirectory(prefix="qat_save_path_") as tmpdir: # save inference quantized model @@ -237,7 +237,7 @@ class TestImperativeQat(unittest.TestCase): ) paddle.disable_static() quant_out = fluid.dygraph.to_variable(quant_out) - quant_acc = fluid.layers.accuracy(quant_out, label).numpy() + quant_acc = paddle.static.accuracy(quant_out, label).numpy() paddle.enable_static() delta_value = fp32_acc - quant_acc self.assertLessEqual(delta_value, self.diff_threshold) diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py index ee0edb445e..d01fc2e63c 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_amp.py @@ -118,7 +118,7 @@ class TestImperativeQatAmp(unittest.TestCase): if use_amp: with paddle.amp.auto_cast(): out = model(img) - acc = fluid.layers.accuracy(out, label) + acc = paddle.static.accuracy(out, label) loss = fluid.layers.cross_entropy(out, label) avg_loss = paddle.mean(loss) scaled_loss = scaler.scale(avg_loss) @@ -128,7 +128,7 @@ class TestImperativeQatAmp(unittest.TestCase): adam.clear_gradients() else: out = model(img) - acc = fluid.layers.accuracy(out, label) + acc = paddle.static.accuracy(out, label) loss = fluid.layers.cross_entropy(out, label) avg_loss = paddle.mean(loss) avg_loss.backward() @@ -167,8 +167,8 @@ class TestImperativeQatAmp(unittest.TestCase): with paddle.amp.auto_cast(use_amp): out = model(img) - acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1) - acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5) + acc_top1 = paddle.static.accuracy(input=out, label=label, k=1) + acc_top5 = paddle.static.accuracy(input=out, label=label, k=5) acc_top1_list.append(float(acc_top1.numpy())) if batch_id % 100 == 0: diff --git a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py index 1b54a5b55b..2b06ee5bf0 100644 --- a/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py +++ b/python/paddle/fluid/contrib/slim/tests/test_imperative_qat_lsq.py @@ -170,7 +170,7 @@ class TestImperativeQatLSQ(unittest.TestCase): img = fluid.dygraph.to_variable(x_data) label = fluid.dygraph.to_variable(y_data) out = lenet(img) - acc = fluid.layers.accuracy(out, label) + acc = paddle.static.accuracy(out, label) loss = fluid.layers.cross_entropy(out, label) avg_loss = paddle.mean(loss) @@ -202,10 +202,10 @@ class TestImperativeQatLSQ(unittest.TestCase): label = fluid.dygraph.to_variable(y_data) out = lenet(img) - acc_top1 = fluid.layers.accuracy( + acc_top1 = paddle.static.accuracy( input=out, label=label, k=1 ) - acc_top5 = fluid.layers.accuracy( + acc_top5 = paddle.static.accuracy( input=out, label=label, k=5 ) diff --git a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py index 7edaeb2760..ab9ebfa719 100644 --- a/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py +++ b/python/paddle/fluid/contrib/tests/test_image_classification_fp16.py @@ -131,7 +131,7 @@ def train(net_type, use_cuda, save_dirname, is_local): logits, label, return_softmax=True ) avg_cost = paddle.mean(cost) - acc = fluid.layers.accuracy(input=predict, label=label) + acc = paddle.static.accuracy(input=predict, label=label) # Test program test_program = train_program.clone(for_test=True) diff --git a/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py b/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py index d2c843ca4d..3c034d60f3 100644 --- a/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py +++ b/python/paddle/fluid/incubate/fleet/tests/fleet_deep_ctr.py @@ -146,8 +146,8 @@ def model(): merge_layer = fluid.layers.concat(input=[dnn_out, lr_pool], axis=1) predict = fluid.layers.fc(input=merge_layer, size=2, act='softmax') - acc = fluid.layers.accuracy(input=predict, label=label) - auc_var, batch_auc_var, auc_states = fluid.layers.auc( + acc = paddle.static.accuracy(input=predict, label=label) + auc_var, batch_auc_var, auc_states = paddle.static.auc( input=predict, label=label ) cost = fluid.layers.cross_entropy(input=predict, label=label) diff --git a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py index 8d5203f201..9678914b50 100644 --- a/python/paddle/fluid/incubate/fleet/utils/fleet_util.py +++ b/python/paddle/fluid/incubate/fleet/utils/fleet_util.py @@ -192,7 +192,7 @@ class FleetUtil: fluid.layers.ceil(similarity_norm), similarity_norm),\ similarity_norm], axis=1) auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \ - stat_neg] = fluid.layers.auc(input=binary_predict,\ + stat_neg] = paddle.static.auc(input=binary_predict,\ label=label, curve='ROC',\ num_thresholds=4096) @@ -1381,7 +1381,7 @@ class FleetUtil: fluid.layers.ceil(similarity_norm), similarity_norm),\ similarity_norm], axis=1) auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \ - stat_neg] = fluid.layers.auc(input=binary_predict,\ + stat_neg] = paddle.static.auc(input=binary_predict,\ label=label, curve='ROC',\ num_thresholds=4096) local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins,\ @@ -1581,7 +1581,7 @@ class FleetUtil: fluid.layers.ceil(similarity_norm), similarity_norm),\ similarity_norm], axis=1) auc, batch_auc, [batch_stat_pos, batch_stat_neg, stat_pos, \ - stat_neg] = fluid.layers.auc(input=binary_predict,\ + stat_neg] = paddle.static.auc(input=binary_predict,\ label=label, curve='ROC',\ num_thresholds=4096) local_sqrerr, local_abserr, local_prob, local_q, local_pos_ins, \ diff --git a/python/paddle/fluid/layers/__init__.py b/python/paddle/fluid/layers/__init__.py index 9ce0c0afeb..6a49ad0117 100644 --- a/python/paddle/fluid/layers/__init__.py +++ b/python/paddle/fluid/layers/__init__.py @@ -20,19 +20,14 @@ from . import tensor from .tensor import * from . import control_flow from .control_flow import * -from . import device -from .device import * from . import math_op_patch from .math_op_patch import * from . import loss from .loss import * from . import detection from .detection import * -from . import metric_op -from .metric_op import * from .learning_rate_scheduler import * from .collective import * -from .distributions import * from .sequence_lod import * from . import rnn @@ -41,11 +36,8 @@ __all__ += nn.__all__ __all__ += io.__all__ __all__ += tensor.__all__ __all__ += control_flow.__all__ -__all__ += device.__all__ __all__ += detection.__all__ -__all__ += metric_op.__all__ __all__ += learning_rate_scheduler.__all__ -__all__ += distributions.__all__ __all__ += sequence_lod.__all__ __all__ += loss.__all__ __all__ += rnn.__all__ diff --git a/python/paddle/fluid/layers/device.py b/python/paddle/fluid/layers/device.py deleted file mode 100644 index ac352ef52b..0000000000 --- a/python/paddle/fluid/layers/device.py +++ /dev/null @@ -1,43 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" -All util layers. -""" - -from .layer_function_generator import autodoc -from ..framework import unique_name -from ..layer_helper import LayerHelper -from paddle.utils import deprecated - -__all__ = [] - - -@deprecated(since='0.15.0', update_to="paddle.fluid.ParallelExecutor") -@autodoc() -def get_places(device_count=None, device_type=None): - helper = LayerHelper('get_places', **locals()) - out_places = helper.create_variable( - name=unique_name.generate_with_ignorable_key(helper.name + ".out") - ) - attrs = dict() - if device_count is not None: - attrs['device_count'] = int(device_count) - if device_type is not None: - attrs['device_type'] = str(device_type) - - helper.append_op( - type='get_places', outputs={"Out": [out_places]}, attrs=attrs - ) - - return out_places diff --git a/python/paddle/fluid/layers/distributions.py b/python/paddle/fluid/layers/distributions.py deleted file mode 100644 index a54403013c..0000000000 --- a/python/paddle/fluid/layers/distributions.py +++ /dev/null @@ -1,721 +0,0 @@ -# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from . import control_flow -from . import tensor -from . import nn -import math -import numpy as np -import warnings -import paddle - -from ..data_feeder import ( - convert_dtype, - check_variable_and_dtype, - check_type, - check_dtype, -) - -__all__ = ['Uniform', 'Normal', 'Categorical', 'MultivariateNormalDiag'] - - -class Distribution: - """ - Distribution is the abstract base class for probability distributions. - """ - - def sample(self): - """Sampling from the distribution.""" - raise NotImplementedError - - def entropy(self): - """The entropy of the distribution.""" - raise NotImplementedError - - def kl_divergence(self, other): - """The KL-divergence between self distributions and other.""" - raise NotImplementedError - - def log_prob(self, value): - """Log probability density/mass function.""" - raise NotImplementedError - - def _validate_args(self, *args): - """ - Argument validation for distribution args - Args: - value (float, list, numpy.ndarray, Variable) - Raises - ValueError: if one argument is Variable, all arguments should be Variable - """ - is_variable = False - is_number = False - for arg in args: - if isinstance(arg, tensor.Variable): - is_variable = True - else: - is_number = True - - if is_variable and is_number: - raise ValueError( - 'if one argument is Variable, all arguments should be Variable' - ) - - return is_variable - - def _to_variable(self, *args): - """ - Argument convert args to Variable - - Args: - value (float, list, numpy.ndarray, Variable) - Returns: - Variable of args. - """ - numpy_args = [] - variable_args = [] - tmp = 0.0 - - for arg in args: - valid_arg = False - for cls in [float, list, np.ndarray, tensor.Variable]: - if isinstance(arg, cls): - valid_arg = True - break - assert ( - valid_arg - ), "type of input args must be float, list, numpy.ndarray or Variable." - if isinstance(arg, float): - arg = np.zeros(1) + arg - arg_np = np.array(arg) - arg_dtype = arg_np.dtype - if str(arg_dtype) not in ['float32']: - warnings.warn( - "data type of argument only support float32, your argument will be convert to float32." - ) - arg_np = arg_np.astype('float32') - tmp = tmp + arg_np - numpy_args.append(arg_np) - - dtype = tmp.dtype - for arg in numpy_args: - arg_broadcasted, _ = np.broadcast_arrays(arg, tmp) - arg_variable = tensor.create_tensor(dtype=dtype) - tensor.assign(arg_broadcasted, arg_variable) - variable_args.append(arg_variable) - - return tuple(variable_args) - - -class Uniform(Distribution): - r"""Uniform distribution with `low` and `high` parameters. - - Mathematical Details - - The probability density function (pdf) is, - - .. math:: - - pdf(x; a, b) = \\frac{1}{Z}, \ a <=x 0): - scale_np = np.random.randn(batch_size, dims).astype('float32') - while not np.all(other_scale_np > 0): - other_scale_np = np.random.randn(batch_size, dims).astype('float32') - return ( - loc_np, - other_loc_np, - loc_float, - scale_float, - other_loc_float, - other_scale_float, - scale_np, - other_scale_np, - values_np, - ) - - def test_normal_distribution(self, batch_size=2, dims=3, tolerance=1e-6): - test_program = fluid.Program() - ( - loc_np, - other_loc_np, - loc_float, - scale_float, - other_loc_float, - other_scale_float, - scale_np, - other_scale_np, - values_np, - ) = self.get_normal_random_input(batch_size, dims) - - feed_vars, fetch_list = self.build_normal_program( - test_program, - batch_size, - dims, - loc_float, - scale_float, - other_loc_float, - other_scale_float, - scale_np, - other_scale_np, - loc_np, - other_loc_np, - values_np, - ) - self.executor.run(fluid.default_startup_program()) - - np_normal_float = NormalNumpy(loc_float, scale_float) - np_other_normal_float = NormalNumpy(other_loc_float, other_scale_float) - np_normal_float_np_broadcast = NormalNumpy(loc_float, scale_np) - np_other_normal_float_np_broadcast = NormalNumpy( - other_loc_float, other_scale_np - ) - np_normal = NormalNumpy(loc_np, scale_np) - np_other_normal = NormalNumpy(other_loc_np, other_scale_np) - - gt_sample_float = np_normal_float.sample([batch_size, dims]) - gt_sample_float_np_broadcast = np_normal_float_np_broadcast.sample( - [batch_size, dims] - ) - gt_sample_np = np_normal.sample([batch_size, dims]) - gt_entropy_float = np_normal_float.entropy() - gt_entropy_float_np_broadcast = np_normal_float_np_broadcast.entropy() - gt_entropy = np_normal.entropy() - gt_lp_float_np_broadcast = np_normal_float_np_broadcast.log_prob( - values_np - ) - gt_lp = np_normal.log_prob(values_np) - gt_kl_float = np_normal_float.kl_divergence(np_other_normal_float) - gt_kl_float_np_broadcast = np_normal_float_np_broadcast.kl_divergence( - np_other_normal_float_np_broadcast - ) - gt_kl = np_normal.kl_divergence(np_other_normal) - - [ - output_sample_float, - output_sample_float_np_broadcast, - output_sample_np, - output_sample_variable, - output_entropy_float, - output_entropy_float_np_broadcast, - output_entropy_np, - output_entropy_variable, - output_lp_float_np_broadcast, - output_lp_np, - output_lp_variable, - output_kl_float, - output_kl_float_np_broadcast, - output_kl_np, - output_kl_variable, - ] = self.executor.run( - program=test_program, feed=feed_vars, fetch_list=fetch_list - ) - - np.testing.assert_allclose( - output_sample_float.shape, - gt_sample_float.shape, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_sample_float_np_broadcast.shape, - gt_sample_float_np_broadcast.shape, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_sample_np.shape, - gt_sample_np.shape, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_sample_variable.shape, - gt_sample_np.shape, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_entropy_float, - gt_entropy_float, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_entropy_float_np_broadcast, - gt_entropy_float_np_broadcast, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_entropy_np, gt_entropy, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_entropy_variable, gt_entropy, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_lp_float_np_broadcast, - gt_lp_float_np_broadcast, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_lp_np, gt_lp, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_lp_variable, gt_lp, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_kl_float, gt_kl_float, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_kl_float_np_broadcast, - gt_kl_float_np_broadcast, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_kl_np, gt_kl, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_kl_variable, gt_kl, rtol=tolerance, atol=tolerance - ) - - def build_uniform_program( - self, - test_program, - batch_size, - dims, - low_float, - high_float, - high_np, - low_np, - values_np, - ): - with fluid.program_guard(test_program): - low = layers.data(name='low', shape=[dims], dtype='float32') - high = layers.data(name='high', shape=[dims], dtype='float32') - - values = layers.data(name='values', shape=[dims], dtype='float32') - - uniform_float = Uniform(low_float, high_float) - uniform_float_np_broadcast = Uniform(low_float, high_np) - uniform_np = Uniform(low_np, high_np) - uniform_variable = Uniform(low, high) - - sample_float = uniform_float.sample([batch_size, dims]) - sample_float_np_broadcast = uniform_float_np_broadcast.sample( - [batch_size, dims] - ) - sample_np = uniform_np.sample([batch_size, dims]) - sample_variable = uniform_variable.sample([batch_size, dims]) - - entropy_float = uniform_float.entropy() - entropy_float_np_broadcast = uniform_float_np_broadcast.entropy() - entropy_np = uniform_np.entropy() - entropy_variable = uniform_variable.entropy() - - lp_float_np_broadcast = uniform_float_np_broadcast.log_prob(values) - lp_np = uniform_np.log_prob(values) - lp_variable = uniform_variable.log_prob(values) - - fetch_list = [ - sample_float, - sample_float_np_broadcast, - sample_np, - sample_variable, - entropy_float, - entropy_float_np_broadcast, - entropy_np, - entropy_variable, - lp_float_np_broadcast, - lp_np, - lp_variable, - ] - feed_vars = {'low': low_np, 'high': high_np, 'values': values_np} - return feed_vars, fetch_list - - def test_uniform_distribution(self, batch_size=2, dims=3, tolerance=1e-6): - test_program = fluid.Program() - - low_np = np.random.randn(batch_size, dims).astype('float32') - low_float = np.random.uniform(-2, 1) - high_float = np.random.uniform(1, 3) - high_np = np.random.uniform(-5.0, 5.0, (batch_size, dims)).astype( - 'float32' - ) - values_np = np.random.randn(batch_size, dims).astype('float32') - - feed_vars, fetch_list = self.build_uniform_program( - test_program, - batch_size, - dims, - low_float, - high_float, - high_np, - low_np, - values_np, - ) - - self.executor.run(fluid.default_startup_program()) - - np_uniform_float = UniformNumpy(low_float, high_float) - np_uniform_float_np_broadcast = UniformNumpy(low_float, high_np) - np_uniform = UniformNumpy(low_np, high_np) - - gt_sample_float = np_uniform_float.sample([batch_size, dims]) - gt_sample_float_np_broadcast = np_uniform_float_np_broadcast.sample( - [batch_size, dims] - ) - gt_sample_np = np_uniform.sample([batch_size, dims]) - gt_entropy_float = np_uniform_float.entropy() - gt_entropy_float_np_broadcast = np_uniform_float_np_broadcast.entropy() - gt_entropy = np_uniform.entropy() - gt_lp_float_np_broadcast = np_uniform_float_np_broadcast.log_prob( - values_np - ) - gt_lp = np_uniform.log_prob(values_np) - - # result calculated by paddle - [ - output_sample_float, - output_sample_float_np_broadcast, - output_sample_np, - output_sample_variable, - output_entropy_float, - output_entropy_float_np_broadcast, - output_entropy_np, - output_entropy_variable, - output_lp_float_np_broadcast, - output_lp_np, - output_lp_variable, - ] = self.executor.run( - program=test_program, feed=feed_vars, fetch_list=fetch_list - ) - - np.testing.assert_allclose( - output_sample_float.shape, - gt_sample_float.shape, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_sample_float_np_broadcast.shape, - gt_sample_float_np_broadcast.shape, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_sample_np.shape, - gt_sample_np.shape, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_sample_variable.shape, - gt_sample_np.shape, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_entropy_float, - gt_entropy_float, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_entropy_float_np_broadcast, - gt_entropy_float_np_broadcast, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_entropy_np, gt_entropy, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_entropy_variable, gt_entropy, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_lp_float_np_broadcast, - gt_lp_float_np_broadcast, - rtol=tolerance, - atol=tolerance, - ) - np.testing.assert_allclose( - output_lp_np, gt_lp, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_lp_variable, gt_lp, rtol=tolerance, atol=tolerance - ) - - def test_categorical_distribution( - self, batch_size=2, dims=3, tolerance=1e-6 - ): - test_program = fluid.Program() - - logits_np = np.random.randn(batch_size, dims).astype('float32') - other_logits_np = np.random.randn(batch_size, dims).astype('float32') - - with fluid.program_guard(test_program): - logits = layers.data(name='logits', shape=[dims], dtype='float32') - other_logits = layers.data( - name='other_logits', shape=[dims], dtype='float32' - ) - - categorical_np = Categorical(logits_np) - other_categorical_np = Categorical(other_logits_np) - - entropy_np = categorical_np.entropy() - kl_np = categorical_np.kl_divergence(other_categorical_np) - - self.executor.run(fluid.default_main_program()) - - np_categorical = CategoricalNumpy(logits_np) - np_other_categorical = CategoricalNumpy(other_logits_np) - gt_entropy_np = np_categorical.entropy() - gt_kl_np = np_categorical.kl_divergence(np_other_categorical) - - # result calculated by paddle - [output_entropy_np, output_kl_np] = self.executor.run( - program=test_program, - feed={'logits': logits_np}, - fetch_list=[entropy_np, kl_np], - ) - np.testing.assert_allclose( - output_entropy_np, gt_entropy_np, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_kl_np, gt_kl_np, rtol=tolerance, atol=tolerance - ) - - def test_multivariateNormalDiag_distribution( - self, batch_size=2, tolerance=1e-6 - ): - test_program = fluid.Program() - - loc_np = np.random.random( - batch_size, - ).astype('float32') - scale_np = np.diag( - np.random.random( - batch_size, - ) - ).astype('float32') - other_loc_np = np.random.random( - batch_size, - ).astype('float32') - other_scale_np = np.diag( - np.random.random( - batch_size, - ) - ).astype('float32') - - with fluid.program_guard(test_program): - loc = layers.data( - name='loc', - shape=[ - batch_size, - ], - dtype='float32', - append_batch_size=False, - ) - scale = layers.data( - name='scale', - shape=[batch_size, batch_size], - dtype='float32', - append_batch_size=False, - ) - other_loc = layers.data( - name='other_loc', - shape=[ - batch_size, - ], - dtype='float32', - append_batch_size=False, - ) - other_scale = layers.data( - name='other_scale', - shape=[batch_size, batch_size], - dtype='float32', - append_batch_size=False, - ) - - multivariate_np = MultivariateNormalDiag(loc, scale) - other_multivariate_np = MultivariateNormalDiag( - other_loc, other_scale - ) - - entropy_np = multivariate_np.entropy() - other_entropy_np = other_multivariate_np.entropy() - kl_np = multivariate_np.kl_divergence(other_multivariate_np) - - self.executor.run(fluid.default_main_program()) - - np_multivariate = MultivariateNormalDiagNumpy(loc_np, scale_np) - np_other_multivariate = MultivariateNormalDiagNumpy( - other_loc_np, other_scale_np - ) - gt_entropy_np = np_multivariate.entropy() - gt_kl_np = np_multivariate.kl_divergence(np_other_multivariate) - - # result calculated by paddle - [output_entropy_np, output_kl_np] = self.executor.run( - program=test_program, - feed={ - 'loc': loc_np, - 'scale': scale_np, - 'other_loc': other_loc_np, - 'other_scale': other_scale_np, - }, - fetch_list=[entropy_np, kl_np], - ) - np.testing.assert_allclose( - output_entropy_np, gt_entropy_np, rtol=tolerance, atol=tolerance - ) - np.testing.assert_allclose( - output_kl_np, gt_kl_np, rtol=tolerance, atol=tolerance - ) - - -class DistributionTestError(unittest.TestCase): - def test_normal_error(self): - loc = int(1) - scale = int(1) - - # type of loc and scale must be float, list, numpy.ndarray, Variable - self.assertRaises(TypeError, Normal, loc, 1.0) - self.assertRaises(TypeError, Normal, 1.0, scale) - - normal = Normal(0.0, 1.0) - - value = [1.0, 2.0] - # type of value must be variable - self.assertRaises(TypeError, normal.log_prob, value) - - shape = 1.0 - # type of shape must be list - self.assertRaises(TypeError, normal.sample, shape) - - seed = 1.0 - # type of seed must be int - self.assertRaises(TypeError, normal.sample, [2, 3], seed) - - normal_other = Uniform(1.0, 2.0) - # type of other must be an instance of Normal - self.assertRaises(TypeError, normal.kl_divergence, normal_other) - - def test_uniform_error(self): - low = int(1) - high = int(1) - - # type of loc and scale must be float, list, numpy.ndarray, Variable - self.assertRaises(TypeError, Uniform, low, 1.0) - self.assertRaises(TypeError, Uniform, 1.0, high) - - uniform = Uniform(0.0, 1.0) - - value = [1.0, 2.0] - # type of value must be variable - self.assertRaises(TypeError, uniform.log_prob, value) - - shape = 1.0 - # type of shape must be list - self.assertRaises(TypeError, uniform.sample, shape) - - seed = 1.0 - # type of seed must be int - self.assertRaises(TypeError, uniform.sample, [2, 3], seed) - - def test_categorical_error(self): - logit = 1.0 - - # type of loc and scale must be list, numpy.ndarray, Variable - self.assertRaises(TypeError, Categorical, logit) - - categorical = Categorical([-0.602, -0.602]) - - categorical_other = Normal(1.0, 2.0) - # type of other must be an instance of Normal - self.assertRaises( - TypeError, categorical.kl_divergence, categorical_other - ) - - def test_multivariate_normal_diag_error(self): - loc = 1.0 - scale = 1.0 - - # type of loc and scale must be list, numpy.ndarray, Variable - self.assertRaises(TypeError, MultivariateNormalDiag, loc, [1.0]) - self.assertRaises(TypeError, MultivariateNormalDiag, [1.0], scale) - - mnd = MultivariateNormalDiag([0.3, 0.5], [[0.4, 0], [0, 0.5]]) - - categorical_other = Normal(1.0, 2.0) - # type of other must be an instance of Normal - self.assertRaises(TypeError, mnd.kl_divergence, categorical_other) - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_get_places_op.py b/python/paddle/fluid/tests/unittests/test_get_places_op.py deleted file mode 100644 index 03f32c78b5..0000000000 --- a/python/paddle/fluid/tests/unittests/test_get_places_op.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import unittest - -from decorator_helper import prog_scope - -import paddle.fluid as fluid -import paddle.fluid.core as core -from paddle.fluid.layers.device import get_places - - -class TestGetPlaces(unittest.TestCase): - @prog_scope() - def check_get_cpu_places(self): - places = get_places() - cpu = fluid.CPUPlace() - exe = fluid.Executor(cpu) - exe.run(fluid.default_main_program()) - self.assertEqual(places.type, fluid.core.VarDesc.VarType.PLACE_LIST) - - @prog_scope() - def check_get_gpu_places(self): - places = get_places(device_type='CUDA') - gpu = fluid.CUDAPlace(0) - exe = fluid.Executor(gpu) - exe.run(fluid.default_main_program()) - self.assertEqual(places.type, fluid.core.VarDesc.VarType.PLACE_LIST) - - def test_main(self): - if core.is_compiled_with_cuda(): - self.check_get_gpu_places() - self.check_get_cpu_places() - - -if __name__ == '__main__': - unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_inference_model_io.py b/python/paddle/fluid/tests/unittests/test_inference_model_io.py index b7aa6e7ba0..9cf82e16f7 100644 --- a/python/paddle/fluid/tests/unittests/test_inference_model_io.py +++ b/python/paddle/fluid/tests/unittests/test_inference_model_io.py @@ -174,8 +174,8 @@ class TestSaveInferenceModel(unittest.TestCase): x = layers.data(name='x', shape=[2], dtype='float32') y = layers.data(name='y', shape=[1], dtype='int32') predict = fluid.layers.fc(input=x, size=2, act='softmax') - acc = fluid.layers.accuracy(input=predict, label=y) - auc_var, batch_auc_var, auc_states = fluid.layers.auc( + acc = paddle.static.accuracy(input=predict, label=y) + auc_var, batch_auc_var, auc_states = paddle.static.auc( input=predict, label=y ) cost = fluid.layers.cross_entropy(input=predict, label=y) diff --git a/python/paddle/fluid/tests/unittests/test_layers.py b/python/paddle/fluid/tests/unittests/test_layers.py index 912382f49a..8807b77664 100644 --- a/python/paddle/fluid/tests/unittests/test_layers.py +++ b/python/paddle/fluid/tests/unittests/test_layers.py @@ -34,7 +34,6 @@ from paddle.fluid.framework import ( program_guard, ) from paddle.fluid.initializer import Constant -from paddle.fluid.layers.device import get_places from paddle.fluid.param_attr import ParamAttr from paddle.tensor import random @@ -2895,7 +2894,7 @@ class TestLayer(LayerTest): label = fluid.data(name="label", shape=[-1, 1], dtype="int") fc_out = fluid.layers.fc(input=data, size=10) predict = fluid.layers.softmax(input=fc_out) - result = fluid.layers.accuracy(input=predict, label=label, k=5) + result = paddle.static.accuracy(input=predict, label=label, k=5) place = fluid.CPUPlace() exe = fluid.Executor(place) @@ -2911,7 +2910,9 @@ class TestLayer(LayerTest): label = base.to_variable(y) fc_out = fluid.layers.fc(data, size=10) predict = fluid.layers.softmax(fc_out) - dynamic_out = fluid.layers.accuracy(input=predict, label=label, k=5) + dynamic_out = paddle.static.accuracy( + input=predict, label=label, k=5 + ) np.testing.assert_array_equal(static_out[0], dynamic_out.numpy()) @@ -2954,7 +2955,6 @@ class TestBook(LayerTest): ) else: - assert method.__name__ in ('make_get_places') continue if method.__name__ in self.only_static_set: continue @@ -3201,12 +3201,6 @@ class TestBook(LayerTest): hid = layers.fc(input=data, size=20) return layers.softmax(hid, axis=1) - def make_get_places(self): - with program_guard( - fluid.default_main_program(), fluid.default_startup_program() - ): - get_places(device_count=1) - @prog_scope() def make_nce(self): window_size = 5 diff --git a/python/paddle/fluid/tests/unittests/test_profiler.py b/python/paddle/fluid/tests/unittests/test_profiler.py index 5fbedfaaa7..3303481165 100644 --- a/python/paddle/fluid/tests/unittests/test_profiler.py +++ b/python/paddle/fluid/tests/unittests/test_profiler.py @@ -60,7 +60,7 @@ class TestProfiler(unittest.TestCase): cost = fluid.layers.cross_entropy(input=predict, label=label) avg_cost = paddle.mean(cost) batch_size = fluid.layers.create_tensor(dtype='int64') - batch_acc = fluid.layers.accuracy( + batch_acc = paddle.static.accuracy( input=predict, label=label, total=batch_size ) diff --git a/python/paddle/fluid/tests/unittests/test_program_prune_backward.py b/python/paddle/fluid/tests/unittests/test_program_prune_backward.py index 04c707b320..d751fd4b90 100755 --- a/python/paddle/fluid/tests/unittests/test_program_prune_backward.py +++ b/python/paddle/fluid/tests/unittests/test_program_prune_backward.py @@ -76,7 +76,7 @@ def simple_fc_net_with_accuracy(use_feed): prediction = fluid.layers.fc(hidden, size=10, act='softmax') loss = fluid.layers.cross_entropy(input=prediction, label=label) loss = paddle.mean(loss) - accuracy_out = fluid.layers.accuracy(input=prediction, label=label, k=5) + accuracy_out = paddle.static.accuracy(input=prediction, label=label, k=5) return loss diff --git a/python/paddle/static/__init__.py b/python/paddle/static/__init__.py index 78ad1cfabc..f527b5a1c3 100644 --- a/python/paddle/static/__init__.py +++ b/python/paddle/static/__init__.py @@ -68,11 +68,12 @@ from ..fluid.io import batch # noqa: F401 from ..fluid.layers import create_parameter # noqa: F401 from ..fluid.layers import create_global_var # noqa: F401 -from ..fluid.layers.metric_op import auc # noqa: F401 -from ..fluid.layers.metric_op import accuracy # noqa: F401 from ..fluid.contrib.layers import ctr_metric_bundle # noqa: F401 from ..fluid.layers import exponential_decay # noqa: F401 +from paddle.static.nn.metric import auc # noqa: F401 +from paddle.static.nn.metric import accuracy # noqa: F401 + __all__ = [ # noqa 'append_backward', 'gradients', diff --git a/python/paddle/fluid/layers/metric_op.py b/python/paddle/static/nn/metric.py old mode 100755 new mode 100644 similarity index 97% rename from python/paddle/fluid/layers/metric_op.py rename to python/paddle/static/nn/metric.py index 3179f5d568..948b100bce --- a/python/paddle/fluid/layers/metric_op.py +++ b/python/paddle/static/nn/metric.py @@ -15,22 +15,16 @@ All layers just related to metric. """ -import warnings -from ..layer_helper import LayerHelper -from ..initializer import Normal, Constant -from ..framework import ( +from paddle.fluid.layer_helper import LayerHelper +from paddle.fluid.initializer import Constant +from paddle.fluid.framework import ( Variable, _non_static_mode, _varbase_creator, - _in_legacy_dygraph, - in_dygraph_mode, ) -from .. import core -from ..param_attr import ParamAttr -from . import nn -from . import tensor -from ..data_feeder import check_variable_and_dtype -from paddle import _C_ops, _legacy_C_ops +from paddle.fluid.layers import tensor +from paddle.fluid.data_feeder import check_variable_and_dtype +from paddle import _legacy_C_ops __all__ = ['accuracy', 'auc'] -- GitLab